Added Fuel agent

Fuel agent is a bunch of tools which are
supposed to be placed on bootstrap image and
used for node discovering and image based
provisioning.

Implements: blueprint image-based-provisioning
Change-Id: I946decd50c51e6db767401682d9effbe3cf42bed
This commit is contained in:
Vladimir Kozhukalov 2014-06-26 12:39:30 +04:00
commit 7730a743bb
58 changed files with 6467 additions and 0 deletions

5
.testr.conf Normal file
View File

@ -0,0 +1,5 @@
[DEFAULT]
test_command=OS_STDOUT_CAPTURE=1 OS_STDERR_CAPTURE=1 OS_TEST_TIMEOUT=60 ${PYTHON:-python} -m subunit.run discover -s fuel_agent/tests -p "*.py" $LISTOPT $IDOPTION
test_id_option=--load-list $IDFILE
test_list_option=--list
test_run_concurrency=echo 1

View File

@ -0,0 +1,74 @@
#cloud-boothook
#!/bin/bash
function add_str_to_file_if_not_exists {
file=$1
str=$2
val=$3
if ! grep -q "^ *${str}" $file; then
echo $val >> $file
fi
}
# configure udev rules
# udev persistent net
cloud-init-per instance udev_persistent_net1 /etc/init.d/networking stop
ADMIN_MAC={{ common.admin_mac }}
ADMIN_IF=$(echo {{ common.udevrules }} | sed 's/[,=]/\n/g' | grep "$ADMIN_MAC" | cut -d_ -f2 | head -1)
INSTALL_IF=$(ifconfig | grep "$ADMIN_MAC" | head -1 | cut -d' ' -f1)
# Check if we do not already have static config (or interface seems unconfigured)
NETADDR=( $(ifconfig $INSTALL_IF | grep -oP "[0-9]{1,3}\.[0-9]{1,3}\.[0-9]{1,3}\.[0-9]{1,3}") )
if [ ! -z "$(grep $ADMIN_IF /etc/network/interfaces.d/ifcfg-$ADMIN_IF | grep dhcp)" ] ; then
echo -e "auto $ADMIN_IF\niface $ADMIN_IF inet static\n\taddress ${NETADDR[0]}\n\tnetmask ${NETADDR[2]}\n\tbroadcast ${NETADDR[1]}" > /etc/network/interfaces.d/ifcfg-"$ADMIN_IF"
fi
#Add static udev rules
cloud-init-per instance udev_persistent_net2 echo {{ common.udevrules }} | tr ' ' '\n' | grep udevrules | tr '[:upper:]' '[:lower:]' | sed -e 's/udevrules=//g' -e 's/,/\n/g' | sed -e "s/^/SUBSYSTEM==\"net\",\ ACTION==\"add\",\ DRIVERS==\"?*\",\ ATTR{address}==\"/g" -e "s/_/\",\ ATTR{type}==\"1\",\ KERNEL==\"eth*\",\ NAME=\"/g" -e "s/$/\"/g" | tee /etc/udev/rules.d/70-persistent-net.rules
cloud-init-per instance udev_persistent_net3 udevadm control --reload-rules
cloud-init-per instance udev_persistent_net4 udevadm trigger --attr-match=subsystem=net
cloud-init-per instance udev_persistent_net5 /etc/init.d/networking start
# end of udev
# configure black module lists
# virt-what should be installed
if [ ! -f /etc/modprobe.d/blacklist-i2c_piix4.conf ]; then
([[ $(virt-what) = "virtualbox" ]] && echo "blacklist i2c_piix4" >> /etc/modprobe.d/blacklist-i2c_piix4.conf || :) && update-initramfs -u -k all
modprobe -r i2c_piix4
fi
cloud-init-per instance conntrack_ipv4 echo nf_conntrack_ipv4 | tee -a /etc/modules
cloud-init-per instance conntrack_ipv6 echo nf_conntrack_ipv6 | tee -a /etc/modules
cloud-init-per instance conntrack_max echo "net.nf_conntrack_max=1048576" | tee -a /etc/sysctl.conf
cloud-init-per instance conntrack_ipv4_load modprobe nf_conntrack_ipv4
cloud-init-per instance conntrack_ipv6_load modprobe nf_conntrack_ipv6
cloud-init-per instance conntrack_max_set sysctl -w "net.nf_conntrack_max=1048576"
cloud-init-per instance dhclient echo 'supersede routers 0;' | tee /etc/dhcp/dhclient.conf
# ntp sync
cloud-init-per instance service ntp stop | tee /dev/null
cloud-init-per instance sync_date ntpdate -t 4 -b {{ common.master_ip }}
cloud-init-per instance sync_hwclock hwclock --systohc
cloud-init-per instance edit_ntp_conf1 sed -i '/^\s*tinker panic/ d' /etc/ntp.conf
cloud-init-per instance edit_ntp_conf2 sed -i '1 i tinker panic 0' /etc/ntp.conf
cloud-init-per instance edit_ntp_conf3 echo 0 > /var/lib/ntp/drift
cloud-init-per instance edit_ntp_conf3 sed -i '/^\s*server/ d' /etc/ntp.conf
cloud-init-per instance edit_ntp_conf4 echo "server {{ common.master_ip }} burst iburst" | tee -a /etc/ntp.conf
cloud-init-per instance removeUseDNS sed -i --follow-symlinks -e '/UseDNS/d' /etc/ssh/sshd_config
add_str_to_file_if_not_exists /etc/ssh/sshd_config 'UseDNS' 'UseDNS no'
cloud-init-per instance gssapi_disable sed -i -e "/^\s*GSSAPICleanupCredentials yes/d" -e "/^\s*GSSAPIAuthentication yes/d" /etc/ssh/sshd_config
cloud-init-per instance nailgun_agent echo 'flock -w 0 -o /var/lock/agent.lock -c "/opt/nailgun/bin/agent >> /var/log/nailgun-agent.log 2>&1"' | tee /etc/rc.local
# Copying default bash settings to the root directory
cloud-init-per instance skel_bash cp -f /etc/skel/.bash* /root/
cloud-init-per instance hiera_puppet mkdir -p /etc/puppet /var/lib/hiera
cloud-init-per instance touch_puppet touch /var/lib/hiera/common.yaml /etc/puppet/hiera.yaml

View File

@ -0,0 +1,91 @@
#cloud-config
disable_ec2_metadata: true
disable_root: false
ssh_authorized_keys:
- {{ common.ssh_auth_key }}
# set the locale to a given locale
# default: en_US.UTF-8
locale: en_US.UTF-8
timezone: {{ common.timezone }}
hostname: {{ common.hostname }}
fqdn: {{ common.fqdn }}
# TODO(kozhukalov) name_servers is set as "1.2.3.4,1.2.3.5"
resolv_conf:
nameservers: [ {{ common.name_servers }} ]
searchdomains:
- {{ common.search_domain }}
# domain: {{ domain }}
# options:
# rotate: true
# timeout: 1
# add entries to rsyslog configuration
rsyslog:
- filename: 10-log2master.conf
content: |
$template LogToMaster, "<%%PRI%>1 %$NOW%T%TIMESTAMP:8:$%Z %HOSTNAME% %APP-NAME% %PROCID% %MSGID% -%msg%\n"
*.* @{{ common.master_ip }};LogToMaster
# that module's missing in 0.6.3, but existent for >= 0.7.3
write_files:
- content: |
---
url: {{ common.master_url }}
path: /etc/nailgun-agent/config.yaml
- content: target
path: /etc/nailgun_systemtype
- content: APT::Get::AllowUnauthenticated 1;
path: /etc/apt/apt.conf.d/02mirantis-allow-unsigned
apt_sources:
- source: deb http://{{ common.master_ip }}:8080/ubuntu/fuelweb/x86_64 precise main
mcollective:
conf:
main_collective: mcollective
collectives: mcollective
libdir: /usr/share/mcollective/plugins
logfile: /var/log/mcollective.log
loglevel: debug
daemonize: 1
direct_addressing: 0
ttl: 4294957
securityprovider: psk
plugin.psk: {{ mcollective.pskey }}
connector: {{ mcollective.connector }}
plugin.rabbitmq.vhost: {{ mcollective.vhost }}
plugin.rabbitmq.pool.size: 1
plugin.rabbitmq.pool.1.host: {{ mcollective.host }}
plugin.rabbitmq.pool.1.port: {{ mcollective.port|default(61613) }}
plugin.rabbitmq.pool.1.user: {{ mcollective.user }}
plugin.rabbitmq.pool.1.password: {{ mcollective.password }}
plugin.rabbitmq.heartbeat_interval: 30
factsource: yaml
plugin.yaml: /etc/mcollective/facts.yaml
puppet:
conf:
main:
logdir: /var/log/puppet
rundir: /var/run/puppet
ssldir: $vardir/ssl
pluginsync: true
agent:
classfile: $vardir/classes.txt
localconfig: $vardir/localconfig
server: {{ puppet.master }}
report: false
configtimeout: 600
final_message: "YAY! The system is finally up, after $UPTIME seconds"

View File

@ -0,0 +1,10 @@
# instance-id will be autogenerated
# instance-id: iid-abcdefg
network-interfaces: |
iface {{ common.admin_iface_name|default("eth0") }} inet static
address {{ common.admin_ip }}
# network 192.168.1.0
netmask {{ common.admin_mask }}
# broadcast 192.168.1.255
# gateway 192.168.1.254
hostname: {{ common.hostname }}

View File

@ -0,0 +1,10 @@
# instance-id will be autogenerated
# instance-id: iid-abcdefg
network-interfaces: |
iface {{ common.admin_iface_name|default("eth0") }} inet static
address {{ common.admin_ip }}
# network 192.168.1.0
netmask {{ common.admin_mask }}
# broadcast 192.168.1.255
# gateway 192.168.1.254
hostname: {{ common.hostname }}

View File

@ -0,0 +1,115 @@
[DEFAULT]
#
# Options defined in fuel_agent.partition
#
# Parititoning data driver (string value)
#partition_data_driver=ks_spaces
# Get data driver (string value)
#get_data_driver=read_file
#
# Options defined in fuel_agent.drivers.read_file
#
# Provision data file (string value)
#provision_data_file=/tmp/provision.json
#
# Options defined in fuel_agent.openstack.common.log
#
# Print debugging output (set logging level to DEBUG instead
# of default WARNING level). (boolean value)
#debug=false
# Print more verbose output (set logging level to INFO instead
# of default WARNING level). (boolean value)
#verbose=false
# Log output to standard error. (boolean value)
#use_stderr=true
# Format string to use for log messages with context. (string
# value)
#logging_context_format_string=%(asctime)s.%(msecs)03d %(process)d %(levelname)s %(name)s [%(request_id)s %(user_identity)s] %(instance)s%(message)s
# Format string to use for log messages without context.
# (string value)
#logging_default_format_string=%(asctime)s.%(msecs)03d %(process)d %(levelname)s %(name)s [-] %(instance)s%(message)s
# Data to append to log format when level is DEBUG. (string
# value)
#logging_debug_format_suffix=%(funcName)s %(pathname)s:%(lineno)d
# Prefix each line of exception output with this format.
# (string value)
#logging_exception_prefix=%(asctime)s.%(msecs)03d %(process)d TRACE %(name)s %(instance)s
# List of logger=LEVEL pairs. (list value)
#default_log_levels=amqp=WARN,amqplib=WARN,boto=WARN,qpid=WARN,sqlalchemy=WARN,suds=INFO,oslo.messaging=INFO,iso8601=WARN,requests.packages.urllib3.connectionpool=WARN
# Enables or disables publication of error events. (boolean
# value)
#publish_errors=false
# Enables or disables fatal status of deprecations. (boolean
# value)
#fatal_deprecations=false
# The format for an instance that is passed with the log
# message. (string value)
#instance_format="[instance: %(uuid)s] "
# The format for an instance UUID that is passed with the log
# message. (string value)
#instance_uuid_format="[instance: %(uuid)s] "
# The name of a logging configuration file. This file is
# appended to any existing logging configuration files. For
# details about logging configuration files, see the Python
# logging module documentation. (string value)
# Deprecated group/name - [DEFAULT]/log_config
#log_config_append=<None>
# DEPRECATED. A logging.Formatter log message format string
# which may use any of the available logging.LogRecord
# attributes. This option is deprecated. Please use
# logging_context_format_string and
# logging_default_format_string instead. (string value)
#log_format=<None>
# Format string for %%(asctime)s in log records. Default:
# %(default)s . (string value)
#log_date_format=%Y-%m-%d %H:%M:%S
# (Optional) Name of log file to output to. If no default is
# set, logging will go to stdout. (string value)
# Deprecated group/name - [DEFAULT]/logfile
#log_file=<None>
# (Optional) The base directory used for relative --log-file
# paths. (string value)
# Deprecated group/name - [DEFAULT]/logdir
#log_dir=<None>
# Use syslog for logging. Existing syslog format is DEPRECATED
# during I, and will change in J to honor RFC5424. (boolean
# value)
#use_syslog=false
# (Optional) Enables or disables syslog rfc5424 format for
# logging. If enabled, prefixes the MSG part of the syslog
# message with APP-NAME (RFC5424). The format without the APP-
# NAME is deprecated in I, and will be removed in J. (boolean
# value)
#use_syslog_rfc_format=false
# Syslog facility to receive log lines. (string value)
#syslog_log_facility=LOG_USER

13
fuel_agent/__init__.py Normal file
View File

@ -0,0 +1,13 @@
# Copyright 2014 Mirantis, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.

View File

@ -0,0 +1,13 @@
# Copyright 2014 Mirantis, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.

21
fuel_agent/cmd/agent.py Normal file
View File

@ -0,0 +1,21 @@
# Copyright 2014 Mirantis, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
def main():
pass
if __name__ == '__main__':
main()

View File

@ -0,0 +1,49 @@
# Copyright 2014 Mirantis, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import json
import sys
from oslo.config import cfg
from fuel_agent import manager as manager
from fuel_agent.openstack.common import log
from fuel_agent import version
opts = [
cfg.StrOpt(
'provision_data_file',
default='/tmp/provision.json',
help='Provision data file'
),
]
CONF = cfg.CONF
CONF.register_opts(opts)
def main():
CONF(sys.argv[1:], project='fuel-agent',
version=version.version_info.release_string())
log.setup('fuel-agent')
with open(CONF.provision_data_file) as f:
data = json.load(f)
provision_manager = manager.Manager(data)
provision_manager.do_provisioning()
if __name__ == '__main__':
main()

View File

@ -0,0 +1,13 @@
# Copyright 2014 Mirantis, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.

View File

@ -0,0 +1,145 @@
# Copyright 2014 Mirantis, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import jsonschema
from fuel_agent import errors
KS_SPACES_SCHEMA = {
'$schema': 'http://json-schema.org/draft-04/schema#',
'title': 'Partition scheme',
'type': 'array',
'minItems': 1,
'uniqueItems': True,
'items': {
'anyOf': [
{
'type': 'object',
'required': ['type', 'id', 'volumes', 'name',
'size', 'extra', 'free_space'],
'properties': {
'type': {'enum': ['disk']},
'id': {'type': 'string'},
'name': {'type': 'string'},
'size': {'type': 'integer'},
'free_space': {'type': 'integer'},
'extra': {
'type': 'array',
'items': {'type': 'string'},
},
'volumes': {
'type': 'array',
'items': {
'anyOf': [
{
'type': 'object',
'required': ['type', 'size',
'lvm_meta_size', 'vg'],
'properties': {
'type': {'enum': ['pv']},
'size': {'type': 'integer'},
'lvm_meta_size': {'type': 'integer'},
'vg': {'type': 'string'}
}
},
{
'type': 'object',
'required': ['type', 'size'],
'properties': {
'type': {'enum': ['raid',
'partition']},
'size': {'type': 'integer'},
'mount': {'type': 'string'},
'file_system': {'type': 'string'},
'name': {'type': 'string'}
}
},
{
'type': 'object',
'required': ['type', 'size'],
'properties': {
'type': {'enum': ['boot']},
'size': {'type': 'integer'}
}
},
{
'type': 'object',
'required': ['type', 'size'],
'properties': {
'type': {'enum': ['lvm_meta_pool']},
'size': {'type': 'integer'}
}
},
]
}
}
}
},
{
'type': 'object',
'required': ['type', 'id', 'volumes'],
'properties': {
'type': {'enum': ['vg']},
'id': {'type': 'string'},
'label': {'type': 'string'},
'min_size': {'type': 'integer'},
'_allocate_size': {'type': 'string'},
'volumes': {
'type': 'array',
'items': {
'type': 'object',
'required': ['type', 'size', 'name'],
'properties': {
'type': {'enum': ['lv']},
'size': {'type': 'integer'},
'name': {'type': 'string'},
'mount': {'type': 'string'},
'file_system': {'type': 'string'},
}
}
}
}
}
]
}
}
def validate(scheme):
"""Validates a given partition scheme using jsonschema.
:param scheme: partition scheme to validate
"""
try:
checker = jsonschema.FormatChecker()
jsonschema.validate(scheme, KS_SPACES_SCHEMA,
format_checker=checker)
except Exception as exc:
raise errors.WrongPartitionSchemeError(str(exc))
# scheme is not valid if the number of disks is 0
if not [d for d in scheme if d['type'] == 'disk']:
raise errors.WrongPartitionSchemeError(
'Partition scheme seems empty')
for space in scheme:
for volume in space.get('volumes', []):
if volume['size'] > 16777216 and volume['mount'] == '/':
raise errors.WrongPartitionSchemeError(
'Root file system must be less than 16T')
# TODO(kozhukalov): need to have additional logical verifications
# maybe sizes and format of string values

View File

@ -0,0 +1,219 @@
# Copyright 2014 Mirantis, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from fuel_agent.drivers import ks_spaces_validator
from fuel_agent import errors
from fuel_agent import objects
from fuel_agent.openstack.common import log as logging
from fuel_agent.utils import hardware_utils as hu
LOG = logging.getLogger(__name__)
def match_device(hu_disk, ks_disk):
"""Tries to figure out if hu_disk got from hu.list_block_devices
and ks_spaces_disk given correspond to the same disk device. This
is the simplified version of hu.match_device
:param hu_disk: A dict representing disk device how
it is given by list_block_devices method.
:param ks_disk: A dict representing disk device according to
ks_spaces format.
:returns: True if hu_disk matches ks_spaces_disk else False.
"""
uspec = hu_disk['uspec']
# True if at least one by-id link matches ks_disk
if ('DEVLINKS' in uspec and 'extra' in ks_disk
and any(x.startswith('/dev/disk/by-id') for x in
set(uspec['DEVLINKS']) &
set(['/dev/%s' % l for l in ks_disk['extra']]))):
return True
# True if one of DEVLINKS matches ks_disk id
if ('DEVLINKS' in uspec and 'id' in ks_disk
and '/dev/%s' % ks_disk['id'] in uspec['DEVLINKS']):
return True
return False
class Nailgun(object):
def __init__(self, data):
# Here data is expected to be raw provisioning data
# how it is given by nailgun
self.data = data
def partition_data(self):
return self.data['ks_meta']['pm_data']['ks_spaces']
@property
def ks_disks(self):
disk_filter = lambda x: x['type'] == 'disk' and x['size'] > 0
return filter(disk_filter, self.partition_data())
@property
def ks_vgs(self):
vg_filter = lambda x: x['type'] == 'vg'
self.ks_vgs = filter(vg_filter, self.partition_data())
@property
def hu_disks(self):
"""Actual disks which are available on this node
it is a list of dicts which are formatted other way than
ks_spaces disks. To match both of those formats use
_match_device method.
"""
if not getattr(self, '_hu_disks', None):
self._hu_disks = hu.list_block_devices(disks=True)
return self._hu_disks
def _disk_dev(self, ks_disk):
# first we try to find a device that matches ks_disk
# comparing by-id and by-path links
matched = [hu_disk['device'] for hu_disk in self.hu_disks
if match_device(hu_disk, ks_disk)]
# if we can not find a device by its by-id and by-path links
# we try to find a device by its name
fallback = [hu_disk['device'] for hu_disk in self.hu_disks
if '/dev/%s' % ks_disk['name'] == hu_disk['device']]
found = matched or fallback
if not found or len(found) > 1:
raise errors.DiskNotFoundError(
'Disk not found: %s' % ks_disk['name'])
return found[0]
def _getlabel(self, label):
if not label:
return ''
# XFS will refuse to format a partition if the
# disk label is > 12 characters.
return ' -L {0} '.format(label[:12])
def partition_scheme(self):
data = self.partition_data()
ks_spaces_validator.validate(data)
partition_scheme = objects.PartitionScheme()
for disk in enumerate(self.ks_disks):
parted = partition_scheme.add_parted(
name=self._disk_dev(disk), label='gpt')
# legacy boot partition
parted.add_partition(size=24, flags=['bios_grub'])
# uefi partition (for future use)
parted.add_partition(size=200)
for volume in disk['volumes']:
if volume['size'] <= 0:
continue
if volume['type'] in ('partition', 'pv', 'raid'):
prt = parted.add_partition(size=volume['size'])
if volume['type'] == 'partition':
if 'partition_guid' in volume:
prt.set_guid(volume['partition_guid'])
if 'mount' in volume and volume['mount'] != 'none':
partition_scheme.add_fs(
device=prt.name, mount=volume['mount'],
fs_type=volume.get('file_system', 'xfs'),
fs_label=self._getlabel(volume.get('disk_label')))
if volume['type'] == 'pv':
partition_scheme.vg_attach_by_name(
pvname=prt.name, vgname=volume['vg'])
if volume['type'] == 'raid':
if 'mount' in volume and volume['mount'] != 'none':
partition_scheme.md_attach_by_mount(
device=prt.name, mount=volume['mount'],
fs_type=volume.get('file_system', 'xfs'),
fs_label=self._getlabel(volume.get('disk_label')))
# this partition will be used to put there configdrive image
if partition_scheme.configdrive_device() is None:
parted.add_partition(size=20, configdrive=True)
for vg in enumerate(self.ks_vgs):
for volume in vg['volumes']:
if volume['size'] <= 0:
continue
if volume['type'] == 'lv':
lv = partition_scheme.add_lv(name=volume['name'],
vgname=vg['id'],
size=volume['size'])
if 'mount' in volume and volume['mount'] != 'none':
partition_scheme.add_fs(
device=lv.device_name, mount=volume['mount'],
fs_type=volume.get('file_system', 'xfs'),
fs_label=self._getlabel(volume.get('disk_label')))
return partition_scheme
def configdrive_scheme(self):
data = self.data
configdrive_scheme = objects.ConfigDriveScheme()
admin_interface = filter(
lambda x: (x['mac_address'] ==
data['kernel_options']['netcfg/choose_interface']),
[spec.update(name=name) for name, spec
in data['interfaces'].iteritems()])[0]
configdrive_scheme.set_common(
ssh_auth_key=data['ks_meta']['auth_key'],
hostname=data['hostname'],
fqdn=data['hostname'],
name_servers=data['name_servers'],
search_domain=data['name_servers_search'],
master_ip=data['ks_meta']['master_ip'],
master_url='http:/%s:8000/api' % self.data['master_ip'],
udevrules=data['kernel_options']['udevrules'],
admin_mac=data['kernel_options']['netcfg/choose_interface'],
admin_ip=admin_interface['ip_address'],
admin_mask=admin_interface['netmask'],
admin_iface_name=admin_interface['name'],
timezone=data['ks_meta']['timezone'],
)
configdrive_scheme.set_puppet(
master=data['ks_meta']['puppet_master']
)
configdrive_scheme.set_mcollective(
pskey=data['ks_meta']['mco_pskey'],
vhost=data['ks_meta']['mco_vhost'],
host=data['ks_meta']['mco_host'],
user=data['ks_meta']['mco_host'],
password=data['ks_meta']['mco_password'],
connector=data['ks_meta']['mco_connector']
)
configdrive_scheme.set_profile(profile=data['profile'].split('_')[0])
return configdrive_scheme
def image_scheme(self, partition_scheme):
data = self.data
image_scheme = objects.ImageScheme()
image_scheme.add_image(
uri=data['ks_meta']['image_uri'],
target_device=partition_scheme.root_device(),
image_format=data['ks_meta']['image_format'],
container=data['ks_meta']['image_container'],
size=data['ks_meta'].get('image_size'),
)
return image_scheme

95
fuel_agent/errors.py Normal file
View File

@ -0,0 +1,95 @@
# Copyright 2014 Mirantis, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from fuel_agent.openstack.common import log as logging
LOG = logging.getLogger(__name__)
class BaseError(Exception):
def __init__(self, *args, **kwargs):
super(BaseException, self).__init__(*args, **kwargs)
LOG.error(self.message)
class WrongPartitionSchemeError(BaseError):
pass
class WrongPartitionLabelError(BaseError):
pass
class PartitionNotFoundError(BaseError):
pass
class DiskNotFoundError(BaseError):
pass
class NotEnoughSpaceError(BaseError):
pass
class PVAlreadyExistsError(BaseError):
pass
class PVNotFoundError(BaseError):
pass
class PVBelongsToVGError(BaseError):
pass
class VGAlreadyExistsError(BaseError):
pass
class VGNotFoundError(BaseError):
pass
class LVAlreadyExistsError(BaseError):
pass
class LVNotFoundError(BaseError):
pass
class MDAlreadyExistsError(BaseError):
pass
class MDNotFoundError(BaseError):
pass
class MDDeviceDuplicationError(BaseError):
pass
class MDWrongSpecError(BaseError):
pass
class WrongConfigDriveDataError(BaseError):
pass
class WrongImageDataError(BaseError):
pass

144
fuel_agent/manager.py Normal file
View File

@ -0,0 +1,144 @@
# Copyright 2014 Mirantis, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
from oslo.config import cfg
from fuel_agent import errors
from fuel_agent.utils import fs_utils as fu
from fuel_agent.utils import lvm_utils as lu
from fuel_agent.utils import md_utils as mu
from fuel_agent.utils import partition_utils as pu
from fuel_agent.utils import utils
opts = [
cfg.StrOpt(
'data_driver',
default='nailgun',
help='Data driver'
),
cfg.StrOpt(
'nc_template_path',
default='/usr/share/fuel-agent/cloud-init-templates',
help='Path to directory with cloud init templates',
),
cfg.StrOpt(
'tmp_path',
default='/tmp',
help='Temporary directory for file manipulations',
),
cfg.StrOpt(
'config_drive_path',
default='/tmp/config-drive.img',
help='Path where to store generated config drive image',
),
]
CONF = cfg.CONF
CONF.register_opts(opts)
class Manager(object):
def __init__(self, data):
self.driver = utils.get_driver(CONF.data_driver)(data)
self.partition_scheme = None
self.configdrive_scheme = None
self.image_scheme = None
def do_parsing(self):
self.partition_scheme = self.driver.partition_scheme()
self.configdrive_scheme = self.driver.configdrive_scheme()
self.image_scheme = self.driver.image_scheme(self.partition_scheme)
def do_partitioning(self):
for parted in self.partition_scheme.parteds:
pu.make_label(parted.name, parted.label)
for prt in parted.partititons:
pu.make_partition(prt.device, prt.begin, prt.end, prt.type)
for flag in prt.flags:
pu.set_partition_flag(prt.device, prt.count, flag)
# creating meta disks
for md in self.partition_scheme.mds:
mu.mdcreate(md.name, md.level, *md.devices)
# creating physical volumes
for pv in self.partition_scheme.pvs:
lu.pvcreate(pv.name)
# creating volume groups
for vg in self.partition_scheme.vgs:
lu.vgcreate(vg.name, *vg.pvnames)
# creating logical volumes
for lv in self.partition_scheme.lvs:
lu.lvcreate(lv.vgname, lv.name, lv.size)
# making file systems
for fs in self.partition_scheme.fss:
fu.make_fs(fs.type, fs.options, fs.label, fs.device)
def do_configdrive(self):
cc_output_path = os.path.join(CONF.tmp_path, 'cloud_config.txt')
bh_output_path = os.path.join(CONF.tmp_path, 'boothook.txt')
# NOTE:file should be strictly named as 'user-data'
ud_output_path = os.path.join(CONF.tmp_path, 'user-data')
md_output_path = os.path.join(CONF.tmp_path, 'meta-data')
tmpl_dir = CONF.nc_template_path
utils.render_and_save(
tmpl_dir, self.configdrive_scheme.template_name('cloud_config'),
self.configdrive_scheme.template_data(), cc_output_path
)
utils.render_and_save(
tmpl_dir, self.configdrive_scheme.template_name('boothook'),
self.configdrive_scheme.template_data(), bh_output_path
)
utils.render_and_save(
tmpl_dir, self.configdrive_scheme.template_name('meta-data'),
self.configdrive_scheme.template_data(), md_output_path
)
utils.execute('write-mime-multipart', '--output=%s' % ud_output_path,
'%s:text/cloud-boothook' % bh_output_path,
'%s:text/cloud-config' % cc_output_path)
utils.execute('genisoimage', '-output', CONF.config_drive_path,
'-volid', 'cidata', '-joliet', '-rock', ud_output_path,
md_output_path)
configdrive_device = self.partition_scheme.configdrive_device()
if configdrive_device is None:
raise errors.WrongPartitionSchemeError(
'Error while trying to get configdrive device: '
'configdrive device not found')
self.image_scheme.add_configdrive_image(
uri='file://%s' % CONF.config_drive_path,
target_device=configdrive_device,
image_format='iso9660',
container='raw',
)
def do_copyimage(self):
pass
def do_bootloader(self):
pass
def do_provisioning(self):
self.do_parsing()
self.do_partitioning()
self.do_configdrive()
self.do_copyimage()
self.do_bootloader()

View File

@ -0,0 +1,32 @@
# Copyright 2014 Mirantis, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from fuel_agent.objects.configdrive import ConfigDriveCommon
from fuel_agent.objects.configdrive import ConfigDriveMcollective
from fuel_agent.objects.configdrive import ConfigDrivePuppet
from fuel_agent.objects.image import Image
from fuel_agent.objects.image import ImageScheme
from fuel_agent.objects.partition import Fs
from fuel_agent.objects.partition import Lv
from fuel_agent.objects.partition import Md
from fuel_agent.objects.partition import Partition
from fuel_agent.objects.partition import PartitionScheme
from fuel_agent.objects.partition import Pv
from fuel_agent.objects.partition import Vg
__all__ = [
'Partition', 'Pv', 'Vg', 'Lv', 'Md', 'Fs', 'PartitionScheme',
'ConfigDriveCommon', 'ConfigDrivePuppet', 'ConfigDriveMcollective',
'ConfigDriveScheme', 'Image', 'ImageScheme',
]

View File

@ -0,0 +1,87 @@
# Copyright 2014 Mirantis, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from fuel_agent import errors
class ConfigDriveCommon(object):
def __init__(self, ssh_auth_key, hostname, fqdn, name_servers,
search_domain, master_ip, master_url, timezone):
self.ssh_auth_key = ssh_auth_key
self.hostname = hostname
self.fqdn = fqdn
self.name_servers = name_servers
self.search_domain = search_domain
self.master_ip = master_ip
self.master_url = master_url
self.timezone = timezone
class ConfigDrivePuppet(object):
def __init__(self, master):
self.master = master
class ConfigDriveMcollective(object):
def __init__(self, pskey, vhost, host, user, password, connector):
self.pskey = pskey
self.vhost = vhost
self.host = host
self.user = user
self.password = password
self.connector = connector
class ConfigDriveScheme(object):
def __init__(self, common=None, puppet=None,
mcollective=None, profile=None):
self.common = common
self.puppet = puppet
self.mcollective = mcollective
self._profile = profile or 'ubuntu'
# TODO(kozhukalov) make it possible to validate scheme according to
# chosen profile which means chosen set of cloud-init templates.
# In other words make this templating scheme easily extendable.
def set_common(self, **kwargs):
self.common = ConfigDriveCommon(**kwargs)
def set_puppet(self, **kwargs):
self.puppet = ConfigDrivePuppet(**kwargs)
def set_mcollective(self, **kwargs):
self.mcollective = ConfigDriveMcollective(**kwargs)
def template_data(self):
if self.common is None:
raise errors.WrongConfigDriveDataError(
'Common attribute should be defined, but it is not')
template_data = {'common': self.common}
if self.puppet is not None:
template_data.update(puppet=self.puppet)
if self.mcollective is not None:
template_data.update(mcollective=self.mcollective)
return template_data
def set_profile(self, profile):
# TODO(kozhukalov) validate profile
self._profile = profile
@property
def profile(self):
return self._profile
def template_name(self, what):
return '%s_%s.jinja2' % (what, self._profile)

View File

@ -0,0 +1,43 @@
# Copyright 2014 Mirantis, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from fuel_agent import errors
class Image(object):
SUPPORTED_CONTAINERS = ['raw']
def __init__(self, uri, target_device,
image_format, container, size=None):
# uri is something like
# http://host:port/path/to/image.img or
# file:///tmp/image.img
self.uri = uri
self.target_device = target_device
# this must be one of 'iso9660', 'ext[234]', 'xfs'
self.image_format = image_format
if container not in self.SUPPORTED_CONTAINERS:
raise errors.WrongImageDataError(
'Error while image initialization: '
'unsupported image container')
self.container = container
self.size = size
class ImageScheme(object):
def __init__(self, images=None):
self.images = images or []
def add_image(self, **kwargs):
self.images.append(Image(**kwargs))

View File

@ -0,0 +1,297 @@
# Copyright 2014 Mirantis, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from fuel_agent import errors
from fuel_agent.openstack.common import log as logging
LOG = logging.getLogger(__name__)
class Parted(object):
def __init__(self, name, label):
self.name = name
self.label = label
self.partitions = []
def add_partition(self, **kwargs):
# TODO(kozhukalov): validate before appending
# calculating partition name based on device name and partition count
kwargs['name'] = self.next_name()
kwargs['count'] = self.next_count()
kwargs['device'] = self.name
# if begin is given use its value else use end of last partition
kwargs['begin'] = kwargs.get('begin', self.next_begin())
# if end is given use its value else
# try to calculate it based on size kwarg or
# raise KeyError
# (kwargs.pop['size'] will raise error if size is not set)
kwargs['end'] = kwargs.get('end') or \
kwargs['begin'] + kwargs.pop('size')
# if partition_type is given use its value else
# try to calculate it automatically
kwargs['partition_type'] = \
kwargs.get('partition_type', self.next_type())
partition = Partition(**kwargs)
self.partitions.append(partition)
return partition
@property
def logical(self):
return filter(lambda x: x.type == 'logical', self.partitions)
@property
def primary(self):
return filter(lambda x: x.type == 'primary', self.partitions)
@property
def extended(self):
found = filter(lambda x: x.type == 'extended', self.partitions)
if found:
return found[0]
def next_type(self):
if self.label == 'gpt':
return 'primary'
elif self.label == 'msdos':
if self.extended:
return 'logical'
elif len(self.partitions) < 3 and not self.extended:
return 'primary'
elif len(self.partitions) == 3 and not self.extended:
return 'extended'
else:
return 'logical'
def next_count(self, next_type=None):
next_type = next_type or self.next_type()
if next_type == 'logical':
return len(self.logical) + 5
return len(self.partitions) + 1
def next_begin(self):
if not self.partitions:
return 0
if self.partitions[-1] == self.extended:
return self.partitions[-1].begin
return self.partitions[-1].end
def next_name(self):
if self.next_type() == 'extended':
return None
separator = ''
if self.name.find('cciss') >= 0 or self.name.find('loop') >= 0:
separator = 'p'
return '%s%s%s' % (self.name, separator, self.next_count())
class Partition(object):
def __init__(self, name, count, device, begin, end, partition_type,
flags=None, guid=None, configdrive=False):
self.name = name
self.count = count
self.device = device
self.name = name
self.begin = begin
self.end = end
self.type = partition_type
self.flags = flags or []
self.guid = guid
self.configdrive = configdrive
def set_flag(self, flag):
if flag not in self.flags:
self.flags.append(flag)
def set_guid(self, guid):
self.guid = guid
class Pv(object):
def __init__(self, name):
self.name = name
class Vg(object):
def __init__(self, name, pvnames=None):
self.name = name
self.pvnames = pvnames or []
def add_pv(self, pvname):
if pvname not in self.pvnames:
self.pvnames.append(pvname)
class Lv(object):
def __init__(self, name, vgname, size):
self.name = name
self.vgname = vgname
self.size = size
@property
def device_name(self):
return '/dev/mapper/%s-%s' % (self.vgname.replace('-', '--'),
self.name.replace('-', '--'))
class Md(object):
def __init__(self, name, level,
devices=None, spares=None):
self.name = name
self.level = level
self.devices = devices or []
self.spares = spares or []
def add_device(self, device):
if device in self.devices or device in self.spares:
raise errors.MDDeviceDuplicationError(
'Error while attaching device to md: '
'device %s is already attached' % device)
self.devices.append(device)
def add_spare(self, device):
if device in self.devices or device in self.spares:
raise errors.MDDeviceDuplicationError(
'Error while attaching device to md: '
'device %s is already attached' % device)
self.spares.append(device)
class Fs(object):
def __init__(self, device, mount=None,
fs_type=None, fs_options=None, fs_label=None):
self.device = device
self.mount = mount
self.type = fs_type or 'xfs'
self.options = fs_options or ''
self.label = fs_label or ''
class PartitionScheme(object):
def __init__(self):
self.parteds = []
self.mds = []
self.pvs = []
self.vgs = []
self.lvs = []
self.fss = []
def add_parted(self, **kwargs):
parted = Parted(**kwargs)
self.parteds.append(parted)
return parted
def add_pv(self, name):
pv = Pv(name=name)
self.pvs.append(pv)
return pv
def add_vg(self, **kwargs):
vg = Vg(**kwargs)
self.vgs.append(vg)
return vg
def add_lv(self, **kwargs):
lv = Lv(**kwargs)
self.lvs.append(lv)
return lv
def add_fs(self, **kwargs):
fs = Fs(**kwargs)
self.fss.append(fs)
return fs
def add_md(self, **kwargs):
kwargs['name'] = kwargs.get('name') or self.md_next_name()
kwargs['level'] = kwargs.get('level') or 'mirror'
md = Md(**kwargs)
self.mds.append(md)
return md
def md_by_name(self, name):
found = filter(lambda x: x.name == name, self.mds)
if found:
return found[0]
def md_by_mount(self, mount):
fs = self.fs_by_mount(mount)
if fs:
return self.md_by_name(fs.device)
def md_attach_by_mount(self, device, mount, spare=False, **kwargs):
md = self.md_by_mount(mount)
if not md:
md = self.add_md(**kwargs)
fskwargs = {}
fskwargs['device'] = md.name
fskwargs['mount'] = kwargs.pop('mount')
fskwargs['fs_type'] = kwargs.pop('fs_type', None)
fskwargs['fs_options'] = kwargs.pop('fs_options', None)
fskwargs['fs_label'] = kwargs.pop('fs_label', None)
self.add_fs(**fskwargs)
md.add_spare(device) if spare else md.add_device(device)
return md
def md_next_name(self):
count = 0
while True:
name = '/dev/md%s' % count
if name not in [md.name for md in self.mds]:
return name
if count > 127:
raise errors.MDAlreadyExistsError(
'Error while generating md name: '
'names from /dev/md0 to /dev/md127 seem to be busy, '
'try to generate md name manually')
def vg_by_name(self, vgname):
found = filter(lambda x: (x.name == vgname), self.vgs)
if found:
return found[0]
def pv_by_name(self, pvname):
found = filter(lambda x: (x.name == pvname), self.pvs)
if found:
return found[0]
def vg_attach_by_name(self, pvname, vgname):
vg = self.vg_by_name(vgname) or self.add_vg(name=vgname)
pv = self.pv_by_name(pvname) or self.add_pv(name=pvname)
vg.add_pv(pv.name)
def fs_by_mount(self, mount):
found = filter(lambda x: (x.mount and x.mount == mount), self.fss)
if found:
return found[0]
def fs_by_device(self, device):
found = filter(lambda x: x.device == device, self.fss)
if found:
return found[0]
def root_device(self):
for fs in self.fss:
if fs.mount == '/':
return fs.device
raise errors.WrongPartitionSchemeError(
'Error while trying to find root device: '
'root file system not found')
# Configdrive device must be a small (about 10M) partition
# on one of node hard drives. This partition is necessary
# only if one uses cloud-init with configdrive.
def configdrive_device(self):
for parted in self.parteds:
for prt in parted.partititons:
if prt.configdrive:
return prt.name

View File

View File

@ -0,0 +1,17 @@
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import six
six.add_move(six.MovedModule('mox', 'mox', 'mox3.mox'))

View File

@ -0,0 +1,314 @@
# Copyright 2012 SINA Corporation
# Copyright 2014 Cisco Systems, Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
"""Extracts OpenStack config option info from module(s)."""
from __future__ import print_function
import argparse
import imp
import os
import re
import socket
import sys
import textwrap
from oslo.config import cfg
import six
import stevedore.named
from fuel_agent.openstack.common import gettextutils
from fuel_agent.openstack.common import importutils
gettextutils.install('fuel_agent')
STROPT = "StrOpt"
BOOLOPT = "BoolOpt"
INTOPT = "IntOpt"
FLOATOPT = "FloatOpt"
LISTOPT = "ListOpt"
DICTOPT = "DictOpt"
MULTISTROPT = "MultiStrOpt"
OPT_TYPES = {
STROPT: 'string value',
BOOLOPT: 'boolean value',
INTOPT: 'integer value',
FLOATOPT: 'floating point value',
LISTOPT: 'list value',
DICTOPT: 'dict value',
MULTISTROPT: 'multi valued',
}
OPTION_REGEX = re.compile(r"(%s)" % "|".join([STROPT, BOOLOPT, INTOPT,
FLOATOPT, LISTOPT, DICTOPT,
MULTISTROPT]))
PY_EXT = ".py"
BASEDIR = os.path.abspath(os.path.join(os.path.dirname(__file__),
"../../../../"))
WORDWRAP_WIDTH = 60
def raise_extension_exception(extmanager, ep, err):
raise
def generate(argv):
parser = argparse.ArgumentParser(
description='generate sample configuration file',
)
parser.add_argument('-m', dest='modules', action='append')
parser.add_argument('-l', dest='libraries', action='append')
parser.add_argument('srcfiles', nargs='*')
parsed_args = parser.parse_args(argv)
mods_by_pkg = dict()
for filepath in parsed_args.srcfiles:
pkg_name = filepath.split(os.sep)[1]
mod_str = '.'.join(['.'.join(filepath.split(os.sep)[:-1]),
os.path.basename(filepath).split('.')[0]])
mods_by_pkg.setdefault(pkg_name, list()).append(mod_str)
# NOTE(lzyeval): place top level modules before packages
pkg_names = sorted(pkg for pkg in mods_by_pkg if pkg.endswith(PY_EXT))
ext_names = sorted(pkg for pkg in mods_by_pkg if pkg not in pkg_names)
pkg_names.extend(ext_names)
# opts_by_group is a mapping of group name to an options list
# The options list is a list of (module, options) tuples
opts_by_group = {'DEFAULT': []}
if parsed_args.modules:
for module_name in parsed_args.modules:
module = _import_module(module_name)
if module:
for group, opts in _list_opts(module):
opts_by_group.setdefault(group, []).append((module_name,
opts))
# Look for entry points defined in libraries (or applications) for
# option discovery, and include their return values in the output.
#
# Each entry point should be a function returning an iterable
# of pairs with the group name (or None for the default group)
# and the list of Opt instances for that group.
if parsed_args.libraries:
loader = stevedore.named.NamedExtensionManager(
'oslo.config.opts',
names=list(set(parsed_args.libraries)),
invoke_on_load=False,
on_load_failure_callback=raise_extension_exception
)
for ext in loader:
for group, opts in ext.plugin():
opt_list = opts_by_group.setdefault(group or 'DEFAULT', [])
opt_list.append((ext.name, opts))
for pkg_name in pkg_names:
mods = mods_by_pkg.get(pkg_name)
mods.sort()
for mod_str in mods:
if mod_str.endswith('.__init__'):
mod_str = mod_str[:mod_str.rfind(".")]
mod_obj = _import_module(mod_str)
if not mod_obj:
raise RuntimeError("Unable to import module %s" % mod_str)
for group, opts in _list_opts(mod_obj):
opts_by_group.setdefault(group, []).append((mod_str, opts))
print_group_opts('DEFAULT', opts_by_group.pop('DEFAULT', []))
for group in sorted(opts_by_group.keys()):
print_group_opts(group, opts_by_group[group])
def _import_module(mod_str):
try:
if mod_str.startswith('bin.'):
imp.load_source(mod_str[4:], os.path.join('bin', mod_str[4:]))
return sys.modules[mod_str[4:]]
else:
return importutils.import_module(mod_str)
except Exception as e:
sys.stderr.write("Error importing module %s: %s\n" % (mod_str, str(e)))
return None
def _is_in_group(opt, group):
"Check if opt is in group."
for value in group._opts.values():
# NOTE(llu): Temporary workaround for bug #1262148, wait until
# newly released oslo.config support '==' operator.
if not(value['opt'] != opt):
return True
return False
def _guess_groups(opt, mod_obj):
# is it in the DEFAULT group?
if _is_in_group(opt, cfg.CONF):
return 'DEFAULT'
# what other groups is it in?
for value in cfg.CONF.values():
if isinstance(value, cfg.CONF.GroupAttr):
if _is_in_group(opt, value._group):
return value._group.name
raise RuntimeError(
"Unable to find group for option %s, "
"maybe it's defined twice in the same group?"
% opt.name
)
def _list_opts(obj):
def is_opt(o):
return (isinstance(o, cfg.Opt) and
not isinstance(o, cfg.SubCommandOpt))
opts = list()
for attr_str in dir(obj):
attr_obj = getattr(obj, attr_str)
if is_opt(attr_obj):
opts.append(attr_obj)
elif (isinstance(attr_obj, list) and
all(map(lambda x: is_opt(x), attr_obj))):
opts.extend(attr_obj)
ret = {}
for opt in opts:
ret.setdefault(_guess_groups(opt, obj), []).append(opt)
return ret.items()
def print_group_opts(group, opts_by_module):
print("[%s]" % group)
print('')
for mod, opts in opts_by_module:
print('#')
print('# Options defined in %s' % mod)
print('#')
print('')
for opt in opts:
_print_opt(opt)
print('')
def _get_my_ip():
try:
csock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
csock.connect(('8.8.8.8', 80))
(addr, port) = csock.getsockname()
csock.close()
return addr
except socket.error:
return None
def _sanitize_default(name, value):
"""Set up a reasonably sensible default for pybasedir, my_ip and host."""
hostname = socket.gethostname()
fqdn = socket.getfqdn()
if value.startswith(sys.prefix):
# NOTE(jd) Don't use os.path.join, because it is likely to think the
# second part is an absolute pathname and therefore drop the first
# part.
value = os.path.normpath("/usr/" + value[len(sys.prefix):])
elif value.startswith(BASEDIR):
return value.replace(BASEDIR, '/usr/lib/python/site-packages')
elif BASEDIR in value:
return value.replace(BASEDIR, '')
elif value == _get_my_ip():
return '10.0.0.1'
elif value in (hostname, fqdn):
if 'host' in name:
return 'fuel_agent'
elif value.endswith(hostname):
return value.replace(hostname, 'fuel_agent')
elif value.endswith(fqdn):
return value.replace(fqdn, 'fuel_agent')
elif value.strip() != value:
return '"%s"' % value
return value
def _print_opt(opt):
opt_name, opt_default, opt_help = opt.dest, opt.default, opt.help
if not opt_help:
sys.stderr.write('WARNING: "%s" is missing help string.\n' % opt_name)
opt_help = ""
opt_type = None
try:
opt_type = OPTION_REGEX.search(str(type(opt))).group(0)
except (ValueError, AttributeError) as err:
sys.stderr.write("%s\n" % str(err))
sys.exit(1)
opt_help = u'%s (%s)' % (opt_help,
OPT_TYPES[opt_type])
print('#', "\n# ".join(textwrap.wrap(opt_help, WORDWRAP_WIDTH)))
if opt.deprecated_opts:
for deprecated_opt in opt.deprecated_opts:
if deprecated_opt.name:
deprecated_group = (deprecated_opt.group if
deprecated_opt.group else "DEFAULT")
print('# Deprecated group/name - [%s]/%s' %
(deprecated_group,
deprecated_opt.name))
try:
if opt_default is None:
print('#%s=<None>' % opt_name)
elif opt_type == STROPT:
assert(isinstance(opt_default, six.string_types))
print('#%s=%s' % (opt_name, _sanitize_default(opt_name,
opt_default)))
elif opt_type == BOOLOPT:
assert(isinstance(opt_default, bool))
print('#%s=%s' % (opt_name, str(opt_default).lower()))
elif opt_type == INTOPT:
assert(isinstance(opt_default, int) and
not isinstance(opt_default, bool))
print('#%s=%s' % (opt_name, opt_default))
elif opt_type == FLOATOPT:
assert(isinstance(opt_default, float))
print('#%s=%s' % (opt_name, opt_default))
elif opt_type == LISTOPT:
assert(isinstance(opt_default, list))
print('#%s=%s' % (opt_name, ','.join(opt_default)))
elif opt_type == DICTOPT:
assert(isinstance(opt_default, dict))
opt_default_strlist = [str(key) + ':' + str(value)
for (key, value) in opt_default.items()]
print('#%s=%s' % (opt_name, ','.join(opt_default_strlist)))
elif opt_type == MULTISTROPT:
assert(isinstance(opt_default, list))
if not opt_default:
opt_default = ['']
for default in opt_default:
print('#%s=%s' % (opt_name, default))
print('')
except Exception:
sys.stderr.write('Error in option "%s"\n' % opt_name)
sys.exit(1)
def main():
generate(sys.argv[1:])
if __name__ == '__main__':
main()

View File

@ -0,0 +1,498 @@
# Copyright 2012 Red Hat, Inc.
# Copyright 2013 IBM Corp.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
gettext for openstack-common modules.
Usual usage in an openstack.common module:
from fuel_agent.openstack.common.gettextutils import _
"""
import copy
import functools
import gettext
import locale
from logging import handlers
import os
from babel import localedata
import six
_AVAILABLE_LANGUAGES = {}
# FIXME(dhellmann): Remove this when moving to oslo.i18n.
USE_LAZY = False
class TranslatorFactory(object):
"""Create translator functions
"""
def __init__(self, domain, lazy=False, localedir=None):
"""Establish a set of translation functions for the domain.
:param domain: Name of translation domain,
specifying a message catalog.
:type domain: str
:param lazy: Delays translation until a message is emitted.
Defaults to False.
:type lazy: Boolean
:param localedir: Directory with translation catalogs.
:type localedir: str
"""
self.domain = domain
self.lazy = lazy
if localedir is None:
localedir = os.environ.get(domain.upper() + '_LOCALEDIR')
self.localedir = localedir
def _make_translation_func(self, domain=None):
"""Return a new translation function ready for use.
Takes into account whether or not lazy translation is being
done.
The domain can be specified to override the default from the
factory, but the localedir from the factory is always used
because we assume the log-level translation catalogs are
installed in the same directory as the main application
catalog.
"""
if domain is None:
domain = self.domain
if self.lazy:
return functools.partial(Message, domain=domain)
t = gettext.translation(
domain,
localedir=self.localedir,
fallback=True,
)
if six.PY3:
return t.gettext
return t.ugettext
@property
def primary(self):
"The default translation function."
return self._make_translation_func()
def _make_log_translation_func(self, level):
return self._make_translation_func(self.domain + '-log-' + level)
@property
def log_info(self):
"Translate info-level log messages."
return self._make_log_translation_func('info')
@property
def log_warning(self):
"Translate warning-level log messages."
return self._make_log_translation_func('warning')
@property
def log_error(self):
"Translate error-level log messages."
return self._make_log_translation_func('error')
@property
def log_critical(self):
"Translate critical-level log messages."
return self._make_log_translation_func('critical')
# NOTE(dhellmann): When this module moves out of the incubator into
# oslo.i18n, these global variables can be moved to an integration
# module within each application.
# Create the global translation functions.
_translators = TranslatorFactory('fuel_agent')
# The primary translation function using the well-known name "_"
_ = _translators.primary
# Translators for log levels.
#
# The abbreviated names are meant to reflect the usual use of a short
# name like '_'. The "L" is for "log" and the other letter comes from
# the level.
_LI = _translators.log_info
_LW = _translators.log_warning
_LE = _translators.log_error
_LC = _translators.log_critical
# NOTE(dhellmann): End of globals that will move to the application's
# integration module.
def enable_lazy():
"""Convenience function for configuring _() to use lazy gettext
Call this at the start of execution to enable the gettextutils._
function to use lazy gettext functionality. This is useful if
your project is importing _ directly instead of using the
gettextutils.install() way of importing the _ function.
"""
# FIXME(dhellmann): This function will be removed in oslo.i18n,
# because the TranslatorFactory makes it superfluous.
global _, _LI, _LW, _LE, _LC, USE_LAZY
tf = TranslatorFactory('fuel_agent', lazy=True)
_ = tf.primary
_LI = tf.log_info
_LW = tf.log_warning
_LE = tf.log_error
_LC = tf.log_critical
USE_LAZY = True
def install(domain, lazy=False):
"""Install a _() function using the given translation domain.
Given a translation domain, install a _() function using gettext's
install() function.
The main difference from gettext.install() is that we allow
overriding the default localedir (e.g. /usr/share/locale) using
a translation-domain-specific environment variable (e.g.
NOVA_LOCALEDIR).
:param domain: the translation domain
:param lazy: indicates whether or not to install the lazy _() function.
The lazy _() introduces a way to do deferred translation
of messages by installing a _ that builds Message objects,
instead of strings, which can then be lazily translated into
any available locale.
"""
if lazy:
from six import moves
tf = TranslatorFactory(domain, lazy=True)
moves.builtins.__dict__['_'] = tf.primary
else:
localedir = '%s_LOCALEDIR' % domain.upper()
if six.PY3:
gettext.install(domain,
localedir=os.environ.get(localedir))
else:
gettext.install(domain,
localedir=os.environ.get(localedir),
unicode=True)
class Message(six.text_type):
"""A Message object is a unicode object that can be translated.
Translation of Message is done explicitly using the translate() method.
For all non-translation intents and purposes, a Message is simply unicode,
and can be treated as such.
"""
def __new__(cls, msgid, msgtext=None, params=None,
domain='fuel_agent', *args):
"""Create a new Message object.
In order for translation to work gettext requires a message ID, this
msgid will be used as the base unicode text. It is also possible
for the msgid and the base unicode text to be different by passing
the msgtext parameter.
"""
# If the base msgtext is not given, we use the default translation
# of the msgid (which is in English) just in case the system locale is
# not English, so that the base text will be in that locale by default.
if not msgtext:
msgtext = Message._translate_msgid(msgid, domain)
# We want to initialize the parent unicode with the actual object that
# would have been plain unicode if 'Message' was not enabled.
msg = super(Message, cls).__new__(cls, msgtext)
msg.msgid = msgid
msg.domain = domain
msg.params = params
return msg
def translate(self, desired_locale=None):
"""Translate this message to the desired locale.
:param desired_locale: The desired locale to translate the message to,
if no locale is provided the message will be
translated to the system's default locale.
:returns: the translated message in unicode
"""
translated_message = Message._translate_msgid(self.msgid,
self.domain,
desired_locale)
if self.params is None:
# No need for more translation
return translated_message
# This Message object may have been formatted with one or more
# Message objects as substitution arguments, given either as a single
# argument, part of a tuple, or as one or more values in a dictionary.
# When translating this Message we need to translate those Messages too
translated_params = _translate_args(self.params, desired_locale)
translated_message = translated_message % translated_params
return translated_message
@staticmethod
def _translate_msgid(msgid, domain, desired_locale=None):
if not desired_locale:
system_locale = locale.getdefaultlocale()
# If the system locale is not available to the runtime use English
if not system_locale[0]:
desired_locale = 'en_US'
else:
desired_locale = system_locale[0]
locale_dir = os.environ.get(domain.upper() + '_LOCALEDIR')
lang = gettext.translation(domain,
localedir=locale_dir,
languages=[desired_locale],
fallback=True)
if six.PY3:
translator = lang.gettext
else:
translator = lang.ugettext
translated_message = translator(msgid)
return translated_message
def __mod__(self, other):
# When we mod a Message we want the actual operation to be performed
# by the parent class (i.e. unicode()), the only thing we do here is
# save the original msgid and the parameters in case of a translation
params = self._sanitize_mod_params(other)
unicode_mod = super(Message, self).__mod__(params)
modded = Message(self.msgid,
msgtext=unicode_mod,
params=params,
domain=self.domain)
return modded
def _sanitize_mod_params(self, other):
"""Sanitize the object being modded with this Message.
- Add support for modding 'None' so translation supports it
- Trim the modded object, which can be a large dictionary, to only
those keys that would actually be used in a translation
- Snapshot the object being modded, in case the message is
translated, it will be used as it was when the Message was created
"""
if other is None:
params = (other,)
elif isinstance(other, dict):
# Merge the dictionaries
# Copy each item in case one does not support deep copy.
params = {}
if isinstance(self.params, dict):
for key, val in self.params.items():
params[key] = self._copy_param(val)
for key, val in other.items():
params[key] = self._copy_param(val)
else:
params = self._copy_param(other)
return params
def _copy_param(self, param):
try:
return copy.deepcopy(param)
except Exception:
# Fallback to casting to unicode this will handle the
# python code-like objects that can't be deep-copied
return six.text_type(param)
def __add__(self, other):
msg = _('Message objects do not support addition.')
raise TypeError(msg)
def __radd__(self, other):
return self.__add__(other)
if six.PY2:
def __str__(self):
# NOTE(luisg): Logging in python 2.6 tries to str() log records,
# and it expects specifically a UnicodeError in order to proceed.
msg = _('Message objects do not support str() because they may '
'contain non-ascii characters. '
'Please use unicode() or translate() instead.')
raise UnicodeError(msg)
def get_available_languages(domain):
"""Lists the available languages for the given translation domain.
:param domain: the domain to get languages for
"""
if domain in _AVAILABLE_LANGUAGES:
return copy.copy(_AVAILABLE_LANGUAGES[domain])
localedir = '%s_LOCALEDIR' % domain.upper()
find = lambda x: gettext.find(domain,
localedir=os.environ.get(localedir),
languages=[x])
# NOTE(mrodden): en_US should always be available (and first in case
# order matters) since our in-line message strings are en_US
language_list = ['en_US']
# NOTE(luisg): Babel <1.0 used a function called list(), which was
# renamed to locale_identifiers() in >=1.0, the requirements master list
# requires >=0.9.6, uncapped, so defensively work with both. We can remove
# this check when the master list updates to >=1.0, and update all projects
list_identifiers = (getattr(localedata, 'list', None) or
getattr(localedata, 'locale_identifiers'))
locale_identifiers = list_identifiers()
for i in locale_identifiers:
if find(i) is not None:
language_list.append(i)
# NOTE(luisg): Babel>=1.0,<1.3 has a bug where some OpenStack supported
# locales (e.g. 'zh_CN', and 'zh_TW') aren't supported even though they
# are perfectly legitimate locales:
# https://github.com/mitsuhiko/babel/issues/37
# In Babel 1.3 they fixed the bug and they support these locales, but
# they are still not explicitly "listed" by locale_identifiers().
# That is why we add the locales here explicitly if necessary so that
# they are listed as supported.
aliases = {'zh': 'zh_CN',
'zh_Hant_HK': 'zh_HK',
'zh_Hant': 'zh_TW',
'fil': 'tl_PH'}
for (locale, alias) in six.iteritems(aliases):
if locale in language_list and alias not in language_list:
language_list.append(alias)
_AVAILABLE_LANGUAGES[domain] = language_list
return copy.copy(language_list)
def translate(obj, desired_locale=None):
"""Gets the translated unicode representation of the given object.
If the object is not translatable it is returned as-is.
If the locale is None the object is translated to the system locale.
:param obj: the object to translate
:param desired_locale: the locale to translate the message to, if None the
default system locale will be used
:returns: the translated object in unicode, or the original object if
it could not be translated
"""
message = obj
if not isinstance(message, Message):
# If the object to translate is not already translatable,
# let's first get its unicode representation
message = six.text_type(obj)
if isinstance(message, Message):
# Even after unicoding() we still need to check if we are
# running with translatable unicode before translating
return message.translate(desired_locale)
return obj
def _translate_args(args, desired_locale=None):
"""Translates all the translatable elements of the given arguments object.
This method is used for translating the translatable values in method
arguments which include values of tuples or dictionaries.
If the object is not a tuple or a dictionary the object itself is
translated if it is translatable.
If the locale is None the object is translated to the system locale.
:param args: the args to translate
:param desired_locale: the locale to translate the args to, if None the
default system locale will be used
:returns: a new args object with the translated contents of the original
"""
if isinstance(args, tuple):
return tuple(translate(v, desired_locale) for v in args)
if isinstance(args, dict):
translated_dict = {}
for (k, v) in six.iteritems(args):
translated_v = translate(v, desired_locale)
translated_dict[k] = translated_v
return translated_dict
return translate(args, desired_locale)
class TranslationHandler(handlers.MemoryHandler):
"""Handler that translates records before logging them.
The TranslationHandler takes a locale and a target logging.Handler object
to forward LogRecord objects to after translating them. This handler
depends on Message objects being logged, instead of regular strings.
The handler can be configured declaratively in the logging.conf as follows:
[handlers]
keys = translatedlog, translator
[handler_translatedlog]
class = handlers.WatchedFileHandler
args = ('/var/log/api-localized.log',)
formatter = context
[handler_translator]
class = openstack.common.log.TranslationHandler
target = translatedlog
args = ('zh_CN',)
If the specified locale is not available in the system, the handler will
log in the default locale.
"""
def __init__(self, locale=None, target=None):
"""Initialize a TranslationHandler
:param locale: locale to use for translating messages
:param target: logging.Handler object to forward
LogRecord objects to after translation
"""
# NOTE(luisg): In order to allow this handler to be a wrapper for
# other handlers, such as a FileHandler, and still be able to
# configure it using logging.conf, this handler has to extend
# MemoryHandler because only the MemoryHandlers' logging.conf
# parsing is implemented such that it accepts a target handler.
handlers.MemoryHandler.__init__(self, capacity=0, target=target)
self.locale = locale
def setFormatter(self, fmt):
self.target.setFormatter(fmt)
def emit(self, record):
# We save the message from the original record to restore it
# after translation, so other handlers are not affected by this
original_msg = record.msg
original_args = record.args
try:
self._translate_and_log_record(record)
finally:
record.msg = original_msg
record.args = original_args
def _translate_and_log_record(self, record):
record.msg = translate(record.msg, self.locale)
# In addition to translating the message, we also need to translate
# arguments that were passed to the log method that were not part
# of the main message e.g., log.info(_('Some message %s'), this_one))
record.args = _translate_args(record.args, self.locale)
self.target.emit(record)

View File

@ -0,0 +1,73 @@
# Copyright 2011 OpenStack Foundation.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Import related utilities and helper functions.
"""
import sys
import traceback
def import_class(import_str):
"""Returns a class from a string including module and class."""
mod_str, _sep, class_str = import_str.rpartition('.')
__import__(mod_str)
try:
return getattr(sys.modules[mod_str], class_str)
except AttributeError:
raise ImportError('Class %s cannot be found (%s)' %
(class_str,
traceback.format_exception(*sys.exc_info())))
def import_object(import_str, *args, **kwargs):
"""Import a class and return an instance of it."""
return import_class(import_str)(*args, **kwargs)
def import_object_ns(name_space, import_str, *args, **kwargs):
"""Tries to import object from default namespace.
Imports a class and return an instance of it, first by trying
to find the class in a default namespace, then failing back to
a full path if not found in the default namespace.
"""
import_value = "%s.%s" % (name_space, import_str)
try:
return import_class(import_value)(*args, **kwargs)
except ImportError:
return import_class(import_str)(*args, **kwargs)
def import_module(import_str):
"""Import a module."""
__import__(import_str)
return sys.modules[import_str]
def import_versioned_module(version, submodule=None):
module = 'fuel_agent.v%s' % version
if submodule:
module = '.'.join((module, submodule))
return import_module(module)
def try_import(import_str, default=None):
"""Try to import a module and if it fails return default."""
try:
return import_module(import_str)
except ImportError:
return default

View File

@ -0,0 +1,186 @@
# Copyright 2010 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# Copyright 2011 Justin Santa Barbara
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
'''
JSON related utilities.
This module provides a few things:
1) A handy function for getting an object down to something that can be
JSON serialized. See to_primitive().
2) Wrappers around loads() and dumps(). The dumps() wrapper will
automatically use to_primitive() for you if needed.
3) This sets up anyjson to use the loads() and dumps() wrappers if anyjson
is available.
'''
import codecs
import datetime
import functools
import inspect
import itertools
import sys
if sys.version_info < (2, 7):
# On Python <= 2.6, json module is not C boosted, so try to use
# simplejson module if available
try:
import simplejson as json
except ImportError:
import json
else:
import json
import six
import six.moves.xmlrpc_client as xmlrpclib
from fuel_agent.openstack.common import gettextutils
from fuel_agent.openstack.common import importutils
from fuel_agent.openstack.common import strutils
from fuel_agent.openstack.common import timeutils
netaddr = importutils.try_import("netaddr")
_nasty_type_tests = [inspect.ismodule, inspect.isclass, inspect.ismethod,
inspect.isfunction, inspect.isgeneratorfunction,
inspect.isgenerator, inspect.istraceback, inspect.isframe,
inspect.iscode, inspect.isbuiltin, inspect.isroutine,
inspect.isabstract]
_simple_types = (six.string_types + six.integer_types
+ (type(None), bool, float))
def to_primitive(value, convert_instances=False, convert_datetime=True,
level=0, max_depth=3):
"""Convert a complex object into primitives.
Handy for JSON serialization. We can optionally handle instances,
but since this is a recursive function, we could have cyclical
data structures.
To handle cyclical data structures we could track the actual objects
visited in a set, but not all objects are hashable. Instead we just
track the depth of the object inspections and don't go too deep.
Therefore, convert_instances=True is lossy ... be aware.
"""
# handle obvious types first - order of basic types determined by running
# full tests on nova project, resulting in the following counts:
# 572754 <type 'NoneType'>
# 460353 <type 'int'>
# 379632 <type 'unicode'>
# 274610 <type 'str'>
# 199918 <type 'dict'>
# 114200 <type 'datetime.datetime'>
# 51817 <type 'bool'>
# 26164 <type 'list'>
# 6491 <type 'float'>
# 283 <type 'tuple'>
# 19 <type 'long'>
if isinstance(value, _simple_types):
return value
if isinstance(value, datetime.datetime):
if convert_datetime:
return timeutils.strtime(value)
else:
return value
# value of itertools.count doesn't get caught by nasty_type_tests
# and results in infinite loop when list(value) is called.
if type(value) == itertools.count:
return six.text_type(value)
# FIXME(vish): Workaround for LP bug 852095. Without this workaround,
# tests that raise an exception in a mocked method that
# has a @wrap_exception with a notifier will fail. If
# we up the dependency to 0.5.4 (when it is released) we
# can remove this workaround.
if getattr(value, '__module__', None) == 'mox':
return 'mock'
if level > max_depth:
return '?'
# The try block may not be necessary after the class check above,
# but just in case ...
try:
recursive = functools.partial(to_primitive,
convert_instances=convert_instances,
convert_datetime=convert_datetime,
level=level,
max_depth=max_depth)
if isinstance(value, dict):
return dict((k, recursive(v)) for k, v in six.iteritems(value))
elif isinstance(value, (list, tuple)):
return [recursive(lv) for lv in value]
# It's not clear why xmlrpclib created their own DateTime type, but
# for our purposes, make it a datetime type which is explicitly
# handled
if isinstance(value, xmlrpclib.DateTime):
value = datetime.datetime(*tuple(value.timetuple())[:6])
if convert_datetime and isinstance(value, datetime.datetime):
return timeutils.strtime(value)
elif isinstance(value, gettextutils.Message):
return value.data
elif hasattr(value, 'iteritems'):
return recursive(dict(value.iteritems()), level=level + 1)
elif hasattr(value, '__iter__'):
return recursive(list(value))
elif convert_instances and hasattr(value, '__dict__'):
# Likely an instance of something. Watch for cycles.
# Ignore class member vars.
return recursive(value.__dict__, level=level + 1)
elif netaddr and isinstance(value, netaddr.IPAddress):
return six.text_type(value)
else:
if any(test(value) for test in _nasty_type_tests):
return six.text_type(value)
return value
except TypeError:
# Class objects are tricky since they may define something like
# __iter__ defined but it isn't callable as list().
return six.text_type(value)
def dumps(value, default=to_primitive, **kwargs):
return json.dumps(value, default=default, **kwargs)
def loads(s, encoding='utf-8'):
return json.loads(strutils.safe_decode(s, encoding))
def load(fp, encoding='utf-8'):
return json.load(codecs.getreader(encoding)(fp))
try:
import anyjson
except ImportError:
pass
else:
anyjson._modules.append((__name__, 'dumps', TypeError,
'loads', ValueError, 'load'))
anyjson.force_implementation(__name__)

View File

@ -0,0 +1,45 @@
# Copyright 2011 OpenStack Foundation.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Local storage of variables using weak references"""
import threading
import weakref
class WeakLocal(threading.local):
def __getattribute__(self, attr):
rval = super(WeakLocal, self).__getattribute__(attr)
if rval:
# NOTE(mikal): this bit is confusing. What is stored is a weak
# reference, not the value itself. We therefore need to lookup
# the weak reference and return the inner value here.
rval = rval()
return rval
def __setattr__(self, attr, value):
value = weakref.ref(value)
return super(WeakLocal, self).__setattr__(attr, value)
# NOTE(mikal): the name "store" should be deprecated in the future
store = WeakLocal()
# A "weak" store uses weak references and allows an object to fall out of scope
# when it falls out of scope in the code that uses the thread local storage. A
# "strong" store will hold a reference to the object so that it never falls out
# of scope.
weak_store = WeakLocal()
strong_store = threading.local()

View File

@ -0,0 +1,723 @@
# Copyright 2011 OpenStack Foundation.
# Copyright 2010 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""OpenStack logging handler.
This module adds to logging functionality by adding the option to specify
a context object when calling the various log methods. If the context object
is not specified, default formatting is used. Additionally, an instance uuid
may be passed as part of the log message, which is intended to make it easier
for admins to find messages related to a specific instance.
It also allows setting of formatting information through conf.
"""
import inspect
import itertools
import logging
import logging.config
import logging.handlers
import os
import re
import sys
import traceback
from oslo.config import cfg
import six
from six import moves
from fuel_agent.openstack.common.gettextutils import _
from fuel_agent.openstack.common import importutils
from fuel_agent.openstack.common import jsonutils
from fuel_agent.openstack.common import local
_DEFAULT_LOG_DATE_FORMAT = "%Y-%m-%d %H:%M:%S"
_SANITIZE_KEYS = ['adminPass', 'admin_pass', 'password', 'admin_password']
# NOTE(ldbragst): Let's build a list of regex objects using the list of
# _SANITIZE_KEYS we already have. This way, we only have to add the new key
# to the list of _SANITIZE_KEYS and we can generate regular expressions
# for XML and JSON automatically.
_SANITIZE_PATTERNS = []
_FORMAT_PATTERNS = [r'(%(key)s\s*[=]\s*[\"\']).*?([\"\'])',
r'(<%(key)s>).*?(</%(key)s>)',
r'([\"\']%(key)s[\"\']\s*:\s*[\"\']).*?([\"\'])',
r'([\'"].*?%(key)s[\'"]\s*:\s*u?[\'"]).*?([\'"])',
r'([\'"].*?%(key)s[\'"]\s*,\s*\'--?[A-z]+\'\s*,\s*u?[\'"])'
'.*?([\'"])',
r'(%(key)s\s*--?[A-z]+\s*).*?([\s])']
for key in _SANITIZE_KEYS:
for pattern in _FORMAT_PATTERNS:
reg_ex = re.compile(pattern % {'key': key}, re.DOTALL)
_SANITIZE_PATTERNS.append(reg_ex)
common_cli_opts = [
cfg.BoolOpt('debug',
short='d',
default=False,
help='Print debugging output (set logging level to '
'DEBUG instead of default WARNING level).'),
cfg.BoolOpt('verbose',
short='v',
default=False,
help='Print more verbose output (set logging level to '
'INFO instead of default WARNING level).'),
]
logging_cli_opts = [
cfg.StrOpt('log-config-append',
metavar='PATH',
deprecated_name='log-config',
help='The name of a logging configuration file. This file '
'is appended to any existing logging configuration '
'files. For details about logging configuration files, '
'see the Python logging module documentation.'),
cfg.StrOpt('log-format',
metavar='FORMAT',
help='DEPRECATED. '
'A logging.Formatter log message format string which may '
'use any of the available logging.LogRecord attributes. '
'This option is deprecated. Please use '
'logging_context_format_string and '
'logging_default_format_string instead.'),
cfg.StrOpt('log-date-format',
default=_DEFAULT_LOG_DATE_FORMAT,
metavar='DATE_FORMAT',
help='Format string for %%(asctime)s in log records. '
'Default: %(default)s .'),
cfg.StrOpt('log-file',
metavar='PATH',
deprecated_name='logfile',
help='(Optional) Name of log file to output to. '
'If no default is set, logging will go to stdout.'),
cfg.StrOpt('log-dir',
deprecated_name='logdir',
help='(Optional) The base directory used for relative '
'--log-file paths.'),
cfg.BoolOpt('use-syslog',
default=False,
help='Use syslog for logging. '
'Existing syslog format is DEPRECATED during I, '
'and will change in J to honor RFC5424.'),
cfg.BoolOpt('use-syslog-rfc-format',
# TODO(bogdando) remove or use True after existing
# syslog format deprecation in J
default=False,
help='(Optional) Enables or disables syslog rfc5424 format '
'for logging. If enabled, prefixes the MSG part of the '
'syslog message with APP-NAME (RFC5424). The '
'format without the APP-NAME is deprecated in I, '
'and will be removed in J.'),
cfg.StrOpt('syslog-log-facility',
default='LOG_USER',
help='Syslog facility to receive log lines.')
]
generic_log_opts = [
cfg.BoolOpt('use_stderr',
default=True,
help='Log output to standard error.')
]
log_opts = [
cfg.StrOpt('logging_context_format_string',
default='%(asctime)s.%(msecs)03d %(process)d %(levelname)s '
'%(name)s [%(request_id)s %(user_identity)s] '
'%(instance)s%(message)s',
help='Format string to use for log messages with context.'),
cfg.StrOpt('logging_default_format_string',
default='%(asctime)s.%(msecs)03d %(process)d %(levelname)s '
'%(name)s [-] %(instance)s%(message)s',
help='Format string to use for log messages without context.'),
cfg.StrOpt('logging_debug_format_suffix',
default='%(funcName)s %(pathname)s:%(lineno)d',
help='Data to append to log format when level is DEBUG.'),
cfg.StrOpt('logging_exception_prefix',
default='%(asctime)s.%(msecs)03d %(process)d TRACE %(name)s '
'%(instance)s',
help='Prefix each line of exception output with this format.'),
cfg.ListOpt('default_log_levels',
default=[
'amqp=WARN',
'amqplib=WARN',
'boto=WARN',
'qpid=WARN',
'sqlalchemy=WARN',
'suds=INFO',
'oslo.messaging=INFO',
'iso8601=WARN',
'requests.packages.urllib3.connectionpool=WARN'
],
help='List of logger=LEVEL pairs.'),
cfg.BoolOpt('publish_errors',
default=False,
help='Enables or disables publication of error events.'),
cfg.BoolOpt('fatal_deprecations',
default=False,
help='Enables or disables fatal status of deprecations.'),
# NOTE(mikal): there are two options here because sometimes we are handed
# a full instance (and could include more information), and other times we
# are just handed a UUID for the instance.
cfg.StrOpt('instance_format',
default='[instance: %(uuid)s] ',
help='The format for an instance that is passed with the log '
'message. '),
cfg.StrOpt('instance_uuid_format',
default='[instance: %(uuid)s] ',
help='The format for an instance UUID that is passed with the '
'log message. '),
]
CONF = cfg.CONF
CONF.register_cli_opts(common_cli_opts)
CONF.register_cli_opts(logging_cli_opts)
CONF.register_opts(generic_log_opts)
CONF.register_opts(log_opts)
# our new audit level
# NOTE(jkoelker) Since we synthesized an audit level, make the logging
# module aware of it so it acts like other levels.
logging.AUDIT = logging.INFO + 1
logging.addLevelName(logging.AUDIT, 'AUDIT')
try:
NullHandler = logging.NullHandler
except AttributeError: # NOTE(jkoelker) NullHandler added in Python 2.7
class NullHandler(logging.Handler):
def handle(self, record):
pass
def emit(self, record):
pass
def createLock(self):
self.lock = None
def _dictify_context(context):
if context is None:
return None
if not isinstance(context, dict) and getattr(context, 'to_dict', None):
context = context.to_dict()
return context
def _get_binary_name():
return os.path.basename(inspect.stack()[-1][1])
def _get_log_file_path(binary=None):
logfile = CONF.log_file
logdir = CONF.log_dir
if logfile and not logdir:
return logfile
if logfile and logdir:
return os.path.join(logdir, logfile)
if logdir:
binary = binary or _get_binary_name()
return '%s.log' % (os.path.join(logdir, binary),)
return None
def mask_password(message, secret="***"):
"""Replace password with 'secret' in message.
:param message: The string which includes security information.
:param secret: value with which to replace passwords.
:returns: The unicode value of message with the password fields masked.
For example:
>>> mask_password("'adminPass' : 'aaaaa'")
"'adminPass' : '***'"
>>> mask_password("'admin_pass' : 'aaaaa'")
"'admin_pass' : '***'"
>>> mask_password('"password" : "aaaaa"')
'"password" : "***"'
>>> mask_password("'original_password' : 'aaaaa'")
"'original_password' : '***'"
>>> mask_password("u'original_password' : u'aaaaa'")
"u'original_password' : u'***'"
"""
message = six.text_type(message)
# NOTE(ldbragst): Check to see if anything in message contains any key
# specified in _SANITIZE_KEYS, if not then just return the message since
# we don't have to mask any passwords.
if not any(key in message for key in _SANITIZE_KEYS):
return message
secret = r'\g<1>' + secret + r'\g<2>'
for pattern in _SANITIZE_PATTERNS:
message = re.sub(pattern, secret, message)
return message
class BaseLoggerAdapter(logging.LoggerAdapter):
def audit(self, msg, *args, **kwargs):
self.log(logging.AUDIT, msg, *args, **kwargs)
class LazyAdapter(BaseLoggerAdapter):
def __init__(self, name='unknown', version='unknown'):
self._logger = None
self.extra = {}
self.name = name
self.version = version
@property
def logger(self):
if not self._logger:
self._logger = getLogger(self.name, self.version)
return self._logger
class ContextAdapter(BaseLoggerAdapter):
warn = logging.LoggerAdapter.warning
def __init__(self, logger, project_name, version_string):
self.logger = logger
self.project = project_name
self.version = version_string
self._deprecated_messages_sent = dict()
@property
def handlers(self):
return self.logger.handlers
def deprecated(self, msg, *args, **kwargs):
"""Call this method when a deprecated feature is used.
If the system is configured for fatal deprecations then the message
is logged at the 'critical' level and :class:`DeprecatedConfig` will
be raised.
Otherwise, the message will be logged (once) at the 'warn' level.
:raises: :class:`DeprecatedConfig` if the system is configured for
fatal deprecations.
"""
stdmsg = _("Deprecated: %s") % msg
if CONF.fatal_deprecations:
self.critical(stdmsg, *args, **kwargs)
raise DeprecatedConfig(msg=stdmsg)
# Using a list because a tuple with dict can't be stored in a set.
sent_args = self._deprecated_messages_sent.setdefault(msg, list())
if args in sent_args:
# Already logged this message, so don't log it again.
return
sent_args.append(args)
self.warn(stdmsg, *args, **kwargs)
def process(self, msg, kwargs):
# NOTE(mrodden): catch any Message/other object and
# coerce to unicode before they can get
# to the python logging and possibly
# cause string encoding trouble
if not isinstance(msg, six.string_types):
msg = six.text_type(msg)
if 'extra' not in kwargs:
kwargs['extra'] = {}
extra = kwargs['extra']
context = kwargs.pop('context', None)
if not context:
context = getattr(local.store, 'context', None)
if context:
extra.update(_dictify_context(context))
instance = kwargs.pop('instance', None)
instance_uuid = (extra.get('instance_uuid') or
kwargs.pop('instance_uuid', None))
instance_extra = ''
if instance:
instance_extra = CONF.instance_format % instance
elif instance_uuid:
instance_extra = (CONF.instance_uuid_format
% {'uuid': instance_uuid})
extra['instance'] = instance_extra
extra.setdefault('user_identity', kwargs.pop('user_identity', None))
extra['project'] = self.project
extra['version'] = self.version
extra['extra'] = extra.copy()
return msg, kwargs
class JSONFormatter(logging.Formatter):
def __init__(self, fmt=None, datefmt=None):
# NOTE(jkoelker) we ignore the fmt argument, but its still there
# since logging.config.fileConfig passes it.
self.datefmt = datefmt
def formatException(self, ei, strip_newlines=True):
lines = traceback.format_exception(*ei)
if strip_newlines:
lines = [moves.filter(
lambda x: x,
line.rstrip().splitlines()) for line in lines]
lines = list(itertools.chain(*lines))
return lines
def format(self, record):
message = {'message': record.getMessage(),
'asctime': self.formatTime(record, self.datefmt),
'name': record.name,
'msg': record.msg,
'args': record.args,
'levelname': record.levelname,
'levelno': record.levelno,
'pathname': record.pathname,
'filename': record.filename,
'module': record.module,
'lineno': record.lineno,
'funcname': record.funcName,
'created': record.created,
'msecs': record.msecs,
'relative_created': record.relativeCreated,
'thread': record.thread,
'thread_name': record.threadName,
'process_name': record.processName,
'process': record.process,
'traceback': None}
if hasattr(record, 'extra'):
message['extra'] = record.extra
if record.exc_info:
message['traceback'] = self.formatException(record.exc_info)
return jsonutils.dumps(message)
def _create_logging_excepthook(product_name):
def logging_excepthook(exc_type, value, tb):
extra = {'exc_info': (exc_type, value, tb)}
getLogger(product_name).critical(
"".join(traceback.format_exception_only(exc_type, value)),
**extra)
return logging_excepthook
class LogConfigError(Exception):
message = _('Error loading logging config %(log_config)s: %(err_msg)s')
def __init__(self, log_config, err_msg):
self.log_config = log_config
self.err_msg = err_msg
def __str__(self):
return self.message % dict(log_config=self.log_config,
err_msg=self.err_msg)
def _load_log_config(log_config_append):
try:
logging.config.fileConfig(log_config_append,
disable_existing_loggers=False)
except moves.configparser.Error as exc:
raise LogConfigError(log_config_append, six.text_type(exc))
def setup(product_name, version='unknown'):
"""Setup logging."""
if CONF.log_config_append:
_load_log_config(CONF.log_config_append)
else:
_setup_logging_from_conf(product_name, version)
sys.excepthook = _create_logging_excepthook(product_name)
def set_defaults(logging_context_format_string):
cfg.set_defaults(log_opts,
logging_context_format_string=
logging_context_format_string)
def _find_facility_from_conf():
facility_names = logging.handlers.SysLogHandler.facility_names
facility = getattr(logging.handlers.SysLogHandler,
CONF.syslog_log_facility,
None)
if facility is None and CONF.syslog_log_facility in facility_names:
facility = facility_names.get(CONF.syslog_log_facility)
if facility is None:
valid_facilities = facility_names.keys()
consts = ['LOG_AUTH', 'LOG_AUTHPRIV', 'LOG_CRON', 'LOG_DAEMON',
'LOG_FTP', 'LOG_KERN', 'LOG_LPR', 'LOG_MAIL', 'LOG_NEWS',
'LOG_AUTH', 'LOG_SYSLOG', 'LOG_USER', 'LOG_UUCP',
'LOG_LOCAL0', 'LOG_LOCAL1', 'LOG_LOCAL2', 'LOG_LOCAL3',
'LOG_LOCAL4', 'LOG_LOCAL5', 'LOG_LOCAL6', 'LOG_LOCAL7']
valid_facilities.extend(consts)
raise TypeError(_('syslog facility must be one of: %s') %
', '.join("'%s'" % fac
for fac in valid_facilities))
return facility
class RFCSysLogHandler(logging.handlers.SysLogHandler):
def __init__(self, *args, **kwargs):
self.binary_name = _get_binary_name()
# Do not use super() unless type(logging.handlers.SysLogHandler)
# is 'type' (Python 2.7).
# Use old style calls, if the type is 'classobj' (Python 2.6)
logging.handlers.SysLogHandler.__init__(self, *args, **kwargs)
def format(self, record):
# Do not use super() unless type(logging.handlers.SysLogHandler)
# is 'type' (Python 2.7).
# Use old style calls, if the type is 'classobj' (Python 2.6)
msg = logging.handlers.SysLogHandler.format(self, record)
msg = self.binary_name + ' ' + msg
return msg
def _setup_logging_from_conf(project, version):
log_root = getLogger(None).logger
for handler in log_root.handlers:
log_root.removeHandler(handler)
if CONF.use_syslog:
facility = _find_facility_from_conf()
# TODO(bogdando) use the format provided by RFCSysLogHandler
# after existing syslog format deprecation in J
if CONF.use_syslog_rfc_format:
syslog = RFCSysLogHandler(address='/dev/log',
facility=facility)
else:
syslog = logging.handlers.SysLogHandler(address='/dev/log',
facility=facility)
log_root.addHandler(syslog)
logpath = _get_log_file_path()
if logpath:
filelog = logging.handlers.WatchedFileHandler(logpath)
log_root.addHandler(filelog)
if CONF.use_stderr:
streamlog = ColorHandler()
log_root.addHandler(streamlog)
elif not logpath:
# pass sys.stdout as a positional argument
# python2.6 calls the argument strm, in 2.7 it's stream
streamlog = logging.StreamHandler(sys.stdout)
log_root.addHandler(streamlog)
if CONF.publish_errors:
handler = importutils.import_object(
"fuel_agent.openstack.common.log_handler.PublishErrorsHandler",
logging.ERROR)
log_root.addHandler(handler)
datefmt = CONF.log_date_format
for handler in log_root.handlers:
# NOTE(alaski): CONF.log_format overrides everything currently. This
# should be deprecated in favor of context aware formatting.
if CONF.log_format:
handler.setFormatter(logging.Formatter(fmt=CONF.log_format,
datefmt=datefmt))
log_root.info('Deprecated: log_format is now deprecated and will '
'be removed in the next release')
else:
handler.setFormatter(ContextFormatter(project=project,
version=version,
datefmt=datefmt))
if CONF.debug:
log_root.setLevel(logging.DEBUG)
elif CONF.verbose:
log_root.setLevel(logging.INFO)
else:
log_root.setLevel(logging.WARNING)
for pair in CONF.default_log_levels:
mod, _sep, level_name = pair.partition('=')
logger = logging.getLogger(mod)
# NOTE(AAzza) in python2.6 Logger.setLevel doesn't convert string name
# to integer code.
if sys.version_info < (2, 7):
level = logging.getLevelName(level_name)
logger.setLevel(level)
else:
logger.setLevel(level_name)
_loggers = {}
def getLogger(name='unknown', version='unknown'):
if name not in _loggers:
_loggers[name] = ContextAdapter(logging.getLogger(name),
name,
version)
return _loggers[name]
def getLazyLogger(name='unknown', version='unknown'):
"""Returns lazy logger.
Creates a pass-through logger that does not create the real logger
until it is really needed and delegates all calls to the real logger
once it is created.
"""
return LazyAdapter(name, version)
class WritableLogger(object):
"""A thin wrapper that responds to `write` and logs."""
def __init__(self, logger, level=logging.INFO):
self.logger = logger
self.level = level
def write(self, msg):
self.logger.log(self.level, msg.rstrip())
class ContextFormatter(logging.Formatter):
"""A context.RequestContext aware formatter configured through flags.
The flags used to set format strings are: logging_context_format_string
and logging_default_format_string. You can also specify
logging_debug_format_suffix to append extra formatting if the log level is
debug.
For information about what variables are available for the formatter see:
http://docs.python.org/library/logging.html#formatter
If available, uses the context value stored in TLS - local.store.context
"""
def __init__(self, *args, **kwargs):
"""Initialize ContextFormatter instance
Takes additional keyword arguments which can be used in the message
format string.
:keyword project: project name
:type project: string
:keyword version: project version
:type version: string
"""
self.project = kwargs.pop('project', 'unknown')
self.version = kwargs.pop('version', 'unknown')
logging.Formatter.__init__(self, *args, **kwargs)
def format(self, record):
"""Uses contextstring if request_id is set, otherwise default."""
# store project info
record.project = self.project
record.version = self.version
# store request info
context = getattr(local.store, 'context', None)
if context:
d = _dictify_context(context)
for k, v in d.items():
setattr(record, k, v)
# NOTE(sdague): default the fancier formatting params
# to an empty string so we don't throw an exception if
# they get used
for key in ('instance', 'color', 'user_identity'):
if key not in record.__dict__:
record.__dict__[key] = ''
if record.__dict__.get('request_id'):
self._fmt = CONF.logging_context_format_string
else:
self._fmt = CONF.logging_default_format_string
if (record.levelno == logging.DEBUG and
CONF.logging_debug_format_suffix):
self._fmt += " " + CONF.logging_debug_format_suffix
# Cache this on the record, Logger will respect our formatted copy
if record.exc_info:
record.exc_text = self.formatException(record.exc_info, record)
return logging.Formatter.format(self, record)
def formatException(self, exc_info, record=None):
"""Format exception output with CONF.logging_exception_prefix."""
if not record:
return logging.Formatter.formatException(self, exc_info)
stringbuffer = moves.StringIO()
traceback.print_exception(exc_info[0], exc_info[1], exc_info[2],
None, stringbuffer)
lines = stringbuffer.getvalue().split('\n')
stringbuffer.close()
if CONF.logging_exception_prefix.find('%(asctime)') != -1:
record.asctime = self.formatTime(record, self.datefmt)
formatted_lines = []
for line in lines:
pl = CONF.logging_exception_prefix % record.__dict__
fl = '%s%s' % (pl, line)
formatted_lines.append(fl)
return '\n'.join(formatted_lines)
class ColorHandler(logging.StreamHandler):
LEVEL_COLORS = {
logging.DEBUG: '\033[00;32m', # GREEN
logging.INFO: '\033[00;36m', # CYAN
logging.AUDIT: '\033[01;36m', # BOLD CYAN
logging.WARN: '\033[01;33m', # BOLD YELLOW
logging.ERROR: '\033[01;31m', # BOLD RED
logging.CRITICAL: '\033[01;31m', # BOLD RED
}
def format(self, record):
record.color = self.LEVEL_COLORS[record.levelno]
return logging.StreamHandler.format(self, record)
class DeprecatedConfig(Exception):
message = _("Fatal call to deprecated config: %(msg)s")
def __init__(self, msg):
super(Exception, self).__init__(self.message % dict(msg=msg))

View File

@ -0,0 +1,272 @@
# Copyright 2011 OpenStack Foundation.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
System-level utilities and helper functions.
"""
import errno
import logging as stdlib_logging
import os
import random
import shlex
import signal
from eventlet.green import subprocess
from eventlet import greenthread
import six
from fuel_agent.openstack.common.gettextutils import _
from fuel_agent.openstack.common import log as logging
LOG = logging.getLogger(__name__)
class InvalidArgumentError(Exception):
def __init__(self, message=None):
super(InvalidArgumentError, self).__init__(message)
class UnknownArgumentError(Exception):
def __init__(self, message=None):
super(UnknownArgumentError, self).__init__(message)
class ProcessExecutionError(Exception):
def __init__(self, stdout=None, stderr=None, exit_code=None, cmd=None,
description=None):
self.exit_code = exit_code
self.stderr = stderr
self.stdout = stdout
self.cmd = cmd
self.description = description
if description is None:
description = _("Unexpected error while running command.")
if exit_code is None:
exit_code = '-'
message = _('%(description)s\n'
'Command: %(cmd)s\n'
'Exit code: %(exit_code)s\n'
'Stdout: %(stdout)r\n'
'Stderr: %(stderr)r') % {'description': description,
'cmd': cmd,
'exit_code': exit_code,
'stdout': stdout,
'stderr': stderr}
super(ProcessExecutionError, self).__init__(message)
class NoRootWrapSpecified(Exception):
def __init__(self, message=None):
super(NoRootWrapSpecified, self).__init__(message)
def _subprocess_setup():
# Python installs a SIGPIPE handler by default. This is usually not what
# non-Python subprocesses expect.
signal.signal(signal.SIGPIPE, signal.SIG_DFL)
def execute(*cmd, **kwargs):
"""Helper method to shell out and execute a command through subprocess.
Allows optional retry.
:param cmd: Passed to subprocess.Popen.
:type cmd: string
:param process_input: Send to opened process.
:type process_input: string
:param env_variables: Environment variables and their values that
will be set for the process.
:type env_variables: dict
:param check_exit_code: Single bool, int, or list of allowed exit
codes. Defaults to [0]. Raise
:class:`ProcessExecutionError` unless
program exits with one of these code.
:type check_exit_code: boolean, int, or [int]
:param delay_on_retry: True | False. Defaults to True. If set to True,
wait a short amount of time before retrying.
:type delay_on_retry: boolean
:param attempts: How many times to retry cmd.
:type attempts: int
:param run_as_root: True | False. Defaults to False. If set to True,
the command is prefixed by the command specified
in the root_helper kwarg.
:type run_as_root: boolean
:param root_helper: command to prefix to commands called with
run_as_root=True
:type root_helper: string
:param shell: whether or not there should be a shell used to
execute this command. Defaults to false.
:type shell: boolean
:param loglevel: log level for execute commands.
:type loglevel: int. (Should be stdlib_logging.DEBUG or
stdlib_logging.INFO)
:returns: (stdout, stderr) from process execution
:raises: :class:`UnknownArgumentError` on
receiving unknown arguments
:raises: :class:`ProcessExecutionError`
"""
process_input = kwargs.pop('process_input', None)
env_variables = kwargs.pop('env_variables', None)
check_exit_code = kwargs.pop('check_exit_code', [0])
ignore_exit_code = False
delay_on_retry = kwargs.pop('delay_on_retry', True)
attempts = kwargs.pop('attempts', 1)
run_as_root = kwargs.pop('run_as_root', False)
root_helper = kwargs.pop('root_helper', '')
shell = kwargs.pop('shell', False)
loglevel = kwargs.pop('loglevel', stdlib_logging.DEBUG)
if isinstance(check_exit_code, bool):
ignore_exit_code = not check_exit_code
check_exit_code = [0]
elif isinstance(check_exit_code, int):
check_exit_code = [check_exit_code]
if kwargs:
raise UnknownArgumentError(_('Got unknown keyword args '
'to utils.execute: %r') % kwargs)
if run_as_root and hasattr(os, 'geteuid') and os.geteuid() != 0:
if not root_helper:
raise NoRootWrapSpecified(
message=_('Command requested root, but did not '
'specify a root helper.'))
cmd = shlex.split(root_helper) + list(cmd)
cmd = map(str, cmd)
while attempts > 0:
attempts -= 1
try:
LOG.log(loglevel, 'Running cmd (subprocess): %s',
' '.join(logging.mask_password(cmd)))
_PIPE = subprocess.PIPE # pylint: disable=E1101
if os.name == 'nt':
preexec_fn = None
close_fds = False
else:
preexec_fn = _subprocess_setup
close_fds = True
obj = subprocess.Popen(cmd,
stdin=_PIPE,
stdout=_PIPE,
stderr=_PIPE,
close_fds=close_fds,
preexec_fn=preexec_fn,
shell=shell,
env=env_variables)
result = None
for _i in six.moves.range(20):
# NOTE(russellb) 20 is an arbitrary number of retries to
# prevent any chance of looping forever here.
try:
if process_input is not None:
result = obj.communicate(process_input)
else:
result = obj.communicate()
except OSError as e:
if e.errno in (errno.EAGAIN, errno.EINTR):
continue
raise
break
obj.stdin.close() # pylint: disable=E1101
_returncode = obj.returncode # pylint: disable=E1101
LOG.log(loglevel, 'Result was %s' % _returncode)
if not ignore_exit_code and _returncode not in check_exit_code:
(stdout, stderr) = result
raise ProcessExecutionError(exit_code=_returncode,
stdout=stdout,
stderr=stderr,
cmd=' '.join(cmd))
return result
except ProcessExecutionError:
if not attempts:
raise
else:
LOG.log(loglevel, '%r failed. Retrying.', cmd)
if delay_on_retry:
greenthread.sleep(random.randint(20, 200) / 100.0)
finally:
# NOTE(termie): this appears to be necessary to let the subprocess
# call clean something up in between calls, without
# it two execute calls in a row hangs the second one
greenthread.sleep(0)
def trycmd(*args, **kwargs):
"""A wrapper around execute() to more easily handle warnings and errors.
Returns an (out, err) tuple of strings containing the output of
the command's stdout and stderr. If 'err' is not empty then the
command can be considered to have failed.
:discard_warnings True | False. Defaults to False. If set to True,
then for succeeding commands, stderr is cleared
"""
discard_warnings = kwargs.pop('discard_warnings', False)
try:
out, err = execute(*args, **kwargs)
failed = False
except ProcessExecutionError as exn:
out, err = '', six.text_type(exn)
failed = True
if not failed and discard_warnings and err:
# Handle commands that output to stderr but otherwise succeed
err = ''
return out, err
def ssh_execute(ssh, cmd, process_input=None,
addl_env=None, check_exit_code=True):
LOG.debug('Running cmd (SSH): %s', cmd)
if addl_env:
raise InvalidArgumentError(_('Environment not supported over SSH'))
if process_input:
# This is (probably) fixable if we need it...
raise InvalidArgumentError(_('process_input not supported over SSH'))
stdin_stream, stdout_stream, stderr_stream = ssh.exec_command(cmd)
channel = stdout_stream.channel
# NOTE(justinsb): This seems suspicious...
# ...other SSH clients have buffering issues with this approach
stdout = stdout_stream.read()
stderr = stderr_stream.read()
stdin_stream.close()
exit_status = channel.recv_exit_status()
# exit_status == -1 if no exit code was returned
if exit_status != -1:
LOG.debug('Result was %s' % exit_status)
if check_exit_code and exit_status != 0:
raise ProcessExecutionError(exit_code=exit_status,
stdout=stdout,
stderr=stderr,
cmd=cmd)
return (stdout, stderr)

View File

@ -0,0 +1,239 @@
# Copyright 2011 OpenStack Foundation.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
System-level utilities and helper functions.
"""
import math
import re
import sys
import unicodedata
import six
from fuel_agent.openstack.common.gettextutils import _
UNIT_PREFIX_EXPONENT = {
'k': 1,
'K': 1,
'Ki': 1,
'M': 2,
'Mi': 2,
'G': 3,
'Gi': 3,
'T': 4,
'Ti': 4,
}
UNIT_SYSTEM_INFO = {
'IEC': (1024, re.compile(r'(^[-+]?\d*\.?\d+)([KMGT]i?)?(b|bit|B)$')),
'SI': (1000, re.compile(r'(^[-+]?\d*\.?\d+)([kMGT])?(b|bit|B)$')),
}
TRUE_STRINGS = ('1', 't', 'true', 'on', 'y', 'yes')
FALSE_STRINGS = ('0', 'f', 'false', 'off', 'n', 'no')
SLUGIFY_STRIP_RE = re.compile(r"[^\w\s-]")
SLUGIFY_HYPHENATE_RE = re.compile(r"[-\s]+")
def int_from_bool_as_string(subject):
"""Interpret a string as a boolean and return either 1 or 0.
Any string value in:
('True', 'true', 'On', 'on', '1')
is interpreted as a boolean True.
Useful for JSON-decoded stuff and config file parsing
"""
return bool_from_string(subject) and 1 or 0
def bool_from_string(subject, strict=False, default=False):
"""Interpret a string as a boolean.
A case-insensitive match is performed such that strings matching 't',
'true', 'on', 'y', 'yes', or '1' are considered True and, when
`strict=False`, anything else returns the value specified by 'default'.
Useful for JSON-decoded stuff and config file parsing.
If `strict=True`, unrecognized values, including None, will raise a
ValueError which is useful when parsing values passed in from an API call.
Strings yielding False are 'f', 'false', 'off', 'n', 'no', or '0'.
"""
if not isinstance(subject, six.string_types):
subject = six.text_type(subject)
lowered = subject.strip().lower()
if lowered in TRUE_STRINGS:
return True
elif lowered in FALSE_STRINGS:
return False
elif strict:
acceptable = ', '.join(
"'%s'" % s for s in sorted(TRUE_STRINGS + FALSE_STRINGS))
msg = _("Unrecognized value '%(val)s', acceptable values are:"
" %(acceptable)s") % {'val': subject,
'acceptable': acceptable}
raise ValueError(msg)
else:
return default
def safe_decode(text, incoming=None, errors='strict'):
"""Decodes incoming text/bytes string using `incoming` if they're not
already unicode.
:param incoming: Text's current encoding
:param errors: Errors handling policy. See here for valid
values http://docs.python.org/2/library/codecs.html
:returns: text or a unicode `incoming` encoded
representation of it.
:raises TypeError: If text is not an instance of str
"""
if not isinstance(text, (six.string_types, six.binary_type)):
raise TypeError("%s can't be decoded" % type(text))
if isinstance(text, six.text_type):
return text
if not incoming:
incoming = (sys.stdin.encoding or
sys.getdefaultencoding())
try:
return text.decode(incoming, errors)
except UnicodeDecodeError:
# Note(flaper87) If we get here, it means that
# sys.stdin.encoding / sys.getdefaultencoding
# didn't return a suitable encoding to decode
# text. This happens mostly when global LANG
# var is not set correctly and there's no
# default encoding. In this case, most likely
# python will use ASCII or ANSI encoders as
# default encodings but they won't be capable
# of decoding non-ASCII characters.
#
# Also, UTF-8 is being used since it's an ASCII
# extension.
return text.decode('utf-8', errors)
def safe_encode(text, incoming=None,
encoding='utf-8', errors='strict'):
"""Encodes incoming text/bytes string using `encoding`.
If incoming is not specified, text is expected to be encoded with
current python's default encoding. (`sys.getdefaultencoding`)
:param incoming: Text's current encoding
:param encoding: Expected encoding for text (Default UTF-8)
:param errors: Errors handling policy. See here for valid
values http://docs.python.org/2/library/codecs.html
:returns: text or a bytestring `encoding` encoded
representation of it.
:raises TypeError: If text is not an instance of str
"""
if not isinstance(text, (six.string_types, six.binary_type)):
raise TypeError("%s can't be encoded" % type(text))
if not incoming:
incoming = (sys.stdin.encoding or
sys.getdefaultencoding())
if isinstance(text, six.text_type):
return text.encode(encoding, errors)
elif text and encoding != incoming:
# Decode text before encoding it with `encoding`
text = safe_decode(text, incoming, errors)
return text.encode(encoding, errors)
else:
return text
def string_to_bytes(text, unit_system='IEC', return_int=False):
"""Converts a string into an float representation of bytes.
The units supported for IEC ::
Kb(it), Kib(it), Mb(it), Mib(it), Gb(it), Gib(it), Tb(it), Tib(it)
KB, KiB, MB, MiB, GB, GiB, TB, TiB
The units supported for SI ::
kb(it), Mb(it), Gb(it), Tb(it)
kB, MB, GB, TB
Note that the SI unit system does not support capital letter 'K'
:param text: String input for bytes size conversion.
:param unit_system: Unit system for byte size conversion.
:param return_int: If True, returns integer representation of text
in bytes. (default: decimal)
:returns: Numerical representation of text in bytes.
:raises ValueError: If text has an invalid value.
"""
try:
base, reg_ex = UNIT_SYSTEM_INFO[unit_system]
except KeyError:
msg = _('Invalid unit system: "%s"') % unit_system
raise ValueError(msg)
match = reg_ex.match(text)
if match:
magnitude = float(match.group(1))
unit_prefix = match.group(2)
if match.group(3) in ['b', 'bit']:
magnitude /= 8
else:
msg = _('Invalid string format: %s') % text
raise ValueError(msg)
if not unit_prefix:
res = magnitude
else:
res = magnitude * pow(base, UNIT_PREFIX_EXPONENT[unit_prefix])
if return_int:
return int(math.ceil(res))
return res
def to_slug(value, incoming=None, errors="strict"):
"""Normalize string.
Convert to lowercase, remove non-word characters, and convert spaces
to hyphens.
Inspired by Django's `slugify` filter.
:param value: Text to slugify
:param incoming: Text's current encoding
:param errors: Errors handling policy. See here for valid
values http://docs.python.org/2/library/codecs.html
:returns: slugified unicode representation of `value`
:raises TypeError: If text is not an instance of str
"""
value = safe_decode(value, incoming, errors)
# NOTE(aababilov): no need to use safe_(encode|decode) here:
# encodings are always "ascii", error handling is always "ignore"
# and types are always known (first: unicode; second: str)
value = unicodedata.normalize("NFKD", value).encode(
"ascii", "ignore").decode("ascii")
value = SLUGIFY_STRIP_RE.sub("", value).strip().lower()
return SLUGIFY_HYPHENATE_RE.sub("-", value)

View File

@ -0,0 +1,210 @@
# Copyright 2011 OpenStack Foundation.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Time related utilities and helper functions.
"""
import calendar
import datetime
import time
import iso8601
import six
# ISO 8601 extended time format with microseconds
_ISO8601_TIME_FORMAT_SUBSECOND = '%Y-%m-%dT%H:%M:%S.%f'
_ISO8601_TIME_FORMAT = '%Y-%m-%dT%H:%M:%S'
PERFECT_TIME_FORMAT = _ISO8601_TIME_FORMAT_SUBSECOND
def isotime(at=None, subsecond=False):
"""Stringify time in ISO 8601 format."""
if not at:
at = utcnow()
st = at.strftime(_ISO8601_TIME_FORMAT
if not subsecond
else _ISO8601_TIME_FORMAT_SUBSECOND)
tz = at.tzinfo.tzname(None) if at.tzinfo else 'UTC'
st += ('Z' if tz == 'UTC' else tz)
return st
def parse_isotime(timestr):
"""Parse time from ISO 8601 format."""
try:
return iso8601.parse_date(timestr)
except iso8601.ParseError as e:
raise ValueError(six.text_type(e))
except TypeError as e:
raise ValueError(six.text_type(e))
def strtime(at=None, fmt=PERFECT_TIME_FORMAT):
"""Returns formatted utcnow."""
if not at:
at = utcnow()
return at.strftime(fmt)
def parse_strtime(timestr, fmt=PERFECT_TIME_FORMAT):
"""Turn a formatted time back into a datetime."""
return datetime.datetime.strptime(timestr, fmt)
def normalize_time(timestamp):
"""Normalize time in arbitrary timezone to UTC naive object."""
offset = timestamp.utcoffset()
if offset is None:
return timestamp
return timestamp.replace(tzinfo=None) - offset
def is_older_than(before, seconds):
"""Return True if before is older than seconds."""
if isinstance(before, six.string_types):
before = parse_strtime(before).replace(tzinfo=None)
else:
before = before.replace(tzinfo=None)
return utcnow() - before > datetime.timedelta(seconds=seconds)
def is_newer_than(after, seconds):
"""Return True if after is newer than seconds."""
if isinstance(after, six.string_types):
after = parse_strtime(after).replace(tzinfo=None)
else:
after = after.replace(tzinfo=None)
return after - utcnow() > datetime.timedelta(seconds=seconds)
def utcnow_ts():
"""Timestamp version of our utcnow function."""
if utcnow.override_time is None:
# NOTE(kgriffs): This is several times faster
# than going through calendar.timegm(...)
return int(time.time())
return calendar.timegm(utcnow().timetuple())
def utcnow():
"""Overridable version of utils.utcnow."""
if utcnow.override_time:
try:
return utcnow.override_time.pop(0)
except AttributeError:
return utcnow.override_time
return datetime.datetime.utcnow()
def iso8601_from_timestamp(timestamp):
"""Returns a iso8601 formatted date from timestamp."""
return isotime(datetime.datetime.utcfromtimestamp(timestamp))
utcnow.override_time = None
def set_time_override(override_time=None):
"""Overrides utils.utcnow.
Make it return a constant time or a list thereof, one at a time.
:param override_time: datetime instance or list thereof. If not
given, defaults to the current UTC time.
"""
utcnow.override_time = override_time or datetime.datetime.utcnow()
def advance_time_delta(timedelta):
"""Advance overridden time using a datetime.timedelta."""
assert(not utcnow.override_time is None)
try:
for dt in utcnow.override_time:
dt += timedelta
except TypeError:
utcnow.override_time += timedelta
def advance_time_seconds(seconds):
"""Advance overridden time by seconds."""
advance_time_delta(datetime.timedelta(0, seconds))
def clear_time_override():
"""Remove the overridden time."""
utcnow.override_time = None
def marshall_now(now=None):
"""Make an rpc-safe datetime with microseconds.
Note: tzinfo is stripped, but not required for relative times.
"""
if not now:
now = utcnow()
return dict(day=now.day, month=now.month, year=now.year, hour=now.hour,
minute=now.minute, second=now.second,
microsecond=now.microsecond)
def unmarshall_time(tyme):
"""Unmarshall a datetime dict."""
return datetime.datetime(day=tyme['day'],
month=tyme['month'],
year=tyme['year'],
hour=tyme['hour'],
minute=tyme['minute'],
second=tyme['second'],
microsecond=tyme['microsecond'])
def delta_seconds(before, after):
"""Return the difference between two timing objects.
Compute the difference in seconds between two date, time, or
datetime objects (as a float, to microsecond resolution).
"""
delta = after - before
return total_seconds(delta)
def total_seconds(delta):
"""Return the total seconds of datetime.timedelta object.
Compute total seconds of datetime.timedelta, datetime.timedelta
doesn't have method total_seconds in Python2.6, calculate it manually.
"""
try:
return delta.total_seconds()
except AttributeError:
return ((delta.days * 24 * 3600) + delta.seconds +
float(delta.microseconds) / (10 ** 6))
def is_soon(dt, window):
"""Determines if time is going to happen in the next window seconds.
:param dt: the time
:param window: minimum seconds to remain to consider the time not soon
:return: True if expiration is within the given duration
"""
soon = (utcnow() + datetime.timedelta(seconds=window))
return normalize_time(dt) <= soon

View File

@ -0,0 +1,13 @@
# Copyright 2014 Mirantis, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.

View File

@ -0,0 +1,373 @@
# Copyright 2014 Mirantis, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import mock
from oslotest import base as test_base
from fuel_agent.utils import hardware_utils as hu
from fuel_agent.utils import utils
class TestHardwareUtils(test_base.BaseTestCase):
@mock.patch.object(utils, 'execute')
def test_parse_dmidecode(self, exec_mock):
exec_mock.return_value = ["""
System Slot Information
Designation: PCIEX16_1
ID: 1
Bus Address: 0000:00:01.0
Characteristics:
3.3 V is provided
PME signal is supported
System Slot Information
Type: 32-bit PCI Express
ID: 3
Characteristics:
Opening is shared
Bus Address: 0000:00:1c.4
"""]
expected = [{"designation": "PCIEX16_1",
"id": "1",
"characteristics": ["3.3 V is provided",
"PME signal is supported"],
"bus address": "0000:00:01.0"},
{"type": "32-bit PCI Express",
"id": "3",
"characteristics": ["Opening is shared"],
"bus address": "0000:00:1c.4"}]
self.assertEqual(expected, hu.parse_dmidecode("fake_type"))
exec_mock.assert_called_once_with("dmidecode", "-q", "--type",
"fake_type")
@mock.patch.object(utils, 'execute')
def test_parse_lspci(self, exec_mock):
exec_mock.return_value = ["""Slot: 07:00.0
Class: PCI bridge
Vendor: ASMedia Technology Inc.
Device: ASM1083/1085 PCIe to PCI Bridge
Rev: 01
ProgIf: 01
Slot: 09:00.0
Class: IDE interface
Vendor: Marvell Technology Group Ltd.
Device: 88SE6121 SATA II / PATA Controller
SVendor: ASUSTeK Computer Inc.
SDevice: Device 82a2
Rev: b2
ProgIf: 8f
"""]
expected = [{'class': 'PCI bridge',
'device': 'ASM1083/1085 PCIe to PCI Bridge',
'progif': '01',
'rev': '01',
'slot': '07:00.0',
'vendor': 'ASMedia Technology Inc.'},
{'class': 'IDE interface',
'device': '88SE6121 SATA II / PATA Controller',
'progif': '8f',
'rev': 'b2',
'sdevice': 'Device 82a2',
'slot': '09:00.0',
'svendor': 'ASUSTeK Computer Inc.',
'vendor': 'Marvell Technology Group Ltd.'}]
self.assertEqual(expected, hu.parse_lspci())
exec_mock.assert_called_once_with('lspci', '-vmm', '-D')
@mock.patch.object(utils, 'execute')
def test_parse_simple_kv(self, exec_mock):
exec_mock.return_value = ["""driver: r8169
version: 2.3LK-NAPI
firmware-version: rtl_nic/rtl8168e-2.fw
bus-info: 0000:06:00.0
supports-statistics: yes
supports-test: no
supports-eeprom-access: no
supports-register-dump: yes
"""]
expected = {'driver': 'r8169',
'version': '2.3LK-NAPI',
'firmware-version': 'rtl_nic/rtl8168e-2.fw',
'bus-info': '0000:06:00.0',
'supports-statistics': 'yes',
'supports-test': 'no',
'supports-eeprom-access': 'no',
'supports-register-dump': 'yes'}
self.assertEqual(expected, hu.parse_simple_kv('fake', 'cmd'))
exec_mock.assert_called_once_with('fake', 'cmd')
@mock.patch.object(utils, 'execute')
def test_udevreport(self, mock_exec):
# should run udevadm info OS command
# in order to get udev properties for a device
mock_exec.return_value = (
'DEVLINKS=\'/dev/disk/by-id/fakeid1 /dev/disk/by-id/fakeid2\'\n'
'DEVNAME=\'/dev/fake\'\n'
'DEVPATH=\'/devices/fakepath\'\n'
'DEVTYPE=\'disk\'\n'
'MAJOR=\'11\'\n'
'MINOR=\'0\'\n'
'ID_BUS=\'fakebus\'\n'
'ID_MODEL=\'fakemodel\'\n'
'ID_SERIAL_SHORT=\'fakeserial\'\n'
'ID_WWN=\'fakewwn\'\n'
'ID_CDROM=\'1\'\n'
'ANOTHER=\'another\'\n',
''
)
expected = {
'DEVLINKS': ['/dev/disk/by-id/fakeid1', '/dev/disk/by-id/fakeid2'],
'DEVNAME': '/dev/fake',
'DEVPATH': '/devices/fakepath',
'DEVTYPE': 'disk',
'MAJOR': '11',
'MINOR': '0',
'ID_BUS': 'fakebus',
'ID_MODEL': 'fakemodel',
'ID_SERIAL_SHORT': 'fakeserial',
'ID_WWN': 'fakewwn',
'ID_CDROM': '1'
}
self.assertEqual(expected, hu.udevreport('/dev/fake'))
mock_exec.assert_called_once_with('udevadm',
'info',
'--query=property',
'--export',
'--name=/dev/fake',
check_exit_code=[0])
@mock.patch.object(utils, 'execute')
def test_blockdevreport(self, mock_exec):
# should run blockdev OS command
# in order to get block device properties
cmd = ['blockdev', '--getsz', '--getro', '--getss', '--getpbsz',
'--getsize64', '--getiomin', '--getioopt', '--getra',
'--getalignoff', '--getmaxsect', '/dev/fake']
mock_exec.return_value = (
'625142448\n0\n512\n4096\n320072933376\n4096\n0\n256\n0\n1024',
''
)
expected = {
'sz': '625142448',
'ro': '0',
'ss': '512',
'pbsz': '4096',
'size64': '320072933376',
'iomin': '4096',
'ioopt': '0',
'ra': '256',
'alignoff': '0',
'maxsect': '1024'
}
self.assertEqual(expected, hu.blockdevreport('/dev/fake'))
mock_exec.assert_called_once_with(*cmd, check_exit_code=[0])
@mock.patch('six.moves.builtins.open')
def test_extrareport(self, mock_open):
# should read some files from sysfs e.g. /sys/block/fake/removable
# in order to get some device properties
def with_side_effect(arg):
mock_with = mock.MagicMock()
mock_with.__exit__.return_value = None
mock_file = mock.Mock()
if arg == '/sys/block/fake/removable':
mock_file.read.return_value = '0\n'
elif arg == '/sys/block/fake/device/state':
mock_file.read.return_value = 'running\n'
elif arg == '/sys/block/fake/device/timeout':
mock_file.read.return_value = '30\n'
mock_with.__enter__.return_value = mock_file
return mock_with
mock_open.side_effect = with_side_effect
expected = {'removable': '0', 'state': 'running', 'timeout': '30'}
self.assertEqual(expected, hu.extrareport('/dev/fake'))
@mock.patch.object(hu, 'blockdevreport')
@mock.patch.object(hu, 'udevreport')
def test_is_disk_uspec_bspec_none(self, mock_ureport, mock_breport):
# should call udevreport if uspec is None
# should call blockdevreport if bspec is None
# should return True if uspec and bspec are empty
mock_ureport.return_value = {}
mock_breport.return_value = {}
self.assertTrue(hu.is_disk('/dev/fake'))
mock_ureport.assert_called_once_with('/dev/fake')
mock_breport.assert_called_once_with('/dev/fake')
@mock.patch.object(hu, 'udevreport')
def test_is_disk_uspec_none(self, mock_ureport):
# should call udevreport if uspec is None but bspec is not None
bspec = {'key': 'value'}
mock_ureport.return_value = {}
hu.is_disk('/dev/fake', bspec=bspec)
mock_ureport.assert_called_once_with('/dev/fake')
@mock.patch.object(hu, 'blockdevreport')
def test_is_disk_bspec_none(self, mock_breport):
# should call blockdevreport if bspec is None but uspec is not None
uspec = {'key': 'value'}
mock_breport.return_value = {}
hu.is_disk('/dev/fake', uspec=uspec)
mock_breport.assert_called_once_with('/dev/fake')
@mock.patch.object(hu, 'blockdevreport')
def test_is_disk_cdrom(self, mock_breport):
# should return False if udev ID_CDROM is set to 1
mock_breport.return_value = {}
uspec = {
'ID_CDROM': '1'
}
self.assertFalse(hu.is_disk('/dev/fake', uspec=uspec))
@mock.patch.object(hu, 'blockdevreport')
def test_is_disk_partition(self, mock_breport):
# should return False if udev DEVTYPE is partition
mock_breport.return_value = {}
uspec = {
'DEVTYPE': 'partition'
}
self.assertFalse(hu.is_disk('/dev/fake', uspec=uspec))
@mock.patch.object(hu, 'blockdevreport')
def test_is_disk_major(self, mock_breport):
# should return False if udev MAJOR is not in a list of
# major numbers which are used for disks
# look at kernel/Documentation/devices.txt
mock_breport.return_value = {}
valid_majors = [3, 8, 65, 66, 67, 68, 69, 70, 71, 104, 105,
106, 107, 108, 109, 110, 111, 202, 252, 253]
for major in (set(range(1, 261)) - set(valid_majors)):
uspec = {
'MAJOR': str(major)
}
self.assertFalse(hu.is_disk('/dev/fake', uspec=uspec))
@mock.patch.object(hu, 'udevreport')
def test_is_disk_readonly(self, mock_ureport):
# should return False if device is read only
mock_ureport.return_value = {}
bspec = {
'ro': '1'
}
self.assertFalse(hu.is_disk('/dev/fake', bspec=bspec))
@mock.patch.object(hu, 'is_disk')
@mock.patch.object(hu, 'extrareport')
@mock.patch.object(hu, 'blockdevreport')
@mock.patch.object(hu, 'udevreport')
@mock.patch.object(utils, 'execute')
def test_list_block_devices(self, mock_exec, mock_ureport, mock_breport,
mock_ereport, mock_isdisk):
# should run blockdev --report command
# in order to get a list of block devices
# should call report methods to get device info
# should call is_disk method to filter out
# those block devices which are not disks
mock_exec.return_value = (
'RO RA SSZ BSZ StartSec Size Device\n'
'rw 256 512 4096 0 320072933376 /dev/fake\n'
'rw 256 512 4096 2048 7998537728 /dev/fake1\n'
'rw 256 512 512 0 1073741312 /dev/sr0\n',
''
)
def isdisk_side_effect(arg, uspec=None, bspec=None):
if arg == '/dev/fake':
return True
elif arg in ('/dev/fake1', '/dev/sr0'):
return False
mock_isdisk.side_effect = isdisk_side_effect
mock_ureport.return_value = {'key0': 'value0'}
mock_breport.return_value = {'key1': 'value1'}
mock_ereport.return_value = {'key2': 'value2'}
expected = [{
'device': '/dev/fake',
'startsec': '0',
'size': 320072933376,
'uspec': {'key0': 'value0'},
'bspec': {'key1': 'value1'},
'espec': {'key2': 'value2'}
}]
self.assertEqual(hu.list_block_devices(), expected)
mock_exec.assert_called_once_with('blockdev', '--report',
check_exit_code=[0])
self.assertEqual(mock_ureport.call_args_list, [mock.call('/dev/fake'),
mock.call('/dev/fake1'), mock.call('/dev/sr0')])
self.assertEqual(mock_breport.call_args_list, [mock.call('/dev/fake'),
mock.call('/dev/fake1'), mock.call('/dev/sr0')])
self.assertEqual(mock_ereport.call_args_list, [mock.call('/dev/fake'),
mock.call('/dev/fake1'), mock.call('/dev/sr0')])
def test_match_device_devlinks(self):
# should return true if at least one by-id link from first uspec
# matches by-id link from another uspec
uspec1 = {'DEVLINKS': ['/dev/disk/by-path/fakepath',
'/dev/disk/by-id/fakeid1',
'/dev/disk/by-id/fakeid2']}
uspec2 = {'DEVLINKS': ['/dev/disk/by-id/fakeid2',
'/dev/disk/by-id/fakeid3']}
self.assertTrue(hu.match_device(uspec1, uspec2))
def test_match_device_wwn(self):
# should return true if ID_WWN is given
# and if it is the same in both uspecs
# and if DEVTYPE is given and if DEVTYPE is disk
# or if DEVTYPE is partition and MINOR is the same for both uspecs
uspec1 = uspec2 = {'ID_WWN': 'fakewwn',
'DEVTYPE': 'disk'}
self.assertTrue(hu.match_device(uspec1, uspec2))
uspec1 = uspec2 = {'ID_WWN': 'fakewwn',
'DEVTYPE': 'partition',
'MINOR': '1'}
self.assertTrue(hu.match_device(uspec1, uspec2))
def test_match_device_wwn_false(self):
# should return false if ID_WWN is given
# and does not match each other
uspec1 = {'ID_WWN': 'fakewwn1'}
uspec2 = {'ID_WWN': 'fakewwn2'}
self.assertFalse(hu.match_device(uspec1, uspec2))
def test_match_device_devpath(self):
# should return true if DEVPATH is given
# and if it is the same for both uspecs
uspec1 = uspec2 = {'DEVPATH': '/devices/fake'}
self.assertTrue(hu.match_device(uspec1, uspec2))
def test_match_device_serial(self):
# should return true if ID_SERIAL_SHORT is given
# and if it is the same for both uspecs
# and if DEVTYPE is given and if it is 'disk'
uspec1 = uspec2 = {'ID_SERIAL_SHORT': 'fakeserial',
'DEVTYPE': 'disk'}
self.assertTrue(hu.match_device(uspec1, uspec2))
def test_match_device_serial_false(self):
# should return false if ID_SERIAL_SHORT is given
# and if it does not match each other
uspec1 = {'ID_SERIAL_SHORT': 'fakeserial1'}
uspec2 = {'ID_SERIAL_SHORT': 'fakeserial2'}
self.assertFalse(hu.match_device(uspec1, uspec2))

View File

@ -0,0 +1,287 @@
# Copyright 2014 Mirantis, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import mock
from oslotest import base as test_base
from fuel_agent import errors
from fuel_agent.utils import lvm_utils as lu
from fuel_agent.utils import utils
class TestLvmUtils(test_base.BaseTestCase):
@mock.patch.object(utils, 'execute')
def test_pvdisplay(self, mock_exec):
# should run os command pvdisplay
# in order to get actual pv configuration
mock_exec.return_value = (
'/dev/fake1;vg;892.00m;1024.00m;'
'123456-1234-1234-1234-1234-1234-000000\n'
'/dev/fake2;;1024.00m;1024.00m;'
'123456-1234-1234-1234-1234-1234-111111\n',
''
)
expected = [
{
'uuid': '123456-1234-1234-1234-1234-1234-000000',
'vg': 'vg',
'devsize': 1024,
'psize': 892,
'name': '/dev/fake1',
},
{
'uuid': '123456-1234-1234-1234-1234-1234-111111',
'vg': None,
'devsize': 1024,
'psize': 1024,
'name': '/dev/fake2',
}
]
pvs = lu.pvdisplay()
mock_exec.assert_called_once_with(
'pvdisplay',
'-C',
'--noheading',
'--units', 'm',
'--options', 'pv_name,vg_name,pv_size,dev_size,pv_uuid',
'--separator', ';',
check_exit_code=[0]
)
key = lambda x: x['name']
self.assertEqual(sorted(expected, key=key), sorted(pvs, key=key))
@mock.patch.object(lu, 'pvdisplay')
@mock.patch.object(utils, 'execute')
def test_pvcreate_ok(self, mock_exec, mock_pvdisplay):
# should set metadatasize=64 and metadatacopies=2 if they are not set
# should run pvcreate command
mock_pvdisplay.return_value = [{'name': '/dev/another'}]
lu.pvcreate('/dev/fake1', metadatasize=32, metadatacopies=1)
lu.pvcreate('/dev/fake2', metadatacopies=1)
lu.pvcreate('/dev/fake3', metadatasize=32)
lu.pvcreate('/dev/fake4')
expected_calls = [
mock.call('pvcreate',
'--metadatacopies', '1',
'--metadatasize', '32m',
'/dev/fake1',
check_exit_code=[0]),
mock.call('pvcreate',
'--metadatacopies', '1',
'--metadatasize', '64m',
'/dev/fake2',
check_exit_code=[0]),
mock.call('pvcreate',
'--metadatacopies', '2',
'--metadatasize', '32m',
'/dev/fake3',
check_exit_code=[0]),
mock.call('pvcreate',
'--metadatacopies', '2',
'--metadatasize', '64m',
'/dev/fake4',
check_exit_code=[0])
]
self.assertEqual(mock_exec.call_args_list, expected_calls)
@mock.patch.object(lu, 'pvdisplay')
def test_pvcreate_duplicate(self, mock_pvdisplay):
# should check if pv exists
# then raise exception if it exists
mock_pvdisplay.return_value = [{'name': '/dev/fake'}]
self.assertRaises(
errors.PVAlreadyExistsError, lu.pvcreate, '/dev/fake')
@mock.patch.object(lu, 'pvdisplay')
@mock.patch.object(utils, 'execute')
def test_pvremove_ok(self, mock_exec, mock_pvdisplay):
# should check if pv exists and is not attached to some vg
# then should run pvremove command
mock_pvdisplay.return_value = [{'vg': None, 'name': '/dev/fake'}]
lu.pvremove('/dev/fake')
mock_exec.assert_called_once_with('pvremove', '-ff', '-y', '/dev/fake',
check_exit_code=[0])
@mock.patch.object(lu, 'pvdisplay')
def test_pvremove_attached_to_vg(self, mock_pvdisplay):
# should check if pv exists and is not attached to some vg
# then raise exception if it is attached to some vg
mock_pvdisplay.return_value = [{'vg': 'some', 'name': '/dev/fake'}]
self.assertRaises(errors.PVBelongsToVGError, lu.pvremove, '/dev/fake')
@mock.patch.object(lu, 'pvdisplay')
def test_pvremove_notfound(self, mock_pvdisplay):
# should check if pv exists
# then should raise exception if it does not exist
mock_pvdisplay.return_value = [{'name': '/dev/another'}]
self.assertRaises(errors.PVNotFoundError, lu.pvremove, '/dev/fake')
@mock.patch.object(utils, 'execute')
def test_vgdisplay(self, mock_exec):
# should run os command vgdisplay
# in order to get actual vg configuration
mock_exec.return_value = (
'vg1;123456-1234-1234-1234-1234-1234-000000;2040.00m;2040.00m\n'
'vg2;123456-1234-1234-1234-1234-1234-111111;2040.00m;1020.00m\n',
''
)
expected = [
{
'uuid': '123456-1234-1234-1234-1234-1234-000000',
'size': 2040,
'free': 2040,
'name': 'vg1',
},
{
'uuid': '123456-1234-1234-1234-1234-1234-111111',
'size': 2040,
'free': 1020,
'name': 'vg2',
}
]
vg = lu.vgdisplay()
mock_exec.assert_called_once_with(
'vgdisplay',
'-C',
'--noheading',
'--units', 'm',
'--options', 'vg_name,vg_uuid,vg_size,vg_free',
'--separator', ';',
check_exit_code=[0]
)
key = lambda x: x['name']
self.assertEqual(sorted(expected, key=key), sorted(vg, key=key))
@mock.patch.object(lu, 'pvdisplay')
@mock.patch.object(lu, 'vgdisplay')
@mock.patch.object(utils, 'execute')
def test_vgcreate_ok(self, mock_exec, mock_vgdisplay, mock_pvdisplay):
# should check if vg already exists
# should check if all necessary pv exist
# should run vgcreate command
mock_vgdisplay.return_value = [{'name': 'some'}, {'name': 'another'}]
mock_pvdisplay.return_value = [{'vg': None, 'name': '/dev/fake1'},
{'vg': None, 'name': '/dev/fake2'}]
# one pvname
lu.vgcreate('vgname', '/dev/fake1')
# several pvnames
lu.vgcreate('vgname', '/dev/fake1', '/dev/fake2')
expected_calls = [
mock.call('vgcreate', 'vgname', '/dev/fake1',
check_exit_code=[0]),
mock.call('vgcreate', 'vgname', '/dev/fake1', '/dev/fake2',
check_exit_code=[0])
]
self.assertEqual(mock_exec.call_args_list, expected_calls)
@mock.patch.object(lu, 'vgdisplay')
def test_vgcreate_duplicate(self, mock_vgdisplay):
# should check if vg exists
# should raise exception if it exists
mock_vgdisplay.return_value = [{'name': 'vgname'}, {'name': 'some'}]
self.assertRaises(errors.VGAlreadyExistsError,
lu.vgcreate, 'vgname', '/dev/fake')
@mock.patch.object(lu, 'pvdisplay')
@mock.patch.object(lu, 'vgdisplay')
def test_vgcreate_pv_not_found(self, mock_vgdisplay, mock_pvdisplay):
# should check if all necessary pv exist
# should raise error if at least one pv does not
mock_vgdisplay.return_value = []
mock_pvdisplay.return_value = [{'vg': None, 'name': '/dev/fake1'},
{'vg': None, 'name': '/dev/fake2'}]
self.assertRaises(errors.PVNotFoundError,
lu.vgcreate, 'vgname', '/dev/fake', '/dev/fake2')
@mock.patch.object(lu, 'pvdisplay')
@mock.patch.object(lu, 'vgdisplay')
def test_vgcreate_pv_attached(self, mock_vgdisplay, mock_pvdisplay):
# should check if all necessary pv are not attached to some vg
# should raise error if at least one pv is attached
mock_vgdisplay.return_value = []
mock_pvdisplay.return_value = [{'vg': None, 'name': '/dev/fake1'},
{'vg': 'some', 'name': '/dev/fake2'}]
self.assertRaises(errors.PVBelongsToVGError,
lu.vgcreate, 'vgname', '/dev/fake1', '/dev/fake2')
@mock.patch.object(lu, 'pvdisplay')
@mock.patch.object(lu, 'vgdisplay')
@mock.patch.object(utils, 'execute')
def test_vgextend_ok(self, mock_exec, mock_vgdisplay, mock_pvdisplay):
# should check if vg exists
# should check if all necessary pv exist and not attached to any vg
# should run vgextend command
mock_vgdisplay.return_value = [{'name': 'some'}, {'name': 'another'}]
mock_pvdisplay.return_value = [{'vg': None, 'name': '/dev/fake1'},
{'vg': None, 'name': '/dev/fake2'}]
lu.vgextend('some', '/dev/fake1', '/dev/fake2')
mock_exec.assert_called_once_with(
'vgextend', 'some', '/dev/fake1', '/dev/fake2',
check_exit_code=[0])
@mock.patch.object(lu, 'vgdisplay')
@mock.patch.object(utils, 'execute')
def test_vgextend_not_found(self, mock_exec, mock_vgdisplay):
# should check if vg exists
# should raise error if it does not
mock_vgdisplay.return_value = [{'name': 'some'}]
self.assertRaises(errors.VGNotFoundError,
lu.vgextend, 'vgname', '/dev/fake1')
@mock.patch.object(lu, 'pvdisplay')
@mock.patch.object(lu, 'vgdisplay')
def test_vgextend_pv_not_found(self, mock_vgdisplay, mock_pvdisplay):
# should check if all necessary pv exist
# should raise error if at least one pv does not
mock_vgdisplay.return_value = [{'name': 'vgname'}]
mock_pvdisplay.return_value = [{'vg': None, 'name': '/dev/fake1'},
{'vg': None, 'name': '/dev/fake2'}]
self.assertRaises(errors.PVNotFoundError,
lu.vgextend, 'vgname', '/dev/fake', '/dev/fake2')
@mock.patch.object(lu, 'pvdisplay')
@mock.patch.object(lu, 'vgdisplay')
def test_vgextend_pv_attached(self, mock_vgdisplay, mock_pvdisplay):
# should check if all necessary pv are not attached to some vg
# should raise error if at least one pv is attached
mock_vgdisplay.return_value = [{'name': 'vgname'}]
mock_pvdisplay.return_value = [{'vg': None, 'name': '/dev/fake1'},
{'vg': 'some', 'name': '/dev/fake2'}]
self.assertRaises(errors.PVBelongsToVGError,
lu.vgextend, 'vgname', '/dev/fake1', '/dev/fake2')
@mock.patch.object(lu, 'vgdisplay')
@mock.patch.object(utils, 'execute')
def test_vgremove_ok(self, mock_exec, mock_vgdisplay):
# should check if vg exists
# then run vgremove command if it exists
mock_vgdisplay.return_value = [{'name': 'vgname'}, {'name': 'some'}]
lu.vgremove('vgname')
mock_exec.assert_called_once_with('vgremove', '-f', 'vgname',
check_exit_code=[0])
@mock.patch.object(lu, 'vgdisplay')
@mock.patch.object(utils, 'execute')
def test_vgremove_not_found(self, mock_exec, mock_vgdisplay):
# should check if vg exists
# then raise error if it doesn't
mock_vgdisplay.return_value = [{'name': 'some'}]
self.assertRaises(errors.VGNotFoundError, lu.vgremove, 'vgname')

View File

@ -0,0 +1,202 @@
# Copyright 2014 Mirantis, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import mock
from oslotest import base as test_base
import six
from fuel_agent import errors
from fuel_agent.utils import hardware_utils as hu
from fuel_agent.utils import md_utils as mu
from fuel_agent.utils import utils
if six.PY2:
OPEN_FUNCTION_NAME = '__builtin__.open'
else:
OPEN_FUNCTION_NAME = 'builtins.open'
class TestMdUtils(test_base.BaseTestCase):
@mock.patch.object(utils, 'execute')
def test_mddisplay(self, mock_exec):
# should read file /proc/mdstat
# should get detailed description for all md devices
# should return list of dicts representing md devices
mock_open_data = """Personalities : [raid1]
md0 : active raid1 loop5[1] loop4[0]
102272 blocks super 1.2 [2/2] [UU]
unused devices: <none>
"""
mock_open = mock.mock_open(read_data=mock_open_data)
patcher = mock.patch(OPEN_FUNCTION_NAME, new=mock_open)
patcher.start()
mock_exec.return_value = (
"""/dev/md0:
Version : 1.2
Creation Time : Wed Jun 18 18:44:57 2014
Raid Level : raid1
Array Size : 102272 (99.89 MiB 104.73 MB)
Used Dev Size : 102272 (99.89 MiB 104.73 MB)
Raid Devices : 2
Total Devices : 2
Persistence : Superblock is persistent
Update Time : Wed Jun 18 18:45:01 2014
State : clean
Active Devices : 2
Working Devices : 2
Failed Devices : 0
Spare Devices : 0
Name : localhost.localdomain:0 (local to host
localhost.localdomain)
UUID : 12dd4cfc:6b2ac9db:94564538:a6ffee82
Events : 17
Number Major Minor RaidDevice State
0 7 4 0 active sync /dev/loop4
1 7 5 1 active sync /dev/loop5""",
''
)
expected = [{
'name': '/dev/md0',
'Version': '1.2',
'Raid Level': 'raid1',
'Raid Devices': '2',
'Active Devices': '2',
'Spare Devices': '0',
'Failed Devices': '0',
'State': 'clean',
'UUID': '12dd4cfc:6b2ac9db:94564538:a6ffee82',
'devices': ['/dev/loop4', '/dev/loop5']
}]
mds = mu.mddisplay()
mock_exec.assert_called_once_with(
'mdadm', '--detail', '/dev/md0', check_exit_code=[0])
key = lambda x: x['name']
self.assertEqual(sorted(expected, key=key), sorted(mds, key=key))
patcher.stop()
@mock.patch.object(mu, 'mdclean')
@mock.patch.object(hu, 'list_block_devices')
@mock.patch.object(mu, 'mddisplay')
@mock.patch.object(utils, 'execute')
def test_mdcreate_ok(self, mock_exec, mock_mddisplay,
mock_bdevs, mock_mdclean):
# should check if md already exists
# should check if md level is valid
# should check if all necessary devices exist
# should check if all devices are not parts of some md
# should clean md metadata which possibly are on all devices
# should run mdadm command to create new md
mock_mddisplay.return_value = \
[{'name': '/dev/md10', 'devices': ['/dev/fake10']}]
mock_bdevs.return_value = [{'device': '/dev/fake1'},
{'device': '/dev/fake2'}]
mu.mdcreate('/dev/md0', 'mirror', '/dev/fake1', '/dev/fake2')
mock_exec.assert_called_once_with(
'mdadm', '--force', '--create', '/dev/md0', '-e1.2',
'--level=mirror',
'--raid-devices=2', '/dev/fake1', '/dev/fake2',
check_exit_code=[0])
@mock.patch.object(mu, 'mddisplay')
def test_mdcreate_duplicate(self, mock_mddisplay):
# should check if md already exists
# should raise error if it exists
mock_mddisplay.return_value = [{'name': '/dev/md0'}]
self.assertRaises(
errors.MDAlreadyExistsError, mu.mdcreate,
'/dev/md0', 'mirror', '/dev/fake')
@mock.patch.object(mu, 'mddisplay')
def test_mdcreate_unsupported_level(self, mock_mddisplay):
# should check if md level is valid
# should raise error if it is not
mock_mddisplay.return_value = [{'name': '/dev/md10'}]
self.assertRaises(
errors.MDWrongSpecError, mu.mdcreate,
'/dev/md0', 'badlevel', '/dev/fake')
@mock.patch.object(hu, 'list_block_devices')
@mock.patch.object(mu, 'mddisplay')
def test_mdcreate_device_not_found(self, mock_mddisplay, mock_bdevs):
# should check if all devices exist
# should raise error if at least one device does not
mock_mddisplay.return_value = [{'name': '/dev/md10'}]
mock_bdevs.return_value = [{'device': '/dev/fake1'},
{'device': '/dev/fake10'}]
self.assertRaises(
errors.MDNotFoundError, mu.mdcreate,
'/dev/md0', 'mirror', '/dev/fake1', '/dev/fake2')
@mock.patch.object(hu, 'list_block_devices')
@mock.patch.object(mu, 'mddisplay')
def test_mdcreate_device_attached(self, mock_mddisplay, mock_bdevs):
# should check if all necessary devices are not attached to some md
# should raise error if at least one device is attached
mock_mddisplay.return_value = [{'name': '/dev/md10',
'devices': ['/dev/fake2']}]
mock_bdevs.return_value = [{'device': '/dev/fake1'},
{'device': '/dev/fake2'}]
self.assertRaises(
errors.MDDeviceDuplicationError, mu.mdcreate,
'/dev/md0', 'mirror', '/dev/fake1', '/dev/fake2')
@mock.patch.object(utils, 'execute')
@mock.patch.object(mu, 'mdclean')
@mock.patch.object(hu, 'list_block_devices')
@mock.patch.object(mu, 'mddisplay')
def test_mdcreate_device_clean(self, mock_mddisplay,
mock_bdevs, mock_mdclean, mock_exec):
# should clean md metadata on all devices before building new md
mock_mddisplay.return_value = []
mock_bdevs.return_value = [{'device': '/dev/fake1'},
{'device': '/dev/fake2'}]
mu.mdcreate('/dev/md0', 'mirror', '/dev/fake1', '/dev/fake2')
expected_calls = [mock.call('/dev/fake1'), mock.call('/dev/fake2')]
self.assertEqual(mock_mdclean.call_args_list, expected_calls)
@mock.patch.object(utils, 'execute')
@mock.patch.object(mu, 'mddisplay')
def test_mdremove_ok(self, mock_mddisplay, mock_exec):
# should check if md exists
# should run mdadm command to remove md device
mock_mddisplay.return_value = [{'name': '/dev/md0'}]
expected_calls = [
mock.call('mdadm', '--stop', '/dev/md0', check_exit_code=[0]),
mock.call('mdadm', '--remove', '/dev/md0', check_exit_code=[0, 1])
]
mu.mdremove('/dev/md0')
self.assertEqual(mock_exec.call_args_list, expected_calls)
@mock.patch.object(mu, 'mddisplay')
def test_mdremove_notfound(self, mock_mddisplay):
# should check if md exists
# should raise error if it does not
mock_mddisplay.return_value = [{'name': '/dev/md0'}]
self.assertRaises(
errors.MDNotFoundError, mu.mdremove, '/dev/md1')

View File

@ -0,0 +1,195 @@
# Copyright 2014 Mirantis, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import mock
from oslotest import base as test_base
from fuel_agent import errors
from fuel_agent.utils import partition_utils as pu
from fuel_agent.utils import utils
class TestPartitionUtils(test_base.BaseTestCase):
@mock.patch.object(pu, 'make_label')
def test_wipe(self, mock_label):
# should run call make_label method
# in order to create new empty table which we think
# is equivalent to wiping the old one
pu.wipe('/dev/fake')
mock_label.assert_called_once_with('/dev/fake')
@mock.patch.object(utils, 'execute')
def test_make_label(self, mock_exec):
# should run parted OS command
# in order to create label on a device
# gpt by default
pu.make_label('/dev/fake')
mock_exec.assert_called_once_with(
'parted', '-s', '/dev/fake', 'mklabel', 'gpt', check_exit_code=[0])
mock_exec.reset_mock()
# label is set explicitly
pu.make_label('/dev/fake', label='msdos')
mock_exec.assert_called_once_with(
'parted', '-s', '/dev/fake',
'mklabel', 'msdos', check_exit_code=[0])
def test_make_label_wrong_label(self):
# should check if label is valid
# should raise exception if it is not
self.assertRaises(errors.WrongPartitionLabelError,
pu.make_label, '/dev/fake', 'wrong')
@mock.patch.object(utils, 'execute')
def test_set_partition_flag(self, mock_exec):
# should run parted OS command
# in order to set flag on a partition
# default state is 'on'
pu.set_partition_flag('/dev/fake', 1, 'boot')
mock_exec.assert_called_once_with(
'parted', '-s', '/dev/fake', 'set', '1', 'boot', 'on',
check_exit_code=[0])
mock_exec.reset_mock()
# if state argument is given use it
pu.set_partition_flag('/dev/fake', 1, 'boot', state='off')
mock_exec.assert_called_once_with(
'parted', '-s', '/dev/fake', 'set', '1', 'boot', 'off',
check_exit_code=[0])
@mock.patch.object(utils, 'execute')
def test_set_partition_flag_wrong_flag(self, mock_exec):
# should check if flag is valid
# should raise exception if it is not
self.assertRaises(errors.WrongPartitionSchemeError,
pu.set_partition_flag,
'/dev/fake', 1, 'wrong')
@mock.patch.object(utils, 'execute')
def test_set_partition_flag_wrong_state(self, mock_exec):
# should check if flag is valid
# should raise exception if it is not
self.assertRaises(errors.WrongPartitionSchemeError,
pu.set_partition_flag,
'/dev/fake', 1, 'boot', state='wrong')
@mock.patch.object(pu, 'info')
@mock.patch.object(utils, 'execute')
def test_make_partition(self, mock_exec, mock_info):
# should run parted OS command
# in order to create new partition
mock_info.return_value = {
'parts': [
{'begin': 0, 'end': 1000, 'fstype': 'free'},
]
}
pu.make_partition('/dev/fake', 100, 200, 'primary')
mock_exec.assert_called_once_with(
'parted',
'-a', 'optimal',
'-s', '/dev/fake',
'unit', 'MiB',
'mkpart', 'primary', '100', '200',
check_exit_code=[0])
@mock.patch.object(utils, 'execute')
def test_make_partition_wrong_ptype(self, mock_exec):
# should check if partition type is one of
# 'primary' or 'logical'
# should raise exception if it is not
self.assertRaises(errors.WrongPartitionSchemeError, pu.make_partition,
'/dev/fake', 200, 100, 'wrong')
@mock.patch.object(utils, 'execute')
def test_make_partition_begin_overlaps_end(self, mock_exec):
# should check if begin is less than end
# should raise exception if it isn't
self.assertRaises(errors.WrongPartitionSchemeError, pu.make_partition,
'/dev/fake', 200, 100, 'primary')
@mock.patch.object(pu, 'info')
@mock.patch.object(utils, 'execute')
def test_make_partition_overlaps_other_parts(self, mock_exec, mock_info):
# should check if begin or end overlap other partitions
# should raise exception if it does
mock_info.return_value = {
'parts': [
{'begin': 0, 'end': 100, 'fstype': 'free'},
{'begin': 100, 'end': 200, 'fstype': 'notfree'},
{'begin': 200, 'end': 300, 'fstype': 'free'}
]
}
self.assertRaises(errors.WrongPartitionSchemeError, pu.make_partition,
'/dev/fake', 99, 101, 'primary')
self.assertRaises(errors.WrongPartitionSchemeError, pu.make_partition,
'/dev/fake', 100, 200, 'primary')
self.assertRaises(errors.WrongPartitionSchemeError, pu.make_partition,
'/dev/fake', 200, 301, 'primary')
self.assertEqual(mock_info.call_args_list,
[mock.call('/dev/fake')] * 3)
@mock.patch.object(pu, 'info')
@mock.patch.object(utils, 'execute')
def test_remove_partition(self, mock_exec, mock_info):
# should run parted OS command
# in order to remove partition
mock_info.return_value = {
'parts': [
{
'begin': 1,
'end': 100,
'size': 100,
'num': 1,
'fstype': 'ext2'
},
{
'begin': 100,
'end': 200,
'size': 100,
'num': 2,
'fstype': 'ext2'
}
]
}
pu.remove_partition('/dev/fake', 1)
mock_exec.assert_called_once_with(
'parted', '-s', '/dev/fake', 'rm', '1', check_exit_code=[0])
@mock.patch.object(pu, 'info')
@mock.patch.object(utils, 'execute')
def test_remove_partition_notexists(self, mock_exec, mock_info):
# should check if partition does exist
# should raise exception if it doesn't
mock_info.return_value = {
'parts': [
{
'begin': 1,
'end': 100,
'size': 100,
'num': 1,
'fstype': 'ext2'
},
{
'begin': 100,
'end': 200,
'size': 100,
'num': 2,
'fstype': 'ext2'
}
]
}
self.assertRaises(errors.PartitionNotFoundError, pu.remove_partition,
'/dev/fake', 3)

View File

@ -0,0 +1,122 @@
# Copyright 2011 Justin Santa Barbara
# Copyright 2012 Hewlett-Packard Development Company, L.P.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import os
import tempfile
import testtools
from fuel_agent.openstack.common import processutils
from fuel_agent.utils import utils
class ExecuteTestCase(testtools.TestCase):
"""This class is partly based on the same class in openstack/ironic."""
def test_parse_unit(self):
self.assertEqual(utils.parse_unit('1.00m', 'm', ceil=True), 1)
self.assertEqual(utils.parse_unit('1.00m', 'm', ceil=False), 1)
self.assertEqual(utils.parse_unit('1.49m', 'm', ceil=True), 2)
self.assertEqual(utils.parse_unit('1.49m', 'm', ceil=False), 1)
self.assertEqual(utils.parse_unit('1.51m', 'm', ceil=True), 2)
self.assertEqual(utils.parse_unit('1.51m', 'm', ceil=False), 1)
self.assertRaises(ValueError, utils.parse_unit, '1.00m', 'MiB')
self.assertRaises(ValueError, utils.parse_unit, '', 'MiB')
def test_B2MiB(self):
self.assertEqual(utils.B2MiB(1048575, ceil=False), 0)
self.assertEqual(utils.B2MiB(1048576, ceil=False), 1)
self.assertEqual(utils.B2MiB(1048575, ceil=True), 1)
self.assertEqual(utils.B2MiB(1048576, ceil=True), 1)
self.assertEqual(utils.B2MiB(1048577, ceil=True), 2)
def test_retry_on_failure(self):
fd, tmpfilename = tempfile.mkstemp()
_, tmpfilename2 = tempfile.mkstemp()
try:
fp = os.fdopen(fd, 'w+')
fp.write('''#!/bin/sh
# If stdin fails to get passed during one of the runs, make a note.
if ! grep -q foo
then
echo 'failure' > "$1"
fi
# If stdin has failed to get passed during this or a previous run, exit early.
if grep failure "$1"
then
exit 1
fi
runs="$(cat $1)"
if [ -z "$runs" ]
then
runs=0
fi
runs=$(($runs + 1))
echo $runs > "$1"
exit 1
''')
fp.close()
os.chmod(tmpfilename, 0o755)
self.assertRaises(processutils.ProcessExecutionError,
utils.execute,
tmpfilename, tmpfilename2, attempts=10,
process_input='foo',
delay_on_retry=False)
fp = open(tmpfilename2, 'r')
runs = fp.read()
fp.close()
self.assertNotEqual(runs.strip(), 'failure', 'stdin did not '
'always get passed '
'correctly')
runs = int(runs.strip())
self.assertEqual(10, runs,
'Ran %d times instead of 10.' % (runs,))
finally:
os.unlink(tmpfilename)
os.unlink(tmpfilename2)
def test_unknown_kwargs_raises_error(self):
self.assertRaises(processutils.UnknownArgumentError,
utils.execute,
'/usr/bin/env', 'true',
this_is_not_a_valid_kwarg=True)
def test_check_exit_code_boolean(self):
utils.execute('/usr/bin/env', 'false', check_exit_code=False)
self.assertRaises(processutils.ProcessExecutionError,
utils.execute,
'/usr/bin/env', 'false', check_exit_code=True)
def test_no_retry_on_success(self):
fd, tmpfilename = tempfile.mkstemp()
_, tmpfilename2 = tempfile.mkstemp()
try:
fp = os.fdopen(fd, 'w+')
fp.write('''#!/bin/sh
# If we've already run, bail out.
grep -q foo "$1" && exit 1
# Mark that we've run before.
echo foo > "$1"
# Check that stdin gets passed correctly.
grep foo
''')
fp.close()
os.chmod(tmpfilename, 0o755)
utils.execute(tmpfilename,
tmpfilename2,
process_input='foo',
attempts=2)
finally:
os.unlink(tmpfilename)
os.unlink(tmpfilename2)

View File

@ -0,0 +1,13 @@
# Copyright 2014 Mirantis, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.

View File

@ -0,0 +1,29 @@
# Copyright 2014 Mirantis, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from fuel_agent.utils import utils
def make_fs(fs_type, fs_options, fs_label, dev):
# NOTE(agordeev): notice the different flag to force the fs creating
# ext* uses -F flag, xfs/mkswap uses -f flag.
cmd_line = []
cmd_name = 'mkswap'
if fs_type is not 'swap':
cmd_name = 'mkfs.%s' % fs_type
cmd_line.append(cmd_name)
for opt in (fs_options, fs_label):
cmd_line.extend([s for s in opt.split(' ') if s])
cmd_line.append(dev)
utils.execute(*cmd_line)

View File

@ -0,0 +1,331 @@
# Copyright 2014 Mirantis, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
from fuel_agent.utils import utils
# Please take a look at the linux kernel documentation
# https://github.com/torvalds/linux/blob/master/Documentation/devices.txt.
# KVM virtio volumes have major number 252 in CentOS, but 253 in Ubuntu.
VALID_MAJORS = (3, 8, 65, 66, 67, 68, 69, 70, 71, 104, 105, 106, 107, 108, 109,
110, 111, 202, 252, 253)
# We are only interested in getting these
# properties from udevadm report
# MAJOR major device number
# MINOR minor device number
# DEVNAME e.g. /dev/sda
# DEVTYPE e.g. disk or partition for block devices
# DEVPATH path to a device directory relative to /sys
# ID_BUS e.g. ata, scsi
# ID_MODEL e.g. MATSHITADVD-RAM_UJ890
# ID_SERIAL_SHORT e.g. UH00_296679
# ID_WWN e.g. 0x50000392e9804d4b (optional)
# ID_CDROM e.g. 1 for cdrom device (optional)
UDEV_PROPERTIES = set(['MAJOR', 'MINOR', 'DEVNAME', 'DEVTYPE', 'DEVPATH',
'ID_BUS', 'ID_MODEL', 'ID_SERIAL_SHORT', 'ID_WWN',
'ID_CDROM'])
# more details about types you can find in dmidecode's manual
SMBIOS_TYPES = {'bios': '0',
'base_board': '2',
'processor': '4',
'memory_array': '16',
'memory_device': '17'}
def parse_dmidecode(type):
"""Parses `dmidecode` output.
:param type: A string with type of entity to display.
:returns: A list with dictionaries of entities for specified type.
"""
output = utils.execute('dmidecode', '-q', '--type', type)
lines = output[0].split('\n')
info = []
multiline_values = None
section = 0
for line in lines:
if len(line) != 0 and len(line.strip()) == len(line):
info.append({})
section = len(info) - 1
try:
k, v = (l.strip() for l in line.split(':', 1))
except ValueError:
k = line.strip()
if not k:
multiline_values = None
if multiline_values:
info[section][multiline_values].append(k)
else:
if not v:
multiline_values = k.lower()
info[section][multiline_values] = []
else:
info[section][k.lower()] = v
return info
def parse_lspci():
"""Parses `lspci` output.
:returns: A list of dicts containing PCI devices information
"""
output = utils.execute('lspci', '-vmm', '-D')
lines = output[0].split('\n')
info = [{}]
section = 0
for line in lines[:-2]:
try:
k, v = (l.strip() for l in line.split(':', 1))
except ValueError:
info.append({})
section += 1
else:
info[section][k.lower()] = v
return info
def parse_simple_kv(*command):
"""Parses simple key:value output from specified command.
:param command: A command to execute
:returns: A dict of parsed key-value data
"""
output = utils.execute(*command)
lines = output[0].split('\n')
info = {}
for line in lines[:-1]:
try:
k, v = (l.strip() for l in line.split(':', 1))
except ValueError:
break
else:
info[k.lower()] = v
return info
def is_disk(dev, bspec=None, uspec=None):
"""Checks if given device is a disk.
:param dev: A device file, e.g. /dev/sda.
:param bspec: A dict of properties which we get from blockdev.
:param uspec: A dict of properties which we get from udevadm.
:returns: True if device is disk else False.
"""
# Filtering by udevspec
if uspec is None:
uspec = udevreport(dev)
if uspec.get('ID_CDROM') == '1':
return False
if uspec.get('DEVTYPE') == 'partition':
return False
if 'MAJOR' in uspec and int(uspec['MAJOR']) not in VALID_MAJORS:
return False
# Filtering by blockdev spec
if bspec is None:
bspec = blockdevreport(dev)
if bspec.get('ro') == '1':
return False
return True
def udevreport(dev):
"""Builds device udevadm report.
:param dev: A device file, e.g. /dev/sda.
:returns: A dict of udev device properties.
"""
report = utils.execute('udevadm',
'info',
'--query=property',
'--export',
'--name={0}'.format(dev),
check_exit_code=[0])[0]
spec = {}
for line in [l for l in report.splitlines() if l]:
key, value = line.split('=', 1)
value = value.strip('\'')
# This is a list of symbolic links which were created for this
# block device (e.g. /dev/disk/by-id/foobar)
if key == 'DEVLINKS':
spec['DEVLINKS'] = value.split()
if key in UDEV_PROPERTIES:
spec[key] = value
return spec
def blockdevreport(blockdev):
"""Builds device blockdev report.
:param blockdev: A block device file, e.g. /dev/sda.
:returns: A dict of blockdev properties.
"""
cmd = [
'blockdev',
'--getsz', # get size in 512-byte sectors
'--getro', # get read-only
'--getss', # get logical block (sector) size
'--getpbsz', # get physical block (sector) size
'--getsize64', # get size in bytes
'--getiomin', # get minimum I/O size
'--getioopt', # get optimal I/O size
'--getra', # get readahead
'--getalignoff', # get alignment offset in bytes
'--getmaxsect', # get max sectors per request
blockdev
]
opts = [o[5:] for o in cmd if o.startswith('--get')]
report = utils.execute(*cmd, check_exit_code=[0])[0]
return dict(zip(opts, report.splitlines()))
def extrareport(dev):
"""Builds device report using some additional sources.
:param dev: A device file, e.g. /dev/sda.
:returns: A dict of properties.
"""
spec = {}
name = os.path.basename(dev)
# Finding out if block device is removable or not
# actually, some disks are marked as removable
# while they are actually not e.g. Adaptec RAID volumes
try:
with open('/sys/block/{0}/removable'.format(name)) as file:
spec['removable'] = file.read().strip()
except Exception:
pass
for key in ('state', 'timeout'):
try:
with open('/sys/block/{0}/device/{1}'.format(name, key)) as file:
spec[key] = file.read().strip()
except Exception:
pass
return spec
def list_block_devices(disks=True):
"""Gets list of block devices, tries to guess which of them are disks
and returns list of dicts representing those disks.
:returns: A list of dict representing disks available on a node.
"""
bdevs = []
report = utils.execute('blockdev', '--report', check_exit_code=[0])[0]
lines = [line.split() for line in report.splitlines() if line]
startsec_idx = lines[0].index('StartSec')
device_idx = lines[0].index('Device')
size_idx = lines[0].index('Size')
for line in lines[1:]:
device = line[device_idx]
uspec = udevreport(device)
bspec = blockdevreport(device)
espec = extrareport(device)
# if device is not disk,skip it
if disks and not is_disk(device, bspec=bspec, uspec=uspec):
continue
bdev = {
'device': device,
'startsec': line[startsec_idx],
'size': int(line[size_idx]),
'uspec': uspec,
'bspec': bspec,
'espec': espec
}
bdevs.append(bdev)
return bdevs
def match_device(uspec1, uspec2):
"""Tries to find out if uspec1 and uspec2 are uspecs from the same device.
It compares only some fields in uspecs (not all of them) which, we believe,
is enough to say exactly whether uspecs belong to the same device or not.
:param uspec1: A dict of properties which we get from udevadm.
:param uspec1: A dict of properties which we get from udevadm.
:returns: True if uspecs match each other else False.
"""
# False if ID_WWN is given and does not match each other
if ('ID_WWN' in uspec1 and 'ID_WWN' in uspec2
and uspec1['ID_WWN'] != uspec2['ID_WWN']):
return False
# False if ID_SERIAL_SHORT is given and does not match each other
if ('ID_SERIAL_SHORT' in uspec1 and 'ID_SERIAL_SHORT' in uspec2
and uspec1['ID_SERIAL_SHORT'] != uspec2['ID_SERIAL_SHORT']):
return False
# True if at least one by-id link is the same for both uspecs
if ('DEVLINKS' in uspec1 and 'DEVLINKS' in uspec2
and any(x.startswith('/dev/disk/by-id') for x in
set(uspec1['DEVLINKS']) & set(uspec2['DEVLINKS']))):
return True
# True if ID_WWN is given and matches each other
# and DEVTYPE is given and is 'disk'
if (uspec1.get('ID_WWN') == uspec2.get('ID_WWN') is not None
and uspec1.get('DEVTYPE') == uspec2.get('DEVTYPE') == 'disk'):
return True
# True if ID_WWN is given and matches each other
# and DEVTYPE is given and is 'partition'
# and MINOR is given and matches each other
if (uspec1.get('ID_WWN') == uspec2.get('ID_WWN') is not None
and uspec1.get('DEVTYPE') == uspec2.get('DEVTYPE') == 'partition'
and uspec1.get('MINOR') == uspec2.get('MINOR') is not None):
return True
# True if ID_SERIAL_SHORT is given and matches each other
# and DEVTYPE is given and is 'disk'
if (uspec1.get('ID_SERIAL_SHORT') == uspec2.get('ID_SERIAL_SHORT')
is not None
and uspec1.get('DEVTYPE') == uspec2.get('DEVTYPE') == 'disk'):
return True
# True if DEVPATH is given and matches each other
if uspec1.get('DEVPATH') == uspec2.get('DEVPATH') is not None:
return True
return False

View File

@ -0,0 +1,13 @@
# Copyright 2014 Mirantis, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.

View File

@ -0,0 +1,210 @@
# Copyright 2014 Mirantis, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from fuel_agent import errors
from fuel_agent.openstack.common import log as logging
from fuel_agent.utils import utils
LOG = logging.getLogger(__name__)
def pvdisplay():
# unit m means MiB (power of 2)
result = utils.execute(
'pvdisplay',
'-C',
'--noheading',
'--units', 'm',
'--options', 'pv_name,vg_name,pv_size,dev_size,pv_uuid',
'--separator', ';',
check_exit_code=[0])
pvs = []
for line in result[0].split('\n'):
line = line.strip()
if not line:
continue
pv_params = line.split(';')
pvs.append({
'name': pv_params[0],
'vg': pv_params[1] or None,
'psize': utils.parse_unit(pv_params[2], 'm'),
'devsize': utils.parse_unit(pv_params[3], 'm'),
'uuid': pv_params[4]
})
LOG.debug('Found physical volumes: {0}'.format(pvs))
return pvs
def pvcreate(pvname, metadatasize=64, metadatacopies=2):
# check if pv already exists
if filter(lambda x: x['name'] == pvname, pvdisplay()):
raise errors.PVAlreadyExistsError(
'Error while creating pv: pv %s already exists' % pvname)
utils.execute('pvcreate',
'--metadatacopies', str(metadatacopies),
'--metadatasize', str(metadatasize) + 'm',
pvname, check_exit_code=[0])
def pvremove(pvname):
pv = filter(lambda x: x['name'] == pvname, pvdisplay())
# check if pv exists
if not pv:
raise errors.PVNotFoundError(
'Error while removing pv: pv %s not found' % pvname)
# check if pv is attached to some vg
if pv[0]['vg'] is not None:
raise errors.PVBelongsToVGError('Error while removing pv: '
'pv belongs to vg %s' % pv[0]['vg'])
utils.execute('pvremove', '-ff', '-y', pvname, check_exit_code=[0])
def vgdisplay():
result = utils.execute(
'vgdisplay',
'-C',
'--noheading',
'--units', 'm',
'--options', 'vg_name,vg_uuid,vg_size,vg_free',
'--separator', ';',
check_exit_code=[0])
vgs = []
for line in result[0].split('\n'):
line = line.strip()
if not line:
continue
vg_params = line.split(';')
vgs.append({
'name': vg_params[0],
'uuid': vg_params[1],
'size': utils.parse_unit(vg_params[2], 'm'),
'free': utils.parse_unit(vg_params[3], 'm', ceil=False)
})
LOG.debug('Found volume groups: {0}'.format(vgs))
return vgs
def _vg_attach_validate(pvnames):
pvs = pvdisplay()
# check if all necessary pv exist
if not set(pvnames).issubset(set([pv['name'] for pv in pvs])):
raise errors.PVNotFoundError(
'Error while creating vg: at least one of pv is not found')
# check if all necessary pv are not already attached to some vg
if not set(pvnames).issubset(
set([pv['name'] for pv in pvs if pv['vg'] is None])):
raise errors.PVBelongsToVGError(
'Error while creating vg: at least one of pvs is '
'already attached to some vg')
def vgcreate(vgname, pvname, *args):
# check if vg already exists
if filter(lambda x: x['name'] == vgname, vgdisplay()):
raise errors.VGAlreadyExistsError(
'Error while creating vg: vg %s already exists' % vgname)
pvnames = [pvname] + list(args)
_vg_attach_validate(pvnames)
utils.execute('vgcreate', vgname, *pvnames, check_exit_code=[0])
def vgextend(vgname, pvname, *args):
# check if vg exists
if not filter(lambda x: x['name'] == vgname, vgdisplay()):
raise errors.VGNotFoundError(
'Error while extending vg: vg %s not found' % vgname)
pvnames = [pvname] + list(args)
_vg_attach_validate(pvnames)
utils.execute('vgextend', vgname, *pvnames, check_exit_code=[0])
def vgreduce(vgname, pvname, *args):
# check if vg exists
if not filter(lambda x: x['name'] == vgname, vgdisplay()):
raise errors.VGNotFoundError(
'Error while extending vg: vg %s not found' % vgname)
pvnames = [pvname] + list(args)
# check if all necessary pv are attached to vg
if not set(pvnames).issubset(
set([pv['name'] for pv in pvdisplay() if pv['vg'] == vgname])):
raise errors.PVNotFoundError(
'Error while reducing vg: at least one of pv is '
'not attached to vg')
utils.execute('vgreduce', '-f', vgname, *pvnames, check_exit_code=[0])
def vgremove(vgname):
# check if vg exists
if not filter(lambda x: x['name'] == vgname, vgdisplay()):
raise errors.VGNotFoundError(
'Error while extending vg: vg %s not found' % vgname)
utils.execute('vgremove', '-f', vgname, check_exit_code=[0])
def lvdisplay():
result = utils.execute(
'lvdisplay',
'-C',
'--noheading',
'--units', 'm',
'--options', 'lv_name,lv_size,vg_name,lv_uuid,lv_path',
'--separator', ';',
check_exit_code=[0])
lvs = []
for line in result[0].split('\n'):
line = line.strip()
if not line:
continue
lv_params = line.split(';')
lvs.append({
'name': lv_params[0],
'size': utils.parse_unit(lv_params[1], 'm'),
'vg': lv_params[2],
'uuid': lv_params[3],
'path': lv_params[4]
})
LOG.debug('Found logical volumes: {0}'.format(lvs))
return lvs
def lvcreate(vgname, lvname, size):
vg = filter(lambda x: x['name'] == vgname, vgdisplay())
# check if vg exists
if not vg:
raise errors.VGNotFoundError(
'Error while extending vg: vg %s not found' % vgname)
# check if enough space is available
if vg[0]['free'] < size:
raise errors.NotEnoughSpaceError(
'Error while creating lv: vg %s has only %s m of free space, '
'but at least %s m is needed' % (vgname, vg[0]['free'], size))
# check if lv already exists
if filter(lambda x: x['name'] == lvname, lvdisplay()):
raise errors.LVAlreadyExistsError(
'Error while creating lv: lv %s already exists' % lvname)
utils.execute('lvcreate', '-L', '%sm' % size, '-n', lvname,
vgname, check_exit_code=[0])
def lvremove(lvname):
# check if lv exists
if not filter(lambda x: x['name'] == lvname, lvdisplay()):
raise errors.LVNotFoundError(
'Error while removing lv: lv %s not found' % lvname)
utils.execute('lvremove', '-f', lvname, check_exit_code=[0])

View File

@ -0,0 +1,109 @@
# Copyright 2014 Mirantis, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from fuel_agent import errors
from fuel_agent.openstack.common import log as logging
from fuel_agent.utils import hardware_utils as hu
from fuel_agent.utils import utils
LOG = logging.getLogger(__name__)
def mddisplay():
mdnames = []
with open('/proc/mdstat') as f:
for line in f.read().split('\n'):
if line.startswith('md'):
mdnames.append('/dev/%s' % line.split()[0])
mds = []
for mdname in mdnames:
result = utils.execute('mdadm', '--detail', mdname,
check_exit_code=[0])
md = {'name': mdname}
h, v = result[0].split('Number Major Minor RaidDevice State')
for line in h.split('\n'):
line = line.strip()
if not line:
continue
for pattern in ('Version', 'Raid Level', 'Raid Devices',
'Active Devices', 'Spare Devices',
'Failed Devices', 'State', 'UUID'):
if line.startswith(pattern):
md[pattern] = line.split()[-1]
md['devices'] = []
for line in v.split('\n'):
line = line.strip()
if not line:
continue
md['devices'].append(line.split()[-1])
mds.append(md)
LOG.debug('Found md devices: {0}'.format(mds))
return mds
def mdcreate(mdname, level, device, *args):
mds = mddisplay()
# check if md device already exists
if filter(lambda x: x['name'] == mdname, mds):
raise errors.MDAlreadyExistsError(
'Error while creating md: md %s already exists' % mdname)
# check if level argument is valid
supported_levels = ('0', '1', 'raid0', 'raid1', 'stripe', 'mirror')
if level not in supported_levels:
raise errors.MDWrongSpecError(
'Error while creating md device: '
'level must be one of: %s' % ', '.join(supported_levels))
devices = [device] + list(args)
# check if all necessary devices exist
if not set(devices).issubset(
set([bd['device'] for bd in hu.list_block_devices(disks=False)])):
raise errors.MDNotFoundError(
'Error while creating md: at least one of devices is not found')
# check if devices are not parts of some md array
if set(devices) & \
set(reduce(lambda x, y: x + y, [md['devices'] for md in mds], [])):
raise errors.MDDeviceDuplicationError(
'Error while creating md: at least one of devices is '
'already in belongs to some md')
# cleaning md metadata from devices
map(mdclean, devices)
utils.execute('mdadm', '--force', '--create', mdname, '-e1.2',
'--level=%s' % level,
'--raid-devices=%s' % len(devices), *devices,
check_exit_code=[0])
def mdremove(mdname):
mds = mddisplay()
# check if md exists
if not filter(lambda x: x['name'] == mdname, mds):
raise errors.MDNotFoundError(
'Error while removing md: md %s not found' % mdname)
utils.execute('mdadm', '--stop', mdname, check_exit_code=[0])
utils.execute('mdadm', '--remove', mdname, check_exit_code=[0, 1])
def mdclean(device):
# we don't care if device actually exists or not
utils.execute('mdadm', '--zero-superblock', '--force', device,
check_exit_code=[0])

View File

@ -0,0 +1,136 @@
# Copyright 2014 Mirantis, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from fuel_agent import errors
from fuel_agent.utils import utils
def info(dev):
result = utils.execute('parted', '-s', dev, '-m',
'unit', 'MiB',
'print', 'free',
check_exit_code=[0, 1])
lines = result[0].split('\n')
generic_params = lines[1].rstrip(';').split(':')
generic = {
'dev': generic_params[0],
'size': utils.parse_unit(generic_params[1], 'MiB'),
'logical_block': int(generic_params[3]),
'physical_block': int(generic_params[4]),
'table': generic_params[5],
'model': generic_params[6]
}
parts = []
for line in lines[2:]:
line = line.strip().rstrip(';')
if not line:
continue
part_params = line.split(':')
parts.append({
'num': int(part_params[0]),
'begin': utils.parse_unit(part_params[1], 'MiB'),
'end': utils.parse_unit(part_params[2], 'MiB'),
'size': utils.parse_unit(part_params[3], 'MiB'),
'fstype': part_params[4] or None
})
return {'generic': generic, 'parts': parts}
def wipe(dev):
# making an empty new table is equivalent to wiping the old one
make_label(dev)
def make_label(dev, label='gpt'):
"""Creates partition label on a device.
:param dev: A device file, e.g. /dev/sda.
:param label: Partition label type 'gpt' or 'msdos'. Optional.
:returns: None
"""
if label not in ('gpt', 'msdos'):
raise errors.WrongPartitionLabelError(
'Wrong partition label type: %s' % label)
utils.execute('parted', '-s', dev, 'mklabel', label,
check_exit_code=[0])
def set_partition_flag(dev, num, flag, state='on'):
"""Sets flag on a partition
:param dev: A device file, e.g. /dev/sda.
:param num: Partition number
:param flag: Flag name. Must be one of 'bios_grub', 'legacy_boot',
'boot', 'raid', 'lvm'
:param state: Desiable flag state. 'on' or 'off'. Default is 'on'.
:returns: None
"""
# parted supports more flags but we are interested in
# setting only this subset of them.
# not all of these flags are compatible with one another.
if flag not in ('bios_grub', 'legacy_boot', 'boot', 'raid', 'lvm'):
raise errors.WrongPartitionSchemeError(
'Unsupported partition flag: %s' % flag)
if state not in ('on', 'off'):
raise errors.WrongPartitionSchemeError(
'Wrong partition flag state: %s' % state)
utils.execute('parted', '-s', dev, 'set', str(num),
flag, state, check_exit_code=[0])
def set_gpt_type(dev, num, type_guid):
"""Sets guid on a partition.
:param dev: A device file, e.g. /dev/sda.
:param num: Partition number
:param type_guid: Partition type guid. Must be one of those listed
on this page http://en.wikipedia.org/wiki/GUID_Partition_Table.
This method does not check whether type_guid is valid or not.
:returns: None
"""
# TODO(kozhukalov): check whether type_guid is valid
utils.execute('sgdisk', '--typecode=%s:%s' % (num, type_guid),
dev, check_exit_code=[0])
def make_partition(dev, begin, end, ptype):
if ptype not in ('primary', 'logical'):
raise errors.WrongPartitionSchemeError(
'Wrong partition type: %s' % ptype)
# check begin >= end
if begin >= end:
raise errors.WrongPartitionSchemeError(
'Wrong boundaries: begin >= end')
# check if begin and end are inside one of free spaces available
if not any(x['fstype'] == 'free' and begin >= x['begin'] and
end <= x['end'] for x in info(dev)['parts']):
raise errors.WrongPartitionSchemeError(
'Invalid boundaries: begin and end '
'are not inside available free space'
)
utils.execute('parted', '-a', 'optimal', '-s', dev, 'unit', 'MiB',
'mkpart', ptype, str(begin), str(end), check_exit_code=[0])
def remove_partition(dev, num):
if not any(x['fstype'] != 'free' and x['num'] == num
for x in info(dev)['parts']):
raise errors.PartitionNotFoundError('Partition %s not found' % num)
utils.execute('parted', '-s', dev, 'rm', str(num), check_exit_code=[0])

71
fuel_agent/utils/utils.py Normal file
View File

@ -0,0 +1,71 @@
# Copyright 2014 Mirantis, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import locale
import math
import jinja2
import stevedore.driver
from fuel_agent.openstack.common import gettextutils as gtu
from fuel_agent.openstack.common import log as logging
from fuel_agent.openstack.common import processutils
LOG = logging.getLogger(__name__)
def execute(*cmd, **kwargs):
"""Convenience wrapper around oslo's execute() method."""
LOG.debug(gtu._('Trying to execute command: "%s"'), ' '.join(cmd))
result = processutils.execute(*cmd, **kwargs)
LOG.debug(gtu._('Execution completed: "%s"'),
' '.join(cmd))
LOG.debug(gtu._('Command stdout: "%s"') % result[0])
LOG.debug(gtu._('Command stderr: "%s"') % result[1])
return result
def parse_unit(s, unit, ceil=True):
"""Converts '123.1unit' string into 124 if ceil is True
and converts '123.9unit' into 123 if ceil is False.
"""
flt = locale.atof(s.split(unit)[0])
if ceil:
return int(math.ceil(flt))
return int(math.floor(flt))
def B2MiB(b, ceil=True):
if ceil:
return int(math.ceil(float(b) / 1024 / 1024))
return int(math.floor(float(b) / 1024 / 1024))
def get_driver(name):
return stevedore.driver.DriverManager(
namespace='fuel_agent.drivers', name=name).driver
def render_and_save(tmpl_dir, tmpl_name, tmpl_data, file_name):
env = jinja2.Environment(loader=jinja2.FileSystemLoader(tmpl_dir))
template = env.get_template(tmpl_name)
output = template.render(tmpl_data)
try:
with open(file_name, 'w') as f:
f.write(output)
except Exception:
raise Exception('Something goes wrong while trying to save'
'templated data to {0}'.format(file_name))

17
fuel_agent/version.py Normal file
View File

@ -0,0 +1,17 @@
# Copyright 2014 Mirantis, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import pbr.version
version_info = pbr.version.VersionInfo('fuel-agent')

10
openstack-common.conf Normal file
View File

@ -0,0 +1,10 @@
[DEFAULT]
# The list of modules to copy from oslo-incubator
module=config.generator
module=gettextutils
module=log
module=processutils
# The base module to hold the copy of openstack.common
base=fuel_agent

9
requirements.txt Normal file
View File

@ -0,0 +1,9 @@
Babel>=1.3
eventlet>=0.13.0
iso8601>=0.1.9
jsonschema>=2.3.0
oslo.config>=1.2.0
six>=1.5.2
pbr>=0.7.0
Jinja2
stevedore>=0.15

41
setup.cfg Normal file
View File

@ -0,0 +1,41 @@
[metadata]
name = fuel-agent
version = 0.1.0
author = Mirantis
author-email = fuel-dev@lists.launchpad.net
summary = Fuel agent
classifier =
Development Status :: 4 - Beta
Programming Language :: Python
[files]
packages =
fuel_agent
[entry_points]
console_scripts =
agent_new = fuel_agent.cmd.agent:main
provision = fuel_agent.cmd.provision:main
partition = fuel_agent.cmd.partition:main
fuel_agent.drivers =
nailgun = fuel_agent.drivers.nailgun:Nailgun
[pbr]
autodoc_index_modules = True
# this variable is needed to avoid including files
# from other subprojects in this repository
skip_git_sdist = True
[build_sphinx]
all_files = 1
build-dir = doc/build
source-dir = doc/source
[egg_info]
tag_build =
tag_date = 0
tag_svn_revision = 0
[wheel]
universal = 1

27
setup.py Normal file
View File

@ -0,0 +1,27 @@
# Copyright 2014 Mirantis, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import setuptools
import pbr
import pbr.packaging
# this monkey patch is to avoid appending git version to version
pbr.packaging._get_version_from_git = lambda pre_version: pre_version
setuptools.setup(
setup_requires=['pbr'],
pbr=True)

5
test-requirements.txt Normal file
View File

@ -0,0 +1,5 @@
hacking>=0.8.0,<0.9
mock>=1.0
oslotest==1.0
testtools>=0.9.34
testrepository>=0.0.18

25
tools/config/check_uptodate.sh Executable file
View File

@ -0,0 +1,25 @@
#!/usr/bin/env bash
PROJECT_NAME=${PROJECT_NAME:-fuel_agent}
CFGFILE_NAME=${PROJECT_NAME}.conf.sample
if [ -e etc/${PROJECT_NAME}/${CFGFILE_NAME} ]; then
CFGFILE=etc/${PROJECT_NAME}/${CFGFILE_NAME}
elif [ -e etc/${CFGFILE_NAME} ]; then
CFGFILE=etc/${CFGFILE_NAME}
else
echo "${0##*/}: can not find config file"
exit 1
fi
TEMPDIR=`mktemp -d /tmp/${PROJECT_NAME}.XXXXXX`
trap "rm -rf $TEMPDIR" EXIT
tools/config/generate_sample.sh -b ./ -p ${PROJECT_NAME} -o ${TEMPDIR}
if ! diff -u ${TEMPDIR}/${CFGFILE_NAME} ${CFGFILE}
then
echo "${0##*/}: ${PROJECT_NAME}.conf.sample is not up to date."
echo "${0##*/}: Please run ${0%%${0##*/}}generate_sample.sh."
exit 1
fi

126
tools/config/generate_sample.sh Executable file
View File

@ -0,0 +1,126 @@
#!/usr/bin/env bash
print_hint() {
echo "Try \`${0##*/} --help' for more information." >&2
}
PARSED_OPTIONS=$(getopt -n "${0##*/}" -o hb:p:m:l:o:f: \
--long help,base-dir:,package-name:,output-dir:,output-file:,module:,library: -- "$@")
if [ $? != 0 ] ; then print_hint ; exit 1 ; fi
eval set -- "$PARSED_OPTIONS"
while true; do
case "$1" in
-h|--help)
echo "${0##*/} [options]"
echo ""
echo "options:"
echo "-h, --help show brief help"
echo "-b, --base-dir=DIR project base directory"
echo "-p, --package-name=NAME project package name"
echo "-o, --output-dir=DIR file output directory"
echo "-f, --output-file=FILE file output directory"
echo "-m, --module=MOD extra python module to interrogate for options"
echo "-l, --library=LIB extra library that registers options for discovery"
exit 0
;;
-b|--base-dir)
shift
BASEDIR=`echo $1 | sed -e 's/\/*$//g'`
shift
;;
-p|--package-name)
shift
PACKAGENAME=`echo $1`
shift
;;
-o|--output-dir)
shift
OUTPUTDIR=`echo $1 | sed -e 's/\/*$//g'`
shift
;;
-f|--output-file)
shift
OUTPUTFILE=`echo $1 | sed -e 's/\/*$//g'`
shift
;;
-m|--module)
shift
MODULES="$MODULES -m $1"
shift
;;
-l|--library)
shift
LIBRARIES="$LIBRARIES -l $1"
shift
;;
--)
break
;;
esac
done
BASEDIR=${BASEDIR:-`pwd`}
if ! [ -d $BASEDIR ]
then
echo "${0##*/}: missing project base directory" >&2 ; print_hint ; exit 1
elif [[ $BASEDIR != /* ]]
then
BASEDIR=$(cd "$BASEDIR" && pwd)
fi
PACKAGENAME=${PACKAGENAME:-${BASEDIR##*/}}
PACKAGENAME=`echo $PACKAGENAME | tr - _`
TARGETDIR=$BASEDIR/$PACKAGENAME
if ! [ -d $TARGETDIR ]
then
echo "${0##*/}: invalid project package name" >&2 ; print_hint ; exit 1
fi
OUTPUTDIR=${OUTPUTDIR:-$BASEDIR/etc}
# NOTE(bnemec): Some projects put their sample config in etc/,
# some in etc/$PACKAGENAME/
if [ -d $OUTPUTDIR/$PACKAGENAME ]
then
OUTPUTDIR=$OUTPUTDIR/$PACKAGENAME
elif ! [ -d $OUTPUTDIR ]
then
echo "${0##*/}: cannot access \`$OUTPUTDIR': No such file or directory" >&2
exit 1
fi
BASEDIRESC=`echo $BASEDIR | sed -e 's/\//\\\\\//g'`
find $TARGETDIR -type f -name "*.pyc" -delete
FILES=$(find $TARGETDIR -type f -name "*.py" ! -path "*/tests/*" ! -path "*/nova/*" \
-exec grep -l "Opt(" {} + | sed -e "s/^$BASEDIRESC\///g" | sort -u)
RC_FILE="`dirname $0`/oslo.config.generator.rc"
if test -r "$RC_FILE"
then
source "$RC_FILE"
fi
for mod in ${FUEL_AGENT_CONFIG_GENERATOR_EXTRA_MODULES}; do
MODULES="$MODULES -m $mod"
done
for lib in ${FUEL_AGENT_CONFIG_GENERATOR_EXTRA_LIBRARIES}; do
LIBRARIES="$LIBRARIES -l $lib"
done
export EVENTLET_NO_GREENDNS=yes
OS_VARS=$(set | sed -n '/^OS_/s/=[^=]*$//gp' | xargs)
[ "$OS_VARS" ] && eval "unset \$OS_VARS"
DEFAULT_MODULEPATH=fuel_agent.openstack.common.config.generator
MODULEPATH=${MODULEPATH:-$DEFAULT_MODULEPATH}
OUTPUTFILE=${OUTPUTFILE:-$OUTPUTDIR/$PACKAGENAME.conf.sample}
python -m $MODULEPATH $MODULES $LIBRARIES $FILES > $OUTPUTFILE
# Hook to allow projects to append custom config file snippets
CONCAT_FILES=$(ls $BASEDIR/tools/config/*.conf.sample 2>/dev/null)
for CONCAT_FILE in $CONCAT_FILES; do
cat $CONCAT_FILE >> $OUTPUTFILE
done

View File

7
tools/with_venv.sh Executable file
View File

@ -0,0 +1,7 @@
#!/bin/bash
tools_path=${tools_path:-$(dirname $0)}
venv_path=${venv_path:-${tools_path}}
venv_dir=${venv_name:-/../.venv}
TOOLS=${tools_path}
VENV=${venv:-${venv_path}/${venv_dir}}
source ${VENV}/bin/activate && "$@"

43
tox.ini Normal file
View File

@ -0,0 +1,43 @@
[tox]
minversion = 1.6
skipsdist = True
envlist = py26,py27,pep8
[testenv]
usedevelop = True
install_command = pip install --allow-external -U {opts} {packages}
setenv = VIRTUAL_ENV={envdir}
deps = -r{toxinidir}/requirements.txt
-r{toxinidir}/test-requirements.txt
commands =
python setup.py testr --slowest --testr-args='{posargs:}'
[tox:jenkins]
downloadcache = ~/cache/pip
[testenv:pep8]
deps = hacking==0.7
commands =
flake8 {posargs:fuel_agent}
[testenv:cover]
setenv = VIRTUAL_ENV={envdir}
commands =
python setup.py testr --coverage {posargs:fuel_agent}
[testenv:venv]
commands = {posargs:}
[testenv:devenv]
envdir = devenv
usedevelop = True
[flake8]
ignore = H234,H302,H802
exclude = .venv,.git,.tox,dist,doc,*openstack/common*,*lib/python*,*egg,build,tools,docs
show-pep8 = True
show-source = True
count = True
[hacking]
import_exceptions = fuel_agent.openstack.common.gettextutils._,testtools.matchers