Removed fuel_agent and fuel_agent_ci

We've removed fuel_agent into a separate repository,
so we don't need this directory here any more.

Closes-Bug: #1471849
Change-Id: I165259020721a4f6a87dce3a419bfdf64a5d0fcb
This commit is contained in:
Vladimir Kozhukalov 2015-07-16 11:25:44 +03:00
parent bb362555db
commit ea8a7f94dd
120 changed files with 1 additions and 17259 deletions

View File

@ -1,5 +0,0 @@
[DEFAULT]
test_command=OS_STDOUT_CAPTURE=1 OS_STDERR_CAPTURE=1 OS_TEST_TIMEOUT=60 ${PYTHON:-python} -m subunit.run discover -s fuel_agent/tests -p "*.py" $LISTOPT $IDOPTION
test_id_option=--load-list $IDFILE
test_list_option=--list
test_run_concurrency=echo 1

View File

@ -1,105 +0,0 @@
#cloud-boothook
#!/bin/bash
function add_str_to_file_if_not_exists {
file=$1
str=$2
val=$3
if ! grep -q "^ *${str}" $file; then
echo $val >> $file
fi
}
cloud-init-per instance disable_selinux_on_the_fly setenforce 0
cloud-init-per instance disable_selinux sed -i 's/^SELINUX=.*/SELINUX=disabled/g' /etc/sysconfig/selinux
# configure udev rules
# udev persistent net
cloud-init-per instance udev_persistent_net1 service network stop
ADMIN_MAC={{ common.admin_mac }}
ADMIN_IF=$(echo {{ common.udevrules }} | sed 's/[,=]/\n/g' | grep "$ADMIN_MAC" | cut -d_ -f2 | head -1)
cloud-init-per instance configure_admin_interface /bin/sh -c "echo -e \"# FROM COBBLER SNIPPET\nDEVICE=$ADMIN_IF\nIPADDR={{ common.admin_ip }}\nNETMASK={{ common.admin_mask }}\nBOOTPROTO=none\nONBOOT=yes\nUSERCTL=no\n\" | tee /etc/sysconfig/network-scripts/ifcfg-$ADMIN_IF"
cloud-init-per instance set_gateway /bin/sh -c 'echo GATEWAY="{{ common.gw }}" | tee -a /etc/sysconfig/network'
cloud-init-per instance udev_persistent_net5 service network start
# end of udev
#FIXME(agordeev): if operator updates dns settings on masternode after the node had been provisioned,
# cloud-init will start to generate resolv.conf with non-actual data
cloud-init-per instance resolv_conf_remove rm -f /etc/resolv.conf
cloud-init-per instance resolv_conf_header /bin/sh -c 'echo "# re-generated by cloud-init boothook only at the first boot;" | tee /etc/resolv.conf'
cloud-init-per instance resolv_conf_search /bin/sh -c 'echo "search {{ common.search_domain|replace('"','') }}" | tee -a /etc/resolv.conf'
cloud-init-per instance resolv_conf_domain /bin/sh -c 'echo "domain {{ common.search_domain|replace('"','') }}" | tee -a /etc/resolv.conf'
cloud-init-per instance resolv_conf_nameserver /bin/sh -c 'echo nameserver {{ common.master_ip }} | tee -a /etc/resolv.conf'
# configure black module lists
# virt-what should be installed
if [ ! -f /etc/modprobe.d/blacklist-i2c_piix4.conf ]; then
([[ $(virt-what) = "virtualbox" ]] && echo "blacklist i2c_piix4" >> /etc/modprobe.d/blacklist-i2c_piix4.conf || :)
modprobe -r i2c_piix4
fi
cloud-init-per instance conntrack_ipv4 /bin/sh -c 'echo nf_conntrack_ipv4 | tee -a /etc/rc.modules'
cloud-init-per instance conntrack_ipv6 /bin/sh -c 'echo nf_conntrack_ipv6 | tee -a /etc/rc.modules'
cloud-init-per instance chmod_rc_modules chmod +x /etc/rc.modules
cloud-init-per instance conntrack_max /bin/sh -c 'echo "net.nf_conntrack_max=1048576" | tee -a /etc/sysctl.conf'
cloud-init-per instance conntrack_ipv4_load modprobe nf_conntrack_ipv4
cloud-init-per instance conntrack_ipv6_load modprobe nf_conntrack_ipv6
cloud-init-per instance conntrack_max_set sysctl -w "net.nf_conntrack_max=1048576"
cloud-init-per instance mkdir_coredump mkdir -p /var/log/coredump
cloud-init-per instance set_coredump /bin/sh -c 'echo -e "kernel.core_pattern=/var/log/coredump/core.%e.%p.%h.%t" | tee -a /etc/sysctl.conf'
cloud-init-per instance set_coredump_sysctl sysctl -w "kernel.core_pattern=/var/log/coredump/core.%e.%p.%h.%t"
cloud-init-per instance set_chmod chmod 777 /var/log/coredump
cloud-init-per instance set_limits /bin/sh -c 'echo -e "* soft core unlimited\n* hard core unlimited" | tee -a /etc/security/limits.conf'
#NOTE: disabled for centos?
#cloud-init-per instance dhclient echo 'supersede routers 0;' | tee /etc/dhcp/dhclient.conf
# ntp sync
# '| tee /dev/null' is needed for returning zero execution code always
cloud-init-per instance stop_ntpd /bin/sh -c 'service ntpd stop | tee /dev/null'
cloud-init-per instance sync_date ntpdate -t 4 -b {{ common.master_ip }}
cloud-init-per instance sync_hwclock hwclock --systohc
cloud-init-per instance edit_ntp_conf1 sed -i '/^\s*tinker panic/ d' /etc/ntp.conf
cloud-init-per instance edit_ntp_conf2 sed -i '1 i tinker panic 0' /etc/ntp.conf
cloud-init-per instance edit_ntp_conf_mkdir mkdir -p /var/lib/ntp
cloud-init-per instance edit_ntp_conf3 /bin/sh -c 'echo 0 | tee /var/lib/ntp/drift'
cloud-init-per instance edit_ntp_conf4 chown ntp: /var/lib/ntp/drift
cloud-init-per instance edit_ntp_conf5 sed -i '/^\s*server/ d' /etc/ntp.conf
cloud-init-per instance edit_ntp_conf6 /bin/sh -c 'echo "server {{ common.master_ip }} burst iburst" | tee -a /etc/ntp.conf'
# Point installed ntpd to Master node
cloud-init-per instance set_ntpdate sed -i 's/SYNC_HWCLOCK\s*=\s*no/SYNC_HWCLOCK=yes/' /etc/sysconfig/ntpdate
cloud-init-per instance set_ntpd_0 chkconfig ntpd on
cloud-init-per instance set_ntpd_1 chkconfig ntpdate on
cloud-init-per instance start_ntpd service ntpd start
cloud-init-per instance removeUseDNS sed -i --follow-symlinks -e '/UseDNS/d' /etc/ssh/sshd_config
add_str_to_file_if_not_exists /etc/ssh/sshd_config 'UseDNS' 'UseDNS no'
cloud-init-per instance gssapi_disable sed -i -e "/^\s*GSSAPICleanupCredentials yes/d" -e "/^\s*GSSAPIAuthentication yes/d" /etc/ssh/sshd_config
cloud-init-per instance nailgun_agent_0 /bin/sh -c 'echo "rm -f /etc/nailgun-agent/nodiscover" | tee /etc/rc.local'
cloud-init-per instance nailgun_agent_1 /bin/sh -c 'echo "flock -w 0 -o /var/lock/agent.lock -c \"/opt/nailgun/bin/agent >> /var/log/nailgun-agent.log 2>&1\"" | tee -a /etc/rc.local'
# Copying default bash settings to the root directory
cloud-init-per instance skel_bash cp -f /etc/skel/.bash* /root/
# Puppet config
cloud-init-per instance hiera_puppet mkdir -p /etc/puppet /var/lib/hiera
cloud-init-per instance touch_puppet touch /var/lib/hiera/common.yaml /etc/puppet/hiera.yaml
# Mcollective enable
cloud-init-per instance mcollective_enable sed -i /etc/rc.d/init.d/mcollective -e 's/\(# chkconfig:\s\+[-0-6]\+\) [0-9]\+ \([0-9]\+\)/\1 81 \2/'

View File

@ -1,92 +0,0 @@
#cloud-boothook
#!/bin/bash
function add_str_to_file_if_not_exists {
file=$1
str=$2
val=$3
if ! grep -q "^ *${str}" $file; then
echo $val >> $file
fi
}
cloud-init-per instance wipe_sources_list_templates /bin/sh -c 'echo | tee /etc/cloud/templates/sources.list.ubuntu.tmpl'
# configure udev rules
# udev persistent net
cloud-init-per instance udev_persistent_net1 /etc/init.d/networking stop
ADMIN_MAC={{ common.admin_mac }}
ADMIN_IF=$(echo {{ common.udevrules }} | sed 's/[,=]/\n/g' | grep "$ADMIN_MAC" | cut -d_ -f2 | head -1)
# Check if we do not already have static config (or interface seems unconfigured)
if [ ! -d "/etc/network/interfaces.d" ]; then
mkdir -p /etc/network/interfaces.d
echo 'source /etc/network/interfaces.d/*' > /etc/network/interfaces
fi
if [ ! -e "/etc/network/interfaces.d/ifcfg-$ADMIN_IF" ]; then
echo -e "auto $ADMIN_IF\niface $ADMIN_IF inet static\n\taddress {{ common.admin_ip }}\n\tnetmask {{ common.admin_mask }}\n\tgateway {{ common.gw }}" > /etc/network/interfaces.d/ifcfg-"$ADMIN_IF"
fi
cloud-init-per instance udev_persistent_net5 /etc/init.d/networking start
# end of udev
#FIXME(agordeev): if operator updates dns settings on masternode after the node had been provisioned,
# cloud-init will start to generate resolv.conf with non-actual data
cloud-init-per instance resolv_conf_mkdir mkdir -p /etc/resolvconf/resolv.conf.d
cloud-init-per instance resolv_conf_remove rm -f /etc/resolv.conf
cloud-init-per instance resolv_conf_head_remove rm -f /etc/resolvconf/resolv.conf.d/head
cloud-init-per instance resolv_conf_header /bin/sh -c 'echo "# re-generated by cloud-init boothook only at the first boot;" | tee /etc/resolv.conf'
cloud-init-per instance resolv_conf_search /bin/sh -c 'echo "search {{ common.search_domain|replace('"','') }}" | tee -a /etc/resolv.conf'
cloud-init-per instance resolv_conf_domain /bin/sh -c 'echo "domain {{ common.search_domain|replace('"','') }}" | tee -a /etc/resolv.conf'
cloud-init-per instance resolv_conf_head_header /bin/sh -c 'echo "# re-generated by cloud-init boothook only at the first boot;" | tee /etc/resolvconf/resolv.conf.d/head'
cloud-init-per instance resolv_conf_head_search /bin/sh -c 'echo "search {{ common.search_domain|replace('"','') }}" | tee -a /etc/resolvconf/resolv.conf.d/head'
cloud-init-per instance resolv_conf_head_domain /bin/sh -c 'echo "domain {{ common.search_domain|replace('"','') }}" | tee -a /etc/resolvconf/resolv.conf.d/head'
cloud-init-per instance resolv_conf_nameserver /bin/sh -c 'echo nameserver {{ common.master_ip|replace('"','') }} | tee -a /etc/resolv.conf'
cloud-init-per instance resolv_conf_head_nameserver /bin/sh -c 'echo nameserver {{ common.master_ip|replace('"','') }} | tee -a /etc/resolvconf/resolv.conf.d/head'
# configure black module lists
# virt-what should be installed
if [ ! -f /etc/modprobe.d/blacklist-i2c_piix4.conf ]; then
([[ $(virt-what) = "virtualbox" ]] && echo "blacklist i2c_piix4" >> /etc/modprobe.d/blacklist-i2c_piix4.conf || :) && update-initramfs -u -k all
modprobe -r i2c_piix4
fi
cloud-init-per instance conntrack_ipv4 /bin/sh -c 'echo nf_conntrack_ipv4 | tee -a /etc/modules'
cloud-init-per instance conntrack_ipv6 /bin/sh -c 'echo nf_conntrack_ipv6 | tee -a /etc/modules'
cloud-init-per instance conntrack_max /bin/sh -c 'echo "net.nf_conntrack_max=1048576" | tee -a /etc/sysctl.conf'
cloud-init-per instance conntrack_ipv4_load modprobe nf_conntrack_ipv4
cloud-init-per instance conntrack_ipv6_load modprobe nf_conntrack_ipv6
cloud-init-per instance conntrack_max_set sysctl -w "net.nf_conntrack_max=1048576"
cloud-init-per instance dhclient /bin/sh -c 'echo "supersede routers 0;" | tee /etc/dhcp/dhclient.conf'
# ntp sync
# '| tee /dev/null' is needed for returning zero execution code always
cloud-init-per instance stop_ntp /bin/sh -c 'service ntp stop | tee /dev/null'
cloud-init-per instance sync_date ntpdate -t 4 -b {{ common.master_ip }}
cloud-init-per instance sync_hwclock hwclock --systohc
cloud-init-per instance edit_ntp_conf1 sed -i '/^\s*tinker panic/ d' /etc/ntp.conf
cloud-init-per instance edit_ntp_conf2 sed -i '1 i tinker panic 0' /etc/ntp.conf
cloud-init-per instance edit_ntp_conf_mkdir mkdir -p /var/lib/ntp
cloud-init-per instance edit_ntp_conf3 /bin/sh -c 'echo 0 | tee /var/lib/ntp/drift'
cloud-init-per instance edit_ntp_conf4 sed -i '/^\s*server/ d' /etc/ntp.conf
cloud-init-per instance edit_ntp_conf5 /bin/sh -c 'echo "server {{ common.master_ip }} burst iburst" | tee -a /etc/ntp.conf'
cloud-init-per instance start_ntp service ntp start
cloud-init-per instance removeUseDNS sed -i --follow-symlinks -e '/UseDNS/d' /etc/ssh/sshd_config
add_str_to_file_if_not_exists /etc/ssh/sshd_config 'UseDNS' 'UseDNS no'
cloud-init-per instance gssapi_disable sed -i -e "/^\s*GSSAPICleanupCredentials yes/d" -e "/^\s*GSSAPIAuthentication yes/d" /etc/ssh/sshd_config
cloud-init-per instance nailgun_agent_0 /bin/sh -c 'echo "rm -f /etc/nailgun-agent/nodiscover" | tee /etc/rc.local'
cloud-init-per instance nailgun_agent_1 /bin/sh -c 'echo "flock -w 0 -o /var/lock/agent.lock -c \"/opt/nailgun/bin/agent >> /var/log/nailgun-agent.log 2>&1\"" | tee -a /etc/rc.local'
# Copying default bash settings to the root directory
cloud-init-per instance skel_bash cp -f /etc/skel/.bash* /root/
cloud-init-per instance hiera_puppet mkdir -p /etc/puppet /var/lib/hiera
cloud-init-per instance touch_puppet touch /var/lib/hiera/common.yaml /etc/puppet/hiera.yaml

View File

@ -1,104 +0,0 @@
#cloud-config
resize_rootfs: false
growpart:
mode: false
disable_ec2_metadata: true
disable_root: false
# password: RANDOM
# chpasswd: { expire: True }
ssh_pwauth: false
ssh_authorized_keys:
{% for key in common.ssh_auth_keys %}
- {{ key }}
{% endfor %}
# set the locale to a given locale
# default: en_US.UTF-8
locale: en_US.UTF-8
timezone: {{ common.timezone }}
hostname: {{ common.hostname }}
fqdn: {{ common.fqdn }}
# add entries to rsyslog configuration
rsyslog:
- filename: 10-log2master.conf
content: |
$template LogToMaster, "<%%PRI%>1 %$NOW%T%TIMESTAMP:8:$%Z %HOSTNAME% %APP-NAME% %PROCID% %MSGID% -%msg%\n"
*.* @{{ common.master_ip }};LogToMaster
runcmd:
{% if puppet.enable != 1 %}
- service puppet stop
- chkconfig puppet off
{% endif %}
{% if mcollective.enable != 1 %}
- service mcollective stop
- chkconfig mcollective off
{% else %}
- chkconfig mcollective on
- service mcollective restart
{% endif %}
- iptables -t filter -F INPUT
- iptables -t filter -F FORWARD
- service iptables save
# that module's missing in 0.6.3, but existent for >= 0.7.3
write_files:
- content: |
---
url: {{ common.master_url }}
path: /etc/nailgun-agent/config.yaml
- content: target
path: /etc/nailgun_systemtype
mcollective:
conf:
main_collective: mcollective
collectives: mcollective
libdir: /usr/libexec/mcollective
logfile: /var/log/mcollective.log
loglevel: debug
daemonize: 1
direct_addressing: 1
ttl: 4294957
securityprovider: psk
plugin.psk: {{ mcollective.pskey }}
{% if mcollective.connector == 'stomp' %}
connector = stomp
plugin.stomp.host: {{ mcollective.host }}
plugin.stomp.port: {{ mcollective.port|default(61613) }}
plugin.stomp.user: {{ mcollective.user }}
plugin.stomp.password: {{ mcollective.password }}
{% else %}
connector: rabbitmq
plugin.rabbitmq.vhost: {{ mcollective.vhost }}
plugin.rabbitmq.pool.size: 1
plugin.rabbitmq.pool.1.host: {{ mcollective.host }}
plugin.rabbitmq.pool.1.port: {{ mcollective.port|default(61613) }}
plugin.rabbitmq.pool.1.user: {{ mcollective.user }}
plugin.rabbitmq.pool.1.password: {{ mcollective.password }}
plugin.rabbitmq.heartbeat_interval: 30
{% endif %}
factsource: yaml
plugin.yaml: /etc/mcollective/facts.yaml
puppet:
conf:
main:
logdir: /var/log/puppet
rundir: /var/run/puppet
ssldir: $vardir/ssl
pluginsync: true
agent:
classfile: $vardir/classes.txt
localconfig: $vardir/localconfig
server: {{ puppet.master }}
report: false
configtimeout: 600
final_message: "YAY! The system is finally up, after $UPTIME seconds"

View File

@ -1,103 +0,0 @@
#cloud-config
resize_rootfs: false
growpart:
mode: false
disable_ec2_metadata: true
disable_root: false
user: root
password: r00tme
chpasswd: { expire: false }
ssh_pwauth: false
ssh_authorized_keys:
{% for key in common.ssh_auth_keys %}
- {{ key }}
{% endfor %}
# set the locale to a given locale
# default: en_US.UTF-8
locale: en_US.UTF-8
timezone: {{ common.timezone }}
hostname: {{ common.hostname }}
fqdn: {{ common.fqdn }}
# add entries to rsyslog configuration
rsyslog:
- filename: 10-log2master.conf
content: |
$template LogToMaster, "<%%PRI%>1 %$NOW%T%TIMESTAMP:8:$%Z %HOSTNAME% %APP-NAME% %PROCID% %MSGID% -%msg%\n"
*.* @{{ common.master_ip }};LogToMaster
# that module's missing in 0.6.3, but existent for >= 0.7.3
write_files:
- content: |
---
url: {{ common.master_url }}
path: /etc/nailgun-agent/config.yaml
- content: target
path: /etc/nailgun_systemtype
mcollective:
conf:
main_collective: mcollective
collectives: mcollective
libdir: /usr/share/mcollective/plugins
logfile: /var/log/mcollective.log
loglevel: debug
daemonize: 0
direct_addressing: 1
ttl: 4294957
securityprovider: psk
plugin.psk: {{ mcollective.pskey }}
{% if mcollective.connector == 'stomp' %}
connector = stomp
plugin.stomp.host: {{ mcollective.host }}
plugin.stomp.port: {{ mcollective.port|default(61613) }}
plugin.stomp.user: {{ mcollective.user }}
plugin.stomp.password: {{ mcollective.password }}
{% else %}
connector: rabbitmq
plugin.rabbitmq.vhost: {{ mcollective.vhost }}
plugin.rabbitmq.pool.size: 1
plugin.rabbitmq.pool.1.host: {{ mcollective.host }}
plugin.rabbitmq.pool.1.port: {{ mcollective.port|default(61613) }}
plugin.rabbitmq.pool.1.user: {{ mcollective.user }}
plugin.rabbitmq.pool.1.password: {{ mcollective.password }}
plugin.rabbitmq.heartbeat_interval: 30
{% endif %}
factsource: yaml
plugin.yaml: /etc/mcollective/facts.yaml
puppet:
conf:
main:
logdir: /var/log/puppet
rundir: /var/run/puppet
ssldir: $vardir/ssl
pluginsync: true
agent:
classfile: $vardir/classes.txt
localconfig: $vardir/localconfig
server: {{ puppet.master }}
report: false
configtimeout: 600
runcmd:
{% if puppet.enable != 1 %}
- /usr/sbin/invoke-rc.d puppet stop
- /usr/sbin/update-rc.d -f puppet remove
{% endif %}
{% if mcollective.enable != 1 %}
- /usr/sbin/invoke-rc.d mcollective stop
- echo manual > /etc/init/mcollective.override
{% else %}
- rm -f /etc/init/mcollective.override
- service mcollective restart
{% endif %}
- iptables -t filter -F INPUT
- iptables -t filter -F FORWARD
final_message: "YAY! The system is finally up, after $UPTIME seconds"

View File

@ -1,11 +0,0 @@
# instance-id will be autogenerated
# instance-id: iid-abcdefg
#network-interfaces: |
# auto {{ common.admin_iface_name|default("eth0") }}
# iface {{ common.admin_iface_name|default("eth0") }} inet static
# address {{ common.admin_ip }}
# # network 192.168.1.0
# netmask {{ common.admin_mask }}
# # broadcast 192.168.1.255
# # gateway 192.168.1.254
hostname: {{ common.hostname }}

View File

@ -1,11 +0,0 @@
# instance-id will be autogenerated
# instance-id: iid-abcdefg
#network-interfaces: |
# auto {{ common.admin_iface_name|default("eth0") }}
# iface {{ common.admin_iface_name|default("eth0") }} inet static
# address {{ common.admin_ip }}
# # network 192.168.1.0
# netmask {{ common.admin_mask }}
# # broadcast 192.168.1.255
# # gateway 192.168.1.254
hostname: {{ common.hostname }}

View File

@ -1,184 +0,0 @@
[DEFAULT]
#
# Options defined in fuel_agent.manager
#
# Data driver (string value)
#data_driver=nailgun
# Path to directory with cloud init templates (string value)
#nc_template_path=/usr/share/fuel-agent/cloud-init-templates
# Temporary directory for file manipulations (string value)
#tmp_path=/tmp
# Path where to store generated config drive image (string
# value)
#config_drive_path=/tmp/config-drive.img
# Path where to store actual rules for udev daemon (string
# value)
#udev_rules_dir=/etc/udev/rules.d
# Path where to store default rules for udev daemon (string
# value)
#udev_rules_lib_dir=/lib/udev/rules.d
# Substring to which file extension .rules be renamed (string
# value)
#udev_rename_substr=.renamedrule
# Directory where we build images (string value)
#image_build_dir=/tmp
# Directory where we build images (string value)
#image_build_suffix=.fuel-agent-image
#
# Options defined in fuel_agent.cmd.agent
#
# Input data file (string value)
#input_data_file=/tmp/provision.json
# Input data (json string) (string value)
#input_data=
#
# Options defined in fuel_agent.openstack.common.log
#
# Print debugging output (set logging level to DEBUG instead
# of default WARNING level). (boolean value)
debug=true
# Print more verbose output (set logging level to INFO instead
# of default WARNING level). (boolean value)
#verbose=false
# Log output to standard error. (boolean value)
use_stderr=false
# Format string to use for log messages with context. (string
# value)
#logging_context_format_string=%(asctime)s.%(msecs)03d %(process)d %(levelname)s %(name)s [%(request_id)s %(user_identity)s] %(instance)s%(message)s
# Format string to use for log messages without context.
# (string value)
#logging_default_format_string=%(asctime)s.%(msecs)03d %(process)d %(levelname)s %(name)s [-] %(instance)s%(message)s
# Data to append to log format when level is DEBUG. (string
# value)
logging_debug_format_suffix=
# Prefix each line of exception output with this format.
# (string value)
#logging_exception_prefix=%(asctime)s.%(msecs)03d %(process)d TRACE %(name)s %(instance)s
# List of logger=LEVEL pairs. (list value)
#default_log_levels=amqp=WARN,amqplib=WARN,boto=WARN,qpid=WARN,sqlalchemy=WARN,suds=INFO,oslo.messaging=INFO,iso8601=WARN,requests.packages.urllib3.connectionpool=WARN
# Enables or disables publication of error events. (boolean
# value)
#publish_errors=false
# Enables or disables fatal status of deprecations. (boolean
# value)
#fatal_deprecations=false
# The format for an instance that is passed with the log
# message. (string value)
#instance_format="[instance: %(uuid)s] "
# The format for an instance UUID that is passed with the log
# message. (string value)
#instance_uuid_format="[instance: %(uuid)s] "
# The name of a logging configuration file. This file is
# appended to any existing logging configuration files. For
# details about logging configuration files, see the Python
# logging module documentation. (string value)
# Deprecated group/name - [DEFAULT]/log_config
#log_config_append=<None>
# DEPRECATED. A logging.Formatter log message format string
# which may use any of the available logging.LogRecord
# attributes. This option is deprecated. Please use
# logging_context_format_string and
# logging_default_format_string instead. (string value)
#log_format=<None>
# Format string for %%(asctime)s in log records. Default:
# %(default)s . (string value)
#log_date_format=%Y-%m-%d %H:%M:%S
# (Optional) Name of log file to output to. If no default is
# set, logging will go to stdout. (string value)
# Deprecated group/name - [DEFAULT]/logfile
log_file=/var/log/fuel-agent.log
# (Optional) The base directory used for relative --log-file
# paths. (string value)
# Deprecated group/name - [DEFAULT]/logdir
#log_dir=<None>
# Use syslog for logging. Existing syslog format is DEPRECATED
# during I, and will change in J to honor RFC5424. (boolean
# value)
#use_syslog=false
# (Optional) Enables or disables syslog rfc5424 format for
# logging. If enabled, prefixes the MSG part of the syslog
# message with APP-NAME (RFC5424). The format without the APP-
# NAME is deprecated in I, and will be removed in J. (boolean
# value)
#use_syslog_rfc_format=false
# Syslog facility to receive log lines. (string value)
#syslog_log_facility=LOG_USER
#
# Options defined in fuel_agent.utils.artifact
#
# Size of data chunk to operate with images (integer value)
#data_chunk_size=1048576
#
# Options defined in fuel_agent.utils.build
#
# Maximum allowed loop devices count to use (integer value)
#max_loop_count=255
# Size of sparse file in MiBs (integer value)
#sparse_file_size=2048
# System-wide major number for loop device (integer value)
#loop_dev_major=7
#
# Options defined in fuel_agent.utils.utils
#
# Maximum retries count for http requests. 0 means infinite
# (integer value)
#http_max_retries=30
# Http request timeout in seconds (floating point value)
#http_request_timeout=10.0
# Delay in seconds before the next http request retry
# (floating point value)
#http_retry_delay=2.0
# Block size of data to read for calculating checksum (integer
# value)
#read_chunk_size=1048576

View File

@ -1,103 +0,0 @@
# Copyright 2014 Mirantis, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import sys
from oslo.config import cfg
from oslo_serialization import jsonutils as json
import six
from fuel_agent import manager as manager
from fuel_agent.openstack.common import log as logging
from fuel_agent import version
cli_opts = [
cfg.StrOpt(
'input_data_file',
default='/tmp/provision.json',
help='Input data file'
),
cfg.StrOpt(
'input_data',
default='',
help='Input data (json string)'
),
]
CONF = cfg.CONF
CONF.register_cli_opts(cli_opts)
def provision():
main(['do_provisioning'])
def partition():
main(['do_partitioning'])
def copyimage():
main(['do_copyimage'])
def configdrive():
main(['do_configdrive'])
def bootloader():
main(['do_bootloader'])
def build_image():
main(['do_build_image'])
def print_err(line):
sys.stderr.write(six.text_type(line))
sys.stderr.write('\n')
def handle_exception(exc):
LOG = logging.getLogger(__name__)
LOG.exception(exc)
print_err('Unexpected error')
print_err(exc)
sys.exit(-1)
def main(actions=None):
CONF(sys.argv[1:], project='fuel-agent',
version=version.version_info.release_string())
logging.setup('fuel-agent')
LOG = logging.getLogger(__name__)
try:
if CONF.input_data:
data = json.loads(CONF.input_data)
else:
with open(CONF.input_data_file) as f:
data = json.load(f)
LOG.debug('Input data: %s', data)
mgr = manager.Manager(data)
if actions:
for action in actions:
getattr(mgr, action)()
except Exception as exc:
handle_exception(exc)
if __name__ == '__main__':
main()

View File

@ -1,30 +0,0 @@
# Copyright 2015 Mirantis, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import abc
import copy
import six
@six.add_metaclass(abc.ABCMeta)
class BaseDataDriver(object):
"""Data driver API
For example, data validation methods,
methods for getting object schemes, etc.
"""
def __init__(self, data):
self.data = copy.deepcopy(data)

View File

@ -1,149 +0,0 @@
# Copyright 2014 Mirantis, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import jsonschema
from fuel_agent import errors
from fuel_agent.openstack.common import log as logging
LOG = logging.getLogger(__name__)
KS_SPACES_SCHEMA = {
'$schema': 'http://json-schema.org/draft-04/schema#',
'title': 'Partition scheme',
'type': 'array',
'minItems': 1,
'uniqueItems': True,
'items': {
'anyOf': [
{
'type': 'object',
'required': ['type', 'id', 'volumes', 'name',
'size', 'extra', 'free_space'],
'properties': {
'type': {'enum': ['disk']},
'id': {'type': 'string'},
'name': {'type': 'string'},
'size': {'type': 'integer'},
'free_space': {'type': 'integer'},
'extra': {
'type': 'array',
'items': {'type': 'string'},
},
'volumes': {
'type': 'array',
'items': {
'anyOf': [
{
'type': 'object',
'required': ['type', 'size',
'lvm_meta_size', 'vg'],
'properties': {
'type': {'enum': ['pv']},
'size': {'type': 'integer'},
'lvm_meta_size': {'type': 'integer'},
'vg': {'type': 'string'}
}
},
{
'type': 'object',
'required': ['type', 'size'],
'properties': {
'type': {'enum': ['raid',
'partition']},
'size': {'type': 'integer'},
'mount': {'type': 'string'},
'file_system': {'type': 'string'},
'name': {'type': 'string'}
}
},
{
'type': 'object',
'required': ['type', 'size'],
'properties': {
'type': {'enum': ['boot']},
'size': {'type': 'integer'}
}
},
{
'type': 'object',
'required': ['type', 'size'],
'properties': {
'type': {'enum': ['lvm_meta_pool']},
'size': {'type': 'integer'}
}
},
]
}
}
}
},
{
'type': 'object',
'required': ['type', 'id', 'volumes'],
'properties': {
'type': {'enum': ['vg']},
'id': {'type': 'string'},
'label': {'type': 'string'},
'min_size': {'type': 'integer'},
'_allocate_size': {'type': 'string'},
'volumes': {
'type': 'array',
'items': {
'type': 'object',
'required': ['type', 'size', 'name'],
'properties': {
'type': {'enum': ['lv']},
'size': {'type': 'integer'},
'name': {'type': 'string'},
'mount': {'type': 'string'},
'file_system': {'type': 'string'},
}
}
}
}
}
]
}
}
def validate(scheme):
"""Validates a given partition scheme using jsonschema.
:param scheme: partition scheme to validate
"""
try:
checker = jsonschema.FormatChecker()
jsonschema.validate(scheme, KS_SPACES_SCHEMA,
format_checker=checker)
except Exception as exc:
LOG.exception(exc)
raise errors.WrongPartitionSchemeError(str(exc))
# scheme is not valid if the number of disks is 0
if not [d for d in scheme if d['type'] == 'disk']:
raise errors.WrongPartitionSchemeError(
'Partition scheme seems empty')
for space in scheme:
for volume in space.get('volumes', []):
if volume['size'] > 16777216 and volume['mount'] == '/':
raise errors.WrongPartitionSchemeError(
'Root file system must be less than 16T')
# TODO(kozhukalov): need to have additional logical verifications
# maybe sizes and format of string values

View File

@ -1,592 +0,0 @@
# Copyright 2014 Mirantis, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import itertools
import math
import os
import six
import yaml
from six.moves.urllib.parse import urljoin
from six.moves.urllib.parse import urlparse
from six.moves.urllib.parse import urlsplit
from fuel_agent.drivers.base import BaseDataDriver
from fuel_agent.drivers import ks_spaces_validator
from fuel_agent import errors
from fuel_agent import objects
from fuel_agent.openstack.common import log as logging
from fuel_agent.utils import hardware as hu
from fuel_agent.utils import utils
LOG = logging.getLogger(__name__)
def match_device(hu_disk, ks_disk):
"""Check if hu_disk and ks_disk are the same device
Tries to figure out if hu_disk got from hu.list_block_devices
and ks_spaces_disk given correspond to the same disk device. This
is the simplified version of hu.match_device
:param hu_disk: A dict representing disk device how
it is given by list_block_devices method.
:param ks_disk: A dict representing disk device according to
ks_spaces format.
:returns: True if hu_disk matches ks_spaces_disk else False.
"""
uspec = hu_disk['uspec']
# True if at least one by-id link matches ks_disk
if ('DEVLINKS' in uspec and len(ks_disk.get('extra', [])) > 0
and any(x.startswith('/dev/disk/by-id') for x in
set(uspec['DEVLINKS']) &
set(['/dev/%s' % l for l in ks_disk['extra']]))):
return True
# True if one of DEVLINKS matches ks_disk id
if (len(ks_disk.get('extra', [])) == 0
and 'DEVLINKS' in uspec and 'id' in ks_disk
and '/dev/%s' % ks_disk['id'] in uspec['DEVLINKS']):
return True
return False
class Nailgun(BaseDataDriver):
def __init__(self, data):
super(Nailgun, self).__init__(data)
# this var states whether boot partition
# was already allocated on first matching volume
# or not
self._boot_partition_done = False
# this var is used as a flag that /boot fs
# has already been added. we need this to
# get rid of md over all disks for /boot partition.
self._boot_done = False
self.partition_scheme = self.parse_partition_scheme()
self.grub = self.parse_grub()
self.configdrive_scheme = self.parse_configdrive_scheme()
# parsing image scheme needs partition scheme has been parsed
self.image_scheme = self.parse_image_scheme()
def partition_data(self):
return self.data['ks_meta']['pm_data']['ks_spaces']
@property
def ks_disks(self):
return filter(
lambda x: x['type'] == 'disk' and x['size'] > 0,
self.partition_data())
@property
def small_ks_disks(self):
"""Get those disks which are smaller than 2T"""
return [d for d in self.ks_disks if d['size'] <= 2097152]
@property
def ks_vgs(self):
return filter(
lambda x: x['type'] == 'vg',
self.partition_data())
@property
def hu_disks(self):
"""Actual disks which are available on this node
It is a list of dicts which are formatted other way than
ks_spaces disks. To match both of those formats use
_match_device method.
"""
if not getattr(self, '_hu_disks', None):
self._hu_disks = hu.list_block_devices(disks=True)
return self._hu_disks
def _disk_dev(self, ks_disk):
# first we try to find a device that matches ks_disk
# comparing by-id and by-path links
matched = [hu_disk['device'] for hu_disk in self.hu_disks
if match_device(hu_disk, ks_disk)]
# if we can not find a device by its by-id and by-path links
# we try to find a device by its name
fallback = [hu_disk['device'] for hu_disk in self.hu_disks
if '/dev/%s' % ks_disk['name'] == hu_disk['device']]
found = matched or fallback
if not found or len(found) > 1:
raise errors.DiskNotFoundError(
'Disk not found: %s' % ks_disk['name'])
return found[0]
def _getlabel(self, label):
if not label:
return ''
# XFS will refuse to format a partition if the
# disk label is > 12 characters.
return ' -L {0} '.format(label[:12])
def _get_partition_count(self, name):
count = 0
for disk in self.ks_disks:
count += len([v for v in disk["volumes"]
if v.get('name') == name and v['size'] > 0])
return count
def _num_ceph_journals(self):
return self._get_partition_count('cephjournal')
def _num_ceph_osds(self):
return self._get_partition_count('ceph')
def parse_partition_scheme(self):
LOG.debug('--- Preparing partition scheme ---')
data = self.partition_data()
ks_spaces_validator.validate(data)
partition_scheme = objects.PartitionScheme()
ceph_osds = self._num_ceph_osds()
journals_left = ceph_osds
ceph_journals = self._num_ceph_journals()
LOG.debug('Looping over all disks in provision data')
for disk in self.ks_disks:
# skipping disk if there are no volumes with size >0
# to be allocated on it which are not boot partitions
if all((
v["size"] <= 0
for v in disk["volumes"]
if v["type"] != "boot" and v.get("mount") != "/boot"
)):
continue
LOG.debug('Processing disk %s' % disk['name'])
LOG.debug('Adding gpt table on disk %s' % disk['name'])
parted = partition_scheme.add_parted(
name=self._disk_dev(disk), label='gpt')
# we install bootloader on every disk
LOG.debug('Adding bootloader stage0 on disk %s' % disk['name'])
parted.install_bootloader = True
# legacy boot partition
LOG.debug('Adding bios_grub partition on disk %s: size=24' %
disk['name'])
parted.add_partition(size=24, flags=['bios_grub'])
# uefi partition (for future use)
LOG.debug('Adding UEFI partition on disk %s: size=200' %
disk['name'])
parted.add_partition(size=200)
LOG.debug('Looping over all volumes on disk %s' % disk['name'])
for volume in disk['volumes']:
LOG.debug('Processing volume: '
'name=%s type=%s size=%s mount=%s vg=%s' %
(volume.get('name'), volume.get('type'),
volume.get('size'), volume.get('mount'),
volume.get('vg')))
if volume['size'] <= 0:
LOG.debug('Volume size is zero. Skipping.')
continue
if volume.get('name') == 'cephjournal':
LOG.debug('Volume seems to be a CEPH journal volume. '
'Special procedure is supposed to be applied.')
# We need to allocate a journal partition for each ceph OSD
# Determine the number of journal partitions we need on
# each device
ratio = int(math.ceil(float(ceph_osds) / ceph_journals))
# No more than 10GB will be allocated to a single journal
# partition
size = volume["size"] / ratio
if size > 10240:
size = 10240
# This will attempt to evenly spread partitions across
# multiple devices e.g. 5 osds with 2 journal devices will
# create 3 partitions on the first device and 2 on the
# second
if ratio < journals_left:
end = ratio
else:
end = journals_left
for i in range(0, end):
journals_left -= 1
if volume['type'] == 'partition':
LOG.debug('Adding CEPH journal partition on '
'disk %s: size=%s' %
(disk['name'], size))
prt = parted.add_partition(size=size)
LOG.debug('Partition name: %s' % prt.name)
if 'partition_guid' in volume:
LOG.debug('Setting partition GUID: %s' %
volume['partition_guid'])
prt.set_guid(volume['partition_guid'])
continue
if volume['type'] in ('partition', 'pv', 'raid'):
if volume.get('mount') != '/boot':
LOG.debug('Adding partition on disk %s: size=%s' %
(disk['name'], volume['size']))
prt = parted.add_partition(size=volume['size'])
LOG.debug('Partition name: %s' % prt.name)
elif volume.get('mount') == '/boot' \
and not self._boot_partition_done \
and (disk in self.small_ks_disks or
not self.small_ks_disks):
# NOTE(kozhukalov): On some hardware GRUB is not able
# to see disks larger than 2T due to firmware bugs,
# so we'd better avoid placing /boot on such
# huge disks if it is possible.
LOG.debug('Adding /boot partition on disk %s: '
'size=%s', disk['name'], volume['size'])
prt = parted.add_partition(size=volume['size'])
LOG.debug('Partition name: %s', prt.name)
self._boot_partition_done = True
else:
LOG.debug('No need to create partition on disk %s. '
'Skipping.', disk['name'])
continue
if volume['type'] == 'partition':
if 'partition_guid' in volume:
LOG.debug('Setting partition GUID: %s' %
volume['partition_guid'])
prt.set_guid(volume['partition_guid'])
if 'mount' in volume and volume['mount'] != 'none':
LOG.debug('Adding file system on partition: '
'mount=%s type=%s' %
(volume['mount'],
volume.get('file_system', 'xfs')))
partition_scheme.add_fs(
device=prt.name, mount=volume['mount'],
fs_type=volume.get('file_system', 'xfs'),
fs_label=self._getlabel(volume.get('disk_label')))
if volume['mount'] == '/boot' and not self._boot_done:
self._boot_done = True
if volume['type'] == 'pv':
LOG.debug('Creating pv on partition: pv=%s vg=%s' %
(prt.name, volume['vg']))
lvm_meta_size = volume.get('lvm_meta_size', 64)
# The reason for that is to make sure that
# there will be enough space for creating logical volumes.
# Default lvm extension size is 4M. Nailgun volume
# manager does not care of it and if physical volume size
# is 4M * N + 3M and lvm metadata size is 4M * L then only
# 4M * (N-L) + 3M of space will be available for
# creating logical extensions. So only 4M * (N-L) of space
# will be available for logical volumes, while nailgun
# volume manager might reguire 4M * (N-L) + 3M
# logical volume. Besides, parted aligns partitions
# according to its own algorithm and actual partition might
# be a bit smaller than integer number of mebibytes.
if lvm_meta_size < 10:
raise errors.WrongPartitionSchemeError(
'Error while creating physical volume: '
'lvm metadata size is too small')
metadatasize = int(math.floor((lvm_meta_size - 8) / 2))
metadatacopies = 2
partition_scheme.vg_attach_by_name(
pvname=prt.name, vgname=volume['vg'],
metadatasize=metadatasize,
metadatacopies=metadatacopies)
if volume['type'] == 'raid':
if 'mount' in volume and \
volume['mount'] not in ('none', '/boot'):
LOG.debug('Attaching partition to RAID '
'by its mount point %s' % volume['mount'])
partition_scheme.md_attach_by_mount(
device=prt.name, mount=volume['mount'],
fs_type=volume.get('file_system', 'xfs'),
fs_label=self._getlabel(volume.get('disk_label')))
if 'mount' in volume and volume['mount'] == '/boot' and \
not self._boot_done:
LOG.debug('Adding file system on partition: '
'mount=%s type=%s' %
(volume['mount'],
volume.get('file_system', 'ext2')))
partition_scheme.add_fs(
device=prt.name, mount=volume['mount'],
fs_type=volume.get('file_system', 'ext2'),
fs_label=self._getlabel(volume.get('disk_label')))
self._boot_done = True
# this partition will be used to put there configdrive image
if partition_scheme.configdrive_device() is None:
LOG.debug('Adding configdrive partition on disk %s: size=20' %
disk['name'])
parted.add_partition(size=20, configdrive=True)
# checking if /boot is created
if not self._boot_partition_done or not self._boot_done:
raise errors.WrongPartitionSchemeError(
'/boot partition has not been created for some reasons')
LOG.debug('Looping over all volume groups in provision data')
for vg in self.ks_vgs:
LOG.debug('Processing vg %s' % vg['id'])
LOG.debug('Looping over all logical volumes in vg %s' % vg['id'])
for volume in vg['volumes']:
LOG.debug('Processing lv %s' % volume['name'])
if volume['size'] <= 0:
LOG.debug('Lv size is zero. Skipping.')
continue
if volume['type'] == 'lv':
LOG.debug('Adding lv to vg %s: name=%s, size=%s' %
(vg['id'], volume['name'], volume['size']))
lv = partition_scheme.add_lv(name=volume['name'],
vgname=vg['id'],
size=volume['size'])
if 'mount' in volume and volume['mount'] != 'none':
LOG.debug('Adding file system on lv: '
'mount=%s type=%s' %
(volume['mount'],
volume.get('file_system', 'xfs')))
partition_scheme.add_fs(
device=lv.device_name, mount=volume['mount'],
fs_type=volume.get('file_system', 'xfs'),
fs_label=self._getlabel(volume.get('disk_label')))
return partition_scheme
def parse_configdrive_scheme(self):
LOG.debug('--- Preparing configdrive scheme ---')
data = self.data
configdrive_scheme = objects.ConfigDriveScheme()
LOG.debug('Adding common parameters')
admin_interface = filter(
lambda x: (x['mac_address'] ==
data['kernel_options']['netcfg/choose_interface']),
[dict(name=name, **spec) for name, spec
in data['interfaces'].iteritems()])[0]
ssh_auth_keys = data['ks_meta']['authorized_keys']
if data['ks_meta']['auth_key']:
ssh_auth_keys.append(data['ks_meta']['auth_key'])
configdrive_scheme.set_common(
ssh_auth_keys=ssh_auth_keys,
hostname=data['hostname'],
fqdn=data['hostname'],
name_servers=data['name_servers'],
search_domain=data['name_servers_search'],
master_ip=data['ks_meta']['master_ip'],
master_url='http://%s:8000/api' % data['ks_meta']['master_ip'],
udevrules=data['kernel_options']['udevrules'],
admin_mac=data['kernel_options']['netcfg/choose_interface'],
admin_ip=admin_interface['ip_address'],
admin_mask=admin_interface['netmask'],
admin_iface_name=admin_interface['name'],
timezone=data['ks_meta'].get('timezone', 'America/Los_Angeles'),
gw=data['ks_meta']['gw'],
ks_repos=data['ks_meta']['repo_setup']['repos']
)
LOG.debug('Adding puppet parameters')
configdrive_scheme.set_puppet(
master=data['ks_meta']['puppet_master'],
enable=data['ks_meta']['puppet_enable']
)
LOG.debug('Adding mcollective parameters')
configdrive_scheme.set_mcollective(
pskey=data['ks_meta']['mco_pskey'],
vhost=data['ks_meta']['mco_vhost'],
host=data['ks_meta']['mco_host'],
user=data['ks_meta']['mco_user'],
password=data['ks_meta']['mco_password'],
connector=data['ks_meta']['mco_connector'],
enable=data['ks_meta']['mco_enable']
)
LOG.debug('Setting configdrive profile %s' % data['profile'])
configdrive_scheme.set_profile(profile=data['profile'])
return configdrive_scheme
def parse_grub(self):
LOG.debug('--- Parse grub settings ---')
grub = objects.Grub()
LOG.debug('Appending kernel parameters: %s',
self.data['ks_meta']['pm_data']['kernel_params'])
grub.append_kernel_params(
self.data['ks_meta']['pm_data']['kernel_params'])
if 'centos' in self.data['profile'].lower() and \
not self.data['ks_meta'].get('kernel_lt'):
LOG.debug('Prefered kernel version is 2.6')
grub.kernel_regexp = r'^vmlinuz-2\.6.*'
grub.initrd_regexp = r'^initramfs-2\.6.*'
return grub
def parse_image_scheme(self):
LOG.debug('--- Preparing image scheme ---')
data = self.data
image_scheme = objects.ImageScheme()
# FIXME(agordeev): this piece of code for fetching additional image
# meta data should be factored out of this particular nailgun driver
# into more common and absract data getter which should be able to deal
# with various data sources (local file, http(s), etc.) and different
# data formats ('blob', json, yaml, etc.).
# So, the manager will combine and manipulate all those multiple data
# getter instances.
# Also, the initial data source should be set to sort out chicken/egg
# problem. Command line option may be useful for such a case.
# BUG: https://bugs.launchpad.net/fuel/+bug/1430418
root_uri = data['ks_meta']['image_data']['/']['uri']
filename = os.path.basename(urlparse(root_uri).path).split('.')[0] + \
'.yaml'
metadata_url = urljoin(root_uri, filename)
try:
image_meta = yaml.load(
utils.init_http_request(metadata_url).text)
except Exception as e:
LOG.exception(e)
LOG.debug('Failed to fetch/decode image meta data')
image_meta = {}
# We assume for every file system user may provide a separate
# file system image. For example if partitioning scheme has
# /, /boot, /var/lib file systems then we will try to get images
# for all those mount points. Images data are to be defined
# at provision.json -> ['ks_meta']['image_data']
LOG.debug('Looping over all images in provision data')
for mount_point, image_data in six.iteritems(
data['ks_meta']['image_data']):
LOG.debug('Adding image for fs %s: uri=%s format=%s container=%s' %
(mount_point, image_data['uri'],
image_data['format'], image_data['container']))
iname = os.path.basename(urlparse(image_data['uri']).path)
imeta = next(itertools.chain(
(img for img in image_meta.get('images', [])
if img['container_name'] == iname), [{}]))
image_scheme.add_image(
uri=image_data['uri'],
target_device=self.partition_scheme.fs_by_mount(
mount_point).device,
format=image_data['format'],
container=image_data['container'],
size=imeta.get('raw_size'),
md5=imeta.get('raw_md5'),
)
return image_scheme
class NailgunBuildImage(BaseDataDriver):
# TODO(kozhukalov):
# This list of packages is used by default only if another
# list isn't given in build image data. In the future
# we need to handle package list in nailgun. Even more,
# in the future, we'll be building not only ubuntu images
# and we'll likely move this list into some kind of config.
DEFAULT_TRUSTY_PACKAGES = [
"acl",
"anacron",
"bash-completion",
"bridge-utils",
"bsdmainutils",
"build-essential",
"cloud-init",
"curl",
"daemonize",
"debconf-utils",
"gdisk",
"grub-pc",
"linux-firmware",
"linux-firmware-nonfree",
"linux-headers-generic-lts-trusty",
"linux-image-generic-lts-trusty",
"lvm2",
"mcollective",
"mdadm",
"nailgun-agent",
"nailgun-mcagents",
"nailgun-net-check",
"ntp",
"openssh-client",
"openssh-server",
"puppet",
"python-amqp",
"ruby-augeas",
"ruby-ipaddress",
"ruby-json",
"ruby-netaddr",
"ruby-openstack",
"ruby-shadow",
"ruby-stomp",
"telnet",
"ubuntu-minimal",
"ubuntu-standard",
"uuid-runtime",
"vim",
"virt-what",
"vlan",
]
def __init__(self, data):
super(NailgunBuildImage, self).__init__(data)
self.parse_schemes()
self.parse_operating_system()
def parse_operating_system(self):
if self.data.get('codename').lower() != 'trusty':
raise errors.WrongInputDataError(
'Currently, only Ubuntu Trusty is supported, given '
'codename is {0}'.format(self.data.get('codename')))
packages = self.data.get('packages', self.DEFAULT_TRUSTY_PACKAGES)
repos = []
for repo in self.data['repos']:
repos.append(objects.DEBRepo(
name=repo['name'],
uri=repo['uri'],
suite=repo['suite'],
section=repo['section'],
priority=repo['priority']))
self.operating_system = objects.Ubuntu(repos=repos, packages=packages)
def parse_schemes(self):
self.image_scheme = objects.ImageScheme()
self.partition_scheme = objects.PartitionScheme()
for mount, image in six.iteritems(self.data['image_data']):
filename = os.path.basename(urlsplit(image['uri']).path)
# Loop does not allocate any loop device
# during initialization.
device = objects.Loop()
self.image_scheme.add_image(
uri='file://' + os.path.join(self.data['output'], filename),
format=image['format'],
container=image['container'],
target_device=device)
self.partition_scheme.add_fs(
device=device,
mount=mount,
fs_type=image['format'])
if mount == '/':
metadata_filename = filename.split('.', 1)[0] + '.yaml'
self.metadata_uri = 'file://' + os.path.join(
self.data['output'], metadata_filename)

View File

@ -1,164 +0,0 @@
# Copyright 2014 Mirantis, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
class BaseError(Exception):
def __init__(self, message, *args, **kwargs):
self.message = message
super(BaseError, self).__init__(message, *args, **kwargs)
class WrongInputDataError(BaseError):
pass
class WrongPartitionSchemeError(BaseError):
pass
class WrongPartitionLabelError(BaseError):
pass
class PartitionNotFoundError(BaseError):
pass
class DiskNotFoundError(BaseError):
pass
class NotEnoughSpaceError(BaseError):
pass
class PVAlreadyExistsError(BaseError):
pass
class PVNotFoundError(BaseError):
pass
class PVBelongsToVGError(BaseError):
pass
class VGAlreadyExistsError(BaseError):
pass
class VGNotFoundError(BaseError):
pass
class LVAlreadyExistsError(BaseError):
pass
class LVNotFoundError(BaseError):
pass
class MDAlreadyExistsError(BaseError):
pass
class MDNotFoundError(BaseError):
pass
class MDDeviceDuplicationError(BaseError):
pass
class MDWrongSpecError(BaseError):
pass
class MDRemovingError(BaseError):
pass
class WrongConfigDriveDataError(BaseError):
pass
class WrongImageDataError(BaseError):
pass
class TemplateWriteError(BaseError):
pass
class ProcessExecutionError(BaseError):
def __init__(self, stdout=None, stderr=None, exit_code=None, cmd=None,
description=None):
self.exit_code = exit_code
self.stderr = stderr
self.stdout = stdout
self.cmd = cmd
self.description = description
if description is None:
description = ("Unexpected error while running command.")
if exit_code is None:
exit_code = '-'
message = ('%(description)s\n'
'Command: %(cmd)s\n'
'Exit code: %(exit_code)s\n'
'Stdout: %(stdout)r\n'
'Stderr: %(stderr)r') % {'description': description,
'cmd': cmd,
'exit_code': exit_code,
'stdout': stdout,
'stderr': stderr}
super(ProcessExecutionError, self).__init__(message)
class GrubUtilsError(BaseError):
pass
class FsUtilsError(BaseError):
pass
class HttpUrlConnectionError(BaseError):
pass
class HttpUrlInvalidContentLength(BaseError):
pass
class ImageChecksumMismatchError(BaseError):
pass
class NoFreeLoopDevices(BaseError):
pass
class WrongRepositoryError(BaseError):
pass
class WrongDeviceError(BaseError):
pass
class UnexpectedProcessError(BaseError):
pass

View File

@ -1,21 +0,0 @@
# Copyright 2015 Mirantis, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
def setup_hook(config):
import pbr
import pbr.packaging
# this monkey patch is to avoid appending git version to version
pbr.packaging._get_version_from_git = lambda pre_version: pre_version

View File

@ -1,725 +0,0 @@
# Copyright 2014 Mirantis, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import shutil
import signal
import tempfile
import yaml
from oslo.config import cfg
from fuel_agent import errors
from fuel_agent.openstack.common import log as logging
from fuel_agent.utils import artifact as au
from fuel_agent.utils import build as bu
from fuel_agent.utils import fs as fu
from fuel_agent.utils import grub as gu
from fuel_agent.utils import lvm as lu
from fuel_agent.utils import md as mu
from fuel_agent.utils import partition as pu
from fuel_agent.utils import utils
opts = [
cfg.StrOpt(
'nc_template_path',
default='/usr/share/fuel-agent/cloud-init-templates',
help='Path to directory with cloud init templates',
),
cfg.StrOpt(
'tmp_path',
default='/tmp',
help='Temporary directory for file manipulations',
),
cfg.StrOpt(
'config_drive_path',
default='/tmp/config-drive.img',
help='Path where to store generated config drive image',
),
cfg.StrOpt(
'udev_rules_dir',
default='/etc/udev/rules.d',
help='Path where to store actual rules for udev daemon',
),
cfg.StrOpt(
'udev_rules_lib_dir',
default='/lib/udev/rules.d',
help='Path where to store default rules for udev daemon',
),
cfg.StrOpt(
'udev_rename_substr',
default='.renamedrule',
help='Substring to which file extension .rules be renamed',
),
cfg.StrOpt(
'udev_empty_rule',
default='empty_rule',
help='Correct empty rule for udev daemon',
),
cfg.StrOpt(
'image_build_dir',
default='/tmp',
help='Directory where the image is supposed to be built',
),
cfg.StrOpt(
'image_build_suffix',
default='.fuel-agent-image',
help='Suffix which is used while creating temporary files',
),
]
cli_opts = [
cfg.StrOpt(
'data_driver',
default='nailgun',
help='Data driver'
),
]
CONF = cfg.CONF
CONF.register_opts(opts)
CONF.register_cli_opts(cli_opts)
LOG = logging.getLogger(__name__)
class Manager(object):
def __init__(self, data):
self.driver = utils.get_driver(CONF.data_driver)(data)
def do_partitioning(self):
LOG.debug('--- Partitioning disks (do_partitioning) ---')
# If disks are not wiped out at all, it is likely they contain lvm
# and md metadata which will prevent re-creating a partition table
# with 'device is busy' error.
mu.mdclean_all()
lu.lvremove_all()
lu.vgremove_all()
lu.pvremove_all()
# Here is udev's rules blacklisting to be done:
# by adding symlinks to /dev/null in /etc/udev/rules.d for already
# existent rules in /lib/.
# 'parted' generates too many udev events in short period of time
# so we should increase processing speed for those events,
# otherwise partitioning is doomed.
empty_rule_path = os.path.join(CONF.udev_rules_dir,
os.path.basename(CONF.udev_empty_rule))
with open(empty_rule_path, 'w') as f:
f.write('#\n')
LOG.debug("Enabling udev's rules blacklisting")
for rule in os.listdir(CONF.udev_rules_lib_dir):
dst = os.path.join(CONF.udev_rules_dir, rule)
if os.path.isdir(dst):
continue
if dst.endswith('.rules'):
# for successful blacklisting already existent file with name
# from /etc which overlaps with /lib should be renamed prior
# symlink creation.
try:
if os.path.exists(dst):
os.rename(dst, dst[:-len('.rules')] +
CONF.udev_rename_substr)
except OSError:
LOG.debug("Skipping udev rule %s blacklising" % dst)
else:
os.symlink(empty_rule_path, dst)
utils.execute('udevadm', 'control', '--reload-rules',
check_exit_code=[0])
for parted in self.driver.partition_scheme.parteds:
for prt in parted.partitions:
# We wipe out the beginning of every new partition
# right after creating it. It allows us to avoid possible
# interactive dialog if some data (metadata or file system)
# present on this new partition and it also allows udev not
# hanging trying to parse this data.
utils.execute('dd', 'if=/dev/zero', 'bs=1M',
'seek=%s' % max(prt.begin - 3, 0), 'count=5',
'of=%s' % prt.device, check_exit_code=[0])
# Also wipe out the ending of every new partition.
# Different versions of md stores metadata in different places.
# Adding exit code 1 to be accepted as for handling situation
# when 'no space left on device' occurs.
utils.execute('dd', 'if=/dev/zero', 'bs=1M',
'seek=%s' % max(prt.end - 3, 0), 'count=5',
'of=%s' % prt.device, check_exit_code=[0, 1])
for parted in self.driver.partition_scheme.parteds:
pu.make_label(parted.name, parted.label)
for prt in parted.partitions:
pu.make_partition(prt.device, prt.begin, prt.end, prt.type)
for flag in prt.flags:
pu.set_partition_flag(prt.device, prt.count, flag)
if prt.guid:
pu.set_gpt_type(prt.device, prt.count, prt.guid)
# If any partition to be created doesn't exist it's an error.
# Probably it's again 'device or resource busy' issue.
if not os.path.exists(prt.name):
raise errors.PartitionNotFoundError(
'Partition %s not found after creation' % prt.name)
# disable udev's rules blacklisting
LOG.debug("Disabling udev's rules blacklisting")
for rule in os.listdir(CONF.udev_rules_dir):
src = os.path.join(CONF.udev_rules_dir, rule)
if os.path.isdir(src):
continue
if src.endswith('.rules'):
if os.path.islink(src):
try:
os.remove(src)
except OSError:
LOG.debug(
"Skipping udev rule %s de-blacklisting" % src)
elif src.endswith(CONF.udev_rename_substr):
try:
if os.path.exists(src):
os.rename(src, src[:-len(CONF.udev_rename_substr)] +
'.rules')
except OSError:
LOG.debug("Skipping udev rule %s de-blacklisting" % src)
utils.execute('udevadm', 'control', '--reload-rules',
check_exit_code=[0])
# NOTE(agordeev): re-create all the links which were skipped by udev
# while blacklisted
# NOTE(agordeev): do subsystem match, otherwise it will stuck
utils.execute('udevadm', 'trigger', '--subsystem-match=block',
check_exit_code=[0])
utils.execute('udevadm', 'settle', '--quiet', check_exit_code=[0])
# If one creates partitions with the same boundaries as last time,
# there might be md and lvm metadata on those partitions. To prevent
# failing of creating md and lvm devices we need to make sure
# unused metadata are wiped out.
mu.mdclean_all()
lu.lvremove_all()
lu.vgremove_all()
lu.pvremove_all()
# creating meta disks
for md in self.driver.partition_scheme.mds:
mu.mdcreate(md.name, md.level, *md.devices)
# creating physical volumes
for pv in self.driver.partition_scheme.pvs:
lu.pvcreate(pv.name, metadatasize=pv.metadatasize,
metadatacopies=pv.metadatacopies)
# creating volume groups
for vg in self.driver.partition_scheme.vgs:
lu.vgcreate(vg.name, *vg.pvnames)
# creating logical volumes
for lv in self.driver.partition_scheme.lvs:
lu.lvcreate(lv.vgname, lv.name, lv.size)
# making file systems
for fs in self.driver.partition_scheme.fss:
found_images = [img for img in self.driver.image_scheme.images
if img.target_device == fs.device]
if not found_images:
fu.make_fs(fs.type, fs.options, fs.label, fs.device)
def do_configdrive(self):
LOG.debug('--- Creating configdrive (do_configdrive) ---')
cc_output_path = os.path.join(CONF.tmp_path, 'cloud_config.txt')
bh_output_path = os.path.join(CONF.tmp_path, 'boothook.txt')
# NOTE:file should be strictly named as 'user-data'
# the same is for meta-data as well
ud_output_path = os.path.join(CONF.tmp_path, 'user-data')
md_output_path = os.path.join(CONF.tmp_path, 'meta-data')
tmpl_dir = CONF.nc_template_path
utils.render_and_save(
tmpl_dir,
self.driver.configdrive_scheme.template_names('cloud_config'),
self.driver.configdrive_scheme.template_data(),
cc_output_path
)
utils.render_and_save(
tmpl_dir,
self.driver.configdrive_scheme.template_names('boothook'),
self.driver.configdrive_scheme.template_data(),
bh_output_path
)
utils.render_and_save(
tmpl_dir,
self.driver.configdrive_scheme.template_names('meta-data'),
self.driver.configdrive_scheme.template_data(),
md_output_path
)
utils.execute('write-mime-multipart', '--output=%s' % ud_output_path,
'%s:text/cloud-boothook' % bh_output_path,
'%s:text/cloud-config' % cc_output_path)
utils.execute('genisoimage', '-output', CONF.config_drive_path,
'-volid', 'cidata', '-joliet', '-rock', ud_output_path,
md_output_path)
configdrive_device = self.driver.partition_scheme.configdrive_device()
if configdrive_device is None:
raise errors.WrongPartitionSchemeError(
'Error while trying to get configdrive device: '
'configdrive device not found')
size = os.path.getsize(CONF.config_drive_path)
md5 = utils.calculate_md5(CONF.config_drive_path, size)
self.driver.image_scheme.add_image(
uri='file://%s' % CONF.config_drive_path,
target_device=configdrive_device,
format='iso9660',
container='raw',
size=size,
md5=md5,
)
def do_copyimage(self):
LOG.debug('--- Copying images (do_copyimage) ---')
for image in self.driver.image_scheme.images:
LOG.debug('Processing image: %s' % image.uri)
processing = au.Chain()
LOG.debug('Appending uri processor: %s' % image.uri)
processing.append(image.uri)
if image.uri.startswith('http://'):
LOG.debug('Appending HTTP processor')
processing.append(au.HttpUrl)
elif image.uri.startswith('file://'):
LOG.debug('Appending FILE processor')
processing.append(au.LocalFile)
if image.container == 'gzip':
LOG.debug('Appending GZIP processor')
processing.append(au.GunzipStream)
LOG.debug('Appending TARGET processor: %s' % image.target_device)
processing.append(image.target_device)
LOG.debug('Launching image processing chain')
processing.process()
if image.size and image.md5:
LOG.debug('Trying to compare image checksum')
actual_md5 = utils.calculate_md5(image.target_device,
image.size)
if actual_md5 == image.md5:
LOG.debug('Checksum matches successfully: md5=%s' %
actual_md5)
else:
raise errors.ImageChecksumMismatchError(
'Actual checksum %s mismatches with expected %s for '
'file %s' % (actual_md5, image.md5,
image.target_device))
else:
LOG.debug('Skipping image checksum comparing. '
'Ether size or hash have been missed')
LOG.debug('Extending image file systems')
if image.format in ('ext2', 'ext3', 'ext4', 'xfs'):
LOG.debug('Extending %s %s' %
(image.format, image.target_device))
fu.extend_fs(image.format, image.target_device)
# TODO(kozhukalov): write tests
def mount_target(self, chroot, treat_mtab=True, pseudo=True):
"""Mount a set of file systems into a chroot
:param chroot: Directory where to mount file systems
:param treat_mtab: If mtab needs to be actualized (Default: True)
:param pseudo: If pseudo file systems
need to be mounted (Default: True)
"""
LOG.debug('Mounting target file systems: %s', chroot)
# Here we are going to mount all file systems in partition scheme.
for fs in self.driver.partition_scheme.fs_sorted_by_depth():
if fs.mount == 'swap':
continue
mount = chroot + fs.mount
utils.makedirs_if_not_exists(mount)
fu.mount_fs(fs.type, str(fs.device), mount)
if pseudo:
for path in ('/sys', '/dev', '/proc'):
utils.makedirs_if_not_exists(chroot + path)
fu.mount_bind(chroot, path)
if treat_mtab:
mtab = utils.execute(
'chroot', chroot, 'grep', '-v', 'rootfs', '/proc/mounts')[0]
mtab_path = chroot + '/etc/mtab'
if os.path.islink(mtab_path):
os.remove(mtab_path)
with open(mtab_path, 'wb') as f:
f.write(mtab)
# TODO(kozhukalov): write tests for this method
def umount_target(self, chroot, pseudo=True, try_lazy_umount=True):
LOG.debug('Umounting target file systems: %s', chroot)
if pseudo:
for path in ('/proc', '/dev', '/sys'):
fu.umount_fs(chroot + path, try_lazy_umount=try_lazy_umount)
for fs in self.driver.partition_scheme.fs_sorted_by_depth(
reverse=True):
if fs.mount == 'swap':
continue
fu.umount_fs(chroot + fs.mount, try_lazy_umount=try_lazy_umount)
# TODO(kozhukalov): write tests for this method
# https://bugs.launchpad.net/fuel/+bug/1449609
def do_bootloader(self):
LOG.debug('--- Installing bootloader (do_bootloader) ---')
chroot = '/tmp/target'
self.mount_target(chroot)
mount2uuid = {}
for fs in self.driver.partition_scheme.fss:
mount2uuid[fs.mount] = utils.execute(
'blkid', '-o', 'value', '-s', 'UUID', fs.device,
check_exit_code=[0])[0].strip()
grub = self.driver.grub
grub.version = gu.guess_grub_version(chroot=chroot)
boot_device = self.driver.partition_scheme.boot_device(grub.version)
install_devices = [d.name for d in self.driver.partition_scheme.parteds
if d.install_bootloader]
grub.append_kernel_params('root=UUID=%s ' % mount2uuid['/'])
kernel = grub.kernel_name or \
gu.guess_kernel(chroot=chroot, regexp=grub.kernel_regexp)
initrd = grub.initrd_name or \
gu.guess_initrd(chroot=chroot, regexp=grub.initrd_regexp)
if grub.version == 1:
gu.grub1_cfg(kernel=kernel, initrd=initrd,
kernel_params=grub.kernel_params, chroot=chroot)
gu.grub1_install(install_devices, boot_device, chroot=chroot)
else:
# TODO(kozhukalov): implement which kernel to use by default
# Currently only grub1_cfg accepts kernel and initrd parameters.
gu.grub2_cfg(kernel_params=grub.kernel_params, chroot=chroot)
gu.grub2_install(install_devices, chroot=chroot)
# FIXME(agordeev) There's no convenient way to perfrom NIC remapping in
# Ubuntu, so injecting files prior the first boot should work
with open(chroot + '/etc/udev/rules.d/70-persistent-net.rules',
'w') as f:
f.write('# Generated by fuel-agent during provisioning: BEGIN\n')
# pattern is aa:bb:cc:dd:ee:ff_eth0,aa:bb:cc:dd:ee:ff_eth1
for mapping in self.driver.configdrive_scheme.\
common.udevrules.split(','):
mac_addr, nic_name = mapping.split('_')
f.write('SUBSYSTEM=="net", ACTION=="add", DRIVERS=="?*", '
'ATTR{address}=="%s", ATTR{type}=="1", KERNEL=="eth*",'
' NAME="%s"\n' % (mac_addr, nic_name))
f.write('# Generated by fuel-agent during provisioning: END\n')
# FIXME(agordeev): Disable net-generator that will add new etries to
# 70-persistent-net.rules
with open(chroot +
'/etc/udev/rules.d/75-persistent-net-generator.rules',
'w') as f:
f.write('# Generated by fuel-agent during provisioning:\n'
'# DO NOT DELETE. It is needed to disable net-generator\n')
# FIXME(kozhukalov): Prevent nailgun-agent from doing anything.
# This ugly hack is to be used together with the command removing
# this lock file not earlier than /etc/rc.local
# The reason for this hack to appear is to prevent nailgun-agent from
# changing mcollective config at the same time when cloud-init
# does the same. Otherwise, we can end up with corrupted mcollective
# config. For details see https://bugs.launchpad.net/fuel/+bug/1449186
LOG.debug('Preventing nailgun-agent from doing '
'anything until it is unlocked')
utils.makedirs_if_not_exists(os.path.join(chroot, 'etc/nailgun-agent'))
with open(os.path.join(chroot, 'etc/nailgun-agent/nodiscover'), 'w'):
pass
with open(chroot + '/etc/fstab', 'wb') as f:
for fs in self.driver.partition_scheme.fss:
# TODO(kozhukalov): Think of improving the logic so as to
# insert a meaningful fsck order value which is last zero
# at fstab line. Currently we set it into 0 which means
# a corresponding file system will never be checked. We assume
# puppet or other configuration tool will care of it.
f.write('UUID=%s %s %s defaults 0 0\n' %
(mount2uuid[fs.mount], fs.mount, fs.type))
self.umount_target(chroot)
def do_reboot(self):
LOG.debug('--- Rebooting node (do_reboot) ---')
utils.execute('reboot')
def do_provisioning(self):
LOG.debug('--- Provisioning (do_provisioning) ---')
self.do_partitioning()
self.do_configdrive()
self.do_copyimage()
self.do_bootloader()
LOG.debug('--- Provisioning END (do_provisioning) ---')
# TODO(kozhukalov): Split this huge method
# into a set of smaller ones
# https://bugs.launchpad.net/fuel/+bug/1444090
def do_build_image(self):
"""Building OS images
Includes the following steps
1) create temporary sparse files for all images (truncate)
2) attach temporary files to loop devices (losetup)
3) create file systems on these loop devices
4) create temporary chroot directory
5) mount loop devices into chroot directory
6) install operating system (debootstrap and apt-get)
7) configure OS (clean sources.list and preferences, etc.)
8) umount loop devices
9) resize file systems on loop devices
10) shrink temporary sparse files (images)
11) containerize (gzip) temporary sparse files
12) move temporary gzipped files to their final location
"""
LOG.info('--- Building image (do_build_image) ---')
# TODO(kozhukalov): Implement metadata
# as a pluggable data driver to avoid any fixed format.
metadata = {}
# TODO(kozhukalov): implement this using image metadata
# we need to compare list of packages and repos
LOG.info('*** Checking if image exists ***')
if all([os.path.exists(img.uri.split('file://', 1)[1])
for img in self.driver.image_scheme.images]):
LOG.debug('All necessary images are available. '
'Nothing needs to be done.')
return
LOG.debug('At least one of the necessary images is unavailable. '
'Starting build process.')
try:
LOG.debug('Creating temporary chroot directory')
chroot = tempfile.mkdtemp(
dir=CONF.image_build_dir, suffix=CONF.image_build_suffix)
LOG.debug('Temporary chroot: %s', chroot)
proc_path = os.path.join(chroot, 'proc')
LOG.info('*** Preparing image space ***')
for image in self.driver.image_scheme.images:
LOG.debug('Creating temporary sparsed file for the '
'image: %s', image.uri)
img_tmp_file = bu.create_sparse_tmp_file(
dir=CONF.image_build_dir, suffix=CONF.image_build_suffix)
LOG.debug('Temporary file: %s', img_tmp_file)
# we need to remember those files
# to be able to shrink them and move in the end
image.img_tmp_file = img_tmp_file
LOG.debug('Looking for a free loop device')
image.target_device.name = bu.get_free_loop_device()
LOG.debug('Attaching temporary image file to free loop device')
bu.attach_file_to_loop(img_tmp_file, str(image.target_device))
# find fs with the same loop device object
# as image.target_device
fs = self.driver.partition_scheme.fs_by_device(
image.target_device)
LOG.debug('Creating file system on the image')
fu.make_fs(
fs_type=fs.type,
fs_options=fs.options,
fs_label=fs.label,
dev=str(fs.device))
if fs.type == 'ext4':
LOG.debug('Trying to disable journaling for ext4 '
'in order to speed up the build')
utils.execute('tune2fs', '-O', '^has_journal',
str(fs.device))
# mounting all images into chroot tree
self.mount_target(chroot, treat_mtab=False, pseudo=False)
LOG.info('*** Shipping image content ***')
LOG.debug('Installing operating system into image')
# FIXME(kozhukalov): !!! we need this part to be OS agnostic
# DEBOOTSTRAP
# we use first repo as the main mirror
uri = self.driver.operating_system.repos[0].uri
suite = self.driver.operating_system.repos[0].suite
LOG.debug('Preventing services from being get started')
bu.suppress_services_start(chroot)
LOG.debug('Installing base operating system using debootstrap')
bu.run_debootstrap(uri=uri, suite=suite, chroot=chroot)
# APT-GET
LOG.debug('Configuring apt inside chroot')
LOG.debug('Setting environment variables')
bu.set_apt_get_env()
LOG.debug('Allowing unauthenticated repos')
bu.pre_apt_get(chroot)
for repo in self.driver.operating_system.repos:
LOG.debug('Adding repository source: name={name}, uri={uri},'
'suite={suite}, section={section}'.format(
name=repo.name, uri=repo.uri,
suite=repo.suite, section=repo.section))
bu.add_apt_source(
name=repo.name,
uri=repo.uri,
suite=repo.suite,
section=repo.section,
chroot=chroot)
LOG.debug('Adding repository preference: '
'name={name}, priority={priority}'.format(
name=repo.name, priority=repo.priority))
if repo.priority is not None:
bu.add_apt_preference(
name=repo.name,
priority=repo.priority,
suite=repo.suite,
section=repo.section,
chroot=chroot,
uri=repo.uri)
metadata.setdefault('repos', []).append({
'type': 'deb',
'name': repo.name,
'uri': repo.uri,
'suite': repo.suite,
'section': repo.section,
'priority': repo.priority,
'meta': repo.meta})
LOG.debug('Preventing services from being get started')
bu.suppress_services_start(chroot)
packages = self.driver.operating_system.packages
metadata['packages'] = packages
# we need /proc to be mounted for apt-get success
utils.makedirs_if_not_exists(proc_path)
fu.mount_bind(chroot, '/proc')
LOG.debug('Installing packages using apt-get: %s',
' '.join(packages))
bu.run_apt_get(chroot, packages=packages)
LOG.debug('Post-install OS configuration')
bu.do_post_inst(chroot)
LOG.debug('Making sure there are no running processes '
'inside chroot before trying to umount chroot')
if not bu.stop_chrooted_processes(chroot, signal=signal.SIGTERM):
if not bu.stop_chrooted_processes(
chroot, signal=signal.SIGKILL):
raise errors.UnexpectedProcessError(
'Stopping chrooted processes failed. '
'There are some processes running in chroot %s',
chroot)
LOG.info('*** Finalizing image space ***')
fu.umount_fs(proc_path)
# umounting all loop devices
self.umount_target(chroot, pseudo=False, try_lazy_umount=False)
for image in self.driver.image_scheme.images:
# find fs with the same loop device object
# as image.target_device
fs = self.driver.partition_scheme.fs_by_device(
image.target_device)
if fs.type == 'ext4':
LOG.debug('Trying to re-enable journaling for ext4')
utils.execute('tune2fs', '-O', 'has_journal',
str(fs.device))
LOG.debug('Deattaching loop device from file: %s',
image.img_tmp_file)
bu.deattach_loop(str(image.target_device))
LOG.debug('Shrinking temporary image file: %s',
image.img_tmp_file)
bu.shrink_sparse_file(image.img_tmp_file)
raw_size = os.path.getsize(image.img_tmp_file)
raw_md5 = utils.calculate_md5(image.img_tmp_file, raw_size)
LOG.debug('Containerizing temporary image file: %s',
image.img_tmp_file)
img_tmp_containerized = bu.containerize(
image.img_tmp_file, image.container)
img_containerized = image.uri.split('file://', 1)[1]
# NOTE(kozhukalov): implement abstract publisher
LOG.debug('Moving image file to the final location: %s',
img_containerized)
shutil.move(img_tmp_containerized, img_containerized)
container_size = os.path.getsize(img_containerized)
container_md5 = utils.calculate_md5(
img_containerized, container_size)
metadata.setdefault('images', []).append({
'raw_md5': raw_md5,
'raw_size': raw_size,
'raw_name': None,
'container_name': os.path.basename(img_containerized),
'container_md5': container_md5,
'container_size': container_size,
'container': image.container,
'format': image.format})
# NOTE(kozhukalov): implement abstract publisher
LOG.debug('Image metadata: %s', metadata)
with open(self.driver.metadata_uri.split('file://', 1)[1],
'w') as f:
yaml.safe_dump(metadata, stream=f)
LOG.info('--- Building image END (do_build_image) ---')
except Exception as exc:
LOG.error('Failed to build image: %s', exc)
raise
finally:
LOG.debug('Finally: stopping processes inside chroot: %s', chroot)
if not bu.stop_chrooted_processes(chroot, signal=signal.SIGTERM):
bu.stop_chrooted_processes(chroot, signal=signal.SIGKILL)
LOG.debug('Finally: umounting procfs %s', proc_path)
fu.umount_fs(proc_path)
LOG.debug('Finally: umounting chroot tree %s', chroot)
self.umount_target(chroot, pseudo=False, try_lazy_umount=False)
for image in self.driver.image_scheme.images:
LOG.debug('Finally: detaching loop device: %s',
str(image.target_device))
try:
bu.deattach_loop(str(image.target_device))
except errors.ProcessExecutionError as e:
LOG.warning('Error occured while trying to detach '
'loop device %s. Error message: %s',
str(image.target_device), e)
LOG.debug('Finally: removing temporary file: %s',
image.img_tmp_file)
try:
os.unlink(image.img_tmp_file)
except OSError:
LOG.debug('Finally: file %s seems does not exist '
'or can not be removed', image.img_tmp_file)
LOG.debug('Finally: removing chroot directory: %s', chroot)
try:
os.rmdir(chroot)
except OSError:
LOG.debug('Finally: directory %s seems does not exist '
'or can not be removed', chroot)

View File

@ -1,42 +0,0 @@
# Copyright 2014 Mirantis, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from fuel_agent.objects.bootloader import Grub
from fuel_agent.objects.configdrive import ConfigDriveCommon
from fuel_agent.objects.configdrive import ConfigDriveMcollective
from fuel_agent.objects.configdrive import ConfigDrivePuppet
from fuel_agent.objects.configdrive import ConfigDriveScheme
from fuel_agent.objects.device import Loop
from fuel_agent.objects.image import Image
from fuel_agent.objects.image import ImageScheme
from fuel_agent.objects.operating_system import OperatingSystem
from fuel_agent.objects.operating_system import Ubuntu
from fuel_agent.objects.partition import Fs
from fuel_agent.objects.partition import Lv
from fuel_agent.objects.partition import Md
from fuel_agent.objects.partition import Partition
from fuel_agent.objects.partition import PartitionScheme
from fuel_agent.objects.partition import Pv
from fuel_agent.objects.partition import Vg
from fuel_agent.objects.repo import DEBRepo
from fuel_agent.objects.repo import Repo
__all__ = [
'Partition', 'Pv', 'Vg', 'Lv', 'Md', 'Fs', 'PartitionScheme',
'ConfigDriveCommon', 'ConfigDrivePuppet', 'ConfigDriveMcollective',
'ConfigDriveScheme', 'Image', 'ImageScheme', 'Grub',
'OperatingSystem', 'Ubuntu',
'Repo', 'DEBRepo',
'Loop',
]

View File

@ -1,29 +0,0 @@
# Copyright 2014 Mirantis, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
class Grub(object):
def __init__(self, version=None, kernel_params='',
kernel_name=None, kernel_regexp=None,
initrd_name=None, initrd_regexp=None):
self.version = version
self.kernel_params = kernel_params
self.kernel_name = kernel_name
self.initrd_name = initrd_name
self.kernel_regexp = kernel_regexp
self.initrd_regexp = initrd_regexp
def append_kernel_params(self, *kernel_params):
for kp in kernel_params:
self.kernel_params = '{0} {1}'.format(self.kernel_params, kp)

View File

@ -1,107 +0,0 @@
# Copyright 2014 Mirantis, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from fuel_agent import errors
class ConfigDriveCommon(object):
def __init__(self, ssh_auth_keys, hostname, fqdn, name_servers,
search_domain, master_ip, master_url, udevrules, admin_mac,
admin_ip, admin_mask, admin_iface_name, timezone, ks_repos,
gw):
self.ssh_auth_keys = ssh_auth_keys
self.hostname = hostname
self.fqdn = fqdn
self.name_servers = name_servers
self.search_domain = search_domain
self.master_ip = master_ip
self.master_url = master_url
self.udevrules = udevrules
self.admin_mac = admin_mac
self.admin_ip = admin_ip
self.admin_mask = admin_mask
self.admin_iface_name = admin_iface_name
self.timezone = timezone
self.ks_repos = ks_repos
self.gw = gw
class ConfigDrivePuppet(object):
def __init__(self, master, enable):
self.master = master
self.enable = enable
class ConfigDriveMcollective(object):
def __init__(self, pskey, vhost, host, user, password, connector, enable):
self.pskey = pskey
self.vhost = vhost
self.host = host
self.user = user
self.password = password
self.connector = connector
self.enable = enable
class ConfigDriveScheme(object):
def __init__(self, common=None, puppet=None,
mcollective=None, profile=None):
self.common = common
self.puppet = puppet
self.mcollective = mcollective
self._profile = profile or 'ubuntu'
# TODO(kozhukalov) make it possible to validate scheme according to
# chosen profile which means chosen set of cloud-init templates.
# In other words make this templating scheme easily extendable.
def set_common(self, **kwargs):
self.common = ConfigDriveCommon(**kwargs)
def set_puppet(self, **kwargs):
self.puppet = ConfigDrivePuppet(**kwargs)
def set_mcollective(self, **kwargs):
self.mcollective = ConfigDriveMcollective(**kwargs)
def template_data(self):
if self.common is None:
raise errors.WrongConfigDriveDataError(
'Common attribute should be defined, but it is not')
template_data = {'common': self.common}
if self.puppet is not None:
template_data.update(puppet=self.puppet)
if self.mcollective is not None:
template_data.update(mcollective=self.mcollective)
return template_data
def set_profile(self, profile):
# TODO(kozhukalov) validate profile
self._profile = profile
@property
def profile(self):
return self._profile
def template_names(self, what):
# such a complicated scheme is used to cover a range of profile names
# which might be either dash or underline separated
# ubuntu_1404_x86_64
# centos-65_x86_64
return [
'%s_%s.jinja2' % (what, self._profile),
'%s_%s.jinja2' % (what, self._profile.split('_')[0]),
'%s_%s.jinja2' % (what, self._profile.split('-')[0]),
'%s.jinja2' % what
]

View File

@ -1,28 +0,0 @@
# Copyright 2015 Mirantis, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from fuel_agent import errors
class Loop(object):
def __init__(self, name=None):
self.name = name
def __str__(self):
if self.name:
return self.name
raise errors.WrongDeviceError(
'Loop device can not be stringified. '
'Name attribute is not set. Current: '
'name={0}'.format(self.name))

View File

@ -1,44 +0,0 @@
# Copyright 2014 Mirantis, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from fuel_agent import errors
class Image(object):
SUPPORTED_CONTAINERS = ['raw', 'gzip']
def __init__(self, uri, target_device,
format, container, size=None, md5=None):
# uri is something like
# http://host:port/path/to/image.img or
# file:///tmp/image.img
self.uri = uri
self.target_device = target_device
# this must be one of 'iso9660', 'ext[234]', 'xfs'
self.format = format
if container not in self.SUPPORTED_CONTAINERS:
raise errors.WrongImageDataError(
'Error while image initialization: '
'unsupported image container')
self.container = container
self.size = size
self.md5 = md5
class ImageScheme(object):
def __init__(self, images=None):
self.images = images or []
def add_image(self, **kwargs):
self.images.append(Image(**kwargs))

View File

@ -1,23 +0,0 @@
# Copyright 2015 Mirantis, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
class OperatingSystem(object):
def __init__(self, repos, packages):
self.repos = repos
self.packages = packages
class Ubuntu(OperatingSystem):
pass

View File

@ -1,359 +0,0 @@
# Copyright 2014 Mirantis, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
from fuel_agent import errors
from fuel_agent.openstack.common import log as logging
LOG = logging.getLogger(__name__)
class Parted(object):
def __init__(self, name, label):
self.name = name
self.label = label
self.partitions = []
self.install_bootloader = False
def add_partition(self, **kwargs):
# TODO(kozhukalov): validate before appending
# calculating partition name based on device name and partition count
kwargs['name'] = self.next_name()
kwargs['count'] = self.next_count()
kwargs['device'] = self.name
# if begin is given use its value else use end of last partition
kwargs['begin'] = kwargs.get('begin', self.next_begin())
# if end is given use its value else
# try to calculate it based on size kwarg or
# raise KeyError
# (kwargs.pop['size'] will raise error if size is not set)
kwargs['end'] = kwargs.get('end') or \
kwargs['begin'] + kwargs.pop('size')
# if partition_type is given use its value else
# try to calculate it automatically
kwargs['partition_type'] = \
kwargs.get('partition_type', self.next_type())
partition = Partition(**kwargs)
self.partitions.append(partition)
return partition
@property
def logical(self):
return filter(lambda x: x.type == 'logical', self.partitions)
@property
def primary(self):
return filter(lambda x: x.type == 'primary', self.partitions)
@property
def extended(self):
found = filter(lambda x: x.type == 'extended', self.partitions)
if found:
return found[0]
def next_type(self):
if self.label == 'gpt':
return 'primary'
elif self.label == 'msdos':
if self.extended:
return 'logical'
elif len(self.partitions) < 3 and not self.extended:
return 'primary'
elif len(self.partitions) == 3 and not self.extended:
return 'extended'
# NOTE(agordeev): how to reach that condition?
else:
return 'logical'
def next_count(self, next_type=None):
next_type = next_type or self.next_type()
if next_type == 'logical':
return len(self.logical) + 5
return len(self.partitions) + 1
def next_begin(self):
if not self.partitions:
return 1
if self.partitions[-1] == self.extended:
return self.partitions[-1].begin
return self.partitions[-1].end
def next_name(self):
if self.next_type() == 'extended':
return None
separator = ''
special_devices = ('cciss', 'nvme', 'loop')
if any(n in self.name for n in special_devices):
separator = 'p'
return '%s%s%s' % (self.name, separator, self.next_count())
class Partition(object):
def __init__(self, name, count, device, begin, end, partition_type,
flags=None, guid=None, configdrive=False):
self.name = name
self.count = count
self.device = device
self.name = name
self.begin = begin
self.end = end
self.type = partition_type
self.flags = flags or []
self.guid = guid
self.configdrive = configdrive
def set_flag(self, flag):
if flag not in self.flags:
self.flags.append(flag)
def set_guid(self, guid):
self.guid = guid
class Pv(object):
def __init__(self, name, metadatasize=16, metadatacopies=2):
self.name = name
self.metadatasize = metadatasize
self.metadatacopies = metadatacopies
class Vg(object):
def __init__(self, name, pvnames=None):
self.name = name
self.pvnames = pvnames or []
def add_pv(self, pvname):
if pvname not in self.pvnames:
self.pvnames.append(pvname)
class Lv(object):
def __init__(self, name, vgname, size):
self.name = name
self.vgname = vgname
self.size = size
@property
def device_name(self):
return '/dev/mapper/%s-%s' % (self.vgname.replace('-', '--'),
self.name.replace('-', '--'))
class Md(object):
def __init__(self, name, level,
devices=None, spares=None):
self.name = name
self.level = level
self.devices = devices or []
self.spares = spares or []
def add_device(self, device):
if device in self.devices or device in self.spares:
raise errors.MDDeviceDuplicationError(
'Error while attaching device to md: '
'device %s is already attached' % device)
self.devices.append(device)
def add_spare(self, device):
if device in self.devices or device in self.spares:
raise errors.MDDeviceDuplicationError(
'Error while attaching device to md: '
'device %s is already attached' % device)
self.spares.append(device)
class Fs(object):
def __init__(self, device, mount=None,
fs_type=None, fs_options=None, fs_label=None):
self.device = device
self.mount = mount
self.type = fs_type or 'xfs'
self.options = fs_options or ''
self.label = fs_label or ''
class PartitionScheme(object):
def __init__(self):
self.parteds = []
self.mds = []
self.pvs = []
self.vgs = []
self.lvs = []
self.fss = []
def add_parted(self, **kwargs):
parted = Parted(**kwargs)
self.parteds.append(parted)
return parted
def add_pv(self, **kwargs):
pv = Pv(**kwargs)
self.pvs.append(pv)
return pv
def add_vg(self, **kwargs):
vg = Vg(**kwargs)
self.vgs.append(vg)
return vg
def add_lv(self, **kwargs):
lv = Lv(**kwargs)
self.lvs.append(lv)
return lv
def add_fs(self, **kwargs):
fs = Fs(**kwargs)
self.fss.append(fs)
return fs
def add_md(self, **kwargs):
mdkwargs = {}
mdkwargs['name'] = kwargs.get('name') or self.md_next_name()
mdkwargs['level'] = kwargs.get('level') or 'mirror'
md = Md(**mdkwargs)
self.mds.append(md)
return md
def md_by_name(self, name):
found = filter(lambda x: x.name == name, self.mds)
if found:
return found[0]
def md_by_mount(self, mount):
fs = self.fs_by_mount(mount)
if fs:
return self.md_by_name(fs.device)
def md_attach_by_mount(self, device, mount, spare=False, **kwargs):
md = self.md_by_mount(mount)
if not md:
md = self.add_md(**kwargs)
fskwargs = {}
fskwargs['device'] = md.name
fskwargs['mount'] = mount
fskwargs['fs_type'] = kwargs.pop('fs_type', None)
fskwargs['fs_options'] = kwargs.pop('fs_options', None)
fskwargs['fs_label'] = kwargs.pop('fs_label', None)
self.add_fs(**fskwargs)
md.add_spare(device) if spare else md.add_device(device)
return md
def md_next_name(self):
count = 0
while True:
name = '/dev/md%s' % count
if name not in [md.name for md in self.mds]:
return name
if count >= 127:
raise errors.MDAlreadyExistsError(
'Error while generating md name: '
'names from /dev/md0 to /dev/md127 seem to be busy, '
'try to generate md name manually')
count += 1
def vg_by_name(self, vgname):
found = filter(lambda x: (x.name == vgname), self.vgs)
if found:
return found[0]
def pv_by_name(self, pvname):
found = filter(lambda x: (x.name == pvname), self.pvs)
if found:
return found[0]
def vg_attach_by_name(self, pvname, vgname,
metadatasize=16, metadatacopies=2):
vg = self.vg_by_name(vgname) or self.add_vg(name=vgname)
pv = self.pv_by_name(pvname) or self.add_pv(
name=pvname, metadatasize=metadatasize,
metadatacopies=metadatacopies)
vg.add_pv(pv.name)
def fs_by_mount(self, mount):
found = filter(lambda x: (x.mount and x.mount == mount), self.fss)
if found:
return found[0]
def fs_by_device(self, device):
found = filter(lambda x: x.device == device, self.fss)
if found:
return found[0]
def fs_sorted_by_depth(self, reverse=False):
"""Getting file systems sorted by path length.
Shorter paths earlier.
['/', '/boot', '/var', '/var/lib/mysql']
:param reverse: Sort backward (Default: False)
"""
def key(x):
return x.mount.rstrip(os.path.sep).count(os.path.sep)
return sorted(self.fss, key=key, reverse=reverse)
def lv_by_device_name(self, device_name):
found = filter(lambda x: x.device_name == device_name, self.lvs)
if found:
return found[0]
def root_device(self):
fs = self.fs_by_mount('/')
if not fs:
raise errors.WrongPartitionSchemeError(
'Error while trying to find root device: '
'root file system not found')
return fs.device
def boot_device(self, grub_version=2):
# We assume /boot is a separate partition. If it is not
# then we try to use root file system
boot_fs = self.fs_by_mount('/boot') or self.fs_by_mount('/')
if not boot_fs:
raise errors.WrongPartitionSchemeError(
'Error while trying to find boot device: '
'boot file system not fount, '
'it must be a separate mount point')
if grub_version == 1:
# Legacy GRUB has a limitation. It is not able to mount MD devices.
# If it is MD compatible it is only able to ignore MD metadata
# and to mount one of those devices which are parts of MD device,
# but it is possible only if MD device is a MIRROR.
md = self.md_by_name(boot_fs.device)
if md:
try:
return md.devices[0]
except IndexError:
raise errors.WrongPartitionSchemeError(
'Error while trying to find boot device: '
'md device %s does not have devices attached' %
md.name)
# Legacy GRUB is not able to mount LVM devices.
if self.lv_by_device_name(boot_fs.device):
raise errors.WrongPartitionSchemeError(
'Error while trying to find boot device: '
'found device is %s but legacy grub is not able to '
'mount logical volumes' %
boot_fs.device)
return boot_fs.device
def configdrive_device(self):
# Configdrive device must be a small (about 10M) partition
# on one of node hard drives. This partition is necessary
# only if one uses cloud-init with configdrive.
for parted in self.parteds:
for prt in parted.partitions:
if prt.configdrive:
return prt.name

View File

@ -1,28 +0,0 @@
# Copyright 2015 Mirantis, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
class Repo(object):
def __init__(self, name, uri, priority=None):
self.name = name
self.uri = uri
self.priority = priority
class DEBRepo(Repo):
def __init__(self, name, uri, suite, section, meta=None, priority=None):
super(DEBRepo, self).__init__(name, uri, priority)
self.suite = suite
self.section = section
self.meta = meta

View File

@ -1,17 +0,0 @@
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import six
six.add_move(six.MovedModule('mox', 'mox', 'mox3.mox'))

View File

@ -1,314 +0,0 @@
# Copyright 2012 SINA Corporation
# Copyright 2014 Cisco Systems, Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
"""Extracts OpenStack config option info from module(s)."""
from __future__ import print_function
import argparse
import imp
import os
import re
import socket
import sys
import textwrap
from oslo.config import cfg
import six
import stevedore.named
from fuel_agent.openstack.common import gettextutils
from fuel_agent.openstack.common import importutils
gettextutils.install('fuel_agent')
STROPT = "StrOpt"
BOOLOPT = "BoolOpt"
INTOPT = "IntOpt"
FLOATOPT = "FloatOpt"
LISTOPT = "ListOpt"
DICTOPT = "DictOpt"
MULTISTROPT = "MultiStrOpt"
OPT_TYPES = {
STROPT: 'string value',
BOOLOPT: 'boolean value',
INTOPT: 'integer value',
FLOATOPT: 'floating point value',
LISTOPT: 'list value',
DICTOPT: 'dict value',
MULTISTROPT: 'multi valued',
}
OPTION_REGEX = re.compile(r"(%s)" % "|".join([STROPT, BOOLOPT, INTOPT,
FLOATOPT, LISTOPT, DICTOPT,
MULTISTROPT]))
PY_EXT = ".py"
BASEDIR = os.path.abspath(os.path.join(os.path.dirname(__file__),
"../../../../"))
WORDWRAP_WIDTH = 60
def raise_extension_exception(extmanager, ep, err):
raise
def generate(argv):
parser = argparse.ArgumentParser(
description='generate sample configuration file',
)
parser.add_argument('-m', dest='modules', action='append')
parser.add_argument('-l', dest='libraries', action='append')
parser.add_argument('srcfiles', nargs='*')
parsed_args = parser.parse_args(argv)
mods_by_pkg = dict()
for filepath in parsed_args.srcfiles:
pkg_name = filepath.split(os.sep)[1]
mod_str = '.'.join(['.'.join(filepath.split(os.sep)[:-1]),
os.path.basename(filepath).split('.')[0]])
mods_by_pkg.setdefault(pkg_name, list()).append(mod_str)
# NOTE(lzyeval): place top level modules before packages
pkg_names = sorted(pkg for pkg in mods_by_pkg if pkg.endswith(PY_EXT))
ext_names = sorted(pkg for pkg in mods_by_pkg if pkg not in pkg_names)
pkg_names.extend(ext_names)
# opts_by_group is a mapping of group name to an options list
# The options list is a list of (module, options) tuples
opts_by_group = {'DEFAULT': []}
if parsed_args.modules:
for module_name in parsed_args.modules:
module = _import_module(module_name)
if module:
for group, opts in _list_opts(module):
opts_by_group.setdefault(group, []).append((module_name,
opts))
# Look for entry points defined in libraries (or applications) for
# option discovery, and include their return values in the output.
#
# Each entry point should be a function returning an iterable
# of pairs with the group name (or None for the default group)
# and the list of Opt instances for that group.
if parsed_args.libraries:
loader = stevedore.named.NamedExtensionManager(
'oslo.config.opts',
names=list(set(parsed_args.libraries)),
invoke_on_load=False,
on_load_failure_callback=raise_extension_exception
)
for ext in loader:
for group, opts in ext.plugin():
opt_list = opts_by_group.setdefault(group or 'DEFAULT', [])
opt_list.append((ext.name, opts))
for pkg_name in pkg_names:
mods = mods_by_pkg.get(pkg_name)
mods.sort()
for mod_str in mods:
if mod_str.endswith('.__init__'):
mod_str = mod_str[:mod_str.rfind(".")]
mod_obj = _import_module(mod_str)
if not mod_obj:
raise RuntimeError("Unable to import module %s" % mod_str)
for group, opts in _list_opts(mod_obj):
opts_by_group.setdefault(group, []).append((mod_str, opts))
print_group_opts('DEFAULT', opts_by_group.pop('DEFAULT', []))
for group in sorted(opts_by_group.keys()):
print_group_opts(group, opts_by_group[group])
def _import_module(mod_str):
try:
if mod_str.startswith('bin.'):
imp.load_source(mod_str[4:], os.path.join('bin', mod_str[4:]))
return sys.modules[mod_str[4:]]
else:
return importutils.import_module(mod_str)
except Exception as e:
sys.stderr.write("Error importing module %s: %s\n" % (mod_str, str(e)))
return None
def _is_in_group(opt, group):
"Check if opt is in group."
for value in group._opts.values():
# NOTE(llu): Temporary workaround for bug #1262148, wait until
# newly released oslo.config support '==' operator.
if not(value['opt'] != opt):
return True
return False
def _guess_groups(opt, mod_obj):
# is it in the DEFAULT group?
if _is_in_group(opt, cfg.CONF):
return 'DEFAULT'
# what other groups is it in?
for value in cfg.CONF.values():
if isinstance(value, cfg.CONF.GroupAttr):
if _is_in_group(opt, value._group):
return value._group.name
raise RuntimeError(
"Unable to find group for option %s, "
"maybe it's defined twice in the same group?"
% opt.name
)
def _list_opts(obj):
def is_opt(o):
return (isinstance(o, cfg.Opt) and
not isinstance(o, cfg.SubCommandOpt))
opts = list()
for attr_str in dir(obj):
attr_obj = getattr(obj, attr_str)
if is_opt(attr_obj):
opts.append(attr_obj)
elif (isinstance(attr_obj, list) and
all(map(lambda x: is_opt(x), attr_obj))):
opts.extend(attr_obj)
ret = {}
for opt in opts:
ret.setdefault(_guess_groups(opt, obj), []).append(opt)
return ret.items()
def print_group_opts(group, opts_by_module):
print("[%s]" % group)
print('')
for mod, opts in opts_by_module:
print('#')
print('# Options defined in %s' % mod)
print('#')
print('')
for opt in opts:
_print_opt(opt)
print('')
def _get_my_ip():
try:
csock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
csock.connect(('8.8.8.8', 80))
(addr, port) = csock.getsockname()
csock.close()
return addr
except socket.error:
return None
def _sanitize_default(name, value):
"""Set up a reasonably sensible default for pybasedir, my_ip and host."""
hostname = socket.gethostname()
fqdn = socket.getfqdn()
if value.startswith(sys.prefix):
# NOTE(jd) Don't use os.path.join, because it is likely to think the
# second part is an absolute pathname and therefore drop the first
# part.
value = os.path.normpath("/usr/" + value[len(sys.prefix):])
elif value.startswith(BASEDIR):
return value.replace(BASEDIR, '/usr/lib/python/site-packages')
elif BASEDIR in value:
return value.replace(BASEDIR, '')
elif value == _get_my_ip():
return '10.0.0.1'
elif value in (hostname, fqdn):
if 'host' in name:
return 'fuel_agent'
elif value.endswith(hostname):
return value.replace(hostname, 'fuel_agent')
elif value.endswith(fqdn):
return value.replace(fqdn, 'fuel_agent')
elif value.strip() != value:
return '"%s"' % value
return value
def _print_opt(opt):
opt_name, opt_default, opt_help = opt.dest, opt.default, opt.help
if not opt_help:
sys.stderr.write('WARNING: "%s" is missing help string.\n' % opt_name)
opt_help = ""
opt_type = None
try:
opt_type = OPTION_REGEX.search(str(type(opt))).group(0)
except (ValueError, AttributeError) as err:
sys.stderr.write("%s\n" % str(err))
sys.exit(1)
opt_help = u'%s (%s)' % (opt_help,
OPT_TYPES[opt_type])
print('#', "\n# ".join(textwrap.wrap(opt_help, WORDWRAP_WIDTH)))
if opt.deprecated_opts:
for deprecated_opt in opt.deprecated_opts:
if deprecated_opt.name:
deprecated_group = (deprecated_opt.group if
deprecated_opt.group else "DEFAULT")
print('# Deprecated group/name - [%s]/%s' %
(deprecated_group,
deprecated_opt.name))
try:
if opt_default is None:
print('#%s=<None>' % opt_name)
elif opt_type == STROPT:
assert(isinstance(opt_default, six.string_types))
print('#%s=%s' % (opt_name, _sanitize_default(opt_name,
opt_default)))
elif opt_type == BOOLOPT:
assert(isinstance(opt_default, bool))
print('#%s=%s' % (opt_name, str(opt_default).lower()))
elif opt_type == INTOPT:
assert(isinstance(opt_default, int) and
not isinstance(opt_default, bool))
print('#%s=%s' % (opt_name, opt_default))
elif opt_type == FLOATOPT:
assert(isinstance(opt_default, float))
print('#%s=%s' % (opt_name, opt_default))
elif opt_type == LISTOPT:
assert(isinstance(opt_default, list))
print('#%s=%s' % (opt_name, ','.join(opt_default)))
elif opt_type == DICTOPT:
assert(isinstance(opt_default, dict))
opt_default_strlist = [str(key) + ':' + str(value)
for (key, value) in opt_default.items()]
print('#%s=%s' % (opt_name, ','.join(opt_default_strlist)))
elif opt_type == MULTISTROPT:
assert(isinstance(opt_default, list))
if not opt_default:
opt_default = ['']
for default in opt_default:
print('#%s=%s' % (opt_name, default))
print('')
except Exception:
sys.stderr.write('Error in option "%s"\n' % opt_name)
sys.exit(1)
def main():
generate(sys.argv[1:])
if __name__ == '__main__':
main()

View File

@ -1,498 +0,0 @@
# Copyright 2012 Red Hat, Inc.
# Copyright 2013 IBM Corp.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
gettext for openstack-common modules.
Usual usage in an openstack.common module:
from fuel_agent.openstack.common.gettextutils import _
"""
import copy
import functools
import gettext
import locale
from logging import handlers
import os
from babel import localedata
import six
_AVAILABLE_LANGUAGES = {}
# FIXME(dhellmann): Remove this when moving to oslo.i18n.
USE_LAZY = False
class TranslatorFactory(object):
"""Create translator functions
"""
def __init__(self, domain, lazy=False, localedir=None):
"""Establish a set of translation functions for the domain.
:param domain: Name of translation domain,
specifying a message catalog.
:type domain: str
:param lazy: Delays translation until a message is emitted.
Defaults to False.
:type lazy: Boolean
:param localedir: Directory with translation catalogs.
:type localedir: str
"""
self.domain = domain
self.lazy = lazy
if localedir is None:
localedir = os.environ.get(domain.upper() + '_LOCALEDIR')
self.localedir = localedir
def _make_translation_func(self, domain=None):
"""Return a new translation function ready for use.
Takes into account whether or not lazy translation is being
done.
The domain can be specified to override the default from the
factory, but the localedir from the factory is always used
because we assume the log-level translation catalogs are
installed in the same directory as the main application
catalog.
"""
if domain is None:
domain = self.domain
if self.lazy:
return functools.partial(Message, domain=domain)
t = gettext.translation(
domain,
localedir=self.localedir,
fallback=True,
)
if six.PY3:
return t.gettext
return t.ugettext
@property
def primary(self):
"The default translation function."
return self._make_translation_func()
def _make_log_translation_func(self, level):
return self._make_translation_func(self.domain + '-log-' + level)
@property
def log_info(self):
"Translate info-level log messages."
return self._make_log_translation_func('info')
@property
def log_warning(self):
"Translate warning-level log messages."
return self._make_log_translation_func('warning')
@property
def log_error(self):
"Translate error-level log messages."
return self._make_log_translation_func('error')
@property
def log_critical(self):
"Translate critical-level log messages."
return self._make_log_translation_func('critical')
# NOTE(dhellmann): When this module moves out of the incubator into
# oslo.i18n, these global variables can be moved to an integration
# module within each application.
# Create the global translation functions.
_translators = TranslatorFactory('fuel_agent')
# The primary translation function using the well-known name "_"
_ = _translators.primary
# Translators for log levels.
#
# The abbreviated names are meant to reflect the usual use of a short
# name like '_'. The "L" is for "log" and the other letter comes from
# the level.
_LI = _translators.log_info
_LW = _translators.log_warning
_LE = _translators.log_error
_LC = _translators.log_critical
# NOTE(dhellmann): End of globals that will move to the application's
# integration module.
def enable_lazy():
"""Convenience function for configuring _() to use lazy gettext
Call this at the start of execution to enable the gettextutils._
function to use lazy gettext functionality. This is useful if
your project is importing _ directly instead of using the
gettextutils.install() way of importing the _ function.
"""
# FIXME(dhellmann): This function will be removed in oslo.i18n,
# because the TranslatorFactory makes it superfluous.
global _, _LI, _LW, _LE, _LC, USE_LAZY
tf = TranslatorFactory('fuel_agent', lazy=True)
_ = tf.primary
_LI = tf.log_info
_LW = tf.log_warning
_LE = tf.log_error
_LC = tf.log_critical
USE_LAZY = True
def install(domain, lazy=False):
"""Install a _() function using the given translation domain.
Given a translation domain, install a _() function using gettext's
install() function.
The main difference from gettext.install() is that we allow
overriding the default localedir (e.g. /usr/share/locale) using
a translation-domain-specific environment variable (e.g.
NOVA_LOCALEDIR).
:param domain: the translation domain
:param lazy: indicates whether or not to install the lazy _() function.
The lazy _() introduces a way to do deferred translation
of messages by installing a _ that builds Message objects,
instead of strings, which can then be lazily translated into
any available locale.
"""
if lazy:
from six import moves
tf = TranslatorFactory(domain, lazy=True)
moves.builtins.__dict__['_'] = tf.primary
else:
localedir = '%s_LOCALEDIR' % domain.upper()
if six.PY3:
gettext.install(domain,
localedir=os.environ.get(localedir))
else:
gettext.install(domain,
localedir=os.environ.get(localedir),
unicode=True)
class Message(six.text_type):
"""A Message object is a unicode object that can be translated.
Translation of Message is done explicitly using the translate() method.
For all non-translation intents and purposes, a Message is simply unicode,
and can be treated as such.
"""
def __new__(cls, msgid, msgtext=None, params=None,
domain='fuel_agent', *args):
"""Create a new Message object.
In order for translation to work gettext requires a message ID, this
msgid will be used as the base unicode text. It is also possible
for the msgid and the base unicode text to be different by passing
the msgtext parameter.
"""
# If the base msgtext is not given, we use the default translation
# of the msgid (which is in English) just in case the system locale is
# not English, so that the base text will be in that locale by default.
if not msgtext:
msgtext = Message._translate_msgid(msgid, domain)
# We want to initialize the parent unicode with the actual object that
# would have been plain unicode if 'Message' was not enabled.
msg = super(Message, cls).__new__(cls, msgtext)
msg.msgid = msgid
msg.domain = domain
msg.params = params
return msg
def translate(self, desired_locale=None):
"""Translate this message to the desired locale.
:param desired_locale: The desired locale to translate the message to,
if no locale is provided the message will be
translated to the system's default locale.
:returns: the translated message in unicode
"""
translated_message = Message._translate_msgid(self.msgid,
self.domain,
desired_locale)
if self.params is None:
# No need for more translation
return translated_message
# This Message object may have been formatted with one or more
# Message objects as substitution arguments, given either as a single
# argument, part of a tuple, or as one or more values in a dictionary.
# When translating this Message we need to translate those Messages too
translated_params = _translate_args(self.params, desired_locale)
translated_message = translated_message % translated_params
return translated_message
@staticmethod
def _translate_msgid(msgid, domain, desired_locale=None):
if not desired_locale:
system_locale = locale.getdefaultlocale()
# If the system locale is not available to the runtime use English
if not system_locale[0]:
desired_locale = 'en_US'
else:
desired_locale = system_locale[0]
locale_dir = os.environ.get(domain.upper() + '_LOCALEDIR')
lang = gettext.translation(domain,
localedir=locale_dir,
languages=[desired_locale],
fallback=True)
if six.PY3:
translator = lang.gettext
else:
translator = lang.ugettext
translated_message = translator(msgid)
return translated_message
def __mod__(self, other):
# When we mod a Message we want the actual operation to be performed
# by the parent class (i.e. unicode()), the only thing we do here is
# save the original msgid and the parameters in case of a translation
params = self._sanitize_mod_params(other)
unicode_mod = super(Message, self).__mod__(params)
modded = Message(self.msgid,
msgtext=unicode_mod,
params=params,
domain=self.domain)
return modded
def _sanitize_mod_params(self, other):
"""Sanitize the object being modded with this Message.
- Add support for modding 'None' so translation supports it
- Trim the modded object, which can be a large dictionary, to only
those keys that would actually be used in a translation
- Snapshot the object being modded, in case the message is
translated, it will be used as it was when the Message was created
"""
if other is None:
params = (other,)
elif isinstance(other, dict):
# Merge the dictionaries
# Copy each item in case one does not support deep copy.
params = {}
if isinstance(self.params, dict):
for key, val in self.params.items():
params[key] = self._copy_param(val)
for key, val in other.items():
params[key] = self._copy_param(val)
else:
params = self._copy_param(other)
return params
def _copy_param(self, param):
try:
return copy.deepcopy(param)
except Exception:
# Fallback to casting to unicode this will handle the
# python code-like objects that can't be deep-copied
return six.text_type(param)
def __add__(self, other):
msg = _('Message objects do not support addition.')
raise TypeError(msg)
def __radd__(self, other):
return self.__add__(other)
if six.PY2:
def __str__(self):
# NOTE(luisg): Logging in python 2.6 tries to str() log records,
# and it expects specifically a UnicodeError in order to proceed.
msg = _('Message objects do not support str() because they may '
'contain non-ascii characters. '
'Please use unicode() or translate() instead.')
raise UnicodeError(msg)
def get_available_languages(domain):
"""Lists the available languages for the given translation domain.
:param domain: the domain to get languages for
"""
if domain in _AVAILABLE_LANGUAGES:
return copy.copy(_AVAILABLE_LANGUAGES[domain])
localedir = '%s_LOCALEDIR' % domain.upper()
find = lambda x: gettext.find(domain,
localedir=os.environ.get(localedir),
languages=[x])
# NOTE(mrodden): en_US should always be available (and first in case
# order matters) since our in-line message strings are en_US
language_list = ['en_US']
# NOTE(luisg): Babel <1.0 used a function called list(), which was
# renamed to locale_identifiers() in >=1.0, the requirements master list
# requires >=0.9.6, uncapped, so defensively work with both. We can remove
# this check when the master list updates to >=1.0, and update all projects
list_identifiers = (getattr(localedata, 'list', None) or
getattr(localedata, 'locale_identifiers'))
locale_identifiers = list_identifiers()
for i in locale_identifiers:
if find(i) is not None:
language_list.append(i)
# NOTE(luisg): Babel>=1.0,<1.3 has a bug where some OpenStack supported
# locales (e.g. 'zh_CN', and 'zh_TW') aren't supported even though they
# are perfectly legitimate locales:
# https://github.com/mitsuhiko/babel/issues/37
# In Babel 1.3 they fixed the bug and they support these locales, but
# they are still not explicitly "listed" by locale_identifiers().
# That is why we add the locales here explicitly if necessary so that
# they are listed as supported.
aliases = {'zh': 'zh_CN',
'zh_Hant_HK': 'zh_HK',
'zh_Hant': 'zh_TW',
'fil': 'tl_PH'}
for (locale, alias) in six.iteritems(aliases):
if locale in language_list and alias not in language_list:
language_list.append(alias)
_AVAILABLE_LANGUAGES[domain] = language_list
return copy.copy(language_list)
def translate(obj, desired_locale=None):
"""Gets the translated unicode representation of the given object.
If the object is not translatable it is returned as-is.
If the locale is None the object is translated to the system locale.
:param obj: the object to translate
:param desired_locale: the locale to translate the message to, if None the
default system locale will be used
:returns: the translated object in unicode, or the original object if
it could not be translated
"""
message = obj
if not isinstance(message, Message):
# If the object to translate is not already translatable,
# let's first get its unicode representation
message = six.text_type(obj)
if isinstance(message, Message):
# Even after unicoding() we still need to check if we are
# running with translatable unicode before translating
return message.translate(desired_locale)
return obj
def _translate_args(args, desired_locale=None):
"""Translates all the translatable elements of the given arguments object.
This method is used for translating the translatable values in method
arguments which include values of tuples or dictionaries.
If the object is not a tuple or a dictionary the object itself is
translated if it is translatable.
If the locale is None the object is translated to the system locale.
:param args: the args to translate
:param desired_locale: the locale to translate the args to, if None the
default system locale will be used
:returns: a new args object with the translated contents of the original
"""
if isinstance(args, tuple):
return tuple(translate(v, desired_locale) for v in args)
if isinstance(args, dict):
translated_dict = {}
for (k, v) in six.iteritems(args):
translated_v = translate(v, desired_locale)
translated_dict[k] = translated_v
return translated_dict
return translate(args, desired_locale)
class TranslationHandler(handlers.MemoryHandler):
"""Handler that translates records before logging them.
The TranslationHandler takes a locale and a target logging.Handler object
to forward LogRecord objects to after translating them. This handler
depends on Message objects being logged, instead of regular strings.
The handler can be configured declaratively in the logging.conf as follows:
[handlers]
keys = translatedlog, translator
[handler_translatedlog]
class = handlers.WatchedFileHandler
args = ('/var/log/api-localized.log',)
formatter = context
[handler_translator]
class = openstack.common.log.TranslationHandler
target = translatedlog
args = ('zh_CN',)
If the specified locale is not available in the system, the handler will
log in the default locale.
"""
def __init__(self, locale=None, target=None):
"""Initialize a TranslationHandler
:param locale: locale to use for translating messages
:param target: logging.Handler object to forward
LogRecord objects to after translation
"""
# NOTE(luisg): In order to allow this handler to be a wrapper for
# other handlers, such as a FileHandler, and still be able to
# configure it using logging.conf, this handler has to extend
# MemoryHandler because only the MemoryHandlers' logging.conf
# parsing is implemented such that it accepts a target handler.
handlers.MemoryHandler.__init__(self, capacity=0, target=target)
self.locale = locale
def setFormatter(self, fmt):
self.target.setFormatter(fmt)
def emit(self, record):
# We save the message from the original record to restore it
# after translation, so other handlers are not affected by this
original_msg = record.msg
original_args = record.args
try:
self._translate_and_log_record(record)
finally:
record.msg = original_msg
record.args = original_args
def _translate_and_log_record(self, record):
record.msg = translate(record.msg, self.locale)
# In addition to translating the message, we also need to translate
# arguments that were passed to the log method that were not part
# of the main message e.g., log.info(_('Some message %s'), this_one))
record.args = _translate_args(record.args, self.locale)
self.target.emit(record)

View File

@ -1,73 +0,0 @@
# Copyright 2011 OpenStack Foundation.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Import related utilities and helper functions.
"""
import sys
import traceback
def import_class(import_str):
"""Returns a class from a string including module and class."""
mod_str, _sep, class_str = import_str.rpartition('.')
__import__(mod_str)
try:
return getattr(sys.modules[mod_str], class_str)
except AttributeError:
raise ImportError('Class %s cannot be found (%s)' %
(class_str,
traceback.format_exception(*sys.exc_info())))
def import_object(import_str, *args, **kwargs):
"""Import a class and return an instance of it."""
return import_class(import_str)(*args, **kwargs)
def import_object_ns(name_space, import_str, *args, **kwargs):
"""Tries to import object from default namespace.
Imports a class and return an instance of it, first by trying
to find the class in a default namespace, then failing back to
a full path if not found in the default namespace.
"""
import_value = "%s.%s" % (name_space, import_str)
try:
return import_class(import_value)(*args, **kwargs)
except ImportError:
return import_class(import_str)(*args, **kwargs)
def import_module(import_str):
"""Import a module."""
__import__(import_str)
return sys.modules[import_str]
def import_versioned_module(version, submodule=None):
module = 'fuel_agent.v%s' % version
if submodule:
module = '.'.join((module, submodule))
return import_module(module)
def try_import(import_str, default=None):
"""Try to import a module and if it fails return default."""
try:
return import_module(import_str)
except ImportError:
return default

View File

@ -1,45 +0,0 @@
# Copyright 2011 OpenStack Foundation.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Local storage of variables using weak references"""
import threading
import weakref
class WeakLocal(threading.local):
def __getattribute__(self, attr):
rval = super(WeakLocal, self).__getattribute__(attr)
if rval:
# NOTE(mikal): this bit is confusing. What is stored is a weak
# reference, not the value itself. We therefore need to lookup
# the weak reference and return the inner value here.
rval = rval()
return rval
def __setattr__(self, attr, value):
value = weakref.ref(value)
return super(WeakLocal, self).__setattr__(attr, value)
# NOTE(mikal): the name "store" should be deprecated in the future
store = WeakLocal()
# A "weak" store uses weak references and allows an object to fall out of scope
# when it falls out of scope in the code that uses the thread local storage. A
# "strong" store will hold a reference to the object so that it never falls out
# of scope.
weak_store = WeakLocal()
strong_store = threading.local()

View File

@ -1,723 +0,0 @@
# Copyright 2011 OpenStack Foundation.
# Copyright 2010 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""OpenStack logging handler.
This module adds to logging functionality by adding the option to specify
a context object when calling the various log methods. If the context object
is not specified, default formatting is used. Additionally, an instance uuid
may be passed as part of the log message, which is intended to make it easier
for admins to find messages related to a specific instance.
It also allows setting of formatting information through conf.
"""
import inspect
import itertools
import logging
import logging.config
import logging.handlers
import os
import re
import sys
import traceback
from oslo.config import cfg
from oslo_serialization import jsonutils
import six
from six import moves
from fuel_agent.openstack.common.gettextutils import _
from fuel_agent.openstack.common import importutils
from fuel_agent.openstack.common import local
_DEFAULT_LOG_DATE_FORMAT = "%Y-%m-%d %H:%M:%S"
_SANITIZE_KEYS = ['adminPass', 'admin_pass', 'password', 'admin_password']
# NOTE(ldbragst): Let's build a list of regex objects using the list of
# _SANITIZE_KEYS we already have. This way, we only have to add the new key
# to the list of _SANITIZE_KEYS and we can generate regular expressions
# for XML and JSON automatically.
_SANITIZE_PATTERNS = []
_FORMAT_PATTERNS = [r'(%(key)s\s*[=]\s*[\"\']).*?([\"\'])',
r'(<%(key)s>).*?(</%(key)s>)',
r'([\"\']%(key)s[\"\']\s*:\s*[\"\']).*?([\"\'])',
r'([\'"].*?%(key)s[\'"]\s*:\s*u?[\'"]).*?([\'"])',
r'([\'"].*?%(key)s[\'"]\s*,\s*\'--?[A-z]+\'\s*,\s*u?[\'"])'
'.*?([\'"])',
r'(%(key)s\s*--?[A-z]+\s*).*?([\s])']
for key in _SANITIZE_KEYS:
for pattern in _FORMAT_PATTERNS:
reg_ex = re.compile(pattern % {'key': key}, re.DOTALL)
_SANITIZE_PATTERNS.append(reg_ex)
common_cli_opts = [
cfg.BoolOpt('debug',
short='d',
default=False,
help='Print debugging output (set logging level to '
'DEBUG instead of default WARNING level).'),
cfg.BoolOpt('verbose',
short='v',
default=False,
help='Print more verbose output (set logging level to '
'INFO instead of default WARNING level).'),
]
logging_cli_opts = [
cfg.StrOpt('log-config-append',
metavar='PATH',
deprecated_name='log-config',
help='The name of a logging configuration file. This file '
'is appended to any existing logging configuration '
'files. For details about logging configuration files, '
'see the Python logging module documentation.'),
cfg.StrOpt('log-format',
metavar='FORMAT',
help='DEPRECATED. '
'A logging.Formatter log message format string which may '
'use any of the available logging.LogRecord attributes. '
'This option is deprecated. Please use '
'logging_context_format_string and '
'logging_default_format_string instead.'),
cfg.StrOpt('log-date-format',
default=_DEFAULT_LOG_DATE_FORMAT,
metavar='DATE_FORMAT',
help='Format string for %%(asctime)s in log records. '
'Default: %(default)s .'),
cfg.StrOpt('log-file',
metavar='PATH',
deprecated_name='logfile',
help='(Optional) Name of log file to output to. '
'If no default is set, logging will go to stdout.'),
cfg.StrOpt('log-dir',
deprecated_name='logdir',
help='(Optional) The base directory used for relative '
'--log-file paths.'),
cfg.BoolOpt('use-syslog',
default=False,
help='Use syslog for logging. '
'Existing syslog format is DEPRECATED during I, '
'and will change in J to honor RFC5424.'),
cfg.BoolOpt('use-syslog-rfc-format',
# TODO(bogdando) remove or use True after existing
# syslog format deprecation in J
default=False,
help='(Optional) Enables or disables syslog rfc5424 format '
'for logging. If enabled, prefixes the MSG part of the '
'syslog message with APP-NAME (RFC5424). The '
'format without the APP-NAME is deprecated in I, '
'and will be removed in J.'),
cfg.StrOpt('syslog-log-facility',
default='LOG_USER',
help='Syslog facility to receive log lines.')
]
generic_log_opts = [
cfg.BoolOpt('use_stderr',
default=True,
help='Log output to standard error.')
]
log_opts = [
cfg.StrOpt('logging_context_format_string',
default='%(asctime)s.%(msecs)03d %(process)d %(levelname)s '
'%(name)s [%(request_id)s %(user_identity)s] '
'%(instance)s%(message)s',
help='Format string to use for log messages with context.'),
cfg.StrOpt('logging_default_format_string',
default='%(asctime)s.%(msecs)03d %(process)d %(levelname)s '
'%(name)s [-] %(instance)s%(message)s',
help='Format string to use for log messages without context.'),
cfg.StrOpt('logging_debug_format_suffix',
default='%(funcName)s %(pathname)s:%(lineno)d',
help='Data to append to log format when level is DEBUG.'),
cfg.StrOpt('logging_exception_prefix',
default='%(asctime)s.%(msecs)03d %(process)d TRACE %(name)s '
'%(instance)s',
help='Prefix each line of exception output with this format.'),
cfg.ListOpt('default_log_levels',
default=[
'amqp=WARN',
'amqplib=WARN',
'boto=WARN',
'qpid=WARN',
'sqlalchemy=WARN',
'suds=INFO',
'oslo.messaging=INFO',
'iso8601=WARN',
'requests.packages.urllib3.connectionpool=WARN'
],
help='List of logger=LEVEL pairs.'),
cfg.BoolOpt('publish_errors',
default=False,
help='Enables or disables publication of error events.'),
cfg.BoolOpt('fatal_deprecations',
default=False,
help='Enables or disables fatal status of deprecations.'),
# NOTE(mikal): there are two options here because sometimes we are handed
# a full instance (and could include more information), and other times we
# are just handed a UUID for the instance.
cfg.StrOpt('instance_format',
default='[instance: %(uuid)s] ',
help='The format for an instance that is passed with the log '
'message. '),
cfg.StrOpt('instance_uuid_format',
default='[instance: %(uuid)s] ',
help='The format for an instance UUID that is passed with the '
'log message. '),
]
CONF = cfg.CONF
CONF.register_cli_opts(common_cli_opts)
CONF.register_cli_opts(logging_cli_opts)
CONF.register_opts(generic_log_opts)
CONF.register_opts(log_opts)
# our new audit level
# NOTE(jkoelker) Since we synthesized an audit level, make the logging
# module aware of it so it acts like other levels.
logging.AUDIT = logging.INFO + 1
logging.addLevelName(logging.AUDIT, 'AUDIT')
try:
NullHandler = logging.NullHandler
except AttributeError: # NOTE(jkoelker) NullHandler added in Python 2.7
class NullHandler(logging.Handler):
def handle(self, record):
pass
def emit(self, record):
pass
def createLock(self):
self.lock = None
def _dictify_context(context):
if context is None:
return None
if not isinstance(context, dict) and getattr(context, 'to_dict', None):
context = context.to_dict()
return context
def _get_binary_name():
return os.path.basename(inspect.stack()[-1][1])
def _get_log_file_path(binary=None):
logfile = CONF.log_file
logdir = CONF.log_dir
if logfile and not logdir:
return logfile
if logfile and logdir:
return os.path.join(logdir, logfile)
if logdir:
binary = binary or _get_binary_name()
return '%s.log' % (os.path.join(logdir, binary),)
return None
def mask_password(message, secret="***"):
"""Replace password with 'secret' in message.
:param message: The string which includes security information.
:param secret: value with which to replace passwords.
:returns: The unicode value of message with the password fields masked.
For example:
>>> mask_password("'adminPass' : 'aaaaa'")
"'adminPass' : '***'"
>>> mask_password("'admin_pass' : 'aaaaa'")
"'admin_pass' : '***'"
>>> mask_password('"password" : "aaaaa"')
'"password" : "***"'
>>> mask_password("'original_password' : 'aaaaa'")
"'original_password' : '***'"
>>> mask_password("u'original_password' : u'aaaaa'")
"u'original_password' : u'***'"
"""
message = six.text_type(message)
# NOTE(ldbragst): Check to see if anything in message contains any key
# specified in _SANITIZE_KEYS, if not then just return the message since
# we don't have to mask any passwords.
if not any(key in message for key in _SANITIZE_KEYS):
return message
secret = r'\g<1>' + secret + r'\g<2>'
for pattern in _SANITIZE_PATTERNS:
message = re.sub(pattern, secret, message)
return message
class BaseLoggerAdapter(logging.LoggerAdapter):
def audit(self, msg, *args, **kwargs):
self.log(logging.AUDIT, msg, *args, **kwargs)
class LazyAdapter(BaseLoggerAdapter):
def __init__(self, name='unknown', version='unknown'):
self._logger = None
self.extra = {}
self.name = name
self.version = version
@property
def logger(self):
if not self._logger:
self._logger = getLogger(self.name, self.version)
return self._logger
class ContextAdapter(BaseLoggerAdapter):
warn = logging.LoggerAdapter.warning
def __init__(self, logger, project_name, version_string):
self.logger = logger
self.project = project_name
self.version = version_string
self._deprecated_messages_sent = dict()
@property
def handlers(self):
return self.logger.handlers
def deprecated(self, msg, *args, **kwargs):
"""Call this method when a deprecated feature is used.
If the system is configured for fatal deprecations then the message
is logged at the 'critical' level and :class:`DeprecatedConfig` will
be raised.
Otherwise, the message will be logged (once) at the 'warn' level.
:raises: :class:`DeprecatedConfig` if the system is configured for
fatal deprecations.
"""
stdmsg = _("Deprecated: %s") % msg
if CONF.fatal_deprecations:
self.critical(stdmsg, *args, **kwargs)
raise DeprecatedConfig(msg=stdmsg)
# Using a list because a tuple with dict can't be stored in a set.
sent_args = self._deprecated_messages_sent.setdefault(msg, list())
if args in sent_args:
# Already logged this message, so don't log it again.
return
sent_args.append(args)
self.warn(stdmsg, *args, **kwargs)
def process(self, msg, kwargs):
# NOTE(mrodden): catch any Message/other object and
# coerce to unicode before they can get
# to the python logging and possibly
# cause string encoding trouble
if not isinstance(msg, six.string_types):
msg = six.text_type(msg)
if 'extra' not in kwargs:
kwargs['extra'] = {}
extra = kwargs['extra']
context = kwargs.pop('context', None)
if not context:
context = getattr(local.store, 'context', None)
if context:
extra.update(_dictify_context(context))
instance = kwargs.pop('instance', None)
instance_uuid = (extra.get('instance_uuid') or
kwargs.pop('instance_uuid', None))
instance_extra = ''
if instance:
instance_extra = CONF.instance_format % instance
elif instance_uuid:
instance_extra = (CONF.instance_uuid_format
% {'uuid': instance_uuid})
extra['instance'] = instance_extra
extra.setdefault('user_identity', kwargs.pop('user_identity', None))
extra['project'] = self.project
extra['version'] = self.version
extra['extra'] = extra.copy()
return msg, kwargs
class JSONFormatter(logging.Formatter):
def __init__(self, fmt=None, datefmt=None):
# NOTE(jkoelker) we ignore the fmt argument, but its still there
# since logging.config.fileConfig passes it.
self.datefmt = datefmt
def formatException(self, ei, strip_newlines=True):
lines = traceback.format_exception(*ei)
if strip_newlines:
lines = [moves.filter(
lambda x: x,
line.rstrip().splitlines()) for line in lines]
lines = list(itertools.chain(*lines))
return lines
def format(self, record):
message = {'message': record.getMessage(),
'asctime': self.formatTime(record, self.datefmt),
'name': record.name,
'msg': record.msg,
'args': record.args,
'levelname': record.levelname,
'levelno': record.levelno,
'pathname': record.pathname,
'filename': record.filename,
'module': record.module,
'lineno': record.lineno,
'funcname': record.funcName,
'created': record.created,
'msecs': record.msecs,
'relative_created': record.relativeCreated,
'thread': record.thread,
'thread_name': record.threadName,
'process_name': record.processName,
'process': record.process,
'traceback': None}
if hasattr(record, 'extra'):
message['extra'] = record.extra
if record.exc_info:
message['traceback'] = self.formatException(record.exc_info)
return jsonutils.dumps(message)
def _create_logging_excepthook(product_name):
def logging_excepthook(exc_type, value, tb):
extra = {'exc_info': (exc_type, value, tb)}
getLogger(product_name).critical(
"".join(traceback.format_exception_only(exc_type, value)),
**extra)
return logging_excepthook
class LogConfigError(Exception):
message = _('Error loading logging config %(log_config)s: %(err_msg)s')
def __init__(self, log_config, err_msg):
self.log_config = log_config
self.err_msg = err_msg
def __str__(self):
return self.message % dict(log_config=self.log_config,
err_msg=self.err_msg)
def _load_log_config(log_config_append):
try:
logging.config.fileConfig(log_config_append,
disable_existing_loggers=False)
except moves.configparser.Error as exc:
raise LogConfigError(log_config_append, six.text_type(exc))
def setup(product_name, version='unknown'):
"""Setup logging."""
if CONF.log_config_append:
_load_log_config(CONF.log_config_append)
else:
_setup_logging_from_conf(product_name, version)
sys.excepthook = _create_logging_excepthook(product_name)
def set_defaults(logging_context_format_string):
cfg.set_defaults(log_opts,
logging_context_format_string=
logging_context_format_string)
def _find_facility_from_conf():
facility_names = logging.handlers.SysLogHandler.facility_names
facility = getattr(logging.handlers.SysLogHandler,
CONF.syslog_log_facility,
None)
if facility is None and CONF.syslog_log_facility in facility_names:
facility = facility_names.get(CONF.syslog_log_facility)
if facility is None:
valid_facilities = facility_names.keys()
consts = ['LOG_AUTH', 'LOG_AUTHPRIV', 'LOG_CRON', 'LOG_DAEMON',
'LOG_FTP', 'LOG_KERN', 'LOG_LPR', 'LOG_MAIL', 'LOG_NEWS',
'LOG_AUTH', 'LOG_SYSLOG', 'LOG_USER', 'LOG_UUCP',
'LOG_LOCAL0', 'LOG_LOCAL1', 'LOG_LOCAL2', 'LOG_LOCAL3',
'LOG_LOCAL4', 'LOG_LOCAL5', 'LOG_LOCAL6', 'LOG_LOCAL7']
valid_facilities.extend(consts)
raise TypeError(_('syslog facility must be one of: %s') %
', '.join("'%s'" % fac
for fac in valid_facilities))
return facility
class RFCSysLogHandler(logging.handlers.SysLogHandler):
def __init__(self, *args, **kwargs):
self.binary_name = _get_binary_name()
# Do not use super() unless type(logging.handlers.SysLogHandler)
# is 'type' (Python 2.7).
# Use old style calls, if the type is 'classobj' (Python 2.6)
logging.handlers.SysLogHandler.__init__(self, *args, **kwargs)
def format(self, record):
# Do not use super() unless type(logging.handlers.SysLogHandler)
# is 'type' (Python 2.7).
# Use old style calls, if the type is 'classobj' (Python 2.6)
msg = logging.handlers.SysLogHandler.format(self, record)
msg = self.binary_name + ' ' + msg
return msg
def _setup_logging_from_conf(project, version):
log_root = getLogger(None).logger
for handler in log_root.handlers:
log_root.removeHandler(handler)
if CONF.use_syslog:
facility = _find_facility_from_conf()
# TODO(bogdando) use the format provided by RFCSysLogHandler
# after existing syslog format deprecation in J
if CONF.use_syslog_rfc_format:
syslog = RFCSysLogHandler(address='/dev/log',
facility=facility)
else:
syslog = logging.handlers.SysLogHandler(address='/dev/log',
facility=facility)
log_root.addHandler(syslog)
logpath = _get_log_file_path()
if logpath:
filelog = logging.handlers.WatchedFileHandler(logpath)
log_root.addHandler(filelog)
if CONF.use_stderr:
streamlog = ColorHandler()
log_root.addHandler(streamlog)
elif not logpath:
# pass sys.stdout as a positional argument
# python2.6 calls the argument strm, in 2.7 it's stream
streamlog = logging.StreamHandler(sys.stdout)
log_root.addHandler(streamlog)
if CONF.publish_errors:
handler = importutils.import_object(
"fuel_agent.openstack.common.log_handler.PublishErrorsHandler",
logging.ERROR)
log_root.addHandler(handler)
datefmt = CONF.log_date_format
for handler in log_root.handlers:
# NOTE(alaski): CONF.log_format overrides everything currently. This
# should be deprecated in favor of context aware formatting.
if CONF.log_format:
handler.setFormatter(logging.Formatter(fmt=CONF.log_format,
datefmt=datefmt))
log_root.info('Deprecated: log_format is now deprecated and will '
'be removed in the next release')
else:
handler.setFormatter(ContextFormatter(project=project,
version=version,
datefmt=datefmt))
if CONF.debug:
log_root.setLevel(logging.DEBUG)
elif CONF.verbose:
log_root.setLevel(logging.INFO)
else:
log_root.setLevel(logging.WARNING)
for pair in CONF.default_log_levels:
mod, _sep, level_name = pair.partition('=')
logger = logging.getLogger(mod)
# NOTE(AAzza) in python2.6 Logger.setLevel doesn't convert string name
# to integer code.
if sys.version_info < (2, 7):
level = logging.getLevelName(level_name)
logger.setLevel(level)
else:
logger.setLevel(level_name)
_loggers = {}
def getLogger(name='unknown', version='unknown'):
if name not in _loggers:
_loggers[name] = ContextAdapter(logging.getLogger(name),
name,
version)
return _loggers[name]
def getLazyLogger(name='unknown', version='unknown'):
"""Returns lazy logger.
Creates a pass-through logger that does not create the real logger
until it is really needed and delegates all calls to the real logger
once it is created.
"""
return LazyAdapter(name, version)
class WritableLogger(object):
"""A thin wrapper that responds to `write` and logs."""
def __init__(self, logger, level=logging.INFO):
self.logger = logger
self.level = level
def write(self, msg):
self.logger.log(self.level, msg.rstrip())
class ContextFormatter(logging.Formatter):
"""A context.RequestContext aware formatter configured through flags.
The flags used to set format strings are: logging_context_format_string
and logging_default_format_string. You can also specify
logging_debug_format_suffix to append extra formatting if the log level is
debug.
For information about what variables are available for the formatter see:
http://docs.python.org/library/logging.html#formatter
If available, uses the context value stored in TLS - local.store.context
"""
def __init__(self, *args, **kwargs):
"""Initialize ContextFormatter instance
Takes additional keyword arguments which can be used in the message
format string.
:keyword project: project name
:type project: string
:keyword version: project version
:type version: string
"""
self.project = kwargs.pop('project', 'unknown')
self.version = kwargs.pop('version', 'unknown')
logging.Formatter.__init__(self, *args, **kwargs)
def format(self, record):
"""Uses contextstring if request_id is set, otherwise default."""
# store project info
record.project = self.project
record.version = self.version
# store request info
context = getattr(local.store, 'context', None)
if context:
d = _dictify_context(context)
for k, v in d.items():
setattr(record, k, v)
# NOTE(sdague): default the fancier formatting params
# to an empty string so we don't throw an exception if
# they get used
for key in ('instance', 'color', 'user_identity'):
if key not in record.__dict__:
record.__dict__[key] = ''
if record.__dict__.get('request_id'):
self._fmt = CONF.logging_context_format_string
else:
self._fmt = CONF.logging_default_format_string
if (record.levelno == logging.DEBUG and
CONF.logging_debug_format_suffix):
self._fmt += " " + CONF.logging_debug_format_suffix
# Cache this on the record, Logger will respect our formatted copy
if record.exc_info:
record.exc_text = self.formatException(record.exc_info, record)
return logging.Formatter.format(self, record)
def formatException(self, exc_info, record=None):
"""Format exception output with CONF.logging_exception_prefix."""
if not record:
return logging.Formatter.formatException(self, exc_info)
stringbuffer = moves.StringIO()
traceback.print_exception(exc_info[0], exc_info[1], exc_info[2],
None, stringbuffer)
lines = stringbuffer.getvalue().split('\n')
stringbuffer.close()
if CONF.logging_exception_prefix.find('%(asctime)') != -1:
record.asctime = self.formatTime(record, self.datefmt)
formatted_lines = []
for line in lines:
pl = CONF.logging_exception_prefix % record.__dict__
fl = '%s%s' % (pl, line)
formatted_lines.append(fl)
return '\n'.join(formatted_lines)
class ColorHandler(logging.StreamHandler):
LEVEL_COLORS = {
logging.DEBUG: '\033[00;32m', # GREEN
logging.INFO: '\033[00;36m', # CYAN
logging.AUDIT: '\033[01;36m', # BOLD CYAN
logging.WARN: '\033[01;33m', # BOLD YELLOW
logging.ERROR: '\033[01;31m', # BOLD RED
logging.CRITICAL: '\033[01;31m', # BOLD RED
}
def format(self, record):
record.color = self.LEVEL_COLORS[record.levelno]
return logging.StreamHandler.format(self, record)
class DeprecatedConfig(Exception):
message = _("Fatal call to deprecated config: %(msg)s")
def __init__(self, msg):
super(Exception, self).__init__(self.message % dict(msg=msg))

View File

@ -1,272 +0,0 @@
# Copyright 2011 OpenStack Foundation.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
System-level utilities and helper functions.
"""
import errno
import logging as stdlib_logging
import os
import random
import shlex
import signal
from eventlet.green import subprocess
from eventlet import greenthread
import six
from fuel_agent.openstack.common.gettextutils import _
from fuel_agent.openstack.common import log as logging
LOG = logging.getLogger(__name__)
class InvalidArgumentError(Exception):
def __init__(self, message=None):
super(InvalidArgumentError, self).__init__(message)
class UnknownArgumentError(Exception):
def __init__(self, message=None):
super(UnknownArgumentError, self).__init__(message)
class ProcessExecutionError(Exception):
def __init__(self, stdout=None, stderr=None, exit_code=None, cmd=None,
description=None):
self.exit_code = exit_code
self.stderr = stderr
self.stdout = stdout
self.cmd = cmd
self.description = description
if description is None:
description = _("Unexpected error while running command.")
if exit_code is None:
exit_code = '-'
message = _('%(description)s\n'
'Command: %(cmd)s\n'
'Exit code: %(exit_code)s\n'
'Stdout: %(stdout)r\n'
'Stderr: %(stderr)r') % {'description': description,
'cmd': cmd,
'exit_code': exit_code,
'stdout': stdout,
'stderr': stderr}
super(ProcessExecutionError, self).__init__(message)
class NoRootWrapSpecified(Exception):
def __init__(self, message=None):
super(NoRootWrapSpecified, self).__init__(message)
def _subprocess_setup():
# Python installs a SIGPIPE handler by default. This is usually not what
# non-Python subprocesses expect.
signal.signal(signal.SIGPIPE, signal.SIG_DFL)
def execute(*cmd, **kwargs):
"""Helper method to shell out and execute a command through subprocess.
Allows optional retry.
:param cmd: Passed to subprocess.Popen.
:type cmd: string
:param process_input: Send to opened process.
:type process_input: string
:param env_variables: Environment variables and their values that
will be set for the process.
:type env_variables: dict
:param check_exit_code: Single bool, int, or list of allowed exit
codes. Defaults to [0]. Raise
:class:`ProcessExecutionError` unless
program exits with one of these code.
:type check_exit_code: boolean, int, or [int]
:param delay_on_retry: True | False. Defaults to True. If set to True,
wait a short amount of time before retrying.
:type delay_on_retry: boolean
:param attempts: How many times to retry cmd.
:type attempts: int
:param run_as_root: True | False. Defaults to False. If set to True,
the command is prefixed by the command specified
in the root_helper kwarg.
:type run_as_root: boolean
:param root_helper: command to prefix to commands called with
run_as_root=True
:type root_helper: string
:param shell: whether or not there should be a shell used to
execute this command. Defaults to false.
:type shell: boolean
:param loglevel: log level for execute commands.
:type loglevel: int. (Should be stdlib_logging.DEBUG or
stdlib_logging.INFO)
:returns: (stdout, stderr) from process execution
:raises: :class:`UnknownArgumentError` on
receiving unknown arguments
:raises: :class:`ProcessExecutionError`
"""
process_input = kwargs.pop('process_input', None)
env_variables = kwargs.pop('env_variables', None)
check_exit_code = kwargs.pop('check_exit_code', [0])
ignore_exit_code = False
delay_on_retry = kwargs.pop('delay_on_retry', True)
attempts = kwargs.pop('attempts', 1)
run_as_root = kwargs.pop('run_as_root', False)
root_helper = kwargs.pop('root_helper', '')
shell = kwargs.pop('shell', False)
loglevel = kwargs.pop('loglevel', stdlib_logging.DEBUG)
if isinstance(check_exit_code, bool):
ignore_exit_code = not check_exit_code
check_exit_code = [0]
elif isinstance(check_exit_code, int):
check_exit_code = [check_exit_code]
if kwargs:
raise UnknownArgumentError(_('Got unknown keyword args '
'to utils.execute: %r') % kwargs)
if run_as_root and hasattr(os, 'geteuid') and os.geteuid() != 0:
if not root_helper:
raise NoRootWrapSpecified(
message=_('Command requested root, but did not '
'specify a root helper.'))
cmd = shlex.split(root_helper) + list(cmd)
cmd = map(str, cmd)
while attempts > 0:
attempts -= 1
try:
LOG.log(loglevel, 'Running cmd (subprocess): %s',
' '.join(logging.mask_password(cmd)))
_PIPE = subprocess.PIPE # pylint: disable=E1101
if os.name == 'nt':
preexec_fn = None
close_fds = False
else:
preexec_fn = _subprocess_setup
close_fds = True
obj = subprocess.Popen(cmd,
stdin=_PIPE,
stdout=_PIPE,
stderr=_PIPE,
close_fds=close_fds,
preexec_fn=preexec_fn,
shell=shell,
env=env_variables)
result = None
for _i in six.moves.range(20):
# NOTE(russellb) 20 is an arbitrary number of retries to
# prevent any chance of looping forever here.
try:
if process_input is not None:
result = obj.communicate(process_input)
else:
result = obj.communicate()
except OSError as e:
if e.errno in (errno.EAGAIN, errno.EINTR):
continue
raise
break
obj.stdin.close() # pylint: disable=E1101
_returncode = obj.returncode # pylint: disable=E1101
LOG.log(loglevel, 'Result was %s' % _returncode)
if not ignore_exit_code and _returncode not in check_exit_code:
(stdout, stderr) = result
raise ProcessExecutionError(exit_code=_returncode,
stdout=stdout,
stderr=stderr,
cmd=' '.join(cmd))
return result
except ProcessExecutionError:
if not attempts:
raise
else:
LOG.log(loglevel, '%r failed. Retrying.', cmd)
if delay_on_retry:
greenthread.sleep(random.randint(20, 200) / 100.0)
finally:
# NOTE(termie): this appears to be necessary to let the subprocess
# call clean something up in between calls, without
# it two execute calls in a row hangs the second one
greenthread.sleep(0)
def trycmd(*args, **kwargs):
"""A wrapper around execute() to more easily handle warnings and errors.
Returns an (out, err) tuple of strings containing the output of
the command's stdout and stderr. If 'err' is not empty then the
command can be considered to have failed.
:discard_warnings True | False. Defaults to False. If set to True,
then for succeeding commands, stderr is cleared
"""
discard_warnings = kwargs.pop('discard_warnings', False)
try:
out, err = execute(*args, **kwargs)
failed = False
except ProcessExecutionError as exn:
out, err = '', six.text_type(exn)
failed = True
if not failed and discard_warnings and err:
# Handle commands that output to stderr but otherwise succeed
err = ''
return out, err
def ssh_execute(ssh, cmd, process_input=None,
addl_env=None, check_exit_code=True):
LOG.debug('Running cmd (SSH): %s', cmd)
if addl_env:
raise InvalidArgumentError(_('Environment not supported over SSH'))
if process_input:
# This is (probably) fixable if we need it...
raise InvalidArgumentError(_('process_input not supported over SSH'))
stdin_stream, stdout_stream, stderr_stream = ssh.exec_command(cmd)
channel = stdout_stream.channel
# NOTE(justinsb): This seems suspicious...
# ...other SSH clients have buffering issues with this approach
stdout = stdout_stream.read()
stderr = stderr_stream.read()
stdin_stream.close()
exit_status = channel.recv_exit_status()
# exit_status == -1 if no exit code was returned
if exit_status != -1:
LOG.debug('Result was %s' % exit_status)
if check_exit_code and exit_status != 0:
raise ProcessExecutionError(exit_code=exit_status,
stdout=stdout,
stderr=stderr,
cmd=cmd)
return (stdout, stderr)

View File

@ -1,239 +0,0 @@
# Copyright 2011 OpenStack Foundation.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
System-level utilities and helper functions.
"""
import math
import re
import sys
import unicodedata
import six
from fuel_agent.openstack.common.gettextutils import _
UNIT_PREFIX_EXPONENT = {
'k': 1,
'K': 1,
'Ki': 1,
'M': 2,
'Mi': 2,
'G': 3,
'Gi': 3,
'T': 4,
'Ti': 4,
}
UNIT_SYSTEM_INFO = {
'IEC': (1024, re.compile(r'(^[-+]?\d*\.?\d+)([KMGT]i?)?(b|bit|B)$')),
'SI': (1000, re.compile(r'(^[-+]?\d*\.?\d+)([kMGT])?(b|bit|B)$')),
}
TRUE_STRINGS = ('1', 't', 'true', 'on', 'y', 'yes')
FALSE_STRINGS = ('0', 'f', 'false', 'off', 'n', 'no')
SLUGIFY_STRIP_RE = re.compile(r"[^\w\s-]")
SLUGIFY_HYPHENATE_RE = re.compile(r"[-\s]+")
def int_from_bool_as_string(subject):
"""Interpret a string as a boolean and return either 1 or 0.
Any string value in:
('True', 'true', 'On', 'on', '1')
is interpreted as a boolean True.
Useful for JSON-decoded stuff and config file parsing
"""
return bool_from_string(subject) and 1 or 0
def bool_from_string(subject, strict=False, default=False):
"""Interpret a string as a boolean.
A case-insensitive match is performed such that strings matching 't',
'true', 'on', 'y', 'yes', or '1' are considered True and, when
`strict=False`, anything else returns the value specified by 'default'.
Useful for JSON-decoded stuff and config file parsing.
If `strict=True`, unrecognized values, including None, will raise a
ValueError which is useful when parsing values passed in from an API call.
Strings yielding False are 'f', 'false', 'off', 'n', 'no', or '0'.
"""
if not isinstance(subject, six.string_types):
subject = six.text_type(subject)
lowered = subject.strip().lower()
if lowered in TRUE_STRINGS:
return True
elif lowered in FALSE_STRINGS:
return False
elif strict:
acceptable = ', '.join(
"'%s'" % s for s in sorted(TRUE_STRINGS + FALSE_STRINGS))
msg = _("Unrecognized value '%(val)s', acceptable values are:"
" %(acceptable)s") % {'val': subject,
'acceptable': acceptable}
raise ValueError(msg)
else:
return default
def safe_decode(text, incoming=None, errors='strict'):
"""Decodes incoming text/bytes string using `incoming` if they're not
already unicode.
:param incoming: Text's current encoding
:param errors: Errors handling policy. See here for valid
values http://docs.python.org/2/library/codecs.html
:returns: text or a unicode `incoming` encoded
representation of it.
:raises TypeError: If text is not an instance of str
"""
if not isinstance(text, (six.string_types, six.binary_type)):
raise TypeError("%s can't be decoded" % type(text))
if isinstance(text, six.text_type):
return text
if not incoming:
incoming = (sys.stdin.encoding or
sys.getdefaultencoding())
try:
return text.decode(incoming, errors)
except UnicodeDecodeError:
# Note(flaper87) If we get here, it means that
# sys.stdin.encoding / sys.getdefaultencoding
# didn't return a suitable encoding to decode
# text. This happens mostly when global LANG
# var is not set correctly and there's no
# default encoding. In this case, most likely
# python will use ASCII or ANSI encoders as
# default encodings but they won't be capable
# of decoding non-ASCII characters.
#
# Also, UTF-8 is being used since it's an ASCII
# extension.
return text.decode('utf-8', errors)
def safe_encode(text, incoming=None,
encoding='utf-8', errors='strict'):
"""Encodes incoming text/bytes string using `encoding`.
If incoming is not specified, text is expected to be encoded with
current python's default encoding. (`sys.getdefaultencoding`)
:param incoming: Text's current encoding
:param encoding: Expected encoding for text (Default UTF-8)
:param errors: Errors handling policy. See here for valid
values http://docs.python.org/2/library/codecs.html
:returns: text or a bytestring `encoding` encoded
representation of it.
:raises TypeError: If text is not an instance of str
"""
if not isinstance(text, (six.string_types, six.binary_type)):
raise TypeError("%s can't be encoded" % type(text))
if not incoming:
incoming = (sys.stdin.encoding or
sys.getdefaultencoding())
if isinstance(text, six.text_type):
return text.encode(encoding, errors)
elif text and encoding != incoming:
# Decode text before encoding it with `encoding`
text = safe_decode(text, incoming, errors)
return text.encode(encoding, errors)
else:
return text
def string_to_bytes(text, unit_system='IEC', return_int=False):
"""Converts a string into an float representation of bytes.
The units supported for IEC ::
Kb(it), Kib(it), Mb(it), Mib(it), Gb(it), Gib(it), Tb(it), Tib(it)
KB, KiB, MB, MiB, GB, GiB, TB, TiB
The units supported for SI ::
kb(it), Mb(it), Gb(it), Tb(it)
kB, MB, GB, TB
Note that the SI unit system does not support capital letter 'K'
:param text: String input for bytes size conversion.
:param unit_system: Unit system for byte size conversion.
:param return_int: If True, returns integer representation of text
in bytes. (default: decimal)
:returns: Numerical representation of text in bytes.
:raises ValueError: If text has an invalid value.
"""
try:
base, reg_ex = UNIT_SYSTEM_INFO[unit_system]
except KeyError:
msg = _('Invalid unit system: "%s"') % unit_system
raise ValueError(msg)
match = reg_ex.match(text)
if match:
magnitude = float(match.group(1))
unit_prefix = match.group(2)
if match.group(3) in ['b', 'bit']:
magnitude /= 8
else:
msg = _('Invalid string format: %s') % text
raise ValueError(msg)
if not unit_prefix:
res = magnitude
else:
res = magnitude * pow(base, UNIT_PREFIX_EXPONENT[unit_prefix])
if return_int:
return int(math.ceil(res))
return res
def to_slug(value, incoming=None, errors="strict"):
"""Normalize string.
Convert to lowercase, remove non-word characters, and convert spaces
to hyphens.
Inspired by Django's `slugify` filter.
:param value: Text to slugify
:param incoming: Text's current encoding
:param errors: Errors handling policy. See here for valid
values http://docs.python.org/2/library/codecs.html
:returns: slugified unicode representation of `value`
:raises TypeError: If text is not an instance of str
"""
value = safe_decode(value, incoming, errors)
# NOTE(aababilov): no need to use safe_(encode|decode) here:
# encodings are always "ascii", error handling is always "ignore"
# and types are always known (first: unicode; second: str)
value = unicodedata.normalize("NFKD", value).encode(
"ascii", "ignore").decode("ascii")
value = SLUGIFY_STRIP_RE.sub("", value).strip().lower()
return SLUGIFY_HYPHENATE_RE.sub("-", value)

View File

@ -1,210 +0,0 @@
# Copyright 2011 OpenStack Foundation.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Time related utilities and helper functions.
"""
import calendar
import datetime
import time
import iso8601
import six
# ISO 8601 extended time format with microseconds
_ISO8601_TIME_FORMAT_SUBSECOND = '%Y-%m-%dT%H:%M:%S.%f'
_ISO8601_TIME_FORMAT = '%Y-%m-%dT%H:%M:%S'
PERFECT_TIME_FORMAT = _ISO8601_TIME_FORMAT_SUBSECOND
def isotime(at=None, subsecond=False):
"""Stringify time in ISO 8601 format."""
if not at:
at = utcnow()
st = at.strftime(_ISO8601_TIME_FORMAT
if not subsecond
else _ISO8601_TIME_FORMAT_SUBSECOND)
tz = at.tzinfo.tzname(None) if at.tzinfo else 'UTC'
st += ('Z' if tz == 'UTC' else tz)
return st
def parse_isotime(timestr):
"""Parse time from ISO 8601 format."""
try:
return iso8601.parse_date(timestr)
except iso8601.ParseError as e:
raise ValueError(six.text_type(e))
except TypeError as e:
raise ValueError(six.text_type(e))
def strtime(at=None, fmt=PERFECT_TIME_FORMAT):
"""Returns formatted utcnow."""
if not at:
at = utcnow()
return at.strftime(fmt)
def parse_strtime(timestr, fmt=PERFECT_TIME_FORMAT):
"""Turn a formatted time back into a datetime."""
return datetime.datetime.strptime(timestr, fmt)
def normalize_time(timestamp):
"""Normalize time in arbitrary timezone to UTC naive object."""
offset = timestamp.utcoffset()
if offset is None:
return timestamp
return timestamp.replace(tzinfo=None) - offset
def is_older_than(before, seconds):
"""Return True if before is older than seconds."""
if isinstance(before, six.string_types):
before = parse_strtime(before).replace(tzinfo=None)
else:
before = before.replace(tzinfo=None)
return utcnow() - before > datetime.timedelta(seconds=seconds)
def is_newer_than(after, seconds):
"""Return True if after is newer than seconds."""
if isinstance(after, six.string_types):
after = parse_strtime(after).replace(tzinfo=None)
else:
after = after.replace(tzinfo=None)
return after - utcnow() > datetime.timedelta(seconds=seconds)
def utcnow_ts():
"""Timestamp version of our utcnow function."""
if utcnow.override_time is None:
# NOTE(kgriffs): This is several times faster
# than going through calendar.timegm(...)
return int(time.time())
return calendar.timegm(utcnow().timetuple())
def utcnow():
"""Overridable version of utils.utcnow."""
if utcnow.override_time:
try:
return utcnow.override_time.pop(0)
except AttributeError:
return utcnow.override_time
return datetime.datetime.utcnow()
def iso8601_from_timestamp(timestamp):
"""Returns a iso8601 formatted date from timestamp."""
return isotime(datetime.datetime.utcfromtimestamp(timestamp))
utcnow.override_time = None
def set_time_override(override_time=None):
"""Overrides utils.utcnow.
Make it return a constant time or a list thereof, one at a time.
:param override_time: datetime instance or list thereof. If not
given, defaults to the current UTC time.
"""
utcnow.override_time = override_time or datetime.datetime.utcnow()
def advance_time_delta(timedelta):
"""Advance overridden time using a datetime.timedelta."""
assert(not utcnow.override_time is None)
try:
for dt in utcnow.override_time:
dt += timedelta
except TypeError:
utcnow.override_time += timedelta
def advance_time_seconds(seconds):
"""Advance overridden time by seconds."""
advance_time_delta(datetime.timedelta(0, seconds))
def clear_time_override():
"""Remove the overridden time."""
utcnow.override_time = None
def marshall_now(now=None):
"""Make an rpc-safe datetime with microseconds.
Note: tzinfo is stripped, but not required for relative times.
"""
if not now:
now = utcnow()
return dict(day=now.day, month=now.month, year=now.year, hour=now.hour,
minute=now.minute, second=now.second,
microsecond=now.microsecond)
def unmarshall_time(tyme):
"""Unmarshall a datetime dict."""
return datetime.datetime(day=tyme['day'],
month=tyme['month'],
year=tyme['year'],
hour=tyme['hour'],
minute=tyme['minute'],
second=tyme['second'],
microsecond=tyme['microsecond'])
def delta_seconds(before, after):
"""Return the difference between two timing objects.
Compute the difference in seconds between two date, time, or
datetime objects (as a float, to microsecond resolution).
"""
delta = after - before
return total_seconds(delta)
def total_seconds(delta):
"""Return the total seconds of datetime.timedelta object.
Compute total seconds of datetime.timedelta, datetime.timedelta
doesn't have method total_seconds in Python2.6, calculate it manually.
"""
try:
return delta.total_seconds()
except AttributeError:
return ((delta.days * 24 * 3600) + delta.seconds +
float(delta.microseconds) / (10 ** 6))
def is_soon(dt, window):
"""Determines if time is going to happen in the next window seconds.
:param dt: the time
:param window: minimum seconds to remain to consider the time not soon
:return: True if expiration is within the given duration
"""
soon = (utcnow() + datetime.timedelta(seconds=window))
return normalize_time(dt) <= soon

View File

@ -1,119 +0,0 @@
# Copyright 2014 Mirantis, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import mock
from oslotest import base as test_base
import zlib
from oslo.config import cfg
from fuel_agent import errors
from fuel_agent.utils import artifact as au
from fuel_agent.utils import utils
CONF = cfg.CONF
class TestTarget(test_base.BaseTestCase):
def setUp(self):
super(TestTarget, self).setUp()
self.tgt = au.Target()
def test_target_next(self):
self.assertRaises(StopIteration, self.tgt.next)
@mock.patch('os.fsync')
@mock.patch.object(au.Target, '__iter__')
def test_target_target(self, mock_iter, mock_os_sync):
mock_iter.return_value = iter(['chunk1', 'chunk2', 'chunk3'])
m = mock.mock_open()
with mock.patch('six.moves.builtins.open', m):
self.tgt.target()
mock_write_expected_calls = [mock.call('chunk1'), mock.call('chunk2'),
mock.call('chunk3')]
file_handle = m()
self.assertEqual(mock_write_expected_calls,
file_handle.write.call_args_list)
file_handle.flush.assert_called_once_with()
class TestLocalFile(test_base.BaseTestCase):
def setUp(self):
super(TestLocalFile, self).setUp()
self.lf = au.LocalFile('/dev/null')
def test_localfile_next(self):
self.lf.fileobj = mock.Mock()
self.lf.fileobj.read.side_effect = ['some_data', 'another_data']
self.assertEqual('some_data', self.lf.next())
self.assertEqual('another_data', self.lf.next())
self.assertRaises(StopIteration, self.lf.next)
class TestHttpUrl(test_base.BaseTestCase):
@mock.patch.object(utils, 'init_http_request')
def test_httpurl_init_ok(self, mock_req):
mock_req.return_value = mock.Mock(headers={'content-length': 123})
httpurl = au.HttpUrl('fake_url')
self.assertEqual(123, httpurl.length)
mock_req.assert_called_once_with('fake_url')
@mock.patch.object(utils, 'init_http_request')
def test_httpurl_init_invalid_content_length(self, mock_req):
mock_req.return_value = mock.Mock(headers={'content-length':
'invalid'})
self.assertRaises(errors.HttpUrlInvalidContentLength, au.HttpUrl,
'fake_url')
@mock.patch.object(utils, 'init_http_request')
def test_httpurl_next_ok(self, mock_req):
content = ['fake content #1', 'fake content #2']
req_mock = mock.Mock(headers={'content-length': 30})
req_mock.raw.read.side_effect = content
mock_req.return_value = req_mock
httpurl = au.HttpUrl('fake_url')
for data in enumerate(httpurl):
self.assertEqual(content[data[0]], data[1])
class TestGunzipStream(test_base.BaseTestCase):
def test_gunzip_stream_next(self):
content = ['fake content #1']
compressed_stream = [zlib.compress(data) for data in content]
gunzip_stream = au.GunzipStream(compressed_stream)
for data in enumerate(gunzip_stream):
self.assertEqual(content[data[0]], data[1])
class TestChain(test_base.BaseTestCase):
def setUp(self):
super(TestChain, self).setUp()
self.chain = au.Chain()
def test_append(self):
self.assertEqual(0, len(self.chain.processors))
self.chain.append('fake_processor')
self.assertIn('fake_processor', self.chain.processors)
self.assertEqual(1, len(self.chain.processors))
def test_process(self):
self.chain.processors.append('fake_uri')
fake_processor = mock.Mock(spec=au.Target)
self.chain.processors.append(fake_processor)
self.chain.processors.append('fake_target')
self.chain.process()
expected_calls = [mock.call('fake_uri')]
self.assertEqual(expected_calls, fake_processor.call_args_list)

View File

@ -1,489 +0,0 @@
# Copyright 2015 Mirantis, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import os
import shutil
import signal
import testtools
import mock
from oslo.config import cfg
from fuel_agent import errors
from fuel_agent.utils import build as bu
from fuel_agent.utils import hardware as hu
from fuel_agent.utils import utils
CONF = cfg.CONF
class BuildUtilsTestCase(testtools.TestCase):
_fake_ubuntu_release = '''
Origin: TestOrigin
Label: TestLabel
Archive: test-archive
Codename: testcodename
'''
def setUp(self):
super(BuildUtilsTestCase, self).setUp()
@mock.patch.object(utils, 'execute', return_value=(None, None))
def test_run_debootstrap(self, mock_exec):
bu.run_debootstrap('uri', 'suite', 'chroot', 'arch', attempts=2)
mock_exec.assert_called_once_with('debootstrap', '--verbose',
'--no-check-gpg', '--arch=arch',
'suite', 'chroot', 'uri', attempts=2)
@mock.patch.object(utils, 'execute', return_value=(None, None))
def test_run_debootstrap_eatmydata(self, mock_exec):
bu.run_debootstrap('uri', 'suite', 'chroot', 'arch', eatmydata=True,
attempts=2)
mock_exec.assert_called_once_with('debootstrap', '--verbose',
'--no-check-gpg', '--arch=arch',
'--include=eatmydata', 'suite',
'chroot', 'uri', attempts=2)
@mock.patch.object(utils, 'execute', return_value=(None, None))
def test_run_apt_get(self, mock_exec):
bu.run_apt_get('chroot', ['package1', 'package2'], attempts=2)
mock_exec_expected_calls = [
mock.call('chroot', 'chroot', 'apt-get', '-y', 'update',
attempts=2),
mock.call('chroot', 'chroot', 'apt-get', '-y', 'install',
'package1 package2', attempts=2)]
self.assertEqual(mock_exec_expected_calls, mock_exec.call_args_list)
@mock.patch.object(utils, 'execute', return_value=(None, None))
def test_run_apt_get_eatmydata(self, mock_exec):
bu.run_apt_get('chroot', ['package1', 'package2'], eatmydata=True,
attempts=2)
mock_exec_expected_calls = [
mock.call('chroot', 'chroot', 'apt-get', '-y', 'update',
attempts=2),
mock.call('chroot', 'chroot', 'eatmydata', 'apt-get', '-y',
'install', 'package1 package2', attempts=2)]
self.assertEqual(mock_exec_expected_calls, mock_exec.call_args_list)
@mock.patch.object(os, 'fchmod')
@mock.patch.object(os, 'makedirs')
@mock.patch.object(os, 'path')
def test_suppress_services_start(self, mock_path, mock_mkdir, mock_fchmod):
mock_path.join.return_value = 'fake_path'
mock_path.exists.return_value = False
with mock.patch('six.moves.builtins.open', create=True) as mock_open:
file_handle_mock = mock_open.return_value.__enter__.return_value
file_handle_mock.fileno.return_value = 'fake_fileno'
bu.suppress_services_start('chroot')
mock_open.assert_called_once_with('fake_path', 'w')
expected = '#!/bin/sh\n# prevent any service from being started\n'\
'exit 101\n'
file_handle_mock.write.assert_called_once_with(expected)
mock_fchmod.assert_called_once_with('fake_fileno', 0o755)
mock_mkdir.assert_called_once_with('fake_path')
@mock.patch.object(os, 'fchmod')
@mock.patch.object(os, 'path')
def test_suppress_services_start_nomkdir(self, mock_path, mock_fchmod):
mock_path.join.return_value = 'fake_path'
mock_path.exists.return_value = True
with mock.patch('six.moves.builtins.open', create=True) as mock_open:
file_handle_mock = mock_open.return_value.__enter__.return_value
file_handle_mock.fileno.return_value = 'fake_fileno'
bu.suppress_services_start('chroot')
mock_open.assert_called_once_with('fake_path', 'w')
expected = '#!/bin/sh\n# prevent any service from being started\n'\
'exit 101\n'
file_handle_mock.write.assert_called_once_with(expected)
mock_fchmod.assert_called_once_with('fake_fileno', 0o755)
@mock.patch.object(shutil, 'rmtree')
@mock.patch.object(os, 'makedirs')
@mock.patch.object(os, 'path')
def test_clean_dirs(self, mock_path, mock_mkdir, mock_rmtree):
mock_path.isdir.return_value = True
dirs = ['dir1', 'dir2', 'dir3']
mock_path.join.side_effect = dirs
bu.clean_dirs('chroot', dirs)
for m in (mock_rmtree, mock_mkdir):
self.assertEqual([mock.call(d) for d in dirs], m.call_args_list)
@mock.patch.object(os, 'path')
def test_clean_dirs_not_isdir(self, mock_path):
mock_path.isdir.return_value = False
dirs = ['dir1', 'dir2', 'dir3']
mock_path.join.side_effect = dirs
bu.clean_dirs('chroot', dirs)
self.assertEqual([mock.call('chroot', d) for d in dirs],
mock_path.join.call_args_list)
@mock.patch.object(os, 'remove')
@mock.patch.object(os, 'path')
def test_remove_files(self, mock_path, mock_remove):
mock_path.exists.return_value = True
files = ['file1', 'file2', 'dir3']
mock_path.join.side_effect = files
bu.remove_files('chroot', files)
self.assertEqual([mock.call(f) for f in files],
mock_remove.call_args_list)
@mock.patch.object(os, 'path')
def test_remove_files_not_exists(self, mock_path):
mock_path.exists.return_value = False
files = ['file1', 'file2', 'dir3']
mock_path.join.side_effect = files
bu.remove_files('chroot', files)
self.assertEqual([mock.call('chroot', f) for f in files],
mock_path.join.call_args_list)
@mock.patch.object(bu, 'remove_files')
@mock.patch.object(bu, 'clean_dirs')
def test_clean_apt_settings(self, mock_dirs, mock_files):
bu.clean_apt_settings('chroot', 'unsigned', 'force_ipv4')
mock_dirs.assert_called_once_with(
'chroot', ['etc/apt/preferences.d', 'etc/apt/sources.list.d'])
mock_files.assert_called_once_with(
'chroot', ['etc/apt/sources.list', 'etc/apt/preferences',
'etc/apt/apt.conf.d/%s' % 'force_ipv4',
'etc/apt/apt.conf.d/%s' % 'unsigned'])
@mock.patch('fuel_agent.utils.build.open',
create=True, new_callable=mock.mock_open)
@mock.patch.object(os, 'path')
@mock.patch.object(bu, 'clean_apt_settings')
@mock.patch.object(bu, 'remove_files')
@mock.patch.object(utils, 'execute')
def test_do_post_inst(self, mock_exec, mock_files, mock_clean, mock_path,
mock_open):
mock_path.join.return_value = 'fake_path'
bu.do_post_inst('chroot')
file_handle_mock = mock_open.return_value.__enter__.return_value
file_handle_mock.write.assert_called_once_with('manual\n')
mock_exec_expected_calls = [
mock.call('sed', '-i', 's%root:[\*,\!]%root:$6$IInX3Cqo$5xytL1VZb'
'ZTusOewFnG6couuF0Ia61yS3rbC6P5YbZP2TYclwHqMq9e3Tg8rvQx'
'hxSlBXP1DZhdUamxdOBXK0.%', 'fake_path'),
mock.call('chroot', 'chroot', 'update-rc.d', 'puppet', 'disable')]
self.assertEqual(mock_exec_expected_calls, mock_exec.call_args_list)
mock_files.assert_called_once_with('chroot', ['usr/sbin/policy-rc.d'])
mock_clean.assert_called_once_with('chroot')
mock_path_join_expected_calls = [
mock.call('chroot', 'etc/shadow'),
mock.call('chroot', 'etc/init/mcollective.override')]
self.assertEqual(mock_path_join_expected_calls,
mock_path.join.call_args_list)
@mock.patch('fuel_agent.utils.build.open',
create=True, new_callable=mock.mock_open)
@mock.patch('fuel_agent.utils.build.time.sleep')
@mock.patch.object(os, 'kill')
@mock.patch.object(os, 'readlink', return_value='chroot')
@mock.patch.object(utils, 'execute')
def test_stop_chrooted_processes(self, mock_exec, mock_link,
mock_kill, mock_sleep, mock_open):
mock_exec.side_effect = [
('kernel 951 1641 1700 1920 3210 4104', ''),
('kernel 951 1641 1700', ''),
('', '')]
mock_exec_expected_calls = \
[mock.call('fuser', '-v', 'chroot', check_exit_code=False)] * 3
bu.stop_chrooted_processes('chroot', signal=signal.SIGTERM)
self.assertEqual(mock_exec_expected_calls, mock_exec.call_args_list)
expected_mock_link_calls = [
mock.call('/proc/951/root'),
mock.call('/proc/1641/root'),
mock.call('/proc/1700/root'),
mock.call('/proc/1920/root'),
mock.call('/proc/3210/root'),
mock.call('/proc/4104/root'),
mock.call('/proc/951/root'),
mock.call('/proc/1641/root'),
mock.call('/proc/1700/root')]
expected_mock_kill_calls = [
mock.call(951, signal.SIGTERM),
mock.call(1641, signal.SIGTERM),
mock.call(1700, signal.SIGTERM),
mock.call(1920, signal.SIGTERM),
mock.call(3210, signal.SIGTERM),
mock.call(4104, signal.SIGTERM),
mock.call(951, signal.SIGTERM),
mock.call(1641, signal.SIGTERM),
mock.call(1700, signal.SIGTERM)]
self.assertEqual(expected_mock_link_calls, mock_link.call_args_list)
self.assertEqual(expected_mock_kill_calls, mock_kill.call_args_list)
@mock.patch.object(os, 'makedev', return_value='fake_dev')
@mock.patch.object(os, 'mknod')
@mock.patch.object(os, 'path')
@mock.patch.object(utils, 'execute', return_value=('/dev/loop123\n', ''))
def test_get_free_loop_device_ok(self, mock_exec, mock_path, mock_mknod,
mock_mkdev):
mock_path.exists.return_value = False
self.assertEqual('/dev/loop123', bu.get_free_loop_device(1))
mock_exec.assert_called_once_with('losetup', '--find')
mock_path.exists.assert_called_once_with('/dev/loop0')
mock_mknod.assert_called_once_with('/dev/loop0', 25008, 'fake_dev')
mock_mkdev.assert_called_once_with(1, 0)
def test_set_apt_get_env(self):
with mock.patch.dict('os.environ', {}):
bu.set_apt_get_env()
self.assertEqual('noninteractive', os.environ['DEBIAN_FRONTEND'])
self.assertEqual('true', os.environ['DEBCONF_NONINTERACTIVE_SEEN'])
for var in ('LC_ALL', 'LANG', 'LANGUAGE'):
self.assertEqual('C', os.environ[var])
def test_strip_filename(self):
self.assertEqual('safe_Tex.-98',
bu.strip_filename('!@$^^^safe _Tex.?-98;'))
@mock.patch.object(os, 'makedev', return_value='fake_dev')
@mock.patch.object(os, 'mknod')
@mock.patch.object(os, 'path')
@mock.patch.object(utils, 'execute', return_value=('', 'Error!!!'))
def test_get_free_loop_device_not_found(self, mock_exec, mock_path,
mock_mknod, mock_mkdev):
mock_path.exists.return_value = False
self.assertRaises(errors.NoFreeLoopDevices, bu.get_free_loop_device)
@mock.patch('tempfile.NamedTemporaryFile')
@mock.patch.object(utils, 'execute')
def test_create_sparse_tmp_file(self, mock_exec, mock_temp):
tmp_file = mock.Mock()
tmp_file.name = 'fake_name'
mock_temp.return_value = tmp_file
bu.create_sparse_tmp_file('dir', 'suffix', 1)
mock_temp.assert_called_once_with(dir='dir', suffix='suffix',
delete=False)
mock_exec.assert_called_once_with('truncate', '-s', '1M',
tmp_file.name)
@mock.patch.object(utils, 'execute')
def test_attach_file_to_loop(self, mock_exec):
bu.attach_file_to_loop('file', 'loop')
mock_exec.assert_called_once_with('losetup', 'loop', 'file')
@mock.patch.object(utils, 'execute')
def test_deattach_loop(self, mock_exec):
mock_exec.return_value = ('/dev/loop0: [fd03]:130820 (/dev/loop0)', '')
bu.deattach_loop('/dev/loop0', check_exit_code='Fake')
mock_exec_expected_calls = [
mock.call('losetup', '-a'),
mock.call('losetup', '-d', '/dev/loop0', check_exit_code='Fake')
]
self.assertEqual(mock_exec.call_args_list, mock_exec_expected_calls)
@mock.patch.object(hu, 'parse_simple_kv')
@mock.patch.object(utils, 'execute')
def test_shrink_sparse_file(self, mock_exec, mock_parse):
mock_parse.return_value = {'block count': 1, 'block size': 2}
with mock.patch('six.moves.builtins.open', create=True) as mock_open:
file_handle_mock = mock_open.return_value.__enter__.return_value
bu.shrink_sparse_file('file')
mock_open.assert_called_once_with('file', 'rwb+')
file_handle_mock.truncate.assert_called_once_with(1 * 2)
expected_mock_exec_calls = [mock.call('e2fsck', '-y', '-f', 'file'),
mock.call('resize2fs', '-F', '-M', 'file')]
mock_parse.assert_called_once_with('dumpe2fs', 'file')
self.assertEqual(expected_mock_exec_calls, mock_exec.call_args_list)
@mock.patch.object(os, 'path')
def test_add_apt_source(self, mock_path):
mock_path.return_value = 'fake_path'
with mock.patch('six.moves.builtins.open', create=True) as mock_open:
file_handle_mock = mock_open.return_value.__enter__.return_value
bu.add_apt_source('name1', 'uri1', 'suite1', 'section1', 'chroot')
expected_calls = [mock.call('deb uri1 suite1 section1\n')]
self.assertEqual(expected_calls,
file_handle_mock.write.call_args_list)
expected_mock_path_calls = [
mock.call('chroot', 'etc/apt/sources.list.d',
'fuel-image-name1.list')]
self.assertEqual(expected_mock_path_calls,
mock_path.join.call_args_list)
@mock.patch.object(os, 'path')
def test_add_apt_source_no_section(self, mock_path):
mock_path.return_value = 'fake_path'
with mock.patch('six.moves.builtins.open', create=True) as mock_open:
file_handle_mock = mock_open.return_value.__enter__.return_value
bu.add_apt_source('name2', 'uri2', 'suite2', None, 'chroot')
expected_calls = [mock.call('deb uri2 suite2\n')]
self.assertEqual(expected_calls,
file_handle_mock.write.call_args_list)
expected_mock_path_calls = [
mock.call('chroot', 'etc/apt/sources.list.d',
'fuel-image-name2.list')]
self.assertEqual(expected_mock_path_calls,
mock_path.join.call_args_list)
@mock.patch.object(os, 'path')
@mock.patch('fuel_agent.utils.build.utils.init_http_request',
return_value=mock.Mock(text=_fake_ubuntu_release))
def test_add_apt_preference(self, mock_get, mock_path):
with mock.patch('six.moves.builtins.open', create=True) as mock_open:
file_handle_mock = mock_open.return_value.__enter__.return_value
fake_section = 'section1'
bu.add_apt_preference(
'name1',
123,
'test-archive',
fake_section,
'chroot',
'http://test-uri'
)
calls_args = [
c[0][0] for c in file_handle_mock.write.call_args_list
]
self.assertEqual(len(calls_args), 4)
self.assertEqual(calls_args[0], 'Package: *\n')
self.assertEqual(calls_args[1], 'Pin: release ')
self.assertIn("l=TestLabel", calls_args[2])
self.assertIn("n=testcodename", calls_args[2])
self.assertIn("a=test-archive", calls_args[2])
self.assertIn("o=TestOrigin", calls_args[2])
self.assertIn("c=section1", calls_args[2])
self.assertEqual(calls_args[3], 'Pin-Priority: 123\n')
expected_mock_path_calls = [
mock.call('http://test-uri', 'dists', 'test-archive', 'Release'),
mock.call('chroot', 'etc/apt/preferences.d',
'fuel-image-name1.pref')]
self.assertEqual(expected_mock_path_calls,
mock_path.join.call_args_list)
@mock.patch.object(os, 'path')
@mock.patch('fuel_agent.utils.build.utils.init_http_request',
return_value=mock.Mock(text=_fake_ubuntu_release))
def test_add_apt_preference_multuple_sections(self, mock_get, mock_path):
with mock.patch('six.moves.builtins.open', create=True) as mock_open:
file_handle_mock = mock_open.return_value.__enter__.return_value
fake_sections = ['section2', 'section3']
bu.add_apt_preference('name3', 234, 'test-archive',
' '.join(fake_sections),
'chroot', 'http://test-uri')
calls_args = [
c[0][0] for c in file_handle_mock.write.call_args_list
]
calls_package = [c for c in calls_args if c == 'Package: *\n']
calls_pin = [c for c in calls_args if c == 'Pin: release ']
calls_pin_p = [c for c in calls_args if c == 'Pin-Priority: 234\n']
first_section = [
c for c in calls_args if 'c={0}'.format(fake_sections[0]) in c
]
second_section = [
c for c in calls_args if 'c={0}'.format(fake_sections[1]) in c
]
self.assertEqual(len(calls_package), len(fake_sections))
self.assertEqual(len(calls_pin), len(fake_sections))
self.assertEqual(len(calls_pin_p), len(fake_sections))
self.assertEqual(len(first_section), 1)
self.assertEqual(len(second_section), 1)
for pin_line in calls_args[2::4]:
self.assertIn("l=TestLabel", pin_line)
self.assertIn("n=testcodename", pin_line)
self.assertIn("a=test-archive", pin_line)
self.assertIn("o=TestOrigin", pin_line)
expected_mock_path_calls = [
mock.call('http://test-uri', 'dists', 'test-archive', 'Release'),
mock.call('chroot', 'etc/apt/preferences.d',
'fuel-image-name3.pref')]
self.assertEqual(expected_mock_path_calls,
mock_path.join.call_args_list)
@mock.patch.object(os, 'path')
@mock.patch('fuel_agent.utils.build.utils.init_http_request',
return_value=mock.Mock(text=_fake_ubuntu_release))
def test_add_apt_preference_no_sections(self, mock_get, mock_path):
with mock.patch('six.moves.builtins.open', create=True) as mock_open:
file_handle_mock = mock_open.return_value.__enter__.return_value
bu.add_apt_preference(
'name1',
123,
'test-archive',
'',
'chroot',
'http://test-uri'
)
calls_args = [
c[0][0] for c in file_handle_mock.write.call_args_list
]
self.assertEqual(len(calls_args), 4)
self.assertEqual(calls_args[0], 'Package: *\n')
self.assertEqual(calls_args[1], 'Pin: release ')
self.assertIn("l=TestLabel", calls_args[2])
self.assertIn("n=testcodename", calls_args[2])
self.assertIn("a=test-archive", calls_args[2])
self.assertIn("o=TestOrigin", calls_args[2])
self.assertNotIn("c=", calls_args[2])
self.assertEqual(calls_args[3], 'Pin-Priority: 123\n')
expected_mock_path_calls = [
mock.call('http://test-uri', 'test-archive', 'Release'),
mock.call('chroot', 'etc/apt/preferences.d',
'fuel-image-name1.pref')]
self.assertEqual(expected_mock_path_calls,
mock_path.join.call_args_list)
@mock.patch.object(bu, 'clean_apt_settings')
@mock.patch.object(os, 'path')
def test_pre_apt_get(self, mock_path, mock_clean):
with mock.patch('six.moves.builtins.open', create=True) as mock_open:
file_handle_mock = mock_open.return_value.__enter__.return_value
bu.pre_apt_get('chroot')
expected_calls = [
mock.call('APT::Get::AllowUnauthenticated 1;\n'),
mock.call('Acquire::ForceIPv4 "true";\n')]
self.assertEqual(expected_calls,
file_handle_mock.write.call_args_list)
mock_clean.assert_called_once_with('chroot')
expected_join_calls = [
mock.call('chroot', 'etc/apt/apt.conf.d',
CONF.allow_unsigned_file),
mock.call('chroot', 'etc/apt/apt.conf.d',
CONF.force_ipv4_file)]
self.assertEqual(expected_join_calls, mock_path.join.call_args_list)
@mock.patch('gzip.open')
@mock.patch.object(os, 'remove')
def test_containerize_gzip(self, mock_remove, mock_gzip):
with mock.patch('six.moves.builtins.open', create=True) as mock_open:
file_handle_mock = mock_open.return_value.__enter__.return_value
file_handle_mock.read.side_effect = ['test data', '']
g = mock.Mock()
mock_gzip.return_value = g
self.assertEqual('file.gz', bu.containerize('file', 'gzip', 1))
g.write.assert_called_once_with('test data')
expected_calls = [mock.call(1), mock.call(1)]
self.assertEqual(expected_calls,
file_handle_mock.read.call_args_list)
mock_remove.assert_called_once_with('file')
def test_containerize_bad_container(self):
self.assertRaises(errors.WrongImageDataError, bu.containerize, 'file',
'fake')

View File

@ -1,58 +0,0 @@
# Copyright 2014 Mirantis, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from oslotest import base as test_base
from fuel_agent import errors
from fuel_agent.objects import configdrive
class TestConfigDriveScheme(test_base.BaseTestCase):
def setUp(self):
super(TestConfigDriveScheme, self).setUp()
self.cd_scheme = configdrive.ConfigDriveScheme()
def test_template_names(self):
self.cd_scheme._profile = 'pro_fi-le'
actual = self.cd_scheme.template_names('what')
expected = [
'what_pro_fi-le.jinja2',
'what_pro.jinja2',
'what_pro_fi.jinja2',
'what.jinja2'
]
self.assertEqual(expected, actual)
def test_template_data_no_common(self):
self.assertRaises(errors.WrongConfigDriveDataError,
self.cd_scheme.template_data)
def test_template_data_ok(self):
cd_common = configdrive.ConfigDriveCommon(
['auth_key1', 'auth_key2'], 'hostname', 'fqdn', 'name_servers',
'search_domain', 'master_ip', 'master_url', 'udevrules',
'admin_mac', 'admin_ip', 'admin_mask', 'admin_iface_name',
'timezone', {'repo1': 'repo1_url', 'repo2': 'repo2_url'}, 'gw')
cd_puppet = configdrive.ConfigDrivePuppet('master', 0)
cd_mcollective = configdrive.ConfigDriveMcollective(
'pskey', 'vhost', 'host', 'user', 'password', 'connector', 1)
self.cd_scheme.common = cd_common
self.cd_scheme.puppet = cd_puppet
self.cd_scheme.mcollective = cd_mcollective
template_data = self.cd_scheme.template_data()
self.assertEqual(cd_common, template_data['common'])
self.assertEqual(cd_puppet, template_data['puppet'])
self.assertEqual(cd_mcollective, template_data['mcollective'])

View File

@ -1,122 +0,0 @@
# Copyright 2014 Mirantis, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import mock
from oslotest import base as test_base
from fuel_agent import errors
from fuel_agent.utils import fs as fu
from fuel_agent.utils import utils
class TestFSUtils(test_base.BaseTestCase):
@mock.patch.object(utils, 'execute')
def test_make_fs(self, mock_exec):
fu.make_fs('ext4', '-F', '-L fake_label', '/dev/fake')
mock_exec.assert_called_once_with('mkfs.ext4', '-F', '-L',
'fake_label', '/dev/fake')
@mock.patch.object(utils, 'execute')
def test_make_fs_swap(self, mock_exec):
fu.make_fs('swap', '-f', '-L fake_label', '/dev/fake')
mock_exec.assert_called_once_with('mkswap', '-f', '-L', 'fake_label',
'/dev/fake')
@mock.patch.object(utils, 'execute')
def test_extend_fs_ok_ext2(self, mock_exec):
fu.extend_fs('ext2', '/dev/fake')
expected_calls = [
mock.call('e2fsck', '-yf', '/dev/fake', check_exit_code=[0]),
mock.call('resize2fs', '/dev/fake', check_exit_code=[0]),
mock.call('e2fsck', '-pf', '/dev/fake', check_exit_code=[0])
]
self.assertEqual(mock_exec.call_args_list, expected_calls)
@mock.patch.object(utils, 'execute')
def test_extend_fs_ok_ext3(self, mock_exec):
fu.extend_fs('ext3', '/dev/fake')
expected_calls = [
mock.call('e2fsck', '-yf', '/dev/fake', check_exit_code=[0]),
mock.call('resize2fs', '/dev/fake', check_exit_code=[0]),
mock.call('e2fsck', '-pf', '/dev/fake', check_exit_code=[0])
]
self.assertEqual(mock_exec.call_args_list, expected_calls)
@mock.patch.object(utils, 'execute')
def test_extend_fs_ok_ext4(self, mock_exec):
fu.extend_fs('ext4', '/dev/fake')
expected_calls = [
mock.call('e2fsck', '-yf', '/dev/fake', check_exit_code=[0]),
mock.call('resize2fs', '/dev/fake', check_exit_code=[0]),
mock.call('e2fsck', '-pf', '/dev/fake', check_exit_code=[0])
]
self.assertEqual(mock_exec.call_args_list, expected_calls)
@mock.patch.object(utils, 'execute')
def test_extend_fs_ok_xfs(self, mock_exec):
fu.extend_fs('xfs', '/dev/fake')
mock_exec.assert_called_once_with(
'xfs_growfs', '/dev/fake', check_exit_code=[0])
@mock.patch.object(utils, 'execute')
def test_extend_fs_unsupported_fs(self, mock_exec):
self.assertRaises(errors.FsUtilsError, fu.extend_fs,
'unsupported', '/dev/fake')
@mock.patch.object(utils, 'execute')
def test_mount_fs(self, mock_exec):
fu.mount_fs('ext3', '/dev/fake', '/target')
mock_exec.assert_called_once_with(
'mount', '-t', 'ext3', '/dev/fake', '/target', check_exit_code=[0])
@mock.patch.object(utils, 'execute')
def test_mount_bind_no_path2(self, mock_exec):
fu.mount_bind('/target', '/fake')
mock_exec.assert_called_once_with(
'mount', '--bind', '/fake', '/target/fake', check_exit_code=[0])
@mock.patch.object(utils, 'execute')
def test_mount_bind_path2(self, mock_exec):
fu.mount_bind('/target', '/fake', '/fake2')
mock_exec.assert_called_once_with(
'mount', '--bind', '/fake', '/target/fake2', check_exit_code=[0])
@mock.patch.object(utils, 'execute')
def test_umount_fs_ok(self, mock_exec):
fu.umount_fs('/fake')
expected_calls = [
mock.call('mountpoint', '-q', '/fake', check_exit_code=[0]),
mock.call('umount', '/fake', check_exit_code=[0])
]
self.assertEqual(expected_calls, mock_exec.call_args_list)
@mock.patch.object(utils, 'execute')
def test_umount_fs_not_mounted(self, mock_exec):
mock_exec.side_effect = errors.ProcessExecutionError
fu.umount_fs('/fake')
mock_exec.assert_called_once_with(
'mountpoint', '-q', '/fake', check_exit_code=[0])
@mock.patch.object(utils, 'execute')
def test_umount_fs_error(self, mock_exec):
mock_exec.side_effect = [
None, errors.ProcessExecutionError('message'), ('', '')]
fu.umount_fs('/fake', try_lazy_umount=True)
expected_calls = [
mock.call('mountpoint', '-q', '/fake', check_exit_code=[0]),
mock.call('umount', '/fake', check_exit_code=[0]),
mock.call('umount', '-l', '/fake', check_exit_code=[0])
]
self.assertEqual(expected_calls, mock_exec.call_args_list)

View File

@ -1,420 +0,0 @@
# Copyright 2014 Mirantis, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import mock
import os
from oslotest import base as test_base
import six
import StringIO
if six.PY2:
OPEN_FUNCTION_NAME = '__builtin__.open'
else:
OPEN_FUNCTION_NAME = 'builtins.open'
from fuel_agent import errors
from fuel_agent.utils import grub as gu
from fuel_agent.utils import utils
class TestGrubUtils(test_base.BaseTestCase):
@mock.patch.object(os.path, 'isdir')
def test_guess_grub2_conf(self, mock_isdir):
side_effect_values = {
'/target/boot/grub': True,
'/target/boot/grub2': False
}
def side_effect(key):
return side_effect_values[key]
mock_isdir.side_effect = side_effect
self.assertEqual(gu.guess_grub2_conf('/target'),
'/boot/grub/grub.cfg')
side_effect_values = {
'/target/boot/grub': False,
'/target/boot/grub2': True
}
self.assertEqual(gu.guess_grub2_conf('/target'),
'/boot/grub2/grub.cfg')
@mock.patch.object(os.path, 'isfile')
def test_guess_grub2_default(self, mock_isfile):
side_effect_values = {
'/target/etc/default/grub': True,
'/target/etc/sysconfig/grub': False
}
def side_effect(key):
return side_effect_values[key]
mock_isfile.side_effect = side_effect
self.assertEqual(gu.guess_grub2_default('/target'),
'/etc/default/grub')
side_effect_values = {
'/target/etc/default/grub': False,
'/target/etc/sysconfig/grub': True
}
self.assertEqual(gu.guess_grub2_default('/target'),
'/etc/sysconfig/grub')
@mock.patch.object(os.path, 'isfile')
def test_guess_grub2_mkconfig(self, mock_isfile):
side_effect_values = {
'/target/sbin/grub-mkconfig': True,
'/target/sbin/grub2-mkconfig': False,
'/target/usr/sbin/grub-mkconfig': False,
'/target/usr/sbin/grub2-mkconfig': False
}
def side_effect(key):
return side_effect_values[key]
mock_isfile.side_effect = side_effect
self.assertEqual(gu.guess_grub2_mkconfig('/target'),
'/sbin/grub-mkconfig')
side_effect_values = {
'/target/sbin/grub-mkconfig': False,
'/target/sbin/grub2-mkconfig': True,
'/target/usr/sbin/grub-mkconfig': False,
'/target/usr/sbin/grub2-mkconfig': False
}
self.assertEqual(gu.guess_grub2_mkconfig('/target'),
'/sbin/grub2-mkconfig')
side_effect_values = {
'/target/sbin/grub-mkconfig': False,
'/target/sbin/grub2-mkconfig': False,
'/target/usr/sbin/grub-mkconfig': True,
'/target/usr/sbin/grub2-mkconfig': False
}
self.assertEqual(gu.guess_grub2_mkconfig('/target'),
'/usr/sbin/grub-mkconfig')
side_effect_values = {
'/target/sbin/grub-mkconfig': False,
'/target/sbin/grub2-mkconfig': False,
'/target/usr/sbin/grub-mkconfig': False,
'/target/usr/sbin/grub2-mkconfig': True
}
self.assertEqual(gu.guess_grub2_mkconfig('/target'),
'/usr/sbin/grub2-mkconfig')
@mock.patch.object(gu, 'guess_grub_install')
@mock.patch.object(utils, 'execute')
def test_guess_grub_version_1(self, mock_exec, mock_ggi):
mock_ggi.return_value = '/grub_install'
mock_exec.return_value = ('foo 0.97 bar', '')
version = gu.guess_grub_version('/target')
cmd = 'chroot /target /grub_install --version'.split()
mock_exec.assert_called_once_with(*cmd)
self.assertEqual(version, 1)
@mock.patch.object(gu, 'guess_grub_install')
@mock.patch.object(utils, 'execute')
def test_guess_grub_version_2(self, mock_exec, mock_ggi):
mock_ggi.return_value = '/grub_install'
mock_exec.return_value = ('foo bar', '')
version = gu.guess_grub_version('/target')
cmd = 'chroot /target /grub_install --version'.split()
mock_exec.assert_called_once_with(*cmd)
self.assertEqual(version, 2)
@mock.patch.object(os.path, 'isfile')
def test_guess_grub(self, mock_isfile):
side_effect_values = {
'/target/sbin/grub': True,
'/target/usr/sbin/grub': False
}
def side_effect(key):
return side_effect_values[key]
mock_isfile.side_effect = side_effect
self.assertEqual(gu.guess_grub('/target'),
'/sbin/grub')
side_effect_values = {
'/target/sbin/grub': False,
'/target/usr/sbin/grub': True
}
self.assertEqual(gu.guess_grub('/target'),
'/usr/sbin/grub')
side_effect_values = {
'/target/sbin/grub': False,
'/target/usr/sbin/grub': False
}
self.assertRaises(errors.GrubUtilsError, gu.guess_grub, '/target')
@mock.patch.object(os.path, 'isfile')
def test_grub_install(self, mock_isfile):
side_effect_values = {
'/target/sbin/grub-install': True,
'/target/sbin/grub2-install': False,
'/target/usr/sbin/grub-install': False,
'/target/usr/sbin/grub2-install': False
}
def side_effect(key):
return side_effect_values[key]
mock_isfile.side_effect = side_effect
self.assertEqual(gu.guess_grub_install('/target'),
'/sbin/grub-install')
side_effect_values = {
'/target/sbin/grub-install': False,
'/target/sbin/grub2-install': True,
'/target/usr/sbin/grub-install': False,
'/target/usr/sbin/grub2-install': False
}
self.assertEqual(gu.guess_grub_install('/target'),
'/sbin/grub2-install')
side_effect_values = {
'/target/sbin/grub-install': False,
'/target/sbin/grub2-install': False,
'/target/usr/sbin/grub-install': True,
'/target/usr/sbin/grub2-install': False
}
self.assertEqual(gu.guess_grub_install('/target'),
'/usr/sbin/grub-install')
side_effect_values = {
'/target/sbin/grub-install': False,
'/target/sbin/grub2-install': False,
'/target/usr/sbin/grub-install': False,
'/target/usr/sbin/grub2-install': True
}
self.assertEqual(gu.guess_grub_install('/target'),
'/usr/sbin/grub2-install')
@mock.patch('fuel_agent.utils.grub.utils.guess_filename')
def test_guess_kernel(self, mock_guess):
mock_guess.return_value = 'vmlinuz-version'
self.assertEqual(gu.guess_kernel('/target'), 'vmlinuz-version')
mock_guess.assert_called_once_with(
path='/target/boot', regexp=r'^vmlinuz.*')
mock_guess.reset_mock()
mock_guess.return_value = 'vmlinuz-version'
self.assertEqual(gu.guess_kernel('/target', r'^vmlinuz-version.*'),
'vmlinuz-version')
mock_guess.assert_called_once_with(
path='/target/boot', regexp=r'^vmlinuz-version.*')
mock_guess.reset_mock()
mock_guess.return_value = None
self.assertRaises(errors.GrubUtilsError, gu.guess_kernel, '/target')
@mock.patch('fuel_agent.utils.grub.utils.guess_filename')
def test_guess_initrd(self, mock_guess):
mock_guess.return_value = 'initrd-version'
self.assertEqual(gu.guess_initrd('/target'), 'initrd-version')
mock_guess.assert_called_once_with(
path='/target/boot', regexp=r'^(initrd|initramfs).*')
mock_guess.reset_mock()
mock_guess.return_value = 'initramfs-version'
self.assertEqual(gu.guess_initrd('/target', r'^initramfs-version.*'),
'initramfs-version')
mock_guess.assert_called_once_with(
path='/target/boot', regexp=r'^initramfs-version.*')
mock_guess.reset_mock()
mock_guess.return_value = None
self.assertRaises(errors.GrubUtilsError, gu.guess_initrd, '/target')
@mock.patch.object(gu, 'grub1_stage1')
@mock.patch.object(gu, 'grub1_mbr')
def test_grub1_install(self, mock_mbr, mock_stage1):
install_devices = ['/dev/foo', '/dev/bar']
expected_calls_mbr = []
for install_device in install_devices:
expected_calls_mbr.append(
mock.call(install_device, '/dev/foo', '0', chroot='/target'))
gu.grub1_install(install_devices, '/dev/foo1', '/target')
self.assertEqual(expected_calls_mbr, mock_mbr.call_args_list)
mock_stage1.assert_called_once_with(chroot='/target')
# should raise exception if boot_device (second argument)
# is not a partition but a whole disk
self.assertRaises(errors.GrubUtilsError, gu.grub1_install,
'/dev/foo', '/dev/foo', chroot='/target')
@mock.patch.object(gu, 'guess_grub')
@mock.patch.object(os, 'chmod')
@mock.patch.object(utils, 'execute')
def test_grub1_mbr_install_differs_boot(self, mock_exec,
mock_chmod, mock_guess):
mock_guess.return_value = '/sbin/grub'
mock_exec.return_value = ('stdout', 'stderr')
# install_device != boot_disk
batch = 'device (hd0) /dev/foo\n'
batch += 'geometry (hd0) 130 255 63\n'
batch += 'device (hd1) /dev/bar\n'
batch += 'geometry (hd1) 130 255 63\n'
batch += 'root (hd1,0)\n'
batch += 'setup (hd0)\n'
batch += 'quit\n'
script = 'cat /tmp/grub.batch | /sbin/grub --no-floppy --batch'
mock_open = mock.mock_open()
with mock.patch(OPEN_FUNCTION_NAME, new=mock_open, create=True):
gu.grub1_mbr('/dev/foo', '/dev/bar', '0', chroot='/target')
self.assertEqual(
mock_open.call_args_list,
[mock.call('/target/tmp/grub.batch', 'wb'),
mock.call('/target/tmp/grub.sh', 'wb')]
)
mock_open_file = mock_open()
self.assertEqual(
mock_open_file.write.call_args_list,
[mock.call(batch), mock.call(script)]
)
mock_chmod.assert_called_once_with('/target/tmp/grub.sh', 0o755)
mock_exec.assert_called_once_with(
'chroot', '/target', '/tmp/grub.sh',
run_as_root=True, check_exit_code=[0])
@mock.patch.object(gu, 'guess_grub')
@mock.patch.object(os, 'chmod')
@mock.patch.object(utils, 'execute')
def test_grub1_mbr_install_same_as_boot(self, mock_exec,
mock_chmod, mock_guess):
mock_guess.return_value = '/sbin/grub'
mock_exec.return_value = ('stdout', 'stderr')
# install_device == boot_disk
batch = 'device (hd0) /dev/foo\n'
batch += 'geometry (hd0) 130 255 63\n'
batch += 'root (hd0,0)\n'
batch += 'setup (hd0)\n'
batch += 'quit\n'
script = 'cat /tmp/grub.batch | /sbin/grub --no-floppy --batch'
mock_open = mock.mock_open()
with mock.patch(OPEN_FUNCTION_NAME, new=mock_open, create=True):
gu.grub1_mbr('/dev/foo', '/dev/foo', '0', chroot='/target')
self.assertEqual(
mock_open.call_args_list,
[mock.call('/target/tmp/grub.batch', 'wb'),
mock.call('/target/tmp/grub.sh', 'wb')]
)
mock_open_file = mock_open()
self.assertEqual(
mock_open_file.write.call_args_list,
[mock.call(batch), mock.call(script)]
)
mock_chmod.assert_called_once_with('/target/tmp/grub.sh', 0o755)
mock_exec.assert_called_once_with(
'chroot', '/target', '/tmp/grub.sh',
run_as_root=True, check_exit_code=[0])
@mock.patch.object(gu, 'guess_kernel')
@mock.patch.object(gu, 'guess_initrd')
def test_grub1_cfg_kernel_initrd_are_not_set(self, mock_initrd,
mock_kernel):
mock_kernel.return_value = 'kernel-version'
mock_initrd.return_value = 'initrd-version'
config = """
default=0
timeout=5
title Default (kernel-version)
kernel /kernel-version kernel-params
initrd /initrd-version
"""
mock_open = mock.mock_open()
with mock.patch(OPEN_FUNCTION_NAME, new=mock_open, create=True):
gu.grub1_cfg(chroot='/target', kernel_params='kernel-params')
mock_open.assert_called_once_with('/target/boot/grub/grub.conf', 'wb')
mock_open_file = mock_open()
mock_open_file.write.assert_called_once_with(config)
def test_grub1_cfg_kernel_initrd_are_set(self):
config = """
default=0
timeout=10
title Default (kernel-version-set)
kernel /kernel-version-set kernel-params
initrd /initrd-version-set
"""
mock_open = mock.mock_open()
with mock.patch(OPEN_FUNCTION_NAME, new=mock_open, create=True):
gu.grub1_cfg(kernel='kernel-version-set',
initrd='initrd-version-set',
chroot='/target', kernel_params='kernel-params',
grub_timeout=10)
mock_open.assert_called_once_with('/target/boot/grub/grub.conf', 'wb')
mock_open_file = mock_open()
mock_open_file.write.assert_called_once_with(config)
@mock.patch.object(utils, 'execute')
@mock.patch.object(gu, 'guess_grub_install')
def test_grub2_install(self, mock_guess_grub, mock_exec):
mock_guess_grub.return_value = '/sbin/grub'
expected_calls = [
mock.call('chroot', '/target', '/sbin/grub', '/dev/foo',
run_as_root=True, check_exit_code=[0]),
mock.call('chroot', '/target', '/sbin/grub', '/dev/bar',
run_as_root=True, check_exit_code=[0])
]
gu.grub2_install(['/dev/foo', '/dev/bar'], chroot='/target')
self.assertEqual(mock_exec.call_args_list, expected_calls)
@mock.patch.object(gu, 'guess_grub2_conf')
@mock.patch.object(gu, 'guess_grub2_mkconfig')
@mock.patch.object(utils, 'execute')
@mock.patch.object(gu, 'guess_grub2_default')
def test_grub2_cfg(self, mock_def, mock_exec, mock_mkconfig, mock_conf):
mock_def.return_value = '/etc/default/grub'
mock_mkconfig.return_value = '/sbin/grub-mkconfig'
mock_conf.return_value = '/boot/grub/grub.cfg'
orig_content = """foo
GRUB_CMDLINE_LINUX="kernel-params-orig"
bar"""
new_content = """foo
GRUB_CMDLINE_LINUX="kernel-params-new"
bar
GRUB_RECORDFAIL_TIMEOUT=10
"""
# mock_open = mock.mock_open(read_data=orig_content)
with mock.patch(OPEN_FUNCTION_NAME,
new=mock.mock_open(read_data=orig_content),
create=True) as mock_open:
mock_open.return_value = mock.MagicMock(spec=file)
handle = mock_open.return_value.__enter__.return_value
handle.__iter__.return_value = StringIO.StringIO(orig_content)
gu.grub2_cfg(kernel_params='kernel-params-new', chroot='/target',
grub_timeout=10)
self.assertEqual(
mock_open.call_args_list,
[mock.call('/target/etc/default/grub'),
mock.call('/target/etc/default/grub', 'wb')]
)
handle.write.assert_called_once_with(new_content)
mock_exec.assert_called_once_with('chroot', '/target',
'/sbin/grub-mkconfig',
'-o', '/boot/grub/grub.cfg',
run_as_root=True)

View File

@ -1,474 +0,0 @@
# Copyright 2014 Mirantis, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import mock
from oslotest import base as test_base
from fuel_agent.utils import hardware as hu
from fuel_agent.utils import utils
class TestHardwareUtils(test_base.BaseTestCase):
@mock.patch.object(utils, 'execute')
def test_parse_dmidecode(self, exec_mock):
exec_mock.return_value = ["""
System Slot Information
Designation: PCIEX16_1
ID: 1
Bus Address: 0000:00:01.0
Characteristics:
3.3 V is provided
PME signal is supported
System Slot Information
Type: 32-bit PCI Express
ID: 3
Characteristics:
Opening is shared
Bus Address: 0000:00:1c.4
"""]
expected = [{"designation": "PCIEX16_1",
"id": "1",
"characteristics": ["3.3 V is provided",
"PME signal is supported"],
"bus address": "0000:00:01.0"},
{"type": "32-bit PCI Express",
"id": "3",
"characteristics": ["Opening is shared"],
"bus address": "0000:00:1c.4"}]
self.assertEqual(expected, hu.parse_dmidecode("fake_type"))
exec_mock.assert_called_once_with("dmidecode", "-q", "--type",
"fake_type")
@mock.patch.object(utils, 'execute')
def test_parse_lspci(self, exec_mock):
exec_mock.return_value = ["""Slot: 07:00.0
Class: PCI bridge
Vendor: ASMedia Technology Inc.
Device: ASM1083/1085 PCIe to PCI Bridge
Rev: 01
ProgIf: 01
Slot: 09:00.0
Class: IDE interface
Vendor: Marvell Technology Group Ltd.
Device: 88SE6121 SATA II / PATA Controller
SVendor: ASUSTeK Computer Inc.
SDevice: Device 82a2
Rev: b2
ProgIf: 8f
"""]
expected = [{'class': 'PCI bridge',
'device': 'ASM1083/1085 PCIe to PCI Bridge',
'progif': '01',
'rev': '01',
'slot': '07:00.0',
'vendor': 'ASMedia Technology Inc.'},
{'class': 'IDE interface',
'device': '88SE6121 SATA II / PATA Controller',
'progif': '8f',
'rev': 'b2',
'sdevice': 'Device 82a2',
'slot': '09:00.0',
'svendor': 'ASUSTeK Computer Inc.',
'vendor': 'Marvell Technology Group Ltd.'}]
self.assertEqual(expected, hu.parse_lspci())
exec_mock.assert_called_once_with('lspci', '-vmm', '-D')
@mock.patch.object(utils, 'execute')
def test_parse_simple_kv(self, exec_mock):
exec_mock.return_value = ["""driver: r8169
version: 2.3LK-NAPI
firmware-version: rtl_nic/rtl8168e-2.fw
bus-info: 0000:06:00.0
supports-statistics: yes
supports-test: no
supports-eeprom-access: no
supports-register-dump: yes
"""]
expected = {'driver': 'r8169',
'version': '2.3LK-NAPI',
'firmware-version': 'rtl_nic/rtl8168e-2.fw',
'bus-info': '0000:06:00.0',
'supports-statistics': 'yes',
'supports-test': 'no',
'supports-eeprom-access': 'no',
'supports-register-dump': 'yes'}
self.assertEqual(expected, hu.parse_simple_kv('fake', 'cmd'))
exec_mock.assert_called_once_with('fake', 'cmd')
@mock.patch.object(utils, 'execute')
def test_udevreport(self, mock_exec):
# should run udevadm info OS command
# in order to get udev properties for a device
mock_exec.return_value = (
'DEVLINKS=\'/dev/disk/by-id/fakeid1 /dev/disk/by-id/fakeid2\'\n'
'DEVNAME=\'/dev/fake\'\n'
'DEVPATH=\'/devices/fakepath\'\n'
'DEVTYPE=\'disk\'\n'
'MAJOR=\'11\'\n'
'MINOR=\'0\'\n'
'ID_BUS=\'fakebus\'\n'
'ID_MODEL=\'fakemodel\'\n'
'ID_SERIAL_SHORT=\'fakeserial\'\n'
'ID_WWN=\'fakewwn\'\n'
'ID_CDROM=\'1\'\n'
'ANOTHER=\'another\'\n',
''
)
expected = {
'DEVLINKS': ['/dev/disk/by-id/fakeid1', '/dev/disk/by-id/fakeid2'],
'DEVNAME': '/dev/fake',
'DEVPATH': '/devices/fakepath',
'DEVTYPE': 'disk',
'MAJOR': '11',
'MINOR': '0',
'ID_BUS': 'fakebus',
'ID_MODEL': 'fakemodel',
'ID_SERIAL_SHORT': 'fakeserial',
'ID_WWN': 'fakewwn',
'ID_CDROM': '1'
}
self.assertEqual(expected, hu.udevreport('/dev/fake'))
mock_exec.assert_called_once_with('udevadm',
'info',
'--query=property',
'--export',
'--name=/dev/fake',
check_exit_code=[0])
@mock.patch.object(utils, 'execute')
def test_blockdevreport(self, mock_exec):
# should run blockdev OS command
# in order to get block device properties
cmd = ['blockdev', '--getsz', '--getro', '--getss', '--getpbsz',
'--getsize64', '--getiomin', '--getioopt', '--getra',
'--getalignoff', '--getmaxsect', '/dev/fake']
mock_exec.return_value = (
'625142448\n0\n512\n4096\n320072933376\n4096\n0\n256\n0\n1024',
''
)
expected = {
'sz': '625142448',
'ro': '0',
'ss': '512',
'pbsz': '4096',
'size64': '320072933376',
'iomin': '4096',
'ioopt': '0',
'ra': '256',
'alignoff': '0',
'maxsect': '1024'
}
self.assertEqual(expected, hu.blockdevreport('/dev/fake'))
mock_exec.assert_called_once_with(*cmd, check_exit_code=[0])
@mock.patch('six.moves.builtins.open')
def test_extrareport(self, mock_open):
# should read some files from sysfs e.g. /sys/block/fake/removable
# in order to get some device properties
def with_side_effect(arg):
mock_with = mock.MagicMock()
mock_with.__exit__.return_value = None
mock_file = mock.Mock()
if arg == '/sys/block/fake/removable':
mock_file.read.return_value = '0\n'
elif arg == '/sys/block/fake/device/state':
mock_file.read.return_value = 'running\n'
elif arg == '/sys/block/fake/device/timeout':
mock_file.read.return_value = '30\n'
mock_with.__enter__.return_value = mock_file
return mock_with
mock_open.side_effect = with_side_effect
expected = {'removable': '0', 'state': 'running', 'timeout': '30'}
self.assertEqual(expected, hu.extrareport('/dev/fake'))
@mock.patch('six.moves.builtins.open')
def test_extrareport_exceptions(self, mock_open):
mock_open.side_effect = Exception('foo')
expected = {}
self.assertEqual(expected, hu.extrareport('/dev/fake'))
@mock.patch.object(hu, 'blockdevreport')
@mock.patch.object(hu, 'udevreport')
def test_is_disk_uspec_bspec_none(self, mock_ureport, mock_breport):
# should call udevreport if uspec is None
# should call blockdevreport if bspec is None
# should return True if uspec and bspec are empty
mock_ureport.return_value = {}
mock_breport.return_value = {}
self.assertTrue(hu.is_disk('/dev/fake'))
mock_ureport.assert_called_once_with('/dev/fake')
mock_breport.assert_called_once_with('/dev/fake')
@mock.patch.object(hu, 'udevreport')
def test_is_disk_uspec_none(self, mock_ureport):
# should call udevreport if uspec is None but bspec is not None
bspec = {'key': 'value'}
mock_ureport.return_value = {}
hu.is_disk('/dev/fake', bspec=bspec)
mock_ureport.assert_called_once_with('/dev/fake')
@mock.patch.object(hu, 'blockdevreport')
def test_is_disk_bspec_none(self, mock_breport):
# should call blockdevreport if bspec is None but uspec is not None
uspec = {'key': 'value'}
mock_breport.return_value = {}
hu.is_disk('/dev/fake', uspec=uspec)
mock_breport.assert_called_once_with('/dev/fake')
@mock.patch.object(hu, 'blockdevreport')
def test_is_disk_cdrom(self, mock_breport):
# should return False if udev ID_CDROM is set to 1
mock_breport.return_value = {}
uspec = {
'ID_CDROM': '1'
}
self.assertFalse(hu.is_disk('/dev/fake', uspec=uspec))
@mock.patch.object(hu, 'blockdevreport')
def test_is_disk_partition(self, mock_breport):
# should return False if udev DEVTYPE is partition
mock_breport.return_value = {}
uspec = {
'DEVTYPE': 'partition'
}
self.assertFalse(hu.is_disk('/dev/fake', uspec=uspec))
@mock.patch.object(hu, 'blockdevreport')
def test_is_disk_major(self, mock_breport):
# should return False if udev MAJOR is not in a list of
# major numbers which are used for disks
# look at kernel/Documentation/devices.txt
mock_breport.return_value = {}
valid_majors = [3, 8, 65, 66, 67, 68, 69, 70, 71, 104, 105,
106, 107, 108, 109, 110, 111, 202, 252, 253, 259]
for major in (set(range(1, 261)) - set(valid_majors)):
uspec = {
'MAJOR': str(major)
}
self.assertFalse(hu.is_disk('/dev/fake', uspec=uspec))
@mock.patch.object(hu, 'udevreport')
def test_is_disk_readonly(self, mock_ureport):
# should return False if device is read only
mock_ureport.return_value = {}
bspec = {
'ro': '1'
}
self.assertFalse(hu.is_disk('/dev/fake', bspec=bspec))
@mock.patch('fuel_agent.utils.hardware.utils.execute')
def test_get_block_devices_from_udev_db(self, mock_exec):
mock_exec.return_value = ("""P: /devices/virtual/block/loop0
N: loop0
E: DEVNAME=/dev/loop0
E: DEVPATH=/devices/virtual/block/loop0
E: DEVTYPE=disk
E: MAJOR=7
E: SUBSYSTEM=block
P: /devices/pci0000:00/0000:00:1f.2/ata1/host0/target0:0:0/0:0:0:0/block/sda
N: sda
S: disk/by-id/wwn-0x5000c5004008ac0f
S: disk/by-path/pci-0000:00:1f.2-scsi-0:0:0:0
E: DEVNAME=/dev/sda
E: DEVTYPE=disk
E: ID_ATA=1
E: MAJOR=8
E: SUBSYSTEM=block
E: UDEV_LOG=3
P: /devices/pci:00/:00:04.0/misc/nvme0
N: nvme0
E: DEVNAME=/dev/nvme0
E: DEVPATH=/devices/pci:00/:00:04.0/misc/nvme0
E: MAJOR=10
E: MINOR=57
E: SUBSYSTEM=misc
P: /devices/pci:00/:00:04.0/block/nvme0n1
N: nvme0n1
E: DEVNAME=/dev/nvme0n1
E: DEVPATH=/devices/pci:00/:00:04.0/block/nvme0n1
E: DEVTYPE=disk
E: MAJOR=259
E: MINOR=0
E: SUBSYSTEM=block
E: USEC_INITIALIZED=87744
P: /devices/pci0000:00/0000:00:1c.1/target16:0:0/16:0:0:0/block/sr0
E: DEVTYPE=disk
E: DEVNAME=/dev/sr0
E: MAJOR=11
E: MINOR=0
E: SEQNUM=4400
E: SUBSYSTEM=block
P: /devices/pci0000:00/0000:00:1f.2/ata1/host0/target0:0:0/0:0:0:0/block/sda
N: sda
S: disk/by-id/wwn-0x5000c5004008ac0f
S: disk/by-path/pci-0000:00:1f.2-scsi-0:0:0:0
E: DEVNAME=/dev/sda1
E: DEVTYPE=partition
E: ID_ATA=1
E: SUBSYSTEM=block
E: MAJOR=8
E: UDEV_LOG=3""", '')
self.assertEqual(['/dev/sda', '/dev/nvme0n1', '/dev/sda1'],
hu.get_block_devices_from_udev_db())
@mock.patch.object(hu, 'get_block_devices_from_udev_db')
@mock.patch.object(hu, 'is_disk')
@mock.patch.object(hu, 'extrareport')
@mock.patch.object(hu, 'blockdevreport')
@mock.patch.object(hu, 'udevreport')
def test_list_block_devices(self, mock_ureport, mock_breport, mock_ereport,
mock_isdisk, mock_get_devs):
# should run blockdev --report command
# in order to get a list of block devices
# should call report methods to get device info
# should call is_disk method to filter out
# those block devices which are not disks
mock_get_devs.return_value = ['/dev/fake', '/dev/fake1', '/dev/sr0']
def isdisk_side_effect(arg, uspec=None, bspec=None):
if arg == '/dev/fake':
return True
elif arg in ('/dev/fake1', '/dev/sr0'):
return False
mock_isdisk.side_effect = isdisk_side_effect
mock_ureport.return_value = {'key0': 'value0'}
mock_breport.return_value = {'key1': 'value1'}
mock_ereport.return_value = {'key2': 'value2'}
expected = [{
'device': '/dev/fake',
'uspec': {'key0': 'value0'},
'bspec': {'key1': 'value1'},
'espec': {'key2': 'value2'}
}]
self.assertEqual(hu.list_block_devices(), expected)
self.assertEqual(mock_ureport.call_args_list, [mock.call('/dev/fake'),
mock.call('/dev/fake1'), mock.call('/dev/sr0')])
self.assertEqual(mock_breport.call_args_list, [mock.call('/dev/fake'),
mock.call('/dev/fake1'), mock.call('/dev/sr0')])
self.assertEqual(mock_ereport.call_args_list, [mock.call('/dev/fake'),
mock.call('/dev/fake1'), mock.call('/dev/sr0')])
@mock.patch.object(hu, 'get_block_devices_from_udev_db')
@mock.patch.object(hu, 'is_disk')
@mock.patch.object(hu, 'extrareport')
@mock.patch.object(hu, 'blockdevreport')
@mock.patch.object(hu, 'udevreport')
def test_list_block_devices_removable_vendors(self, mock_ureport,
mock_breport, mock_ereport,
mock_isdisk, mock_get_devs):
mock_get_devs.return_value = ['/dev/no_vendor_id',
'/dev/wrong_vendor_id',
'/dev/right_vendor_id']
mock_isdisk.return_value = True
mock_ureport.side_effect = [
{},
{'ID_VENDOR': 'Cisco'},
{'ID_VENDOR': 'IBM'},
]
mock_ereport.return_value = {'removable': '1'}
mock_breport.return_value = {'key1': 'value1'}
expected = [{
'device': '/dev/right_vendor_id',
'uspec': {'ID_VENDOR': 'IBM'},
'bspec': {'key1': 'value1'},
'espec': {'removable': '1'}
}]
self.assertEqual(hu.list_block_devices(), expected)
self.assertEqual(
mock_ureport.call_args_list,
[mock.call('/dev/no_vendor_id'),
mock.call('/dev/wrong_vendor_id'),
mock.call('/dev/right_vendor_id')])
mock_breport.assert_called_once_with('/dev/right_vendor_id')
self.assertEqual(
mock_ereport.call_args_list,
[mock.call('/dev/no_vendor_id'),
mock.call('/dev/wrong_vendor_id'),
mock.call('/dev/right_vendor_id')])
def test_match_device_devlinks(self):
# should return true if at least one by-id link from first uspec
# matches by-id link from another uspec
uspec1 = {'DEVLINKS': ['/dev/disk/by-path/fakepath',
'/dev/disk/by-id/fakeid1',
'/dev/disk/by-id/fakeid2']}
uspec2 = {'DEVLINKS': ['/dev/disk/by-id/fakeid2',
'/dev/disk/by-id/fakeid3']}
self.assertTrue(hu.match_device(uspec1, uspec2))
def test_match_device_wwn(self):
# should return true if ID_WWN is given
# and if it is the same in both uspecs
# and if DEVTYPE is given and if DEVTYPE is disk
# or if DEVTYPE is partition and MINOR is the same for both uspecs
uspec1 = uspec2 = {'ID_WWN': 'fakewwn',
'DEVTYPE': 'disk'}
self.assertTrue(hu.match_device(uspec1, uspec2))
uspec1 = uspec2 = {'ID_WWN': 'fakewwn',
'DEVTYPE': 'partition',
'MINOR': '1'}
self.assertTrue(hu.match_device(uspec1, uspec2))
def test_match_device_wwn_false(self):
# should return false if ID_WWN is given
# and does not match each other
uspec1 = {'ID_WWN': 'fakewwn1'}
uspec2 = {'ID_WWN': 'fakewwn2'}
self.assertFalse(hu.match_device(uspec1, uspec2))
def test_match_device_devpath(self):
# should return true if DEVPATH is given
# and if it is the same for both uspecs
uspec1 = uspec2 = {'DEVPATH': '/devices/fake'}
self.assertTrue(hu.match_device(uspec1, uspec2))
def test_match_device_serial(self):
# should return true if ID_SERIAL_SHORT is given
# and if it is the same for both uspecs
# and if DEVTYPE is given and if it is 'disk'
uspec1 = uspec2 = {'ID_SERIAL_SHORT': 'fakeserial',
'DEVTYPE': 'disk'}
self.assertTrue(hu.match_device(uspec1, uspec2))
def test_match_device_serial_false(self):
# should return false if ID_SERIAL_SHORT is given
# and if it does not match each other
uspec1 = {'ID_SERIAL_SHORT': 'fakeserial1'}
uspec2 = {'ID_SERIAL_SHORT': 'fakeserial2'}
self.assertFalse(hu.match_device(uspec1, uspec2))
def test_match_device_false(self):
uspec1 = {'ID_WWN': 'fakewwn1', 'DEVTYPE': 'disk'}
uspec2 = {'ID_WWN': 'fakewwn1', 'DEVTYPE': 'partition'}
self.assertFalse(hu.match_device(uspec1, uspec2))

View File

@ -1,26 +0,0 @@
# Copyright 2014 Mirantis, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from oslotest import base as test_base
from fuel_agent import errors
from fuel_agent.objects import image
class TestImage(test_base.BaseTestCase):
def test_unsupported_container(self):
self.assertRaises(errors.WrongImageDataError, image.Image, 'uri',
'dev', 'format', 'unsupported')

View File

@ -1,204 +0,0 @@
# Copyright 2014 Mirantis, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import copy
from oslotest import base as test_base
from fuel_agent.drivers import ks_spaces_validator as kssv
from fuel_agent import errors
SAMPLE_SCHEME = [
{
"name": "sda",
"extra": [
"disk/by-id/scsi-SATA_VBOX_HARDDISK_VB69050467-b385c7cd",
"disk/by-id/ata-VBOX_HARDDISK_VB69050467-b385c7cd"
],
"free_space": 64907,
"volumes": [
{
"type": "boot",
"size": 300
},
{
"mount": "/boot",
"size": 200,
"type": "raid",
"file_system": "ext2",
"name": "Boot"
},
{
"type": "lvm_meta_pool",
"size": 0
},
{
"size": 19438,
"type": "pv",
"lvm_meta_size": 64,
"vg": "os"
},
{
"size": 45597,
"type": "pv",
"lvm_meta_size": 64,
"vg": "image"
}
],
"type": "disk",
"id": "sda",
"size": 65535
},
{
"name": "sdb",
"extra": [
"disk/by-id/scsi-SATA_VBOX_HARDDISK_VBf2923215-708af674",
"disk/by-id/ata-VBOX_HARDDISK_VBf2923215-708af674"
],
"free_space": 64907,
"volumes": [
{
"type": "boot",
"size": 300
},
{
"mount": "/boot",
"size": 200,
"type": "raid",
"file_system": "ext2",
"name": "Boot"
},
{
"type": "lvm_meta_pool",
"size": 64
},
{
"size": 0,
"type": "pv",
"lvm_meta_size": 0,
"vg": "os"
},
{
"size": 64971,
"type": "pv",
"lvm_meta_size": 64,
"vg": "image"
}
],
"type": "disk",
"id": "sdb",
"size": 65535
},
{
"name": "sdc",
"extra": [
"disk/by-id/scsi-SATA_VBOX_HARDDISK_VB50ee61eb-84e74fdf",
"disk/by-id/ata-VBOX_HARDDISK_VB50ee61eb-84e74fdf"
],
"free_space": 64907,
"volumes": [
{
"type": "boot",
"size": 300
},
{
"mount": "/boot",
"size": 200,
"type": "raid",
"file_system": "ext2",
"name": "Boot"
},
{
"type": "lvm_meta_pool",
"size": 64
},
{
"size": 0,
"type": "pv",
"lvm_meta_size": 0,
"vg": "os"
},
{
"size": 64971,
"type": "pv",
"lvm_meta_size": 64,
"vg": "image"
}
],
"type": "disk",
"id": "disk/by-path/pci-0000:00:0d.0-scsi-0:0:0:0",
"size": 65535
},
{
"_allocate_size": "min",
"label": "Base System",
"min_size": 19374,
"volumes": [
{
"mount": "/",
"size": 15360,
"type": "lv",
"name": "root",
"file_system": "ext4"
},
{
"mount": "swap",
"size": 4014,
"type": "lv",
"name": "swap",
"file_system": "swap"
}
],
"type": "vg",
"id": "os"
},
{
"_allocate_size": "all",
"label": "Image Storage",
"min_size": 5120,
"volumes": [
{
"mount": "/var/lib/glance",
"size": 175347,
"type": "lv",
"name": "glance",
"file_system": "xfs"
}
],
"type": "vg",
"id": "image"
}
]
class TestKSSpacesValidator(test_base.BaseTestCase):
def setUp(self):
super(TestKSSpacesValidator, self).setUp()
self.fake_scheme = copy.deepcopy(SAMPLE_SCHEME)
def test_validate_ok(self):
kssv.validate(self.fake_scheme)
def test_validate_jsoschema_fail(self):
self.assertRaises(errors.WrongPartitionSchemeError, kssv.validate,
[{}])
def test_validate_no_disks_fail(self):
self.assertRaises(errors.WrongPartitionSchemeError, kssv.validate,
self.fake_scheme[-2:])
def test_validate_16T_root_volume_fail(self):
self.fake_scheme[3]['volumes'][0]['size'] = 16777216 + 1
self.assertRaises(errors.WrongPartitionSchemeError, kssv.validate,
self.fake_scheme)

View File

@ -1,400 +0,0 @@
# Copyright 2014 Mirantis, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import mock
from oslotest import base as test_base
from fuel_agent import errors
from fuel_agent.utils import lvm as lu
from fuel_agent.utils import utils
class TestLvmUtils(test_base.BaseTestCase):
@mock.patch.object(utils, 'execute')
def test_pvdisplay(self, mock_exec):
# should run os command pvdisplay
# in order to get actual pv configuration
mock_exec.return_value = (
'/dev/fake1;vg;892.00m;1024.00m;'
'123456-1234-1234-1234-1234-1234-000000\n'
'/dev/fake2;;1024.00m;1024.00m;'
'123456-1234-1234-1234-1234-1234-111111\n',
''
)
expected = [
{
'uuid': '123456-1234-1234-1234-1234-1234-000000',
'vg': 'vg',
'devsize': 1024,
'psize': 892,
'name': '/dev/fake1',
},
{
'uuid': '123456-1234-1234-1234-1234-1234-111111',
'vg': None,
'devsize': 1024,
'psize': 1024,
'name': '/dev/fake2',
}
]
pvs = lu.pvdisplay()
mock_exec.assert_called_once_with(
'pvdisplay',
'-C',
'--noheading',
'--units', 'm',
'--options', 'pv_name,vg_name,pv_size,dev_size,pv_uuid',
'--separator', ';',
check_exit_code=[0]
)
key = lambda x: x['name']
self.assertEqual(sorted(expected, key=key), sorted(pvs, key=key))
@mock.patch.object(lu, 'pvdisplay')
@mock.patch.object(utils, 'execute')
def test_pvcreate_ok(self, mock_exec, mock_pvdisplay):
# should set metadatasize=64 and metadatacopies=2 if they are not set
# should run pvcreate command
mock_pvdisplay.return_value = [{'name': '/dev/another'}]
lu.pvcreate('/dev/fake1', metadatasize=32, metadatacopies=1)
lu.pvcreate('/dev/fake2', metadatacopies=1)
lu.pvcreate('/dev/fake3', metadatasize=32)
lu.pvcreate('/dev/fake4')
expected_calls = [
mock.call('pvcreate',
'--metadatacopies', '1',
'--metadatasize', '32m',
'/dev/fake1',
check_exit_code=[0]),
mock.call('pvcreate',
'--metadatacopies', '1',
'--metadatasize', '64m',
'/dev/fake2',
check_exit_code=[0]),
mock.call('pvcreate',
'--metadatacopies', '2',
'--metadatasize', '32m',
'/dev/fake3',
check_exit_code=[0]),
mock.call('pvcreate',
'--metadatacopies', '2',
'--metadatasize', '64m',
'/dev/fake4',
check_exit_code=[0])
]
self.assertEqual(mock_exec.call_args_list, expected_calls)
@mock.patch.object(lu, 'pvdisplay')
def test_pvcreate_duplicate(self, mock_pvdisplay):
# should check if pv exists
# then raise exception if it exists
mock_pvdisplay.return_value = [{'name': '/dev/fake'}]
self.assertRaises(
errors.PVAlreadyExistsError, lu.pvcreate, '/dev/fake')
@mock.patch.object(lu, 'pvdisplay')
@mock.patch.object(utils, 'execute')
def test_pvremove_ok(self, mock_exec, mock_pvdisplay):
# should check if pv exists and is not attached to some vg
# then should run pvremove command
mock_pvdisplay.return_value = [{'vg': None, 'name': '/dev/fake'}]
lu.pvremove('/dev/fake')
mock_exec.assert_called_once_with('pvremove', '-ff', '-y', '/dev/fake',
check_exit_code=[0])
@mock.patch.object(lu, 'pvdisplay')
def test_pvremove_attached_to_vg(self, mock_pvdisplay):
# should check if pv exists and is not attached to some vg
# then raise exception if it is attached to some vg
mock_pvdisplay.return_value = [{'vg': 'some', 'name': '/dev/fake'}]
self.assertRaises(errors.PVBelongsToVGError, lu.pvremove, '/dev/fake')
@mock.patch.object(lu, 'pvdisplay')
def test_pvremove_notfound(self, mock_pvdisplay):
# should check if pv exists
# then should raise exception if it does not exist
mock_pvdisplay.return_value = [{'name': '/dev/another'}]
self.assertRaises(errors.PVNotFoundError, lu.pvremove, '/dev/fake')
@mock.patch.object(utils, 'execute')
def test_vgdisplay(self, mock_exec):
# should run os command vgdisplay
# in order to get actual vg configuration
mock_exec.return_value = (
'vg1;123456-1234-1234-1234-1234-1234-000000;2040.00m;2040.00m\n'
'vg2;123456-1234-1234-1234-1234-1234-111111;2040.00m;1020.00m\n',
''
)
expected = [
{
'uuid': '123456-1234-1234-1234-1234-1234-000000',
'size': 2040,
'free': 2040,
'name': 'vg1',
},
{
'uuid': '123456-1234-1234-1234-1234-1234-111111',
'size': 2040,
'free': 1020,
'name': 'vg2',
}
]
vg = lu.vgdisplay()
mock_exec.assert_called_once_with(
'vgdisplay',
'-C',
'--noheading',
'--units', 'm',
'--options', 'vg_name,vg_uuid,vg_size,vg_free',
'--separator', ';',
check_exit_code=[0]
)
key = lambda x: x['name']
self.assertEqual(sorted(expected, key=key), sorted(vg, key=key))
@mock.patch.object(lu, 'pvdisplay')
@mock.patch.object(lu, 'vgdisplay')
@mock.patch.object(utils, 'execute')
def test_vgcreate_ok(self, mock_exec, mock_vgdisplay, mock_pvdisplay):
# should check if vg already exists
# should check if all necessary pv exist
# should run vgcreate command
mock_vgdisplay.return_value = [{'name': 'some'}, {'name': 'another'}]
mock_pvdisplay.return_value = [{'vg': None, 'name': '/dev/fake1'},
{'vg': None, 'name': '/dev/fake2'}]
# one pvname
lu.vgcreate('vgname', '/dev/fake1')
# several pvnames
lu.vgcreate('vgname', '/dev/fake1', '/dev/fake2')
expected_calls = [
mock.call('vgcreate', 'vgname', '/dev/fake1',
check_exit_code=[0]),
mock.call('vgcreate', 'vgname', '/dev/fake1', '/dev/fake2',
check_exit_code=[0])
]
self.assertEqual(mock_exec.call_args_list, expected_calls)
@mock.patch.object(lu, 'vgdisplay')
def test_vgcreate_duplicate(self, mock_vgdisplay):
# should check if vg exists
# should raise exception if it exists
mock_vgdisplay.return_value = [{'name': 'vgname'}, {'name': 'some'}]
self.assertRaises(errors.VGAlreadyExistsError,
lu.vgcreate, 'vgname', '/dev/fake')
@mock.patch.object(lu, 'pvdisplay')
@mock.patch.object(lu, 'vgdisplay')
def test_vgcreate_pv_not_found(self, mock_vgdisplay, mock_pvdisplay):
# should check if all necessary pv exist
# should raise error if at least one pv does not
mock_vgdisplay.return_value = []
mock_pvdisplay.return_value = [{'vg': None, 'name': '/dev/fake1'},
{'vg': None, 'name': '/dev/fake2'}]
self.assertRaises(errors.PVNotFoundError,
lu.vgcreate, 'vgname', '/dev/fake', '/dev/fake2')
@mock.patch.object(lu, 'pvdisplay')
@mock.patch.object(lu, 'vgdisplay')
def test_vgcreate_pv_attached(self, mock_vgdisplay, mock_pvdisplay):
# should check if all necessary pv are not attached to some vg
# should raise error if at least one pv is attached
mock_vgdisplay.return_value = []
mock_pvdisplay.return_value = [{'vg': None, 'name': '/dev/fake1'},
{'vg': 'some', 'name': '/dev/fake2'}]
self.assertRaises(errors.PVBelongsToVGError,
lu.vgcreate, 'vgname', '/dev/fake1', '/dev/fake2')
@mock.patch.object(lu, 'pvdisplay')
@mock.patch.object(lu, 'vgdisplay')
@mock.patch.object(utils, 'execute')
def test_vgextend_ok(self, mock_exec, mock_vgdisplay, mock_pvdisplay):
# should check if vg exists
# should check if all necessary pv exist and not attached to any vg
# should run vgextend command
mock_vgdisplay.return_value = [{'name': 'some'}, {'name': 'another'}]
mock_pvdisplay.return_value = [{'vg': None, 'name': '/dev/fake1'},
{'vg': None, 'name': '/dev/fake2'}]
lu.vgextend('some', '/dev/fake1', '/dev/fake2')
mock_exec.assert_called_once_with(
'vgextend', 'some', '/dev/fake1', '/dev/fake2',
check_exit_code=[0])
@mock.patch.object(lu, 'vgdisplay')
@mock.patch.object(utils, 'execute')
def test_vgextend_not_found(self, mock_exec, mock_vgdisplay):
# should check if vg exists
# should raise error if it does not
mock_vgdisplay.return_value = [{'name': 'some'}]
self.assertRaises(errors.VGNotFoundError,
lu.vgextend, 'vgname', '/dev/fake1')
@mock.patch.object(lu, 'pvdisplay')
@mock.patch.object(lu, 'vgdisplay')
def test_vgextend_pv_not_found(self, mock_vgdisplay, mock_pvdisplay):
# should check if all necessary pv exist
# should raise error if at least one pv does not
mock_vgdisplay.return_value = [{'name': 'vgname'}]
mock_pvdisplay.return_value = [{'vg': None, 'name': '/dev/fake1'},
{'vg': None, 'name': '/dev/fake2'}]
self.assertRaises(errors.PVNotFoundError,
lu.vgextend, 'vgname', '/dev/fake', '/dev/fake2')
@mock.patch.object(lu, 'pvdisplay')
@mock.patch.object(lu, 'vgdisplay')
def test_vgextend_pv_attached(self, mock_vgdisplay, mock_pvdisplay):
# should check if all necessary pv are not attached to some vg
# should raise error if at least one pv is attached
mock_vgdisplay.return_value = [{'name': 'vgname'}]
mock_pvdisplay.return_value = [{'vg': None, 'name': '/dev/fake1'},
{'vg': 'some', 'name': '/dev/fake2'}]
self.assertRaises(errors.PVBelongsToVGError,
lu.vgextend, 'vgname', '/dev/fake1', '/dev/fake2')
@mock.patch.object(lu, 'vgdisplay')
@mock.patch.object(utils, 'execute')
def test_vgremove_ok(self, mock_exec, mock_vgdisplay):
# should check if vg exists
# then run vgremove command if it exists
mock_vgdisplay.return_value = [{'name': 'vgname'}, {'name': 'some'}]
lu.vgremove('vgname')
mock_exec.assert_called_once_with('vgremove', '-f', 'vgname',
check_exit_code=[0])
@mock.patch.object(lu, 'vgdisplay')
@mock.patch.object(utils, 'execute')
def test_vgremove_not_found(self, mock_exec, mock_vgdisplay):
# should check if vg exists
# then raise error if it doesn't
mock_vgdisplay.return_value = [{'name': 'some'}]
self.assertRaises(errors.VGNotFoundError, lu.vgremove, 'vgname')
@mock.patch.object(lu, 'lvdisplay')
@mock.patch.object(utils, 'execute')
def test_lvremove_ok(self, mock_exec, mock_lvdisplay):
mock_lvdisplay.return_value = [{'path': '/dev/vg/lv'},
{'path': '/dev/vg2/lv2'}]
lu.lvremove('/dev/vg/lv')
mock_exec.assert_called_once_with('lvremove', '-f', '/dev/vg/lv',
check_exit_code=[0])
@mock.patch.object(lu, 'lvdisplay')
@mock.patch.object(utils, 'execute')
def test_lvremove_not_found(self, mock_exec, mock_lvdisplay):
mock_lvdisplay.return_value = [{'path': '/dev/vg/lv'}]
self.assertRaises(errors.LVNotFoundError, lu.lvremove, '/dev/vg/lv2')
@mock.patch.object(lu, 'vgdisplay')
@mock.patch.object(lu, 'lvdisplay')
@mock.patch.object(utils, 'execute')
def test_lvcreate_ok(self, mock_exec, mock_lvdisplay, mock_vgdisplay):
mock_vgdisplay.return_value = [{'name': 'vgname', 'free': 2000},
{'name': 'some'}]
mock_lvdisplay.return_value = [{'name': 'some'}]
lu.lvcreate('vgname', 'lvname', 1000)
mock_exec.assert_called_once_with('lvcreate', '--yes', '-L', '1000m',
'-n', 'lvname', 'vgname',
check_exit_code=[0])
@mock.patch.object(lu, 'vgdisplay')
@mock.patch.object(utils, 'execute')
def test_lvcreate_not_found(self, mock_exec, mock_vgdisplay):
mock_vgdisplay.return_value = [{'name': 'some'}]
self.assertRaises(errors.VGNotFoundError, lu.lvcreate, 'vgname',
'lvname', 1)
@mock.patch.object(lu, 'vgdisplay')
@mock.patch.object(utils, 'execute')
def test_lvcreate_not_enough_space(self, mock_exec, mock_vgdisplay):
mock_vgdisplay.return_value = [{'name': 'vgname', 'free': 1},
{'name': 'some'}]
self.assertRaises(errors.NotEnoughSpaceError, lu.lvcreate, 'vgname',
'lvname', 2)
@mock.patch.object(lu, 'vgdisplay')
@mock.patch.object(lu, 'lvdisplay')
@mock.patch.object(utils, 'execute')
def test_lvcreate_lv_already_exists(self, mock_exec, mock_lvdisplay,
mock_vgdisplay):
mock_vgdisplay.return_value = [{'name': 'vgname', 'free': 2000},
{'name': 'some'}]
mock_lvdisplay.return_value = [{'name': 'lvname', 'vg': 'vgname'}]
self.assertRaises(errors.LVAlreadyExistsError, lu.lvcreate, 'vgname',
'lvname', 1000)
@mock.patch.object(lu, 'vgdisplay')
@mock.patch.object(lu, 'lvdisplay')
@mock.patch.object(utils, 'execute')
def test_lvcreate_lv_name_collision(self, mock_exec, mock_lvdisplay,
mock_vgdisplay):
# lv lvname already exists in another pv
mock_vgdisplay.return_value = [{'name': 'vgname', 'free': 2000},
{'name': 'some', 'free': 2000}]
mock_lvdisplay.return_value = [{'name': 'lvname', 'vg': 'some'}]
lu.lvcreate('vgname', 'lvname', 1000)
mock_exec.assert_called_once_with('lvcreate', '--yes', '-L', '1000m',
'-n', 'lvname', 'vgname',
check_exit_code=[0])
@mock.patch.object(utils, 'execute')
def test_lvdisplay(self, mock_exec):
mock_exec.return_value = [
' lv_name1;1234.12m;vg_name;lv_uuid1\n'
' lv_name2;5678.79m;vg_name;lv_uuid2\n ']
expected_lvs = [{'name': 'lv_name1', 'size': 1235, 'vg': 'vg_name',
'uuid': 'lv_uuid1', 'path': '/dev/vg_name/lv_name1'},
{'name': 'lv_name2', 'size': 5679, 'vg': 'vg_name',
'uuid': 'lv_uuid2', 'path': '/dev/vg_name/lv_name2'}]
actual_lvs = lu.lvdisplay()
self.assertEqual(expected_lvs, actual_lvs)
mock_exec.assert_called_once_with('lvdisplay', '-C', '--noheading',
'--units', 'm', '--options',
'lv_name,lv_size,vg_name,lv_uuid',
'--separator', ';',
check_exit_code=[0])
@mock.patch.object(lu, 'pvdisplay')
@mock.patch.object(lu, 'vgdisplay')
@mock.patch.object(utils, 'execute')
def test_vgreduce_ok(self, mock_exec, mock_vgdisplay, mock_pvdisplay):
mock_vgdisplay.return_value = [{'name': 'vgname'}, {'name': 'some'}]
mock_pvdisplay.return_value = [{'vg': 'vgname', 'name': '/dev/fake1'},
{'vg': 'vgname', 'name': '/dev/fake2'}]
lu.vgreduce('vgname', '/dev/fake1', '/dev/fake2')
mock_exec.assert_called_once_with('vgreduce', '-f', 'vgname',
'/dev/fake1', '/dev/fake2',
check_exit_code=[0])
@mock.patch.object(lu, 'vgdisplay')
def test_vgreduce_vg_not_found(self, mock_vgdisplay):
mock_vgdisplay.return_value = [{'name': 'some'}]
self.assertRaises(errors.VGNotFoundError, lu.vgreduce, 'vgname1',
'/dev/fake1', '/dev/fake2')
@mock.patch.object(lu, 'pvdisplay')
@mock.patch.object(lu, 'vgdisplay')
@mock.patch.object(utils, 'execute')
def test_vgreduce_pv_not_attached(self, mock_exec, mock_vgdisplay,
mock_pvdisplay):
mock_vgdisplay.return_value = [{'name': 'vgname'}, {'name': 'some'}]
mock_pvdisplay.return_value = [{'vg': None, 'name': '/dev/fake1'},
{'vg': None, 'name': '/dev/fake2'}]
self.assertRaises(errors.PVNotFoundError, lu.vgreduce, 'vgname',
'/dev/fake1', '/dev/fake2')

View File

@ -1,532 +0,0 @@
# Copyright 2014 Mirantis, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import mock
import os
import signal
from oslo.config import cfg
from oslotest import base as test_base
from fuel_agent import errors
from fuel_agent import manager
from fuel_agent import objects
from fuel_agent.objects import partition
from fuel_agent.tests import test_nailgun
from fuel_agent.utils import artifact as au
from fuel_agent.utils import fs as fu
from fuel_agent.utils import hardware as hu
from fuel_agent.utils import lvm as lu
from fuel_agent.utils import md as mu
from fuel_agent.utils import partition as pu
from fuel_agent.utils import utils
CONF = cfg.CONF
class TestManager(test_base.BaseTestCase):
@mock.patch('yaml.load')
@mock.patch.object(utils, 'init_http_request')
@mock.patch.object(hu, 'list_block_devices')
def setUp(self, mock_lbd, mock_http, mock_yaml):
super(TestManager, self).setUp()
mock_lbd.return_value = test_nailgun.LIST_BLOCK_DEVICES_SAMPLE
self.mgr = manager.Manager(test_nailgun.PROVISION_SAMPLE_DATA)
@mock.patch('six.moves.builtins.open')
@mock.patch.object(os, 'symlink')
@mock.patch.object(os, 'remove')
@mock.patch.object(os, 'path')
@mock.patch.object(os, 'listdir')
@mock.patch.object(utils, 'execute')
@mock.patch.object(mu, 'mdclean_all')
@mock.patch.object(lu, 'lvremove_all')
@mock.patch.object(lu, 'vgremove_all')
@mock.patch.object(lu, 'pvremove_all')
@mock.patch.object(fu, 'make_fs')
@mock.patch.object(lu, 'lvcreate')
@mock.patch.object(lu, 'vgcreate')
@mock.patch.object(lu, 'pvcreate')
@mock.patch.object(mu, 'mdcreate')
@mock.patch.object(pu, 'set_gpt_type')
@mock.patch.object(pu, 'set_partition_flag')
@mock.patch.object(pu, 'make_partition')
@mock.patch.object(pu, 'make_label')
@mock.patch.object(hu, 'list_block_devices')
def test_do_partitioning(self, mock_hu_lbd, mock_pu_ml, mock_pu_mp,
mock_pu_spf, mock_pu_sgt, mock_mu_m, mock_lu_p,
mock_lu_v, mock_lu_l, mock_fu_mf, mock_pvr,
mock_vgr, mock_lvr, mock_mdr, mock_exec,
mock_os_ld, mock_os_p, mock_os_r, mock_os_s,
mock_open):
mock_os_ld.return_value = ['not_a_rule', 'fake.rules']
mock_os_p.exists.return_value = True
mock_hu_lbd.return_value = test_nailgun.LIST_BLOCK_DEVICES_SAMPLE
self.mgr.do_partitioning()
mock_pu_ml_expected_calls = [mock.call('/dev/sda', 'gpt'),
mock.call('/dev/sdb', 'gpt'),
mock.call('/dev/sdc', 'gpt')]
self.assertEqual(mock_pu_ml_expected_calls, mock_pu_ml.call_args_list)
mock_pu_mp_expected_calls = [
mock.call('/dev/sda', 1, 25, 'primary'),
mock.call('/dev/sda', 25, 225, 'primary'),
mock.call('/dev/sda', 225, 425, 'primary'),
mock.call('/dev/sda', 425, 625, 'primary'),
mock.call('/dev/sda', 625, 20063, 'primary'),
mock.call('/dev/sda', 20063, 65660, 'primary'),
mock.call('/dev/sda', 65660, 65680, 'primary'),
mock.call('/dev/sdb', 1, 25, 'primary'),
mock.call('/dev/sdb', 25, 225, 'primary'),
mock.call('/dev/sdb', 225, 65196, 'primary'),
mock.call('/dev/sdc', 1, 25, 'primary'),
mock.call('/dev/sdc', 25, 225, 'primary'),
mock.call('/dev/sdc', 225, 65196, 'primary')]
self.assertEqual(mock_pu_mp_expected_calls, mock_pu_mp.call_args_list)
mock_pu_spf_expected_calls = [mock.call('/dev/sda', 1, 'bios_grub'),
mock.call('/dev/sdb', 1, 'bios_grub'),
mock.call('/dev/sdc', 1, 'bios_grub')]
self.assertEqual(mock_pu_spf_expected_calls,
mock_pu_spf.call_args_list)
mock_pu_sgt_expected_calls = [mock.call('/dev/sda', 4, 'fake_guid')]
self.assertEqual(mock_pu_sgt_expected_calls,
mock_pu_sgt.call_args_list)
mock_lu_p_expected_calls = [
mock.call('/dev/sda5', metadatasize=28, metadatacopies=2),
mock.call('/dev/sda6', metadatasize=28, metadatacopies=2),
mock.call('/dev/sdb3', metadatasize=28, metadatacopies=2),
mock.call('/dev/sdc3', metadatasize=28, metadatacopies=2)]
self.assertEqual(mock_lu_p_expected_calls, mock_lu_p.call_args_list)
mock_lu_v_expected_calls = [mock.call('os', '/dev/sda5'),
mock.call('image', '/dev/sda6',
'/dev/sdb3', '/dev/sdc3')]
self.assertEqual(mock_lu_v_expected_calls, mock_lu_v.call_args_list)
mock_lu_l_expected_calls = [mock.call('os', 'root', 15360),
mock.call('os', 'swap', 4014),
mock.call('image', 'glance', 175347)]
self.assertEqual(mock_lu_l_expected_calls, mock_lu_l.call_args_list)
mock_fu_mf_expected_calls = [
mock.call('ext2', '', '', '/dev/sda3'),
mock.call('ext2', '', '', '/dev/sda4'),
mock.call('swap', '', '', '/dev/mapper/os-swap'),
mock.call('xfs', '', '', '/dev/mapper/image-glance')]
self.assertEqual(mock_fu_mf_expected_calls, mock_fu_mf.call_args_list)
@mock.patch.object(utils, 'calculate_md5')
@mock.patch('os.path.getsize')
@mock.patch('yaml.load')
@mock.patch.object(utils, 'init_http_request')
@mock.patch.object(utils, 'execute')
@mock.patch.object(utils, 'render_and_save')
@mock.patch.object(hu, 'list_block_devices')
def test_do_configdrive(self, mock_lbd, mock_u_ras, mock_u_e,
mock_http_req, mock_yaml, mock_get_size, mock_md5):
mock_get_size.return_value = 123
mock_md5.return_value = 'fakemd5'
mock_lbd.return_value = test_nailgun.LIST_BLOCK_DEVICES_SAMPLE
self.assertEqual(1, len(self.mgr.driver.image_scheme.images))
self.mgr.do_configdrive()
mock_u_ras_expected_calls = [
mock.call(CONF.nc_template_path,
['cloud_config_pro_fi-le.jinja2',
'cloud_config_pro.jinja2',
'cloud_config_pro_fi.jinja2',
'cloud_config.jinja2'],
mock.ANY, '%s/%s' % (CONF.tmp_path, 'cloud_config.txt')),
mock.call(CONF.nc_template_path,
['boothook_pro_fi-le.jinja2',
'boothook_pro.jinja2',
'boothook_pro_fi.jinja2',
'boothook.jinja2'],
mock.ANY, '%s/%s' % (CONF.tmp_path, 'boothook.txt')),
mock.call(CONF.nc_template_path,
['meta-data_pro_fi-le.jinja2',
'meta-data_pro.jinja2',
'meta-data_pro_fi.jinja2',
'meta-data.jinja2'],
mock.ANY, '%s/%s' % (CONF.tmp_path, 'meta-data'))]
self.assertEqual(mock_u_ras_expected_calls, mock_u_ras.call_args_list)
mock_u_e_expected_calls = [
mock.call('write-mime-multipart',
'--output=%s' % ('%s/%s' % (CONF.tmp_path, 'user-data')),
'%s:text/cloud-boothook' % ('%s/%s' % (CONF.tmp_path,
'boothook.txt')),
'%s:text/cloud-config' % ('%s/%s' % (CONF.tmp_path,
'cloud_config.txt'))
),
mock.call('genisoimage', '-output', CONF.config_drive_path,
'-volid', 'cidata', '-joliet', '-rock',
'%s/%s' % (CONF.tmp_path, 'user-data'),
'%s/%s' % (CONF.tmp_path, 'meta-data'))]
self.assertEqual(mock_u_e_expected_calls, mock_u_e.call_args_list)
self.assertEqual(2, len(self.mgr.driver.image_scheme.images))
cf_drv_img = self.mgr.driver.image_scheme.images[-1]
self.assertEqual('file://%s' % CONF.config_drive_path, cf_drv_img.uri)
self.assertEqual('/dev/sda7',
self.mgr.driver.partition_scheme.configdrive_device())
self.assertEqual('iso9660', cf_drv_img.format)
self.assertEqual('raw', cf_drv_img.container)
self.assertEqual('fakemd5', cf_drv_img.md5)
self.assertEqual(123, cf_drv_img.size)
@mock.patch('yaml.load')
@mock.patch.object(utils, 'init_http_request')
@mock.patch.object(partition.PartitionScheme, 'configdrive_device')
@mock.patch.object(utils, 'execute')
@mock.patch.object(utils, 'render_and_save')
@mock.patch.object(hu, 'list_block_devices')
def test_do_configdrive_no_configdrive_device(self, mock_lbd, mock_u_ras,
mock_u_e, mock_p_ps_cd,
mock_http_req, mock_yaml):
mock_lbd.return_value = test_nailgun.LIST_BLOCK_DEVICES_SAMPLE
mock_p_ps_cd.return_value = None
self.assertRaises(errors.WrongPartitionSchemeError,
self.mgr.do_configdrive)
@mock.patch.object(utils, 'calculate_md5')
@mock.patch('os.path.getsize')
@mock.patch('yaml.load')
@mock.patch.object(utils, 'init_http_request')
@mock.patch.object(fu, 'extend_fs')
@mock.patch.object(au, 'GunzipStream')
@mock.patch.object(au, 'LocalFile')
@mock.patch.object(au, 'HttpUrl')
@mock.patch.object(au, 'Chain')
@mock.patch.object(utils, 'execute')
@mock.patch.object(utils, 'render_and_save')
@mock.patch.object(hu, 'list_block_devices')
def test_do_copyimage(self, mock_lbd, mock_u_ras, mock_u_e, mock_au_c,
mock_au_h, mock_au_l, mock_au_g, mock_fu_ef,
mock_http_req, mock_yaml, mock_get_size, mock_md5):
class FakeChain(object):
processors = []
def append(self, thing):
self.processors.append(thing)
def process(self):
pass
mock_lbd.return_value = test_nailgun.LIST_BLOCK_DEVICES_SAMPLE
mock_au_c.return_value = FakeChain()
self.mgr.do_configdrive()
self.mgr.do_copyimage()
imgs = self.mgr.driver.image_scheme.images
self.assertEqual(2, len(imgs))
expected_processors_list = []
for img in imgs[:-1]:
expected_processors_list += [
img.uri,
au.HttpUrl,
au.GunzipStream,
img.target_device
]
expected_processors_list += [
imgs[-1].uri,
au.LocalFile,
imgs[-1].target_device
]
self.assertEqual(expected_processors_list,
mock_au_c.return_value.processors)
mock_fu_ef_expected_calls = [
mock.call('ext4', '/dev/mapper/os-root')]
self.assertEqual(mock_fu_ef_expected_calls, mock_fu_ef.call_args_list)
@mock.patch.object(utils, 'calculate_md5')
@mock.patch('os.path.getsize')
@mock.patch('yaml.load')
@mock.patch.object(utils, 'init_http_request')
@mock.patch.object(fu, 'extend_fs')
@mock.patch.object(au, 'GunzipStream')
@mock.patch.object(au, 'LocalFile')
@mock.patch.object(au, 'HttpUrl')
@mock.patch.object(au, 'Chain')
@mock.patch.object(utils, 'execute')
@mock.patch.object(utils, 'render_and_save')
@mock.patch.object(hu, 'list_block_devices')
def test_do_copyimage_md5_matches(self, mock_lbd, mock_u_ras, mock_u_e,
mock_au_c, mock_au_h, mock_au_l,
mock_au_g, mock_fu_ef, mock_http_req,
mock_yaml, mock_get_size, mock_md5):
class FakeChain(object):
processors = []
def append(self, thing):
self.processors.append(thing)
def process(self):
pass
mock_get_size.return_value = 123
mock_md5.side_effect = ['fakemd5', 'really_fakemd5', 'fakemd5']
mock_lbd.return_value = test_nailgun.LIST_BLOCK_DEVICES_SAMPLE
mock_au_c.return_value = FakeChain()
self.mgr.driver.image_scheme.images[0].size = 1234
self.mgr.driver.image_scheme.images[0].md5 = 'really_fakemd5'
self.mgr.do_configdrive()
self.assertEqual(2, len(self.mgr.driver.image_scheme.images))
self.mgr.do_copyimage()
expected_md5_calls = [mock.call('/tmp/config-drive.img', 123),
mock.call('/dev/mapper/os-root', 1234),
mock.call('/dev/sda7', 123)]
self.assertEqual(expected_md5_calls, mock_md5.call_args_list)
@mock.patch.object(utils, 'calculate_md5')
@mock.patch('os.path.getsize')
@mock.patch('yaml.load')
@mock.patch.object(utils, 'init_http_request')
@mock.patch.object(fu, 'extend_fs')
@mock.patch.object(au, 'GunzipStream')
@mock.patch.object(au, 'LocalFile')
@mock.patch.object(au, 'HttpUrl')
@mock.patch.object(au, 'Chain')
@mock.patch.object(utils, 'execute')
@mock.patch.object(utils, 'render_and_save')
@mock.patch.object(hu, 'list_block_devices')
def test_do_copyimage_md5_mismatch(self, mock_lbd, mock_u_ras, mock_u_e,
mock_au_c, mock_au_h, mock_au_l,
mock_au_g, mock_fu_ef, mock_http_req,
mock_yaml, mock_get_size, mock_md5):
class FakeChain(object):
processors = []
def append(self, thing):
self.processors.append(thing)
def process(self):
pass
mock_get_size.return_value = 123
mock_md5.side_effect = ['fakemd5', 'really_fakemd5', 'fakemd5']
mock_lbd.return_value = test_nailgun.LIST_BLOCK_DEVICES_SAMPLE
mock_au_c.return_value = FakeChain()
self.mgr.driver.image_scheme.images[0].size = 1234
self.mgr.driver.image_scheme.images[0].md5 = 'fakemd5'
self.mgr.do_configdrive()
self.assertEqual(2, len(self.mgr.driver.image_scheme.images))
self.assertRaises(errors.ImageChecksumMismatchError,
self.mgr.do_copyimage)
@mock.patch('fuel_agent.manager.bu', create=True)
@mock.patch('fuel_agent.manager.fu', create=True)
@mock.patch('fuel_agent.manager.utils', create=True)
@mock.patch('fuel_agent.manager.os', create=True)
@mock.patch('fuel_agent.manager.shutil.move')
@mock.patch('fuel_agent.manager.open',
create=True, new_callable=mock.mock_open)
@mock.patch('fuel_agent.manager.tempfile.mkdtemp')
@mock.patch('fuel_agent.manager.yaml.safe_dump')
@mock.patch.object(manager.Manager, 'mount_target')
@mock.patch.object(manager.Manager, 'umount_target')
def test_do_build_image(self, mock_umount_target, mock_mount_target,
mock_yaml_dump, mock_mkdtemp,
mock_open, mock_shutil_move, mock_os,
mock_utils, mock_fu, mock_bu):
loops = [objects.Loop(), objects.Loop()]
self.mgr.driver.image_scheme = objects.ImageScheme([
objects.Image('file:///fake/img.img.gz', loops[0], 'ext4', 'gzip'),
objects.Image('file:///fake/img-boot.img.gz',
loops[1], 'ext2', 'gzip')])
self.mgr.driver.partition_scheme = objects.PartitionScheme()
self.mgr.driver.partition_scheme.add_fs(
device=loops[0], mount='/', fs_type='ext4')
self.mgr.driver.partition_scheme.add_fs(
device=loops[1], mount='/boot', fs_type='ext2')
self.mgr.driver.metadata_uri = 'file:///fake/img.yaml'
self.mgr.driver.operating_system = objects.Ubuntu(
repos=[
objects.DEBRepo('ubuntu', 'http://fakeubuntu',
'trusty', 'fakesection', priority=900),
objects.DEBRepo('ubuntu_zero', 'http://fakeubuntu_zero',
'trusty', 'fakesection', priority=None),
objects.DEBRepo('mos', 'http://fakemos',
'mosX.Y', 'fakesection', priority=1000)],
packages=['fakepackage1', 'fakepackage2'])
mock_os.path.exists.return_value = False
mock_os.path.join.return_value = '/tmp/imgdir/proc'
mock_os.path.basename.side_effect = ['img.img.gz', 'img-boot.img.gz']
mock_bu.create_sparse_tmp_file.side_effect = \
['/tmp/img', '/tmp/img-boot']
mock_bu.get_free_loop_device.side_effect = ['/dev/loop0', '/dev/loop1']
mock_mkdtemp.return_value = '/tmp/imgdir'
getsize_side = [20, 2, 10, 1]
mock_os.path.getsize.side_effect = getsize_side
md5_side = ['fakemd5_raw', 'fakemd5_gzip',
'fakemd5_raw_boot', 'fakemd5_gzip_boot']
mock_utils.calculate_md5.side_effect = md5_side
mock_bu.containerize.side_effect = ['/tmp/img.gz', '/tmp/img-boot.gz']
mock_bu.stop_chrooted_processes.side_effect = [
False, True, False, True]
self.mgr.do_build_image()
self.assertEqual(
[mock.call('/fake/img.img.gz'),
mock.call('/fake/img-boot.img.gz')],
mock_os.path.exists.call_args_list)
self.assertEqual([mock.call(dir=CONF.image_build_dir,
suffix=CONF.image_build_suffix)] * 2,
mock_bu.create_sparse_tmp_file.call_args_list)
self.assertEqual([mock.call()] * 2,
mock_bu.get_free_loop_device.call_args_list)
self.assertEqual([mock.call('/tmp/img', '/dev/loop0'),
mock.call('/tmp/img-boot', '/dev/loop1')],
mock_bu.attach_file_to_loop.call_args_list)
self.assertEqual([mock.call(fs_type='ext4', fs_options='',
fs_label='', dev='/dev/loop0'),
mock.call(fs_type='ext2', fs_options='',
fs_label='', dev='/dev/loop1')],
mock_fu.make_fs.call_args_list)
mock_mkdtemp.assert_called_once_with(dir=CONF.image_build_dir,
suffix=CONF.image_build_suffix)
mock_mount_target.assert_called_once_with(
'/tmp/imgdir', treat_mtab=False, pseudo=False)
self.assertEqual([mock.call('/tmp/imgdir')] * 2,
mock_bu.suppress_services_start.call_args_list)
mock_bu.run_debootstrap.assert_called_once_with(
uri='http://fakeubuntu', suite='trusty', chroot='/tmp/imgdir')
mock_bu.set_apt_get_env.assert_called_once_with()
mock_bu.pre_apt_get.assert_called_once_with('/tmp/imgdir')
self.assertEqual([
mock.call(name='ubuntu',
uri='http://fakeubuntu',
suite='trusty',
section='fakesection',
chroot='/tmp/imgdir'),
mock.call(name='ubuntu_zero',
uri='http://fakeubuntu_zero',
suite='trusty',
section='fakesection',
chroot='/tmp/imgdir'),
mock.call(name='mos',
uri='http://fakemos',
suite='mosX.Y',
section='fakesection',
chroot='/tmp/imgdir')],
mock_bu.add_apt_source.call_args_list)
# we don't call add_apt_preference for ubuntu_zero
# because it has priority == None
self.assertEqual([
mock.call(name='ubuntu',
priority=900,
suite='trusty',
section='fakesection',
chroot='/tmp/imgdir',
uri='http://fakeubuntu'),
mock.call(name='mos',
priority=1000,
suite='mosX.Y',
section='fakesection',
chroot='/tmp/imgdir',
uri='http://fakemos')],
mock_bu.add_apt_preference.call_args_list)
mock_utils.makedirs_if_not_exists.assert_called_once_with(
'/tmp/imgdir/proc')
self.assertEqual([
mock.call('tune2fs', '-O', '^has_journal', '/dev/loop0'),
mock.call('tune2fs', '-O', 'has_journal', '/dev/loop0')],
mock_utils.execute.call_args_list)
mock_fu.mount_bind.assert_called_once_with('/tmp/imgdir', '/proc')
mock_bu.run_apt_get.assert_called_once_with(
'/tmp/imgdir', packages=['fakepackage1', 'fakepackage2'])
mock_bu.do_post_inst.assert_called_once_with('/tmp/imgdir')
signal_calls = mock_bu.stop_chrooted_processes.call_args_list
self.assertEqual(2 * [mock.call('/tmp/imgdir', signal=signal.SIGTERM),
mock.call('/tmp/imgdir', signal=signal.SIGKILL)],
signal_calls)
self.assertEqual(
[mock.call('/tmp/imgdir/proc')] * 2,
mock_fu.umount_fs.call_args_list)
self.assertEqual(
[mock.call(
'/tmp/imgdir', try_lazy_umount=False, pseudo=False)] * 2,
mock_umount_target.call_args_list)
self.assertEqual(
[mock.call('/dev/loop0'), mock.call('/dev/loop1')] * 2,
mock_bu.deattach_loop.call_args_list)
self.assertEqual([mock.call('/tmp/img'), mock.call('/tmp/img-boot')],
mock_bu.shrink_sparse_file.call_args_list)
self.assertEqual([mock.call('/tmp/img'),
mock.call('/fake/img.img.gz'),
mock.call('/tmp/img-boot'),
mock.call('/fake/img-boot.img.gz')],
mock_os.path.getsize.call_args_list)
self.assertEqual([mock.call('/tmp/img', 20),
mock.call('/fake/img.img.gz', 2),
mock.call('/tmp/img-boot', 10),
mock.call('/fake/img-boot.img.gz', 1)],
mock_utils.calculate_md5.call_args_list)
self.assertEqual([mock.call('/tmp/img', 'gzip'),
mock.call('/tmp/img-boot', 'gzip')],
mock_bu.containerize.call_args_list)
mock_open.assert_called_once_with('/fake/img.yaml', 'w')
self.assertEqual(
[mock.call('/tmp/img.gz', '/fake/img.img.gz'),
mock.call('/tmp/img-boot.gz', '/fake/img-boot.img.gz')],
mock_shutil_move.call_args_list)
metadata = {}
for repo in self.mgr.driver.operating_system.repos:
metadata.setdefault('repos', []).append({
'type': 'deb',
'name': repo.name,
'uri': repo.uri,
'suite': repo.suite,
'section': repo.section,
'priority': repo.priority,
'meta': repo.meta})
metadata['packages'] = self.mgr.driver.operating_system.packages
metadata['images'] = [
{
'raw_md5': md5_side[0],
'raw_size': getsize_side[0],
'raw_name': None,
'container_name':
os.path.basename(
self.mgr.driver.image_scheme.images[0].uri.split(
'file://', 1)[1]),
'container_md5': md5_side[1],
'container_size': getsize_side[1],
'container': self.mgr.driver.image_scheme.images[0].container,
'format': self.mgr.driver.image_scheme.images[0].format
},
{
'raw_md5': md5_side[2],
'raw_size': getsize_side[2],
'raw_name': None,
'container_name':
os.path.basename(
self.mgr.driver.image_scheme.images[1].uri.split(
'file://', 1)[1]),
'container_md5': md5_side[3],
'container_size': getsize_side[3],
'container': self.mgr.driver.image_scheme.images[1].container,
'format': self.mgr.driver.image_scheme.images[1].format
}
]
mock_yaml_dump.assert_called_once_with(metadata, stream=mock_open())

View File

@ -1,277 +0,0 @@
# Copyright 2014 Mirantis, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import mock
from oslotest import base as test_base
import six
from fuel_agent import errors
from fuel_agent.utils import hardware as hu
from fuel_agent.utils import md as mu
from fuel_agent.utils import utils
if six.PY2:
OPEN_FUNCTION_NAME = '__builtin__.open'
else:
OPEN_FUNCTION_NAME = 'builtins.open'
class TestMdUtils(test_base.BaseTestCase):
@mock.patch('fuel_agent.utils.md.utils.execute')
def test_mddisplay_nostate_detail(self, mock_exec):
mock_exec.return_value = (
"""/dev/md127:
Version : imsm
Raid Level : container
Total Devices : 2
Working Devices : 2
UUID : 46a4fc60:21554de1:1edfad0f:c137ddac
Member Arrays :
Number Major Minor RaidDevice
0 8 0 - /dev/sda
1 8 16 - /dev/sdb""",
''
)
expected = [{
'Raid Level': 'container',
'UUID': '46a4fc60:21554de1:1edfad0f:c137ddac',
'Version': 'imsm',
'devices': ['/dev/sda', '/dev/sdb'],
'name': '/dev/md127',
}]
mds = mu.mddisplay(['/dev/md127'])
mock_exec.assert_called_once_with(
'mdadm', '--detail', '/dev/md127', check_exit_code=[0])
self.assertItemsEqual(expected, mds)
@mock.patch.object(utils, 'execute')
def test_mddisplay(self, mock_exec):
# should read file /proc/mdstat
# should get detailed description for all md devices
# should return list of dicts representing md devices
mock_open_data = """Personalities : [raid1]
md0 : active raid1 loop5[1] loop4[0]
102272 blocks super 1.2 [2/2] [UU]
unused devices: <none>
"""
mock_open = mock.mock_open(read_data=mock_open_data)
patcher = mock.patch(OPEN_FUNCTION_NAME, new=mock_open)
patcher.start()
mock_exec.return_value = (
"""/dev/md0:
Version : 1.2
Creation Time : Wed Jun 18 18:44:57 2014
Raid Level : raid1
Array Size : 102272 (99.89 MiB 104.73 MB)
Used Dev Size : 102272 (99.89 MiB 104.73 MB)
Raid Devices : 2
Total Devices : 2
Persistence : Superblock is persistent
Update Time : Wed Jun 18 18:45:01 2014
State : clean
Active Devices : 2
Working Devices : 2
Failed Devices : 0
Spare Devices : 0
Name : localhost.localdomain:0 (local to host
localhost.localdomain)
UUID : 12dd4cfc:6b2ac9db:94564538:a6ffee82
Events : 17
Number Major Minor RaidDevice State
0 7 4 0 active sync /dev/loop4
1 7 5 1 active sync /dev/loop5""",
''
)
expected = [{
'name': '/dev/md0',
'Version': '1.2',
'Raid Level': 'raid1',
'Raid Devices': '2',
'Active Devices': '2',
'Spare Devices': '0',
'Failed Devices': '0',
'State': 'clean',
'UUID': '12dd4cfc:6b2ac9db:94564538:a6ffee82',
'devices': ['/dev/loop4', '/dev/loop5']
}]
mds = mu.mddisplay()
mock_exec.assert_called_once_with(
'mdadm', '--detail', '/dev/md0', check_exit_code=[0])
key = lambda x: x['name']
self.assertEqual(sorted(expected, key=key), sorted(mds, key=key))
patcher.stop()
@mock.patch.object(mu, 'mdclean')
@mock.patch.object(hu, 'list_block_devices')
@mock.patch.object(mu, 'mddisplay')
@mock.patch.object(utils, 'execute')
def test_mdcreate_ok(self, mock_exec, mock_mddisplay,
mock_bdevs, mock_mdclean):
# should check if md already exists
# should check if md level is valid
# should check if all necessary devices exist
# should check if all devices are not parts of some md
# should clean md metadata which possibly are on all devices
# should run mdadm command to create new md
mock_mddisplay.return_value = \
[{'name': '/dev/md10', 'devices': ['/dev/fake10']},
# should also accept devices with missing 'devices' entry
{'name': '/dev/md11'}]
mock_bdevs.return_value = [{'device': '/dev/fake1'},
{'device': '/dev/fake2'}]
mu.mdcreate('/dev/md0', 'mirror', '/dev/fake1', '/dev/fake2')
mock_mdclean_expected_calls = [mock.call('/dev/fake1'),
mock.call('/dev/fake2')]
self.assertEqual(mock_mdclean_expected_calls,
mock_mdclean.call_args_list)
mock_exec.assert_called_once_with(
'mdadm', '--create', '--force', '/dev/md0', '-e0.90',
'--level=mirror',
'--raid-devices=2', '/dev/fake1', '/dev/fake2',
check_exit_code=[0])
@mock.patch.object(mu, 'mddisplay')
def test_mdcreate_duplicate(self, mock_mddisplay):
# should check if md already exists
# should raise error if it exists
mock_mddisplay.return_value = [{'name': '/dev/md0'}]
self.assertRaises(
errors.MDAlreadyExistsError, mu.mdcreate,
'/dev/md0', 'mirror', '/dev/fake')
@mock.patch.object(mu, 'mddisplay')
def test_mdcreate_unsupported_level(self, mock_mddisplay):
# should check if md level is valid
# should raise error if it is not
mock_mddisplay.return_value = [{'name': '/dev/md10'}]
self.assertRaises(
errors.MDWrongSpecError, mu.mdcreate,
'/dev/md0', 'badlevel', '/dev/fake')
@mock.patch.object(hu, 'list_block_devices')
@mock.patch.object(mu, 'mddisplay')
def test_mdcreate_device_not_found(self, mock_mddisplay, mock_bdevs):
# should check if all devices exist
# should raise error if at least one device does not
mock_mddisplay.return_value = [{'name': '/dev/md10'}]
mock_bdevs.return_value = [{'device': '/dev/fake1'},
{'device': '/dev/fake10'}]
self.assertRaises(
errors.MDNotFoundError, mu.mdcreate,
'/dev/md0', 'mirror', '/dev/fake1', '/dev/fake2')
@mock.patch.object(hu, 'list_block_devices')
@mock.patch.object(mu, 'mddisplay')
def test_mdcreate_device_attached(self, mock_mddisplay, mock_bdevs):
# should check if all necessary devices are not attached to some md
# should raise error if at least one device is attached
mock_mddisplay.return_value = [{'name': '/dev/md10',
'devices': ['/dev/fake2']}]
mock_bdevs.return_value = [{'device': '/dev/fake1'},
{'device': '/dev/fake2'}]
self.assertRaises(
errors.MDDeviceDuplicationError, mu.mdcreate,
'/dev/md0', 'mirror', '/dev/fake1', '/dev/fake2')
@mock.patch.object(utils, 'execute')
@mock.patch.object(mu, 'mdclean')
@mock.patch.object(hu, 'list_block_devices')
@mock.patch.object(mu, 'mddisplay')
def test_mdcreate_device_clean(self, mock_mddisplay,
mock_bdevs, mock_mdclean, mock_exec):
# should clean md metadata on all devices before building new md
mock_mddisplay.return_value = []
mock_bdevs.return_value = [{'device': '/dev/fake1'},
{'device': '/dev/fake2'}]
mu.mdcreate('/dev/md0', 'mirror', '/dev/fake1', '/dev/fake2')
expected_calls = [mock.call('/dev/fake1'), mock.call('/dev/fake2')]
self.assertEqual(mock_mdclean.call_args_list, expected_calls)
@mock.patch.object(mu, 'mdclean')
@mock.patch.object(mu, 'mdremove')
@mock.patch.object(mu, 'mddisplay')
def test_mdclean_all(self, mock_mddisplay, mock_mdremove, mock_mdclean):
mock_mddisplay.side_effect = [
[{'name': '/dev/md10', 'devices': ['/dev/fake10']},
{'name': '/dev/md11'}],
[{'name': '/dev/md11'}],
[]
]
mu.mdclean_all()
mock_mdremove_expected_calls = [
mock.call('/dev/md10'), mock.call('/dev/md11'),
mock.call('/dev/md11')]
mock_mdclean.assert_called_once_with('/dev/fake10')
self.assertEqual(mock_mdremove.call_args_list,
mock_mdremove_expected_calls)
@mock.patch.object(mu, 'mdclean')
@mock.patch.object(mu, 'mdremove')
@mock.patch.object(mu, 'mddisplay')
def test_mdclean_all_fail(self, mock_mddisplay, mock_mdremove,
mock_mdclean):
mock_mddisplay.return_value = [{'name': '/dev/md11'}]
self.assertRaises(errors.MDRemovingError, mu.mdclean_all)
@mock.patch.object(utils, 'execute')
@mock.patch.object(mu, 'get_mdnames')
def test_mdremove_ok(self, mock_get_mdn, mock_exec):
# should check if md exists
# should run mdadm command to remove md device
mock_get_mdn.return_value = ['/dev/md0']
expected_calls = [
mock.call('udevadm', 'settle', '--quiet', check_exit_code=[0]),
mock.call('mdadm', '--stop', '/dev/md0', check_exit_code=[0]),
mock.call('mdadm', '--remove', '/dev/md0', check_exit_code=[0, 1])
]
mu.mdremove('/dev/md0')
self.assertEqual(mock_exec.call_args_list, expected_calls)
@mock.patch.object(mu, 'get_mdnames')
def test_mdremove_notfound(self, mock_get_mdn):
# should check if md exists
# should raise error if it does not
mock_get_mdn.return_value = ['/dev/md0']
self.assertRaises(
errors.MDNotFoundError, mu.mdremove, '/dev/md1')
@mock.patch.object(utils, 'execute')
def test_mdclean(self, mock_exec):
mu.mdclean('/dev/md0')
mock_exec.assert_called_once_with('mdadm', '--zero-superblock',
'--force', '/dev/md0',
check_exit_code=[0])

File diff suppressed because it is too large Load Diff

View File

@ -1,244 +0,0 @@
# Copyright 2015 Mirantis, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import mock
import os
import six
from six.moves.urllib.parse import urlsplit
from oslotest import base as test_base
from fuel_agent.drivers.nailgun import NailgunBuildImage
from fuel_agent import errors
from fuel_agent import objects
DEFAULT_TRUSTY_PACKAGES = [
"acl",
"anacron",
"bash-completion",
"bridge-utils",
"bsdmainutils",
"build-essential",
"cloud-init",
"curl",
"daemonize",
"debconf-utils",
"gdisk",
"grub-pc",
"linux-firmware",
"linux-firmware-nonfree",
"linux-headers-generic-lts-trusty",
"linux-image-generic-lts-trusty",
"lvm2",
"mcollective",
"mdadm",
"nailgun-agent",
"nailgun-mcagents",
"nailgun-net-check",
"ntp",
"openssh-client",
"openssh-server",
"puppet",
"python-amqp",
"ruby-augeas",
"ruby-ipaddress",
"ruby-json",
"ruby-netaddr",
"ruby-openstack",
"ruby-shadow",
"ruby-stomp",
"telnet",
"ubuntu-minimal",
"ubuntu-standard",
"uuid-runtime",
"vim",
"virt-what",
"vlan",
]
REPOS_SAMPLE = [
{
"name": "ubuntu",
"section": "main universe multiverse",
"uri": "http://archive.ubuntu.com/ubuntu/",
"priority": None,
"suite": "trusty",
"type": "deb"
},
{
"name": "mos",
"section": "main restricted",
"uri": "http://10.20.0.2:8080/2014.2-6.1/ubuntu/x86_64",
"priority": 1050,
"suite": "mos6.1",
"type": "deb"
}
]
IMAGE_DATA_SAMPLE = {
"/boot": {
"container": "gzip",
"uri": "http://10.20.0.2:8080/path/to/img-boot.img.gz",
"format": "ext2"
},
"/": {
"container": "gzip",
"uri": "http://10.20.0.2:8080/path/to/img.img.gz",
"format": "ext4"
}
}
class TestNailgunBuildImage(test_base.BaseTestCase):
def test_default_trusty_packages(self):
self.assertEqual(NailgunBuildImage.DEFAULT_TRUSTY_PACKAGES,
DEFAULT_TRUSTY_PACKAGES)
@mock.patch.object(NailgunBuildImage, '__init__')
def test_parse_operating_system_error_bad_codename(self, mock_init):
mock_init.return_value = None
driver = NailgunBuildImage()
driver.data = {'codename': 'not-trusty'}
self.assertRaises(errors.WrongInputDataError,
driver.parse_operating_system)
@mock.patch('fuel_agent.objects.Ubuntu')
@mock.patch.object(NailgunBuildImage, '__init__')
def test_parse_operating_system_packages_given(self, mock_init, mock_ub):
mock_init.return_value = None
data = {
'repos': [],
'codename': 'trusty',
'packages': ['pack']
}
driver = NailgunBuildImage()
driver.data = data
mock_ub_instance = mock_ub.return_value
mock_ub_instance.packages = data['packages']
driver.parse_operating_system()
mock_ub.assert_called_once_with(repos=[], packages=data['packages'])
self.assertEqual(driver.operating_system.packages, data['packages'])
@mock.patch('fuel_agent.objects.Ubuntu')
@mock.patch.object(NailgunBuildImage, '__init__')
def test_parse_operating_system_packages_not_given(
self, mock_init, mock_ub):
mock_init.return_value = None
data = {
'repos': [],
'codename': 'trusty'
}
driver = NailgunBuildImage()
driver.data = data
mock_ub_instance = mock_ub.return_value
mock_ub_instance.packages = NailgunBuildImage.DEFAULT_TRUSTY_PACKAGES
driver.parse_operating_system()
mock_ub.assert_called_once_with(
repos=[], packages=NailgunBuildImage.DEFAULT_TRUSTY_PACKAGES)
self.assertEqual(driver.operating_system.packages,
NailgunBuildImage.DEFAULT_TRUSTY_PACKAGES)
@mock.patch('fuel_agent.objects.DEBRepo')
@mock.patch('fuel_agent.objects.Ubuntu')
@mock.patch.object(NailgunBuildImage, '__init__')
def test_parse_operating_system_repos(self, mock_init, mock_ub, mock_deb):
mock_init.return_value = None
data = {
'repos': REPOS_SAMPLE,
'codename': 'trusty'
}
driver = NailgunBuildImage()
driver.data = data
mock_deb_expected_calls = []
repos = []
for r in REPOS_SAMPLE:
kwargs = {
'name': r['name'],
'uri': r['uri'],
'suite': r['suite'],
'section': r['section'],
'priority': r['priority']
}
mock_deb_expected_calls.append(mock.call(**kwargs))
repos.append(objects.DEBRepo(**kwargs))
driver.parse_operating_system()
mock_ub_instance = mock_ub.return_value
mock_ub_instance.repos = repos
mock_ub.assert_called_once_with(
repos=repos, packages=NailgunBuildImage.DEFAULT_TRUSTY_PACKAGES)
self.assertEqual(mock_deb_expected_calls,
mock_deb.call_args_list[:len(REPOS_SAMPLE)])
self.assertEqual(driver.operating_system.repos, repos)
@mock.patch('fuel_agent.drivers.nailgun.objects.Loop')
@mock.patch('fuel_agent.objects.Image')
@mock.patch('fuel_agent.objects.Fs')
@mock.patch('fuel_agent.objects.PartitionScheme')
@mock.patch('fuel_agent.objects.ImageScheme')
@mock.patch.object(NailgunBuildImage, '__init__')
def test_parse_schemes(
self, mock_init, mock_imgsch, mock_partsch,
mock_fs, mock_img, mock_loop):
mock_init.return_value = None
data = {
'image_data': IMAGE_DATA_SAMPLE,
'output': '/some/local/path',
}
driver = NailgunBuildImage()
driver.data = data
driver.parse_schemes()
mock_fs_expected_calls = []
mock_img_expected_calls = []
images = []
fss = []
data_length = len(data['image_data'].keys())
for mount, image in six.iteritems(data['image_data']):
filename = os.path.basename(urlsplit(image['uri']).path)
img_kwargs = {
'uri': 'file://' + os.path.join(data['output'], filename),
'format': image['format'],
'container': image['container'],
'target_device': None
}
mock_img_expected_calls.append(mock.call(**img_kwargs))
images.append(objects.Image(**img_kwargs))
fs_kwargs = {
'device': None,
'mount': mount,
'fs_type': image['format']
}
mock_fs_expected_calls.append(mock.call(**fs_kwargs))
fss.append(objects.Fs(**fs_kwargs))
if mount == '/':
metadata_filename = filename.split('.', 1)[0] + '.yaml'
mock_imgsch_instance = mock_imgsch.return_value
mock_imgsch_instance.images = images
mock_partsch_instance = mock_partsch.return_value
mock_partsch_instance.fss = fss
self.assertEqual(
driver.metadata_uri, 'file://' + os.path.join(
data['output'], metadata_filename))
self.assertEqual(mock_img_expected_calls,
mock_img.call_args_list[:data_length])
self.assertEqual(mock_fs_expected_calls,
mock_fs.call_args_list[:data_length])
self.assertEqual(driver.image_scheme.images, images)
self.assertEqual(driver.partition_scheme.fss, fss)

View File

@ -1,256 +0,0 @@
# Copyright 2014 Mirantis, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import mock
from oslotest import base as test_base
from fuel_agent import errors
from fuel_agent.objects import partition
class TestMD(test_base.BaseTestCase):
def setUp(self):
super(TestMD, self).setUp()
self.md = partition.Md('name', 'level')
def test_add_device_ok(self):
self.assertEqual(0, len(self.md.devices))
self.md.add_device('device')
self.assertEqual(1, len(self.md.devices))
self.assertEqual('device', self.md.devices[0])
def test_add_device_in_spares_fail(self):
self.assertEqual(0, len(self.md.devices))
self.assertEqual(0, len(self.md.spares))
self.md.add_spare('device')
self.assertRaises(errors.MDDeviceDuplicationError, self.md.add_device,
'device')
def test_add_device_in_devices_fail(self):
self.assertEqual(0, len(self.md.devices))
self.assertEqual(0, len(self.md.spares))
self.md.add_device('device')
self.assertRaises(errors.MDDeviceDuplicationError, self.md.add_device,
'device')
def test_add_spare_in_spares_fail(self):
self.assertEqual(0, len(self.md.devices))
self.assertEqual(0, len(self.md.spares))
self.md.add_spare('device')
self.assertRaises(errors.MDDeviceDuplicationError, self.md.add_spare,
'device')
def test_add_spare_in_devices_fail(self):
self.assertEqual(0, len(self.md.devices))
self.assertEqual(0, len(self.md.spares))
self.md.add_device('device')
self.assertRaises(errors.MDDeviceDuplicationError, self.md.add_spare,
'device')
class TestPartition(test_base.BaseTestCase):
def setUp(self):
super(TestPartition, self).setUp()
self.pt = partition.Partition('name', 'count', 'device', 'begin',
'end', 'partition_type')
def test_set_flag(self):
self.assertEqual(0, len(self.pt.flags))
self.pt.set_flag('fake_flag')
self.assertEqual(1, len(self.pt.flags))
self.assertIn('fake_flag', self.pt.flags)
class TestPartitionScheme(test_base.BaseTestCase):
def setUp(self):
super(TestPartitionScheme, self).setUp()
self.p_scheme = partition.PartitionScheme()
def test_root_device_not_found(self):
self.assertRaises(errors.WrongPartitionSchemeError,
self.p_scheme.root_device)
def test_fs_by_device(self):
expected_fs = partition.Fs('device')
self.p_scheme.fss.append(expected_fs)
self.p_scheme.fss.append(partition.Fs('wrong_device'))
actual_fs = self.p_scheme.fs_by_device('device')
self.assertEqual(expected_fs, actual_fs)
def test_fs_by_mount(self):
expected_fs = partition.Fs('d', mount='mount')
self.p_scheme.fss.append(expected_fs)
self.p_scheme.fss.append(partition.Fs('w_d', mount='wrong_mount'))
actual_fs = self.p_scheme.fs_by_mount('mount')
self.assertEqual(expected_fs, actual_fs)
def test_pv_by_name(self):
expected_pv = partition.Pv('pv')
self.p_scheme.pvs.append(expected_pv)
self.p_scheme.pvs.append(partition.Pv('wrong_pv'))
actual_pv = self.p_scheme.pv_by_name('pv')
self.assertEqual(expected_pv, actual_pv)
def test_vg_by_name(self):
expected_vg = partition.Vg('vg')
self.p_scheme.vgs.append(expected_vg)
self.p_scheme.vgs.append(partition.Vg('wrong_vg'))
actual_vg = self.p_scheme.vg_by_name('vg')
self.assertEqual(expected_vg, actual_vg)
def test_vg_attach_by_name(self):
self.p_scheme.vg_attach_by_name('pvname', 'vgname')
self.assertEqual(1, len(self.p_scheme.pvs))
self.assertEqual(1, len(self.p_scheme.vgs))
self.assertIn('pvname', self.p_scheme.vgs[0].pvnames)
self.assertIn('vgname', self.p_scheme.vgs[0].name)
def test_md_next_name_ok(self):
expected_name = '/dev/md0'
self.assertEqual(expected_name, self.p_scheme.md_next_name())
def test_md_next_name_fail(self):
self.p_scheme.mds = [
partition.Md('/dev/md%s' % x, 'level') for x in range(0, 128)]
self.assertRaises(errors.MDAlreadyExistsError,
self.p_scheme.md_next_name)
def test_md_by_name(self):
self.assertEqual(0, len(self.p_scheme.mds))
expected_md = partition.Md('name', 'level')
self.p_scheme.mds.append(expected_md)
self.p_scheme.mds.append(partition.Md('wrong_name', 'level'))
self.assertEqual(expected_md, self.p_scheme.md_by_name('name'))
def test_md_by_mount(self):
self.assertEqual(0, len(self.p_scheme.mds))
self.assertEqual(0, len(self.p_scheme.fss))
expected_md = partition.Md('name', 'level')
expected_fs = partition.Fs('name', mount='mount')
self.p_scheme.mds.append(expected_md)
self.p_scheme.fss.append(expected_fs)
self.p_scheme.fss.append(partition.Fs('wrong_name',
mount='wrong_mount'))
self.assertEqual(expected_md, self.p_scheme.md_by_mount('mount'))
def test_md_attach_by_mount_md_exists(self):
self.assertEqual(0, len(self.p_scheme.mds))
self.assertEqual(0, len(self.p_scheme.fss))
expected_md = partition.Md('name', 'level')
expected_fs = partition.Fs('name', mount='mount')
self.p_scheme.mds.append(expected_md)
self.p_scheme.fss.append(expected_fs)
actual_md = self.p_scheme.md_attach_by_mount('device', 'mount')
self.assertIn('device', actual_md.devices)
self.assertEqual(expected_md, actual_md)
def test_md_attach_by_mount_no_md(self):
self.assertEqual(0, len(self.p_scheme.mds))
self.assertEqual(0, len(self.p_scheme.fss))
actual_md = self.p_scheme.md_attach_by_mount(
'device', 'mount', fs_type='fs_type', fs_options='-F',
fs_label='fs_label', name='name', level='level')
self.assertIn('device', actual_md.devices)
self.assertEqual(1, len(self.p_scheme.fss))
self.assertEqual('name', self.p_scheme.fss[0].device)
self.assertEqual('mount', self.p_scheme.fss[0].mount)
self.assertEqual('fs_type', self.p_scheme.fss[0].type)
self.assertEqual('fs_label', self.p_scheme.fss[0].label)
self.assertEqual('-F', self.p_scheme.fss[0].options)
class TestParted(test_base.BaseTestCase):
def setUp(self):
super(TestParted, self).setUp()
self.prtd = partition.Parted('name', 'label')
@mock.patch.object(partition.Parted, 'next_count')
@mock.patch.object(partition.Parted, 'next_type')
def test_next_name_none(self, nt_mock, nc_mock):
nc_mock.return_value = 1
nt_mock.return_value = 'extended'
self.assertEqual(None, self.prtd.next_name())
@mock.patch.object(partition.Parted, 'next_count')
@mock.patch.object(partition.Parted, 'next_type')
def test_next_name_no_separator(self, nt_mock, nc_mock):
nc_mock.return_value = 1
nt_mock.return_value = 'not_extended'
expected_name = '%s%s' % (self.prtd.name, 1)
self.assertEqual(expected_name, self.prtd.next_name())
@mock.patch.object(partition.Parted, 'next_count')
@mock.patch.object(partition.Parted, 'next_type')
def test_next_name_with_separator(self, nt_mock, nc_mock):
nc_mock.return_value = 1
nt_mock.return_value = 'not_extended'
self.prtd.name = '/dev/cciss/c0d0'
expected_name = '%sp%s' % (self.prtd.name, 1)
self.assertEqual(expected_name, self.prtd.next_name())
self.prtd.name = '/dev/loop123'
expected_name = '%sp%s' % (self.prtd.name, 1)
self.assertEqual(expected_name, self.prtd.next_name())
self.prtd.name = '/dev/nvme0n1'
expected_name = '%sp%s' % (self.prtd.name, 1)
self.assertEqual(expected_name, self.prtd.next_name())
def test_next_begin_empty_partitions(self):
self.assertEqual(1, self.prtd.next_begin())
def test_next_begin_last_extended_partition(self):
self.prtd.partitions.append(
partition.Partition('name', 'count', 'device', 'begin', 'end',
'extended'))
self.assertEqual('begin', self.prtd.next_begin())
def test_next_begin_no_last_extended_partition(self):
self.prtd.partitions.append(
partition.Partition('name', 'count', 'device', 'begin', 'end',
'primary'))
self.assertEqual('end', self.prtd.next_begin())
def test_next_count_no_logical(self):
self.assertEqual(1, self.prtd.next_count('primary'))
def test_next_count_has_logical(self):
self.prtd.partitions.append(
partition.Partition('name', 'count', 'device', 'begin', 'end',
'logical'))
self.assertEqual(6, self.prtd.next_count('logical'))
def test_next_type_gpt(self):
self.prtd.label = 'gpt'
self.assertEqual('primary', self.prtd.next_type())
def test_next_type_no_extended(self):
self.prtd.label = 'msdos'
self.assertEqual('primary', self.prtd.next_type())
self.prtd.partitions.extend(
3 * [partition.Partition('name', 'count', 'device', 'begin',
'end', 'primary')])
self.assertEqual('extended', self.prtd.next_type())
def test_next_type_has_extended(self):
self.prtd.label = 'msdos'
self.prtd.partitions.append(
partition.Partition('name', 'count', 'device', 'begin', 'end',
'extended'))
self.assertEqual('logical', self.prtd.next_type())
def test_primary(self):
expected_partitions = [partition.Partition('name', 'count', 'device',
'begin', 'end', 'primary')]
self.prtd.partitions.extend(expected_partitions)
self.assertEqual(expected_partitions, self.prtd.primary)

View File

@ -1,289 +0,0 @@
# Copyright 2014 Mirantis, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import mock
from oslotest import base as test_base
import time
from fuel_agent import errors
from fuel_agent.utils import partition as pu
from fuel_agent.utils import utils
class TestPartitionUtils(test_base.BaseTestCase):
@mock.patch.object(pu, 'make_label')
def test_wipe(self, mock_label):
# should run call make_label method
# in order to create new empty table which we think
# is equivalent to wiping the old one
pu.wipe('/dev/fake')
mock_label.assert_called_once_with('/dev/fake')
@mock.patch.object(pu, 'reread_partitions')
@mock.patch.object(utils, 'execute')
def test_make_label(self, mock_exec, mock_rerd):
# should run parted OS command
# in order to create label on a device
mock_exec.return_value = ('out', '')
# gpt by default
pu.make_label('/dev/fake')
mock_exec_expected_calls = [
mock.call('udevadm', 'settle', '--quiet', check_exit_code=[0]),
mock.call('parted', '-s', '/dev/fake', 'mklabel', 'gpt',
check_exit_code=[0, 1])]
self.assertEqual(mock_exec_expected_calls, mock_exec.call_args_list)
mock_rerd.assert_called_once_with('/dev/fake', out='out')
mock_exec.reset_mock()
mock_rerd.reset_mock()
# label is set explicitly
pu.make_label('/dev/fake', label='msdos')
mock_exec_expected_calls = [
mock.call('udevadm', 'settle', '--quiet', check_exit_code=[0]),
mock.call('parted', '-s', '/dev/fake', 'mklabel', 'msdos',
check_exit_code=[0, 1])]
self.assertEqual(mock_exec_expected_calls, mock_exec.call_args_list)
mock_rerd.assert_called_once_with('/dev/fake', out='out')
def test_make_label_wrong_label(self):
# should check if label is valid
# should raise exception if it is not
self.assertRaises(errors.WrongPartitionLabelError,
pu.make_label, '/dev/fake', 'wrong')
@mock.patch.object(pu, 'reread_partitions')
@mock.patch.object(utils, 'execute')
def test_set_partition_flag(self, mock_exec, mock_rerd):
# should run parted OS command
# in order to set flag on a partition
mock_exec.return_value = ('out', '')
# default state is 'on'
pu.set_partition_flag('/dev/fake', 1, 'boot')
mock_exec_expected_calls = [
mock.call('udevadm', 'settle', '--quiet', check_exit_code=[0]),
mock.call('parted', '-s', '/dev/fake', 'set', '1', 'boot', 'on',
check_exit_code=[0, 1])]
self.assertEqual(mock_exec_expected_calls, mock_exec.call_args_list)
mock_rerd.assert_called_once_with('/dev/fake', out='out')
mock_exec.reset_mock()
mock_rerd.reset_mock()
# if state argument is given use it
pu.set_partition_flag('/dev/fake', 1, 'boot', state='off')
mock_exec_expected_calls = [
mock.call('udevadm', 'settle', '--quiet', check_exit_code=[0]),
mock.call('parted', '-s', '/dev/fake', 'set', '1', 'boot', 'off',
check_exit_code=[0, 1])]
self.assertEqual(mock_exec_expected_calls, mock_exec.call_args_list)
mock_rerd.assert_called_once_with('/dev/fake', out='out')
@mock.patch.object(utils, 'execute')
def test_set_partition_flag_wrong_flag(self, mock_exec):
# should check if flag is valid
# should raise exception if it is not
self.assertRaises(errors.WrongPartitionSchemeError,
pu.set_partition_flag,
'/dev/fake', 1, 'wrong')
@mock.patch.object(utils, 'execute')
def test_set_partition_flag_wrong_state(self, mock_exec):
# should check if flag is valid
# should raise exception if it is not
self.assertRaises(errors.WrongPartitionSchemeError,
pu.set_partition_flag,
'/dev/fake', 1, 'boot', state='wrong')
@mock.patch.object(pu, 'reread_partitions')
@mock.patch.object(pu, 'info')
@mock.patch.object(utils, 'execute')
def test_make_partition(self, mock_exec, mock_info, mock_rerd):
# should run parted OS command
# in order to create new partition
mock_exec.return_value = ('out', '')
mock_info.return_value = {
'parts': [
{'begin': 0, 'end': 1000, 'fstype': 'free'},
]
}
pu.make_partition('/dev/fake', 100, 200, 'primary')
mock_exec_expected_calls = [
mock.call('udevadm', 'settle', '--quiet', check_exit_code=[0]),
mock.call('parted', '-a', 'optimal', '-s', '/dev/fake', 'unit',
'MiB', 'mkpart', 'primary', '100', '200',
check_exit_code=[0, 1])]
self.assertEqual(mock_exec_expected_calls, mock_exec.call_args_list)
mock_rerd.assert_called_once_with('/dev/fake', out='out')
@mock.patch.object(utils, 'execute')
def test_make_partition_wrong_ptype(self, mock_exec):
# should check if partition type is one of
# 'primary' or 'logical'
# should raise exception if it is not
self.assertRaises(errors.WrongPartitionSchemeError, pu.make_partition,
'/dev/fake', 200, 100, 'wrong')
@mock.patch.object(utils, 'execute')
def test_make_partition_begin_overlaps_end(self, mock_exec):
# should check if begin is less than end
# should raise exception if it isn't
self.assertRaises(errors.WrongPartitionSchemeError, pu.make_partition,
'/dev/fake', 200, 100, 'primary')
@mock.patch.object(pu, 'info')
@mock.patch.object(utils, 'execute')
def test_make_partition_overlaps_other_parts(self, mock_exec, mock_info):
# should check if begin or end overlap other partitions
# should raise exception if it does
mock_info.return_value = {
'parts': [
{'begin': 0, 'end': 100, 'fstype': 'free'},
{'begin': 100, 'end': 200, 'fstype': 'notfree'},
{'begin': 200, 'end': 300, 'fstype': 'free'}
]
}
self.assertRaises(errors.WrongPartitionSchemeError, pu.make_partition,
'/dev/fake', 99, 101, 'primary')
self.assertRaises(errors.WrongPartitionSchemeError, pu.make_partition,
'/dev/fake', 100, 200, 'primary')
self.assertRaises(errors.WrongPartitionSchemeError, pu.make_partition,
'/dev/fake', 200, 301, 'primary')
self.assertEqual(mock_info.call_args_list,
[mock.call('/dev/fake')] * 3)
@mock.patch.object(pu, 'reread_partitions')
@mock.patch.object(pu, 'info')
@mock.patch.object(utils, 'execute')
def test_remove_partition(self, mock_exec, mock_info, mock_rerd):
# should run parted OS command
# in order to remove partition
mock_exec.return_value = ('out', '')
mock_info.return_value = {
'parts': [
{
'begin': 1,
'end': 100,
'size': 100,
'num': 1,
'fstype': 'ext2'
},
{
'begin': 100,
'end': 200,
'size': 100,
'num': 2,
'fstype': 'ext2'
}
]
}
pu.remove_partition('/dev/fake', 1)
mock_exec_expected_calls = [
mock.call('udevadm', 'settle', '--quiet', check_exit_code=[0]),
mock.call('parted', '-s', '/dev/fake', 'rm', '1',
check_exit_code=[0, 1])]
self.assertEqual(mock_exec_expected_calls, mock_exec.call_args_list)
mock_rerd.assert_called_once_with('/dev/fake', out='out')
@mock.patch.object(pu, 'info')
@mock.patch.object(utils, 'execute')
def test_remove_partition_notexists(self, mock_exec, mock_info):
# should check if partition does exist
# should raise exception if it doesn't
mock_info.return_value = {
'parts': [
{
'begin': 1,
'end': 100,
'size': 100,
'num': 1,
'fstype': 'ext2'
},
{
'begin': 100,
'end': 200,
'size': 100,
'num': 2,
'fstype': 'ext2'
}
]
}
self.assertRaises(errors.PartitionNotFoundError, pu.remove_partition,
'/dev/fake', 3)
@mock.patch.object(utils, 'execute')
def test_set_gpt_type(self, mock_exec):
pu.set_gpt_type('dev', 'num', 'type')
mock_exec_expected_calls = [
mock.call('udevadm', 'settle', '--quiet', check_exit_code=[0]),
mock.call('sgdisk', '--typecode=%s:%s' % ('num', 'type'), 'dev',
check_exit_code=[0])]
self.assertEqual(mock_exec_expected_calls, mock_exec.call_args_list)
@mock.patch.object(utils, 'execute')
def test_info(self, mock_exec):
mock_exec.return_value = [
'BYT;\n'
'/dev/fake:476940MiB:scsi:512:4096:msdos:ATA 1BD14;\n'
'1:0.03MiB:1.00MiB:0.97MiB:free;\n'
'1:1.00MiB:191MiB:190MiB:ext3::boot;\n'
'2:191MiB:476939MiB:476748MiB:::lvm;\n'
'1:476939MiB:476940MiB:1.02MiB:free;\n'
]
expected = {'generic': {'dev': '/dev/fake',
'logical_block': 512,
'model': 'ATA 1BD14',
'physical_block': 4096,
'size': 476940,
'table': 'msdos'},
'parts': [{'begin': 1, 'end': 1, 'fstype': 'free',
'num': 1, 'size': 1},
{'begin': 1, 'end': 191, 'fstype': 'ext3',
'num': 1, 'size': 190},
{'begin': 191, 'end': 476939, 'fstype': None,
'num': 2, 'size': 476748},
{'begin': 476939, 'end': 476940,
'fstype': 'free', 'num': 1, 'size': 2}]}
actual = pu.info('/dev/fake')
self.assertEqual(expected, actual)
mock_exec_expected_calls = [
mock.call('udevadm', 'settle', '--quiet', check_exit_code=[0]),
mock.call('parted', '-s', '/dev/fake', '-m', 'unit', 'MiB',
'print', 'free', check_exit_code=[0])]
self.assertEqual(mock_exec_expected_calls, mock_exec.call_args_list)
@mock.patch.object(utils, 'execute')
def test_reread_partitions_ok(self, mock_exec):
pu.reread_partitions('/dev/fake', out='')
self.assertEqual(mock_exec.call_args_list, [])
@mock.patch.object(time, 'sleep')
@mock.patch.object(utils, 'execute')
def test_reread_partitions_device_busy(self, mock_exec, mock_sleep):
mock_exec.return_value = ('', '')
pu.reread_partitions('/dev/fake', out='_Device or resource busy_')
mock_exec_expected = [
mock.call('partprobe', '/dev/fake', check_exit_code=[0, 1]),
mock.call('udevadm', 'settle', '--quiet', check_exit_code=[0]),
]
self.assertEqual(mock_exec.call_args_list, mock_exec_expected)
mock_sleep.assert_called_once_with(2)
@mock.patch.object(utils, 'execute')
def test_reread_partitions_timeout(self, mock_exec):
self.assertRaises(errors.BaseError, pu.reread_partitions,
'/dev/fake', out='Device or resource busy',
timeout=-40)

View File

@ -1,214 +0,0 @@
# Copyright 2011 Justin Santa Barbara
# Copyright 2012 Hewlett-Packard Development Company, L.P.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import testtools
import mock
from oslo.config import cfg
import requests
import socket
import stevedore
import urllib3
from fuel_agent import errors
from fuel_agent.utils import utils
CONF = cfg.CONF
class ExecuteTestCase(testtools.TestCase):
"""This class is partly based on the same class in openstack/ironic."""
def setUp(self):
super(ExecuteTestCase, self).setUp()
fake_driver = stevedore.extension.Extension('fake_driver', None, None,
mock.MagicMock)
self.drv_manager = stevedore.driver.DriverManager.make_test_instance(
fake_driver)
def test_parse_unit(self):
self.assertEqual(utils.parse_unit('1.00m', 'm', ceil=True), 1)
self.assertEqual(utils.parse_unit('1.00m', 'm', ceil=False), 1)
self.assertEqual(utils.parse_unit('1.49m', 'm', ceil=True), 2)
self.assertEqual(utils.parse_unit('1.49m', 'm', ceil=False), 1)
self.assertEqual(utils.parse_unit('1.51m', 'm', ceil=True), 2)
self.assertEqual(utils.parse_unit('1.51m', 'm', ceil=False), 1)
self.assertRaises(ValueError, utils.parse_unit, '1.00m', 'MiB')
self.assertRaises(ValueError, utils.parse_unit, '', 'MiB')
def test_B2MiB(self):
self.assertEqual(utils.B2MiB(1048575, ceil=False), 0)
self.assertEqual(utils.B2MiB(1048576, ceil=False), 1)
self.assertEqual(utils.B2MiB(1048575, ceil=True), 1)
self.assertEqual(utils.B2MiB(1048576, ceil=True), 1)
self.assertEqual(utils.B2MiB(1048577, ceil=True), 2)
def test_check_exit_code_boolean(self):
utils.execute('/usr/bin/env', 'false', check_exit_code=False)
self.assertRaises(errors.ProcessExecutionError,
utils.execute,
'/usr/bin/env', 'false', check_exit_code=True)
@mock.patch('fuel_agent.utils.utils.time.sleep')
@mock.patch('fuel_agent.utils.utils.subprocess.Popen')
def test_execute_ok_on_third_attempts(self, mock_popen, mock_sleep):
process = mock.Mock()
mock_popen.side_effect = [OSError, ValueError, process]
process.communicate.return_value = (None, None)
process.returncode = 0
utils.execute('/usr/bin/env', 'false', attempts=3)
self.assertEqual(2 * [mock.call(CONF.execute_retry_delay)],
mock_sleep.call_args_list)
@mock.patch('fuel_agent.utils.utils.time.sleep')
@mock.patch('fuel_agent.utils.utils.subprocess.Popen')
def test_execute_failed(self, mock_popen, mock_sleep):
mock_popen.side_effect = OSError
self.assertRaises(errors.ProcessExecutionError, utils.execute,
'/usr/bin/env', 'false', attempts=2)
self.assertEqual(1 * [mock.call(CONF.execute_retry_delay)],
mock_sleep.call_args_list)
@mock.patch('stevedore.driver.DriverManager')
def test_get_driver(self, mock_drv_manager):
mock_drv_manager.return_value = self.drv_manager
self.assertEqual(mock.MagicMock.__name__,
utils.get_driver('fake_driver').__name__)
@mock.patch('jinja2.Environment')
@mock.patch('jinja2.FileSystemLoader')
@mock.patch('six.moves.builtins.open')
def test_render_and_save_fail(self, mock_open, mock_j_lo, mock_j_env):
mock_open.side_effect = Exception('foo')
self.assertRaises(errors.TemplateWriteError, utils.render_and_save,
'fake_dir', 'fake_tmpl_name', 'fake_data',
'fake_file_name')
@mock.patch('jinja2.Environment')
@mock.patch('jinja2.FileSystemLoader')
@mock.patch('six.moves.builtins.open')
def test_render_and_save_ok(self, mock_open, mock_j_lo, mock_j_env):
mock_render = mock.Mock()
mock_render.render.return_value = 'fake_data'
mock_j_env.get_template.return_value = mock_render
utils.render_and_save('fake_dir', 'fake_tmpl_name', 'fake_data',
'fake_file_name')
mock_open.assert_called_once_with('fake_file_name', 'w')
def test_calculate_md5_ok(self):
# calculated by 'printf %10000s | md5sum'
with mock.patch('six.moves.builtins.open',
mock.mock_open(read_data=' ' * 10000), create=True):
self.assertEqual('f38898bb69bb02bccb9594dfe471c5c0',
utils.calculate_md5('fake', 10000))
self.assertEqual('6934d9d33cd2d0c005994e7d96d2e0d9',
utils.calculate_md5('fake', 1000))
self.assertEqual('1e68934346ee57858834a205017af8b7',
utils.calculate_md5('fake', 100))
self.assertEqual('41b394758330c83757856aa482c79977',
utils.calculate_md5('fake', 10))
self.assertEqual('7215ee9c7d9dc229d2921a40e899ec5f',
utils.calculate_md5('fake', 1))
self.assertEqual('d41d8cd98f00b204e9800998ecf8427e',
utils.calculate_md5('fake', 0))
@mock.patch.object(requests, 'get')
def test_init_http_request_ok(self, mock_req):
utils.init_http_request('fake_url')
mock_req.assert_called_once_with(
'fake_url', stream=True, timeout=CONF.http_request_timeout,
headers={'Range': 'bytes=0-'})
@mock.patch('time.sleep')
@mock.patch.object(requests, 'get')
def test_init_http_request_non_critical_errors(self, mock_req, mock_s):
mock_ok = mock.Mock()
mock_req.side_effect = [urllib3.exceptions.DecodeError(),
urllib3.exceptions.ProxyError(),
requests.exceptions.ConnectionError(),
requests.exceptions.Timeout(),
requests.exceptions.TooManyRedirects(),
socket.timeout(),
mock_ok]
req_obj = utils.init_http_request('fake_url')
self.assertEqual(mock_ok, req_obj)
@mock.patch.object(requests, 'get')
def test_init_http_request_wrong_http_status(self, mock_req):
mock_fail = mock.Mock()
mock_fail.raise_for_status.side_effect = KeyError()
mock_req.return_value = mock_fail
self.assertRaises(KeyError, utils.init_http_request, 'fake_url')
@mock.patch('time.sleep')
@mock.patch.object(requests, 'get')
def test_init_http_request_max_retries_exceeded(self, mock_req, mock_s):
mock_req.side_effect = requests.exceptions.ConnectionError()
self.assertRaises(errors.HttpUrlConnectionError,
utils.init_http_request, 'fake_url')
@mock.patch('fuel_agent.utils.utils.os.makedirs')
@mock.patch('fuel_agent.utils.utils.os.path.isdir', return_value=False)
def test_makedirs_if_not_exists(self, mock_isdir, mock_makedirs):
utils.makedirs_if_not_exists('/fake/path')
mock_isdir.assert_called_once_with('/fake/path')
mock_makedirs.assert_called_once_with('/fake/path', mode=0o755)
@mock.patch('fuel_agent.utils.utils.os.makedirs')
@mock.patch('fuel_agent.utils.utils.os.path.isdir', return_value=False)
def test_makedirs_if_not_exists_mode_given(
self, mock_isdir, mock_makedirs):
utils.makedirs_if_not_exists('/fake/path', mode=0o000)
mock_isdir.assert_called_once_with('/fake/path')
mock_makedirs.assert_called_once_with('/fake/path', mode=0o000)
@mock.patch('fuel_agent.utils.utils.os.makedirs')
@mock.patch('fuel_agent.utils.utils.os.path.isdir', return_value=True)
def test_makedirs_if_not_exists_already_exists(
self, mock_isdir, mock_makedirs):
utils.makedirs_if_not_exists('/fake/path')
mock_isdir.assert_called_once_with('/fake/path')
self.assertEqual(mock_makedirs.mock_calls, [])
@mock.patch('fuel_agent.utils.utils.os.listdir')
def test_guess_filename(self, mock_oslistdir):
mock_oslistdir.return_value = ['file1', 'file2', 'file3']
filename = utils.guess_filename('/some/path', '^file2.*')
self.assertEqual(filename, 'file2')
mock_oslistdir.assert_called_once_with('/some/path')
@mock.patch('fuel_agent.utils.utils.os.listdir')
def test_guess_filename_not_found(self, mock_oslistdir):
mock_oslistdir.return_value = ['file1', 'file2', 'file3']
filename = utils.guess_filename('/some/path', '^file4.*')
self.assertIsNone(filename)
mock_oslistdir.assert_called_once_with('/some/path')
@mock.patch('fuel_agent.utils.utils.os.listdir')
def test_guess_filename_not_exact_match(self, mock_oslistdir):
mock_oslistdir.return_value = ['file1', 'file2', 'file3']
filename = utils.guess_filename('/some/path', '^file.*')
# by default files are sorted in backward direction
self.assertEqual(filename, 'file3')
mock_oslistdir.assert_called_once_with('/some/path')
@mock.patch('fuel_agent.utils.utils.os.listdir')
def test_guess_filename_not_exact_match_forward_sort(self, mock_oslistdir):
mock_oslistdir.return_value = ['file1', 'file2', 'file3']
filename = utils.guess_filename('/some/path', '^file.*', reverse=False)
# by default files are sorted in backward direction
self.assertEqual(filename, 'file1')
mock_oslistdir.assert_called_once_with('/some/path')

View File

@ -1,226 +0,0 @@
# Copyright 2014 Mirantis, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import abc
import os
import tarfile
import tempfile
import zlib
from oslo.config import cfg
import six
from fuel_agent import errors
from fuel_agent.openstack.common import log as logging
from fuel_agent.utils import utils
LOG = logging.getLogger(__name__)
au_opts = [
cfg.IntOpt(
'data_chunk_size',
default=1048576,
help='Size of data chunk to operate with images'
),
]
CONF = cfg.CONF
CONF.register_opts(au_opts)
@six.add_metaclass(abc.ABCMeta)
class Target(object):
def __iter__(self):
return self
def next(self):
raise StopIteration()
def target(self, filename='/dev/null'):
LOG.debug('Opening file: %s for write' % filename)
with open(filename, 'wb') as f:
count = 0
for chunk in self:
LOG.debug('Next chunk: %s' % count)
f.write(chunk)
count += 1
LOG.debug('Flushing file: %s' % filename)
f.flush()
# ensure data to be written to disk
os.fsync(f.fileno())
LOG.debug('File is written: %s' % filename)
class LocalFile(Target):
def __init__(self, filename):
if filename.startswith('file://'):
self.filename = filename[7:]
else:
self.filename = filename
self.fileobj = None
def next(self):
if not self.fileobj:
self.fileobj = open(self.filename, 'rb')
buffer = self.fileobj.read(CONF.data_chunk_size)
if buffer:
return buffer
else:
self.fileobj.close()
raise StopIteration()
class HttpUrl(Target):
def __init__(self, url):
self.url = str(url)
self.response_obj = utils.init_http_request(self.url)
self.processed_bytes = 0
try:
self.length = int(self.response_obj.headers['content-length'])
except (ValueError, KeyError):
raise errors.HttpUrlInvalidContentLength(
'Can not get content length for %s' % self.url)
else:
LOG.debug('Expected content length %s for %s' % (self.length,
self.url))
def next(self):
while self.processed_bytes < self.length:
try:
data = self.response_obj.raw.read(CONF.data_chunk_size)
if not data:
raise errors.HttpUrlConnectionError(
'Could not receive data: URL=%s, range=%s' %
(self.url, self.processed_bytes))
except Exception as exc:
LOG.exception(exc)
self.response_obj = utils.init_http_request(
self.url, self.processed_bytes)
continue
else:
self.processed_bytes += len(data)
return data
raise StopIteration()
class GunzipStream(Target):
def __init__(self, stream):
self.stream = iter(stream)
# NOTE(agordeev): toggle automatic header detection on
self.decompressor = zlib.decompressobj(zlib.MAX_WBITS | 32)
def next(self):
try:
return self.decompressor.decompress(self.stream.next())
except StopIteration:
raise
class ForwardFileStream(Target):
def __init__(self, stream):
self.stream = iter(stream)
self.position = 0
self.chunk = ''
self.closed = False
def next(self):
buffer = self.read()
if buffer:
return buffer
else:
raise StopIteration()
def close(self):
self.closed = True
def tell(self):
if self.closed:
raise ValueError('I/O operation on closed file')
return self.position
def seek(self, position):
if self.closed:
raise ValueError('I/O operation on closed file')
if position < self.position:
raise ValueError('Backward seek operation is impossible')
elif position < self.position + len(self.chunk):
self.chunk = self.chunk[(position - self.position):]
self.position = position
else:
try:
current = self.position + len(self.chunk)
while True:
chunk = self.stream.next()
if current + len(chunk) >= position:
self.chunk = chunk[(position - current):]
self.position = position
break
current += len(chunk)
except StopIteration:
self.chunk = None
self.position = position
def read(self, length=CONF.data_chunk_size):
# NOTE(kozhukalov): default lenght = 1048576 is not usual behaviour,
# but that is ok for our use case.
if self.closed:
raise ValueError('I/O operation on closed file')
if self.chunk is None:
return None
try:
while len(self.chunk) < length:
self.chunk += self.stream.next()
result = self.chunk[:length]
self.chunk = self.chunk[length:]
except StopIteration:
result = self.chunk
self.chunk = None
self.position += len(result)
return result
class TarStream(Target):
def __init__(self, stream):
self.stream = iter(stream)
self.tarobj = None
def target(self, filename=None):
if not self.tarobj:
self.tarobj = tarfile.open(
fileobj=ForwardFileStream(self.stream), mode='r:')
self.tarobj.extractall(path=(filename or tempfile.gettempdir()))
class Chain(object):
def __init__(self):
self.processors = []
def append(self, processor):
self.processors.append(processor)
def process(self):
def jump(proc, next_proc):
# if next_proc is just a string we assume it is a filename
# and we save stream into a file
if isinstance(next_proc, (str, unicode)):
LOG.debug('Processor target: %s' % next_proc)
proc.target(next_proc)
return LocalFile(next_proc)
# if next_proc is not a string we return new instance
# initialized with the previous one
else:
return next_proc(proc)
return reduce(jump, self.processors)

View File

@ -1,502 +0,0 @@
# Copyright 2015 Mirantis, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import gzip
import os
import re
import shutil
import signal as sig
import stat
import tempfile
import time
import six
import yaml
from oslo.config import cfg
from fuel_agent import errors
from fuel_agent.openstack.common import log as logging
from fuel_agent.utils import hardware as hu
from fuel_agent.utils import utils
LOG = logging.getLogger(__name__)
bu_opts = [
cfg.IntOpt(
'max_loop_devices_count',
default=255,
# NOTE(agordeev): up to 256 loop devices could be allocated up to
# kernel version 2.6.23, and the limit (from version 2.6.24 onwards)
# isn't theoretically present anymore.
help='Maximum allowed loop devices count to use'
),
cfg.IntOpt(
'sparse_file_size',
# XXX: Apparently Fuel configures the node root filesystem to span
# the whole hard drive. However 2 GB filesystem created with default
# options can grow at most to 2 TB (1024x its initial size). This
# maximal size can be configured by mke2fs -E resize=NNN option,
# however the version of e2fsprogs shipped with CentOS 6.[65] seems
# to silently ignore the `resize' option. Therefore make the initial
# filesystem a bit bigger so it can grow to 8 TB.
default=8192,
help='Size of sparse file in MiBs'
),
cfg.IntOpt(
'loop_device_major_number',
default=7,
help='System-wide major number for loop device'
),
cfg.IntOpt(
'fetch_packages_attempts',
default=10,
help='Maximum allowed debootstrap/apt-get attempts to execute'
),
cfg.StrOpt(
'allow_unsigned_file',
default='allow_unsigned_packages',
help='File where to store apt setting for unsigned packages'
),
cfg.StrOpt(
'force_ipv4_file',
default='force_ipv4',
help='File where to store apt setting for forcing IPv4 usage'
),
]
CONF = cfg.CONF
CONF.register_opts(bu_opts)
DEFAULT_APT_PATH = {
'sources_file': 'etc/apt/sources.list',
'sources_dir': 'etc/apt/sources.list.d',
'preferences_file': 'etc/apt/preferences',
'preferences_dir': 'etc/apt/preferences.d',
'conf_dir': 'etc/apt/apt.conf.d',
}
# NOTE(agordeev): hardcoded to r00tme
ROOT_PASSWORD = '$6$IInX3Cqo$5xytL1VZbZTusOewFnG6couuF0Ia61yS3rbC6P5YbZP2TYcl'\
'wHqMq9e3Tg8rvQxhxSlBXP1DZhdUamxdOBXK0.'
def run_debootstrap(uri, suite, chroot, arch='amd64', eatmydata=False,
attempts=CONF.fetch_packages_attempts):
"""Builds initial base system.
debootstrap builds initial base system which is capable to run apt-get.
debootstrap is well known for its glithcy resolving of package dependecies,
so the rest of packages will be installed later by run_apt_get.
"""
cmds = ['debootstrap', '--verbose', '--no-check-gpg', '--arch=%s' % arch,
suite, chroot, uri]
if eatmydata:
cmds.insert(4, '--include=eatmydata')
stdout, stderr = utils.execute(*cmds, attempts=attempts)
LOG.debug('Running deboostrap completed.\nstdout: %s\nstderr: %s', stdout,
stderr)
def set_apt_get_env():
# NOTE(agordeev): disable any confirmations/questions from apt-get side
os.environ['DEBIAN_FRONTEND'] = 'noninteractive'
os.environ['DEBCONF_NONINTERACTIVE_SEEN'] = 'true'
os.environ['LC_ALL'] = os.environ['LANG'] = os.environ['LANGUAGE'] = 'C'
def run_apt_get(chroot, packages, eatmydata=False,
attempts=CONF.fetch_packages_attempts):
"""Runs apt-get install <packages>.
Unlike debootstrap, apt-get has a perfect package dependecies resolver
under the hood.
eatmydata could be used to totally ignore the storm of sync() calls from
dpkg/apt-get tools. It's dangerous, but could decrease package install
time in X times.
"""
cmds = ['chroot', chroot, 'apt-get', '-y', 'update']
stdout, stderr = utils.execute(*cmds, attempts=attempts)
LOG.debug('Running apt-get update completed.\nstdout: %s\nstderr: %s',
stdout, stderr)
cmds = ['chroot', chroot, 'apt-get', '-y', 'install', ' '.join(packages)]
if eatmydata:
cmds.insert(2, 'eatmydata')
stdout, stderr = utils.execute(*cmds, attempts=attempts)
LOG.debug('Running apt-get install completed.\nstdout: %s\nstderr: %s',
stdout, stderr)
def suppress_services_start(chroot):
"""Suppresses services start.
Prevents start of any service such as udev/ssh/etc in chrooted environment
while image is being built.
"""
path = os.path.join(chroot, 'usr/sbin')
if not os.path.exists(path):
os.makedirs(path)
with open(os.path.join(path, 'policy-rc.d'), 'w') as f:
f.write('#!/bin/sh\n'
'# prevent any service from being started\n'
'exit 101\n')
os.fchmod(f.fileno(), 0o755)
def clean_dirs(chroot, dirs, delete=False):
"""Removes dirs and recreates them
:param chroot: Root directory where to look for subdirectories
:param dirs: List of directories to clean/remove (Relative to chroot)
:param delete: (Boolean) If True, directories will be removed
(Default: False)
"""
for d in dirs:
path = os.path.join(chroot, d)
if os.path.isdir(path):
LOG.debug('Removing dir: %s', path)
shutil.rmtree(path)
if not delete:
LOG.debug('Creating empty dir: %s', path)
os.makedirs(path)
def remove_files(chroot, files):
for f in files:
path = os.path.join(chroot, f)
if os.path.exists(path):
os.remove(path)
LOG.debug('Removed file: %s', path)
def clean_apt_settings(chroot, allow_unsigned_file=CONF.allow_unsigned_file,
force_ipv4_file=CONF.force_ipv4_file):
"""Cleans apt settings such as package sources and repo pinning."""
files = [DEFAULT_APT_PATH['sources_file'],
DEFAULT_APT_PATH['preferences_file'],
os.path.join(DEFAULT_APT_PATH['conf_dir'], force_ipv4_file),
os.path.join(DEFAULT_APT_PATH['conf_dir'], allow_unsigned_file)]
remove_files(chroot, files)
dirs = [DEFAULT_APT_PATH['preferences_dir'],
DEFAULT_APT_PATH['sources_dir']]
clean_dirs(chroot, dirs)
def do_post_inst(chroot):
# NOTE(agordeev): set up password for root
utils.execute('sed', '-i',
's%root:[\*,\!]%root:' + ROOT_PASSWORD + '%',
os.path.join(chroot, 'etc/shadow'))
# NOTE(agordeev): backport from bash-script:
# in order to prevent the later puppet workflow outage, puppet service
# should be disabled on a node startup.
# Being enabled by default, sometimes it leads to puppet service hanging
# and recognizing the deployment as failed.
# TODO(agordeev): take care of puppet service for other distros, once
# fuel-agent will be capable of building images for them too.
utils.execute('chroot', chroot, 'update-rc.d', 'puppet', 'disable')
# NOTE(agordeev): disable mcollective to be automatically started on boot
# to prevent confusing messages in its log (regarding connection errors).
with open(os.path.join(chroot, 'etc/init/mcollective.override'), 'w') as f:
f.write("manual\n")
# NOTE(agordeev): remove custom policy-rc.d which is needed to disable
# execution of post/pre-install package hooks and start of services
remove_files(chroot, ['usr/sbin/policy-rc.d'])
clean_apt_settings(chroot)
def stop_chrooted_processes(chroot, signal=sig.SIGTERM,
attempts=10, attempts_delay=2):
"""Sends signal to all processes, which are running inside chroot.
It tries several times until all processes die. If at some point there
are no running processes found, it returns True.
:param chroot: Process root directory.
:param signal: Which signal to send to processes. It must be either
SIGTERM or SIGKILL. (Default: SIGTERM)
:param attempts: Number of attempts (Default: 10)
:param attempts_delay: Delay between attempts (Default: 2)
"""
if signal not in (sig.SIGTERM, sig.SIGKILL):
raise ValueError('Signal must be either SIGTERM or SIGKILL')
def get_running_processes():
return utils.execute(
'fuser', '-v', chroot, check_exit_code=False)[0].split()
for i in six.moves.range(attempts):
running_processes = get_running_processes()
if not running_processes:
LOG.debug('There are no running processes in %s ', chroot)
return True
for p in running_processes:
try:
pid = int(p)
if os.readlink('/proc/%s/root' % pid) == chroot:
LOG.debug('Sending %s to chrooted process %s', signal, pid)
os.kill(pid, signal)
except (OSError, ValueError) as e:
cmdline = ''
pid = p
try:
with open('/proc/%s/cmdline' % pid) as f:
cmdline = f.read()
except Exception:
LOG.debug('Can not read cmdline for pid=%s', pid)
LOG.warning('Exception while sending signal: '
'pid: %s cmdline: %s message: %s. Skipping it.',
pid, cmdline, e)
# First of all, signal delivery is asynchronous.
# Just because the signal has been sent doesn't
# mean the kernel will deliver it instantly
# (the target process might be uninterruptible at the moment).
# Secondly, exiting might take a while (the process might have
# some data to fsync, etc)
LOG.debug('Attempt %s. Waiting for %s seconds', i + 1, attempts_delay)
time.sleep(attempts_delay)
running_processes = get_running_processes()
if running_processes:
for pid in running_processes:
cmdline = ''
try:
with open('/proc/%s/cmdline' % pid) as f:
cmdline = f.read()
except Exception:
LOG.debug('Can not read cmdline for pid=%s', pid)
LOG.warning('Process is still running: pid=%s cmdline: %s',
pid, cmdline)
return False
return True
def get_free_loop_device(
loop_device_major_number=CONF.loop_device_major_number,
max_loop_devices_count=CONF.max_loop_devices_count):
"""Returns the name of free loop device.
It should return the name of free loop device or raise an exception.
Unfortunately, free loop device couldn't be reversed for the later usage,
so we must start to use it as fast as we can.
If there's no free loop it will try to create new one and ask a system for
free loop again.
"""
for minor in range(0, max_loop_devices_count):
cur_loop = "/dev/loop%s" % minor
if not os.path.exists(cur_loop):
os.mknod(cur_loop, 0o660 | stat.S_IFBLK,
os.makedev(loop_device_major_number, minor))
try:
return utils.execute('losetup', '--find')[0].split()[0]
except (IndexError, errors.ProcessExecutionError):
LOG.debug("Couldn't find free loop device, trying again")
raise errors.NoFreeLoopDevices('Free loop device not found')
def create_sparse_tmp_file(dir, suffix, size=CONF.sparse_file_size):
"""Creates sparse file.
Creates file which consumes disk space more efficiently when the file
itself is mostly empty.
"""
tf = tempfile.NamedTemporaryFile(dir=dir, suffix=suffix, delete=False)
utils.execute('truncate', '-s', '%sM' % size, tf.name)
return tf.name
def attach_file_to_loop(filename, loop):
utils.execute('losetup', loop, filename)
def deattach_loop(loop, check_exit_code=[0]):
LOG.debug('Trying to figure out if loop device %s is attached', loop)
output = utils.execute('losetup', '-a')[0]
for line in output.split('\n'):
# output lines are assumed to have the following format
# /dev/loop0: [fd03]:130820 (/dev/loop0)
if loop == line.split(':')[0]:
LOG.debug('Loop device %s seems to be attached. '
'Trying to detach.', loop)
utils.execute('losetup', '-d', loop,
check_exit_code=check_exit_code)
def shrink_sparse_file(filename):
"""Shrinks file to its size of actual data. Only ext fs are supported."""
utils.execute('e2fsck', '-y', '-f', filename)
utils.execute('resize2fs', '-F', '-M', filename)
data = hu.parse_simple_kv('dumpe2fs', filename)
block_count = int(data['block count'])
block_size = int(data['block size'])
with open(filename, 'rwb+') as f:
f.truncate(block_count * block_size)
def strip_filename(name):
"""Strips filename for apt settings.
The name could only contain alphanumeric, hyphen (-), underscore (_) and
period (.) characters.
"""
return re.sub(r"[^a-zA-Z0-9-_.]*", "", name)
def get_release_file(uri, suite, section):
"""Download and parse repo's Release file
It and returns an apt preferences line for specified repo.
:param repo: a repo as dict
:returns: a string with apt preferences rules
"""
if section:
# We can't use urljoin here because it works pretty bad in
# cases when 'uri' doesn't have a trailing slash.
download_uri = os.path.join(uri, 'dists', suite, 'Release')
else:
# Well, we have a flat repo case, so we should download Release
# file from a different place. Please note, we have to strip
# a leading slash from suite because otherwise the download
# link will be wrong.
download_uri = os.path.join(uri, suite.lstrip('/'), 'Release')
return utils.init_http_request(download_uri).text
def parse_release_file(content):
"""Parse Debian repo's Release file content.
:param content: a Debian's Release file content
:returns: a dict with repo's attributes
"""
_multivalued_fields = {
'SHA1': ['sha1', 'size', 'name'],
'SHA256': ['sha256', 'size', 'name'],
'SHA512': ['sha512', 'size', 'name'],
'MD5Sum': ['md5sum', 'size', 'name'],
}
# debian data format is very similiar to yaml, except
# multivalued field. so we can parse it just like yaml
# and then perform additional transformation for those
# fields (we know which ones are multivalues).
data = yaml.load(content)
for attr, columns in six.iteritems(_multivalued_fields):
if attr not in data:
continue
values = data[attr].split()
data[attr] = []
for group in utils.grouper(values, len(columns)):
data[attr].append(dict(zip(columns, group)))
return data
def add_apt_source(name, uri, suite, section, chroot):
# NOTE(agordeev): The files have either no or "list" as filename extension
filename = 'fuel-image-{name}.list'.format(name=strip_filename(name))
if section:
entry = 'deb {uri} {suite} {section}\n'.format(uri=uri, suite=suite,
section=section)
else:
entry = 'deb {uri} {suite}\n'.format(uri=uri, suite=suite)
with open(os.path.join(chroot, DEFAULT_APT_PATH['sources_dir'], filename),
'w') as f:
f.write(entry)
def add_apt_preference(name, priority, suite, section, chroot, uri):
# NOTE(agordeev): The files have either no or "pref" as filename extension
filename = 'fuel-image-{name}.pref'.format(name=strip_filename(name))
# NOTE(agordeev): priotity=None means that there's no specific pinning for
# particular repo and nothing to process.
# Default system-wide preferences (priority=500) will be used instead.
_transformations = {
'Archive': 'a',
'Suite': 'a', # suite is a synonym for archive
'Codename': 'n',
'Version': 'v',
'Origin': 'o',
'Label': 'l',
}
try:
deb_release = parse_release_file(
get_release_file(uri, suite, section)
)
except ValueError as exc:
LOG.error(
"[Attention] Failed to fetch Release file "
"for repo '{0}': {1} - skipping. "
"This may lead both to trouble with packages "
"and broken OS".format(name, six.text_type(exc))
)
return
conditions = set()
for field, condition in six.iteritems(_transformations):
if field in deb_release:
conditions.add(
'{0}={1}'.format(condition, deb_release[field])
)
with open(os.path.join(chroot, DEFAULT_APT_PATH['preferences_dir'],
filename), 'w') as f:
sections = section.split()
if sections:
for s in sections:
f.write('Package: *\n')
f.write('Pin: release ')
f.write(', '.join(conditions) + ", c={0}\n".format(s))
f.write('Pin-Priority: {priority}\n'.format(priority=priority))
else:
f.write('Package: *\n')
f.write('Pin: release ')
f.write(', '.join(conditions) + "\n")
f.write('Pin-Priority: {priority}\n'.format(priority=priority))
def pre_apt_get(chroot, allow_unsigned_file=CONF.allow_unsigned_file,
force_ipv4_file=CONF.force_ipv4_file):
"""It must be called prior run_apt_get."""
clean_apt_settings(chroot)
# NOTE(agordeev): allow to install packages without gpg digest
with open(os.path.join(chroot, DEFAULT_APT_PATH['conf_dir'],
allow_unsigned_file), 'w') as f:
f.write('APT::Get::AllowUnauthenticated 1;\n')
with open(os.path.join(chroot, DEFAULT_APT_PATH['conf_dir'],
force_ipv4_file), 'w') as f:
f.write('Acquire::ForceIPv4 "true";\n')
def containerize(filename, container, chunk_size=CONF.data_chunk_size):
if container == 'gzip':
output_file = filename + '.gz'
with open(filename, 'rb') as f:
# NOTE(agordeev): gzip in python2.6 doesn't have context manager
# support
g = gzip.open(output_file, 'wb')
for chunk in iter(lambda: f.read(chunk_size), ''):
g.write(chunk)
g.close()
os.remove(filename)
return output_file
raise errors.WrongImageDataError(
'Error while image initialization: '
'unsupported image container: {container}'.format(container=container))

View File

@ -1,78 +0,0 @@
# Copyright 2014 Mirantis, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from fuel_agent import errors
from fuel_agent.openstack.common import log as logging
from fuel_agent.utils import utils
LOG = logging.getLogger(__name__)
def make_fs(fs_type, fs_options, fs_label, dev):
# NOTE(agordeev): notice the different flag to force the fs creating
# ext* uses -F flag, xfs/mkswap uses -f flag.
cmd_line = []
cmd_name = 'mkswap'
if fs_type != 'swap':
cmd_name = 'mkfs.%s' % fs_type
cmd_line.append(cmd_name)
for opt in (fs_options, fs_label):
cmd_line.extend([s for s in opt.split(' ') if s])
cmd_line.append(dev)
utils.execute(*cmd_line)
def extend_fs(fs_type, fs_dev):
if fs_type in ('ext2', 'ext3', 'ext4'):
# ext3,4 file system can be mounted
# must be checked with e2fsck -f
utils.execute('e2fsck', '-yf', fs_dev, check_exit_code=[0])
utils.execute('resize2fs', fs_dev, check_exit_code=[0])
utils.execute('e2fsck', '-pf', fs_dev, check_exit_code=[0])
elif fs_type == 'xfs':
# xfs file system must be mounted
utils.execute('xfs_growfs', fs_dev, check_exit_code=[0])
else:
raise errors.FsUtilsError('Unsupported file system type')
def mount_fs(fs_type, fs_dev, fs_mount):
utils.execute('mount', '-t', fs_type, fs_dev, fs_mount,
check_exit_code=[0])
def mount_bind(chroot, path, path2=None):
if not path2:
path2 = path
utils.execute('mount', '--bind', path, chroot + path2,
check_exit_code=[0])
def umount_fs(fs_mount, try_lazy_umount=False):
try:
utils.execute('mountpoint', '-q', fs_mount, check_exit_code=[0])
except errors.ProcessExecutionError:
LOG.warning('%s is not a mountpoint, skipping umount', fs_mount)
else:
LOG.debug('Trying to umount {0}'.format(fs_mount))
try:
utils.execute('umount', fs_mount, check_exit_code=[0])
except errors.ProcessExecutionError as e:
if try_lazy_umount:
LOG.warning('Error while umounting {0} '
'exc={1}'.format(fs_mount, e.message))
LOG.debug('Trying lazy umounting {0}'.format(fs_mount))
utils.execute('umount', '-l', fs_mount, check_exit_code=[0])
else:
raise

View File

@ -1,257 +0,0 @@
# Copyright 2014 Mirantis, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import re
import shutil
from oslo.config import cfg
from fuel_agent import errors
from fuel_agent.openstack.common import log as logging
from fuel_agent.utils import utils
LOG = logging.getLogger(__name__)
gu_opts = [
cfg.IntOpt(
'grub_timeout',
default=5,
help='Timeout in secs for GRUB'
),
]
CONF = cfg.CONF
CONF.register_opts(gu_opts)
def guess_grub2_conf(chroot=''):
for filename in ('/boot/grub/grub.cfg', '/boot/grub2/grub.cfg'):
if os.path.isdir(os.path.dirname(chroot + filename)):
return filename
def guess_grub2_default(chroot=''):
for filename in ('/etc/default/grub', '/etc/sysconfig/grub'):
if os.path.isfile(chroot + filename):
return filename
def guess_grub2_mkconfig(chroot=''):
for grub_mkconfig in \
('/sbin/grub-mkconfig', '/sbin/grub2-mkconfig',
'/usr/sbin/grub-mkconfig', '/usr/sbin/grub2-mkconfig'):
if os.path.isfile(chroot + grub_mkconfig):
return grub_mkconfig
def guess_grub_version(chroot=''):
grub_install = guess_grub_install(chroot=chroot)
LOG.debug('Trying to run %s --version' % grub_install)
cmd = [grub_install, '--version']
if chroot:
cmd[:0] = ['chroot', chroot]
result = utils.execute(*cmd)
version = 1 if result[0].find('0.97') > 0 else 2
LOG.debug('Looks like grub version is %s' % version)
return version
def guess_grub(chroot=''):
for grub in ('/sbin/grub', '/usr/sbin/grub'):
LOG.debug('Looking for grub: trying %s' % grub)
if os.path.isfile(chroot + grub):
LOG.debug('grub found: %s' % grub)
return grub
raise errors.GrubUtilsError('grub not found')
def guess_grub_install(chroot=''):
for grub_install in ('/sbin/grub-install', '/sbin/grub2-install',
'/usr/sbin/grub-install', '/usr/sbin/grub2-install'):
LOG.debug('Looking for grub-install: trying %s' % grub_install)
if os.path.isfile(chroot + grub_install):
LOG.debug('grub-install found: %s' % grub_install)
return grub_install
raise errors.GrubUtilsError('grub-install not found')
def guess_grub1_datadir(chroot='', arch='x86_64'):
LOG.debug('Looking for grub data directory')
for d in os.listdir(chroot + '/usr/share/grub'):
if arch in d:
LOG.debug('Looks like grub data directory '
'is /usr/share/grub/%s' % d)
return '/usr/share/grub/' + d
def guess_kernel(chroot='', regexp=None):
"""Tries to guess kernel by regexp
:param chroot: Path to chroot
:param regexp: (String) Regular expression (must have python syntax).
Default is r'^vmlinuz.*'
"""
kernel = utils.guess_filename(
path=os.path.join(chroot, 'boot'),
regexp=(regexp or r'^vmlinuz.*'))
if kernel:
return kernel
raise errors.GrubUtilsError('Error while trying to find kernel: '
'regexp=%s' % regexp)
def guess_initrd(chroot='', regexp=None):
"""Tries to guess initrd by regexp
:param chroot: Path to chroot
:param regexp: (String) Regular expression (must have python syntax).
Default is r'^(initrd|initramfs).*'
"""
initrd = utils.guess_filename(
path=os.path.join(chroot, 'boot'),
regexp=(regexp or r'^(initrd|initramfs).*'))
if initrd:
return initrd
raise errors.GrubUtilsError('Error while trying to find initrd: '
'regexp=%s' % regexp)
def grub1_install(install_devices, boot_device, chroot=''):
match = re.search(r'(.+?)(p?)(\d*)$', boot_device)
# Checking whether boot device is a partition
# !!! It must be a partition not a whole disk. !!!
if not match.group(3):
raise errors.GrubUtilsError(
'Error while installing legacy grub: '
'boot device must be a partition')
boot_disk = match.group(1)
boot_part = str(int(match.group(3)) - 1)
grub1_stage1(chroot=chroot)
for install_device in install_devices:
grub1_mbr(install_device, boot_disk, boot_part, chroot=chroot)
def grub1_mbr(install_device, boot_disk, boot_part, chroot=''):
# The device on which we are going to install
# stage1 needs to be mapped as hd0, otherwise system won't be able to boot.
batch = 'device (hd0) {0}\n'.format(install_device)
# That is much easier to use grub-install, but unfortunately
# it is not able to install bootloader on huge disks.
# Instead we set drive geometry manually to avoid grub register
# overlapping. We set it so as to make grub
# thinking that disk size is equal to 1G.
# 130 cylinders * (16065 * 512 = 8225280 bytes) = 1G
# We also assume that boot partition is in the beginning
# of disk between 0 and 1G.
batch += 'geometry (hd0) 130 255 63\n'
if boot_disk != install_device:
batch += 'device (hd1) {0}\n'.format(boot_disk)
batch += 'geometry (hd1) 130 255 63\n'
batch += 'root (hd1,{0})\n'.format(boot_part)
else:
batch += 'root (hd0,{0})\n'.format(boot_part)
batch += 'setup (hd0)\n'
batch += 'quit\n'
with open(chroot + '/tmp/grub.batch', 'wb') as f:
LOG.debug('Grub batch content: \n%s' % batch)
f.write(batch)
script = 'cat /tmp/grub.batch | {0} --no-floppy --batch'.format(
guess_grub(chroot=chroot))
with open(chroot + '/tmp/grub.sh', 'wb') as f:
LOG.debug('Grub script content: \n%s' % script)
f.write(script)
os.chmod(chroot + '/tmp/grub.sh', 0o755)
cmd = ['/tmp/grub.sh']
if chroot:
cmd[:0] = ['chroot', chroot]
stdout, stderr = utils.execute(*cmd, run_as_root=True, check_exit_code=[0])
LOG.debug('Grub script stdout: \n%s' % stdout)
LOG.debug('Grub script stderr: \n%s' % stderr)
def grub1_stage1(chroot=''):
LOG.debug('Installing grub stage1 files')
for f in os.listdir(chroot + '/boot/grub'):
if f in ('stage1', 'stage2') or 'stage1_5' in f:
LOG.debug('Removing: %s' % chroot + os.path.join('/boot/grub', f))
os.remove(chroot + os.path.join('/boot/grub', f))
grub1_datadir = guess_grub1_datadir(chroot=chroot)
for f in os.listdir(chroot + grub1_datadir):
if f in ('stage1', 'stage2') or 'stage1_5' in f:
LOG.debug('Copying %s from %s to /boot/grub' % (f, grub1_datadir))
shutil.copy(chroot + os.path.join(grub1_datadir, f),
chroot + os.path.join('/boot/grub', f))
def grub1_cfg(kernel=None, initrd=None,
kernel_params='', chroot='', grub_timeout=CONF.grub_timeout):
if not kernel:
kernel = guess_kernel(chroot=chroot)
if not initrd:
initrd = guess_initrd(chroot=chroot)
config = """
default=0
timeout={grub_timeout}
title Default ({kernel})
kernel /{kernel} {kernel_params}
initrd /{initrd}
""".format(kernel=kernel, initrd=initrd,
kernel_params=kernel_params,
grub_timeout=grub_timeout)
with open(chroot + '/boot/grub/grub.conf', 'wb') as f:
f.write(config)
def grub2_install(install_devices, chroot=''):
grub_install = guess_grub_install(chroot=chroot)
for install_device in install_devices:
cmd = [grub_install, install_device]
if chroot:
cmd[:0] = ['chroot', chroot]
utils.execute(*cmd, run_as_root=True, check_exit_code=[0])
def grub2_cfg(kernel_params='', chroot='', grub_timeout=CONF.grub_timeout):
grub_defaults = chroot + guess_grub2_default(chroot=chroot)
rekerparams = re.compile(r'^.*GRUB_CMDLINE_LINUX=.*')
retimeout = re.compile(r'^.*GRUB_HIDDEN_TIMEOUT=.*')
new_content = ''
with open(grub_defaults) as f:
for line in f:
line = rekerparams.sub(
'GRUB_CMDLINE_LINUX="{kernel_params}"'.
format(kernel_params=kernel_params), line)
line = retimeout.sub('GRUB_HIDDEN_TIMEOUT={grub_timeout}'.
format(grub_timeout=grub_timeout), line)
new_content += line
# NOTE(agordeev): explicitly add record fail timeout, in order to
# prevent user confirmation appearing if unexpected reboot occured.
new_content += '\nGRUB_RECORDFAIL_TIMEOUT={grub_timeout}\n'.\
format(grub_timeout=grub_timeout)
with open(grub_defaults, 'wb') as f:
f.write(new_content)
cmd = [guess_grub2_mkconfig(chroot), '-o', guess_grub2_conf(chroot)]
if chroot:
cmd[:0] = ['chroot', chroot]
utils.execute(*cmd, run_as_root=True)

View File

@ -1,373 +0,0 @@
# Copyright 2014 Mirantis, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
from fuel_agent.utils import utils
# Please take a look at the linux kernel documentation
# https://github.com/torvalds/linux/blob/master/Documentation/devices.txt.
# KVM virtio volumes have major number 252 in CentOS, but 253 in Ubuntu.
# NOTE(agordeev): nvme devices also have a major number of 259
# (only in 2.6 kernels)
VALID_MAJORS = (3, 8, 65, 66, 67, 68, 69, 70, 71, 104, 105, 106, 107, 108, 109,
110, 111, 202, 252, 253, 259)
# We are only interested in getting these
# properties from udevadm report
# MAJOR major device number
# MINOR minor device number
# DEVNAME e.g. /dev/sda
# DEVTYPE e.g. disk or partition for block devices
# DEVPATH path to a device directory relative to /sys
# ID_BUS e.g. ata, scsi
# ID_MODEL e.g. MATSHITADVD-RAM_UJ890
# ID_SERIAL_SHORT e.g. UH00_296679
# ID_WWN e.g. 0x50000392e9804d4b (optional)
# ID_CDROM e.g. 1 for cdrom device (optional)
UDEV_PROPERTIES = set(['MAJOR', 'MINOR', 'DEVNAME', 'DEVTYPE', 'DEVPATH',
'ID_BUS', 'ID_MODEL', 'ID_SERIAL_SHORT', 'ID_WWN',
'ID_CDROM', 'ID_VENDOR'])
REMOVABLE_VENDORS = [
"Adaptec", "IBM", "ServeRA",
]
# more details about types you can find in dmidecode's manual
SMBIOS_TYPES = {'bios': '0',
'base_board': '2',
'processor': '4',
'memory_array': '16',
'memory_device': '17'}
def parse_dmidecode(type):
"""Parses `dmidecode` output.
:param type: A string with type of entity to display.
:returns: A list with dictionaries of entities for specified type.
"""
output = utils.execute('dmidecode', '-q', '--type', type)
lines = output[0].split('\n')
info = []
multiline_values = None
section = 0
for line in lines:
if len(line) != 0 and len(line.strip()) == len(line):
info.append({})
section = len(info) - 1
try:
k, v = (l.strip() for l in line.split(':', 1))
except ValueError:
k = line.strip()
if not k:
multiline_values = None
if multiline_values:
info[section][multiline_values].append(k)
else:
if not v:
multiline_values = k.lower()
info[section][multiline_values] = []
else:
info[section][k.lower()] = v
return info
def parse_lspci():
"""Parses `lspci` output.
:returns: A list of dicts containing PCI devices information
"""
output = utils.execute('lspci', '-vmm', '-D')
lines = output[0].split('\n')
info = [{}]
section = 0
for line in lines[:-2]:
try:
k, v = (l.strip() for l in line.split(':', 1))
except ValueError:
info.append({})
section += 1
else:
info[section][k.lower()] = v
return info
def parse_simple_kv(*command):
"""Parses simple key:value output from specified command.
:param command: A command to execute
:returns: A dict of parsed key-value data
"""
output = utils.execute(*command)
lines = output[0].split('\n')
info = {}
for line in lines[:-1]:
try:
k, v = (l.strip() for l in line.split(':', 1))
except ValueError:
break
else:
info[k.lower()] = v
return info
def is_disk(dev, bspec=None, uspec=None):
"""Checks if given device is a disk.
:param dev: A device file, e.g. /dev/sda.
:param bspec: A dict of properties which we get from blockdev.
:param uspec: A dict of properties which we get from udevadm.
:returns: True if device is disk else False.
"""
# Filtering by udevspec
if uspec is None:
uspec = udevreport(dev)
if uspec.get('ID_CDROM') == '1':
return False
if uspec.get('DEVTYPE') == 'partition':
return False
if 'MAJOR' in uspec and int(uspec['MAJOR']) not in VALID_MAJORS:
return False
# Filtering by blockdev spec
if bspec is None:
bspec = blockdevreport(dev)
if bspec.get('ro') == '1':
return False
return True
def udevreport(dev):
"""Builds device udevadm report.
:param dev: A device file, e.g. /dev/sda.
:returns: A dict of udev device properties.
"""
report = utils.execute('udevadm',
'info',
'--query=property',
'--export',
'--name={0}'.format(dev),
check_exit_code=[0])[0]
spec = {}
for line in [l for l in report.splitlines() if l]:
key, value = line.split('=', 1)
value = value.strip('\'')
# This is a list of symbolic links which were created for this
# block device (e.g. /dev/disk/by-id/foobar)
if key == 'DEVLINKS':
spec['DEVLINKS'] = value.split()
if key in UDEV_PROPERTIES:
spec[key] = value
return spec
def blockdevreport(blockdev):
"""Builds device blockdev report.
:param blockdev: A block device file, e.g. /dev/sda.
:returns: A dict of blockdev properties.
"""
cmd = [
'blockdev',
'--getsz', # get size in 512-byte sectors
'--getro', # get read-only
'--getss', # get logical block (sector) size
'--getpbsz', # get physical block (sector) size
'--getsize64', # get size in bytes
'--getiomin', # get minimum I/O size
'--getioopt', # get optimal I/O size
'--getra', # get readahead
'--getalignoff', # get alignment offset in bytes
'--getmaxsect', # get max sectors per request
blockdev
]
opts = [o[5:] for o in cmd if o.startswith('--get')]
report = utils.execute(*cmd, check_exit_code=[0])[0]
return dict(zip(opts, report.splitlines()))
def extrareport(dev):
"""Builds device report using some additional sources.
:param dev: A device file, e.g. /dev/sda.
:returns: A dict of properties.
"""
spec = {}
name = os.path.basename(dev)
# Finding out if block device is removable or not
# actually, some disks are marked as removable
# while they are actually not e.g. Adaptec RAID volumes
try:
with open('/sys/block/{0}/removable'.format(name)) as file:
spec['removable'] = file.read().strip()
except Exception:
pass
for key in ('state', 'timeout'):
try:
with open('/sys/block/{0}/device/{1}'.format(name, key)) as file:
spec[key] = file.read().strip()
except Exception:
pass
return spec
def get_block_devices_from_udev_db():
devs = []
output = utils.execute('udevadm', 'info', '--export-db')[0]
for device in output.split('\n\n'):
# NOTE(agordeev): add only disks or their partitions
if 'SUBSYSTEM=block' in device and ('DEVTYPE=disk' in device or
'DEVTYPE=partition' in device):
# NOTE(agordeev): it has to be sorted in order
# to find MAJOR property prior DEVNAME property.
for line in sorted(device.split('\n'), reverse=True):
if line.startswith('E: MAJOR='):
major = int(line.split()[1].split('=')[1])
if major not in VALID_MAJORS:
# NOTE(agordeev): filter out cd/dvd drives and other
# block devices in which fuel-agent aren't interested
break
if line.startswith('E: DEVNAME='):
d = line.split()[1].split('=')[1]
if not any(os.path.basename(d).startswith(n)
for n in ('nbd', 'ram', 'loop')):
devs.append(line.split()[1].split('=')[1])
break
return devs
def list_block_devices(disks=True):
"""Gets list of block devices
Tries to guess which of them are disks
and returns list of dicts representing those disks.
:returns: A list of dict representing disks available on a node.
"""
bdevs = []
# NOTE(agordeev): blockdev from util-linux contains a bug
# which's causing 'blockdev --report' to fail on reporting
# nvme devices.
# The actual fix is included in util-linux-2.24.1:
# - don't use HDIO_GETGEO [Phillip Susi]
# Since the bug only affects '--report' it is safe to use
# 'blockdevreport'.
# fuel-agent has to be switched to use udev database in order to
# find all block devices recognized by kernel.
devs = get_block_devices_from_udev_db()
for device in devs:
uspec = udevreport(device)
espec = extrareport(device)
# NOTE(agordeev): blockdevreport will fail if there's no medium
# inserted into removable device.
# Accept only devices from REMOVABLE_VENDORS list
if (espec.get('removable') == '1' and
uspec.get('ID_VENDOR') not in REMOVABLE_VENDORS):
continue
bspec = blockdevreport(device)
# if device is not disk,skip it
if disks and not is_disk(device, bspec=bspec, uspec=uspec):
continue
bdev = {
'device': device,
# NOTE(agordeev): blockdev gets 'startsec' from sysfs,
# 'size' is determined by ioctl call.
# This data was not actually used by fuel-agent,
# so it can be removed without side effects.
'uspec': uspec,
'bspec': bspec,
'espec': espec
}
bdevs.append(bdev)
return bdevs
def match_device(uspec1, uspec2):
"""Tries to find out if uspec1 and uspec2 are uspecs from the same device
It compares only some fields in uspecs (not all of them) which, we believe,
is enough to say exactly whether uspecs belong to the same device or not.
:param uspec1: A dict of properties which we get from udevadm.
:param uspec1: A dict of properties which we get from udevadm.
:returns: True if uspecs match each other else False.
"""
# False if ID_WWN is given and does not match each other
if ('ID_WWN' in uspec1 and 'ID_WWN' in uspec2
and uspec1['ID_WWN'] != uspec2['ID_WWN']):
return False
# False if ID_SERIAL_SHORT is given and does not match each other
if ('ID_SERIAL_SHORT' in uspec1 and 'ID_SERIAL_SHORT' in uspec2
and uspec1['ID_SERIAL_SHORT'] != uspec2['ID_SERIAL_SHORT']):
return False
# True if at least one by-id link is the same for both uspecs
if ('DEVLINKS' in uspec1 and 'DEVLINKS' in uspec2
and any(x.startswith('/dev/disk/by-id') for x in
set(uspec1['DEVLINKS']) & set(uspec2['DEVLINKS']))):
return True
# True if ID_WWN is given and matches each other
# and DEVTYPE is given and is 'disk'
if (uspec1.get('ID_WWN') == uspec2.get('ID_WWN') is not None
and uspec1.get('DEVTYPE') == uspec2.get('DEVTYPE') == 'disk'):
return True
# True if ID_WWN is given and matches each other
# and DEVTYPE is given and is 'partition'
# and MINOR is given and matches each other
if (uspec1.get('ID_WWN') == uspec2.get('ID_WWN') is not None
and uspec1.get('DEVTYPE') == uspec2.get('DEVTYPE') == 'partition'
and uspec1.get('MINOR') == uspec2.get('MINOR') is not None):
return True
# True if ID_SERIAL_SHORT is given and matches each other
# and DEVTYPE is given and is 'disk'
if (uspec1.get('ID_SERIAL_SHORT') == uspec2.get('ID_SERIAL_SHORT')
is not None
and uspec1.get('DEVTYPE') == uspec2.get('DEVTYPE') == 'disk'):
return True
# True if DEVPATH is given and matches each other
if uspec1.get('DEVPATH') == uspec2.get('DEVPATH') is not None:
return True
return False

View File

@ -1,243 +0,0 @@
# Copyright 2014 Mirantis, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from fuel_agent import errors
from fuel_agent.openstack.common import log as logging
from fuel_agent.utils import utils
LOG = logging.getLogger(__name__)
def pvdisplay():
# unit m means MiB (power of 2)
output = utils.execute(
'pvdisplay',
'-C',
'--noheading',
'--units', 'm',
'--options', 'pv_name,vg_name,pv_size,dev_size,pv_uuid',
'--separator', ';',
check_exit_code=[0])[0]
return pvdisplay_parse(output)
def pvdisplay_parse(output):
pvs = []
for line in output.split('\n'):
line = line.strip()
if not line:
continue
pv_params = line.split(';')
pvs.append({
'name': pv_params[0],
'vg': pv_params[1] or None,
'psize': utils.parse_unit(pv_params[2], 'm'),
'devsize': utils.parse_unit(pv_params[3], 'm'),
'uuid': pv_params[4]
})
LOG.debug('Found physical volumes: {0}'.format(pvs))
return pvs
def pvcreate(pvname, metadatasize=64, metadatacopies=2):
# check if pv already exists
if filter(lambda x: x['name'] == pvname, pvdisplay()):
raise errors.PVAlreadyExistsError(
'Error while creating pv: pv %s already exists' % pvname)
utils.execute('pvcreate',
'--metadatacopies', str(metadatacopies),
'--metadatasize', str(metadatasize) + 'm',
pvname, check_exit_code=[0])
def pvremove(pvname):
pv = filter(lambda x: x['name'] == pvname, pvdisplay())
# check if pv exists
if not pv:
raise errors.PVNotFoundError(
'Error while removing pv: pv %s not found' % pvname)
# check if pv is attached to some vg
if pv[0]['vg'] is not None:
raise errors.PVBelongsToVGError('Error while removing pv: '
'pv belongs to vg %s' % pv[0]['vg'])
utils.execute('pvremove', '-ff', '-y', pvname, check_exit_code=[0])
def vgdisplay():
output = utils.execute(
'vgdisplay',
'-C',
'--noheading',
'--units', 'm',
'--options', 'vg_name,vg_uuid,vg_size,vg_free',
'--separator', ';',
check_exit_code=[0])[0]
return vgdisplay_parse(output)
def vgdisplay_parse(output):
vgs = []
for line in output.split('\n'):
line = line.strip()
if not line:
continue
vg_params = line.split(';')
vgs.append({
'name': vg_params[0],
'uuid': vg_params[1],
'size': utils.parse_unit(vg_params[2], 'm'),
'free': utils.parse_unit(vg_params[3], 'm', ceil=False)
})
LOG.debug('Found volume groups: {0}'.format(vgs))
return vgs
def _vg_attach_validate(pvnames):
pvs = pvdisplay()
# check if all necessary pv exist
if not set(pvnames).issubset(set([pv['name'] for pv in pvs])):
raise errors.PVNotFoundError(
'Error while creating vg: at least one of pv is not found')
# check if all necessary pv are not already attached to some vg
if not set(pvnames).issubset(
set([pv['name'] for pv in pvs if pv['vg'] is None])):
raise errors.PVBelongsToVGError(
'Error while creating vg: at least one of pvs is '
'already attached to some vg')
def vgcreate(vgname, pvname, *args):
# check if vg already exists
if filter(lambda x: x['name'] == vgname, vgdisplay()):
raise errors.VGAlreadyExistsError(
'Error while creating vg: vg %s already exists' % vgname)
pvnames = [pvname] + list(args)
_vg_attach_validate(pvnames)
utils.execute('vgcreate', vgname, *pvnames, check_exit_code=[0])
def vgextend(vgname, pvname, *args):
# check if vg exists
if not filter(lambda x: x['name'] == vgname, vgdisplay()):
raise errors.VGNotFoundError(
'Error while extending vg: vg %s not found' % vgname)
pvnames = [pvname] + list(args)
_vg_attach_validate(pvnames)
utils.execute('vgextend', vgname, *pvnames, check_exit_code=[0])
def vgreduce(vgname, pvname, *args):
# check if vg exists
if not filter(lambda x: x['name'] == vgname, vgdisplay()):
raise errors.VGNotFoundError(
'Error while reducing vg: vg %s not found' % vgname)
pvnames = [pvname] + list(args)
# check if all necessary pv are attached to vg
if not set(pvnames).issubset(
set([pv['name'] for pv in pvdisplay() if pv['vg'] == vgname])):
raise errors.PVNotFoundError(
'Error while reducing vg: at least one of pv is '
'not attached to vg')
utils.execute('vgreduce', '-f', vgname, *pvnames, check_exit_code=[0])
def vgremove(vgname):
# check if vg exists
if not filter(lambda x: x['name'] == vgname, vgdisplay()):
raise errors.VGNotFoundError(
'Error while removing vg: vg %s not found' % vgname)
utils.execute('vgremove', '-f', vgname, check_exit_code=[0])
def lvdisplay():
output = utils.execute(
'lvdisplay',
'-C',
'--noheading',
'--units', 'm',
# NOTE(agordeev): lv_path had been removed from options
# since versions of lvdisplay prior 2.02.68 don't have it.
'--options', 'lv_name,lv_size,vg_name,lv_uuid',
'--separator', ';',
check_exit_code=[0])[0]
return lvdisplay_parse(output)
def lvdisplay_parse(output):
lvs = []
for line in output.split('\n'):
line = line.strip()
if not line:
continue
lv_params = line.split(';')
lvs.append({
'name': lv_params[0],
'size': utils.parse_unit(lv_params[1], 'm'),
'vg': lv_params[2],
'uuid': lv_params[3],
# NOTE(agordeev): simulate lv_path with '/dev/$vg_name/$lv_name'
'path': '/dev/%s/%s' % (lv_params[2], lv_params[0])
})
LOG.debug('Found logical volumes: {0}'.format(lvs))
return lvs
def lvcreate(vgname, lvname, size):
vg = filter(lambda x: x['name'] == vgname, vgdisplay())
# check if vg exists
if not vg:
raise errors.VGNotFoundError(
'Error while creating vg: vg %s not found' % vgname)
# check if enough space is available
if vg[0]['free'] < size:
raise errors.NotEnoughSpaceError(
'Error while creating lv: vg %s has only %s m of free space, '
'but at least %s m is needed' % (vgname, vg[0]['free'], size))
# check if lv already exists
if filter(lambda x: x['name'] == lvname and x['vg'] == vgname,
lvdisplay()):
raise errors.LVAlreadyExistsError(
'Error while creating lv: lv %s already exists' % lvname)
# NOTE(agordeev): by default, lvcreate is configured to wipe signature
# on allocated volume. '--yes' should be passed to avoid waiting for
# user's confirmation:
# "WARNING: <signature> signature detected on <device>. Wipe it? [y/n]"
utils.execute('lvcreate', '--yes', '-L', '%sm' % size, '-n', lvname,
vgname, check_exit_code=[0])
def lvremove(lvpath):
# check if lv exists
if not filter(lambda x: x['path'] == lvpath, lvdisplay()):
raise errors.LVNotFoundError(
'Error while removing lv: lv %s not found' % lvpath)
utils.execute('lvremove', '-f', lvpath, check_exit_code=[0])
def lvremove_all():
for lv in lvdisplay():
lvremove(lv['path'])
def vgremove_all():
for vg in vgdisplay():
vgremove(vg['name'])
def pvremove_all():
for pv in pvdisplay():
pvremove(pv['name'])

View File

@ -1,170 +0,0 @@
# Copyright 2014 Mirantis, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import re
from fuel_agent import errors
from fuel_agent.openstack.common import log as logging
from fuel_agent.utils import hardware as hu
from fuel_agent.utils import utils
LOG = logging.getLogger(__name__)
def mddetail_parse(output):
md = {}
# NOTE(agordeev): Somethimes 'State' column is missing
h, _, v = re.split("Number\s+Major\s+Minor\s+RaidDevice\s+(State\s+)?",
output)
for line in h.split('\n'):
line = line.strip()
if not line:
continue
for pattern in ('Version', 'Raid Level', 'Raid Devices',
'Active Devices', 'Spare Devices',
'Failed Devices', 'State', 'UUID'):
if line.startswith(pattern):
md[pattern] = line.split()[-1]
md['devices'] = []
for line in v.split('\n'):
line = line.strip()
if not line:
continue
md['devices'].append(line.split()[-1])
return md
def get_mdnames(output=None):
mdnames = []
if not output:
with open('/proc/mdstat') as f:
output = f.read()
for line in output.split('\n'):
if line.startswith('md'):
mdnames.append('/dev/%s' % line.split()[0])
return mdnames
def mddisplay(names=None):
mdnames = names or get_mdnames()
mds = []
for mdname in mdnames:
md = {'name': mdname}
try:
output = utils.execute('mdadm', '--detail', mdname,
check_exit_code=[0])[0]
LOG.debug('mdadm --detail %s output:\n%s', mdname, output)
md.update(mddetail_parse(output))
except errors.ProcessExecutionError as exc:
LOG.debug(exc)
continue
finally:
mds.append(md)
LOG.debug('Found md devices: {0}'.format(mds))
return mds
def mdcreate(mdname, level, device, *args):
mds = mddisplay()
# check if md device already exists
if filter(lambda x: x['name'] == mdname, mds):
raise errors.MDAlreadyExistsError(
'Error while creating md: md %s already exists' % mdname)
# check if level argument is valid
supported_levels = ('0', '1', 'raid0', 'raid1', 'stripe', 'mirror')
if level not in supported_levels:
raise errors.MDWrongSpecError(
'Error while creating md device: '
'level must be one of: %s' % ', '.join(supported_levels))
devices = [device] + list(args)
# check if all necessary devices exist
if not set(devices).issubset(
set([bd['device'] for bd in hu.list_block_devices(disks=False)])):
raise errors.MDNotFoundError(
'Error while creating md: at least one of devices is not found')
# check if devices are not parts of some md array
if set(devices) & \
set(reduce(lambda x, y: x + y,
[md.get('devices', []) for md in mds], [])):
raise errors.MDDeviceDuplicationError(
'Error while creating md: at least one of devices is '
'already in belongs to some md')
# FIXME: mdadm will ask user to continue creating if any device appears to
# be a part of raid array. Superblock zeroing helps to avoid that.
map(mdclean, devices)
utils.execute('mdadm', '--create', '--force', mdname, '-e0.90',
'--level=%s' % level,
'--raid-devices=%s' % len(devices), *devices,
check_exit_code=[0])
def mdremove(mdname):
# check if md exists
if mdname not in get_mdnames():
raise errors.MDNotFoundError(
'Error while removing md: md %s not found' % mdname)
# FIXME: The issue faced was quiet hard to reproduce and to figure out the
# root cause. For unknown reason already removed md device is
# unexpectedly returning back after a while from time to time making
# new md device creation to fail.
# Still the actual reason of its failure is unknown, but after a
# searching on a web a mention was found about a race in udev
# http://dev.bizo.com/2012/07/mdadm-device-or-resource-busy.html
# The article recommends to disable udev's queue entirely during md
# device manipulation which sounds rather unappropriate for our case.
# And the link to original post on mailing list suggests to execute
# `udevadm settle` before removing the md device.
# here -> http://permalink.gmane.org/gmane.linux.raid/34027
# So, what was done. `udevadm settle` calls were placed just
# before any of `mdadm` calls and the analizyng the logs was started.
# According to the manual `settle` is an option that "Watches the
# udev event queue, and exits if all current events are handled".
# That means it will wait for udev's finishing of processing the
# events. According to the logs noticeable delay had been recognized
# between `udevadm settle` and the next `mdadm` call.
# The delay was about 150-200ms or even bigger. It was appeared
# right before the `mdadm --stop` call. That just means that udev was
# too busy with events when we start to modifiy md devices hard.
# Thus `udevadm settle` is helping to avoid the later failure and
# to prevent strange behaviour of md device.
utils.execute('udevadm', 'settle', '--quiet', check_exit_code=[0])
utils.execute('mdadm', '--stop', mdname, check_exit_code=[0])
utils.execute('mdadm', '--remove', mdname, check_exit_code=[0, 1])
def mdclean(device):
# we don't care if device actually exists or not
utils.execute('mdadm', '--zero-superblock', '--force', device,
check_exit_code=[0])
def mdclean_all():
LOG.debug('Trying to wipe out all md devices')
for md in mddisplay():
mdremove(md['name'])
for dev in md.get('devices', []):
mdclean(dev)
# second attempt, remove stale inactive devices
for md in mddisplay():
mdremove(md['name'])
mds = mddisplay()
if len(mds) > 0:
raise errors.MDRemovingError(
'Error while removing mds: few devices still presented %s' % mds)

View File

@ -1,194 +0,0 @@
# Copyright 2014 Mirantis, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import time
from fuel_agent import errors
from fuel_agent.openstack.common import log as logging
from fuel_agent.utils import utils
LOG = logging.getLogger(__name__)
def parse_partition_info(output):
lines = output.split('\n')
generic_params = lines[1].rstrip(';').split(':')
generic = {
'dev': generic_params[0],
'size': utils.parse_unit(generic_params[1], 'MiB'),
'logical_block': int(generic_params[3]),
'physical_block': int(generic_params[4]),
'table': generic_params[5],
'model': generic_params[6]
}
parts = []
for line in lines[2:]:
line = line.strip().rstrip(';')
if not line:
continue
part_params = line.split(':')
parts.append({
'num': int(part_params[0]),
'begin': utils.parse_unit(part_params[1], 'MiB'),
'end': utils.parse_unit(part_params[2], 'MiB'),
'size': utils.parse_unit(part_params[3], 'MiB'),
'fstype': part_params[4] or None
})
return {'generic': generic, 'parts': parts}
def info(dev):
utils.execute('udevadm', 'settle', '--quiet', check_exit_code=[0])
output = utils.execute('parted', '-s', dev, '-m',
'unit', 'MiB',
'print', 'free',
check_exit_code=[0])[0]
LOG.debug('Info output: \n%s' % output)
result = parse_partition_info(output)
LOG.debug('Info result: %s' % result)
return result
def wipe(dev):
# making an empty new table is equivalent to wiping the old one
LOG.debug('Wiping partition table on %s (we assume it is equal '
'to creating a new one)' % dev)
make_label(dev)
def make_label(dev, label='gpt'):
"""Creates partition label on a device.
:param dev: A device file, e.g. /dev/sda.
:param label: Partition label type 'gpt' or 'msdos'. Optional.
:returns: None
"""
LOG.debug('Trying to create %s partition table on device %s' %
(label, dev))
if label not in ('gpt', 'msdos'):
raise errors.WrongPartitionLabelError(
'Wrong partition label type: %s' % label)
utils.execute('udevadm', 'settle', '--quiet', check_exit_code=[0])
out, err = utils.execute('parted', '-s', dev, 'mklabel', label,
check_exit_code=[0, 1])
LOG.debug('Parted output: \n%s' % out)
reread_partitions(dev, out=out)
def set_partition_flag(dev, num, flag, state='on'):
"""Sets flag on a partition
:param dev: A device file, e.g. /dev/sda.
:param num: Partition number
:param flag: Flag name. Must be one of 'bios_grub', 'legacy_boot',
'boot', 'raid', 'lvm'
:param state: Desiable flag state. 'on' or 'off'. Default is 'on'.
:returns: None
"""
LOG.debug('Trying to set partition flag: dev=%s num=%s flag=%s state=%s' %
(dev, num, flag, state))
# parted supports more flags but we are interested in
# setting only this subset of them.
# not all of these flags are compatible with one another.
if flag not in ('bios_grub', 'legacy_boot', 'boot', 'raid', 'lvm'):
raise errors.WrongPartitionSchemeError(
'Unsupported partition flag: %s' % flag)
if state not in ('on', 'off'):
raise errors.WrongPartitionSchemeError(
'Wrong partition flag state: %s' % state)
utils.execute('udevadm', 'settle', '--quiet', check_exit_code=[0])
out, err = utils.execute('parted', '-s', dev, 'set', str(num),
flag, state, check_exit_code=[0, 1])
LOG.debug('Parted output: \n%s' % out)
reread_partitions(dev, out=out)
def set_gpt_type(dev, num, type_guid):
"""Sets guid on a partition.
:param dev: A device file, e.g. /dev/sda.
:param num: Partition number
:param type_guid: Partition type guid. Must be one of those listed
on this page http://en.wikipedia.org/wiki/GUID_Partition_Table.
This method does not check whether type_guid is valid or not.
:returns: None
"""
# TODO(kozhukalov): check whether type_guid is valid
LOG.debug('Setting partition GUID: dev=%s num=%s guid=%s' %
(dev, num, type_guid))
utils.execute('udevadm', 'settle', '--quiet', check_exit_code=[0])
utils.execute('sgdisk', '--typecode=%s:%s' % (num, type_guid),
dev, check_exit_code=[0])
def make_partition(dev, begin, end, ptype):
LOG.debug('Trying to create a partition: dev=%s begin=%s end=%s' %
(dev, begin, end))
if ptype not in ('primary', 'logical'):
raise errors.WrongPartitionSchemeError(
'Wrong partition type: %s' % ptype)
# check begin >= end
if begin >= end:
raise errors.WrongPartitionSchemeError(
'Wrong boundaries: begin >= end')
# check if begin and end are inside one of free spaces available
if not any(x['fstype'] == 'free' and begin >= x['begin'] and
end <= x['end'] for x in info(dev)['parts']):
raise errors.WrongPartitionSchemeError(
'Invalid boundaries: begin and end '
'are not inside available free space')
utils.execute('udevadm', 'settle', '--quiet', check_exit_code=[0])
out, err = utils.execute(
'parted', '-a', 'optimal', '-s', dev, 'unit', 'MiB',
'mkpart', ptype, str(begin), str(end), check_exit_code=[0, 1])
LOG.debug('Parted output: \n%s' % out)
reread_partitions(dev, out=out)
def remove_partition(dev, num):
LOG.debug('Trying to remove partition: dev=%s num=%s' % (dev, num))
if not any(x['fstype'] != 'free' and x['num'] == num
for x in info(dev)['parts']):
raise errors.PartitionNotFoundError('Partition %s not found' % num)
utils.execute('udevadm', 'settle', '--quiet', check_exit_code=[0])
out, err = utils.execute('parted', '-s', dev, 'rm',
str(num), check_exit_code=[0, 1])
reread_partitions(dev, out=out)
def reread_partitions(dev, out='Device or resource busy', timeout=60):
# The reason for this method to exist is that old versions of parted
# use ioctl(fd, BLKRRPART, NULL) to tell Linux to re-read partitions.
# This system call does not work sometimes. So we try to re-read partition
# table several times. Besides partprobe uses BLKPG instead, which
# is better than BLKRRPART for this case. BLKRRPART tells Linux to re-read
# partitions while BLKPG tells Linux which partitions are available
# BLKPG is usually used as a fallback system call.
begin = time.time()
while 'Device or resource busy' in out:
if time.time() > begin + timeout:
raise errors.BaseError('Unable to re-read partition table on'
'device %s' % dev)
LOG.debug('Last time output contained "Device or resource busy". '
'Trying to re-read partition table on device %s' % dev)
time.sleep(2)
out, err = utils.execute('partprobe', dev, check_exit_code=[0, 1])
LOG.debug('Partprobe output: \n%s' % out)
utils.execute('udevadm', 'settle', '--quiet', check_exit_code=[0])

View File

@ -1,267 +0,0 @@
# Copyright 2014 Mirantis, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import hashlib
import locale
import math
import os
import re
import shlex
import socket
import subprocess
import time
import jinja2
from oslo.config import cfg
import requests
import stevedore.driver
import urllib3
from six.moves import zip_longest
from fuel_agent import errors
from fuel_agent.openstack.common import log as logging
LOG = logging.getLogger(__name__)
u_opts = [
cfg.IntOpt(
'http_max_retries',
default=30,
help='Maximum retries count for http requests. 0 means infinite',
),
cfg.FloatOpt(
'http_request_timeout',
# Setting it to 10 secs will allow fuel-agent to overcome the momentary
# peak loads when network bandwidth becomes as low as 0.1MiB/s, thus
# preventing of wasting too much retries on such false positives.
default=10.0,
help='Http request timeout in seconds',
),
cfg.FloatOpt(
'http_retry_delay',
default=2.0,
help='Delay in seconds before the next http request retry',
),
cfg.IntOpt(
'read_chunk_size',
default=1048576,
help='Block size of data to read for calculating checksum',
),
cfg.FloatOpt(
'execute_retry_delay',
default=2.0,
help='Delay in seconds before the next exectuion will retry',
),
]
CONF = cfg.CONF
CONF.register_opts(u_opts)
# NOTE(agordeev): signature compatible with execute from oslo
def execute(*cmd, **kwargs):
command = ' '.join(cmd)
LOG.debug('Trying to execute command: %s', command)
commands = [c.strip() for c in re.split(ur'\|', command)]
env = os.environ
env['PATH'] = '/bin:/usr/bin:/sbin:/usr/sbin'
env['LC_ALL'] = env['LANG'] = env['LANGUAGE'] = kwargs.pop('language', 'C')
attempts = kwargs.pop('attempts', 1)
check_exit_code = kwargs.pop('check_exit_code', [0])
ignore_exit_code = False
to_filename = kwargs.get('to_filename')
cwd = kwargs.get('cwd')
if isinstance(check_exit_code, bool):
ignore_exit_code = not check_exit_code
check_exit_code = [0]
elif isinstance(check_exit_code, int):
check_exit_code = [check_exit_code]
to_file = None
if to_filename:
to_file = open(to_filename, 'wb')
for attempt in reversed(xrange(attempts)):
try:
process = []
for c in commands:
try:
# NOTE(eli): Python's shlex implementation doesn't like
# unicode. We have to convert to ascii before shlex'ing
# the command. http://bugs.python.org/issue6988
encoded_command = c.encode('ascii')
process.append(subprocess.Popen(
shlex.split(encoded_command),
env=env,
stdin=(process[-1].stdout if process else None),
stdout=(to_file
if ((len(process) == len(commands) - 1) and
to_file)
else subprocess.PIPE),
stderr=(subprocess.PIPE),
cwd=cwd
))
except (OSError, ValueError) as e:
raise errors.ProcessExecutionError(exit_code=1, stdout='',
stderr=e, cmd=command)
if len(process) >= 2:
process[-2].stdout.close()
stdout, stderr = process[-1].communicate()
if (not ignore_exit_code and
process[-1].returncode not in check_exit_code):
raise errors.ProcessExecutionError(
exit_code=process[-1].returncode, stdout=stdout,
stderr=stderr, cmd=command)
return (stdout, stderr)
except errors.ProcessExecutionError as e:
LOG.warning('Failed to execute command: %s', e)
if not attempt:
raise
else:
time.sleep(CONF.execute_retry_delay)
def parse_unit(s, unit, ceil=True):
"""Converts '123.1unit' string into ints
If ceil is True it will be rounded up (124)
and and down (123) if ceil is False.
"""
flt = locale.atof(s.split(unit)[0])
if ceil:
return int(math.ceil(flt))
return int(math.floor(flt))
def B2MiB(b, ceil=True):
if ceil:
return int(math.ceil(float(b) / 1024 / 1024))
return int(math.floor(float(b) / 1024 / 1024))
def get_driver(name):
LOG.debug('Trying to get driver: fuel_agent.drivers.%s', name)
driver = stevedore.driver.DriverManager(
namespace='fuel_agent.drivers', name=name).driver
LOG.debug('Found driver: %s', driver.__name__)
return driver
def render_and_save(tmpl_dir, tmpl_names, tmpl_data, file_name):
env = jinja2.Environment(loader=jinja2.FileSystemLoader(tmpl_dir))
template = env.get_or_select_template(tmpl_names)
output = template.render(tmpl_data)
try:
with open(file_name, 'w') as f:
f.write(output)
except Exception:
raise errors.TemplateWriteError(
'Something goes wrong while trying to save'
'templated data to {0}'.format(file_name))
def calculate_md5(filename, size):
hash = hashlib.md5()
processed = 0
with open(filename, "rb") as f:
while processed < size:
block = f.read(CONF.read_chunk_size)
if block:
block_len = len(block)
if processed + block_len < size:
hash.update(block)
processed += block_len
else:
hash.update(block[:size - processed])
break
else:
break
return hash.hexdigest()
def init_http_request(url, byte_range=0):
LOG.debug('Trying to initialize http request object %s, byte range: %s'
% (url, byte_range))
retry = 0
while True:
if (CONF.http_max_retries == 0) or retry <= CONF.http_max_retries:
try:
response_obj = requests.get(
url, stream=True,
timeout=CONF.http_request_timeout,
headers={'Range': 'bytes=%s-' % byte_range})
except (socket.timeout,
urllib3.exceptions.DecodeError,
urllib3.exceptions.ProxyError,
requests.exceptions.ConnectionError,
requests.exceptions.Timeout,
requests.exceptions.TooManyRedirects) as e:
LOG.debug('Got non-critical error when accessing to %s '
'on %s attempt: %s' % (url, retry + 1, e))
else:
LOG.debug('Successful http request to %s on %s retry' %
(url, retry + 1))
break
retry += 1
time.sleep(CONF.http_retry_delay)
else:
raise errors.HttpUrlConnectionError(
'Exceeded maximum http request retries for %s' % url)
response_obj.raise_for_status()
return response_obj
def makedirs_if_not_exists(path, mode=0o755):
"""Create directory if it does not exist
:param path: Directory path
:param mode: Directory mode (Default: 0o755)
"""
if not os.path.isdir(path):
os.makedirs(path, mode=mode)
def grouper(iterable, n, fillvalue=None):
"""Collect data into fixed-length chunks or blocks"""
args = [iter(iterable)] * n
return zip_longest(*args, fillvalue=fillvalue)
def guess_filename(path, regexp, sort=True, reverse=True):
"""Tries to find a file by regexp in a given path.
This method is supposed to be mostly used for looking up
for available kernel files which are usually 'vmlinuz-X.Y.Z-foo'.
In order to find the newest one we can sort files in backward
direction (by default).
:param path: Directory where to look for a file
:param regexp: (String) Regular expression (must have python syntax)
:param sort: (Bool) If True (by default), sort files before looking up.
It can be necessary when regexp does not unambiguously correspond to file.
:param reverse: (Bool) If True (by default), sort files
in backward direction.
"""
filenames = os.listdir(path)
if sort:
filenames = sorted(filenames, reverse=reverse)
for filename in filenames:
if re.search(regexp, filename):
return filename
return None

View File

@ -1,17 +0,0 @@
# Copyright 2014 Mirantis, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import pbr.version
version_info = pbr.version.VersionInfo('fuel-agent')

View File

@ -1,10 +0,0 @@
[DEFAULT]
# The list of modules to copy from oslo-incubator
module=config.generator
module=gettextutils
module=log
module=processutils
# The base module to hold the copy of openstack.common
base=fuel_agent

View File

@ -1,13 +0,0 @@
Babel>=1.3
eventlet>=0.13.0
iso8601>=0.1.9
jsonschema>=2.3.0
oslo.config>=1.2.0
oslo.serialization>=1.4.0
six>=1.5.2
pbr>=0.7.0
Jinja2
stevedore>=0.15
requests>=1.2.3
urllib3>=1.7
PyYAML==3.10

View File

@ -1,51 +0,0 @@
[metadata]
name = fuel-agent
version = 7.0.0
author = Mirantis
author-email = fuel-dev@lists.launchpad.net
summary = Fuel agent
classifier =
Development Status :: 4 - Beta
Programming Language :: Python
[files]
packages =
fuel_agent
[entry_points]
console_scripts =
# TODO(kozhukalov): rename entry point
provision = fuel_agent.cmd.agent:provision
fa_partition = fuel_agent.cmd.agent:partition
fa_configdrive = fuel_agent.cmd.agent:configdrive
fa_copyimage = fuel_agent.cmd.agent:copyimage
fa_bootloader = fuel_agent.cmd.agent:bootloader
fa_build_image = fuel_agent.cmd.agent:build_image
fuel_agent.drivers =
nailgun = fuel_agent.drivers.nailgun:Nailgun
nailgun_build_image = fuel_agent.drivers.nailgun:NailgunBuildImage
[pbr]
autodoc_index_modules = True
# this variable is needed to avoid including files
# from other subprojects in this repository
skip_git_sdist = True
[global]
setup-hooks =
pbr.hooks.setup_hook
fuel_agent.hooks.setup_hook
[build_sphinx]
all_files = 1
build-dir = doc/build
source-dir = doc/source
[egg_info]
tag_build =
tag_date = 0
tag_svn_revision = 0
[wheel]
universal = 1

View File

@ -1,20 +0,0 @@
# Copyright 2014 Mirantis, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import setuptools
setuptools.setup(
setup_requires=['pbr'],
pbr=True)

View File

@ -1,6 +0,0 @@
hacking>=0.8.0,<0.9
mock==1.0.1
oslotest==1.0
testtools>=0.9.34
pytest>=2.7.2
pytest-cov>=1.8.1

View File

@ -1,25 +0,0 @@
#!/usr/bin/env bash
PROJECT_NAME=${PROJECT_NAME:-fuel_agent}
CFGFILE_NAME=${PROJECT_NAME}.conf.sample
if [ -e etc/${PROJECT_NAME}/${CFGFILE_NAME} ]; then
CFGFILE=etc/${PROJECT_NAME}/${CFGFILE_NAME}
elif [ -e etc/${CFGFILE_NAME} ]; then
CFGFILE=etc/${CFGFILE_NAME}
else
echo "${0##*/}: can not find config file"
exit 1
fi
TEMPDIR=`mktemp -d /tmp/${PROJECT_NAME}.XXXXXX`
trap "rm -rf $TEMPDIR" EXIT
tools/config/generate_sample.sh -b ./ -p ${PROJECT_NAME} -o ${TEMPDIR}
if ! diff -u ${TEMPDIR}/${CFGFILE_NAME} ${CFGFILE}
then
echo "${0##*/}: ${PROJECT_NAME}.conf.sample is not up to date."
echo "${0##*/}: Please run ${0%%${0##*/}}generate_sample.sh."
exit 1
fi

View File

@ -1,126 +0,0 @@
#!/usr/bin/env bash
print_hint() {
echo "Try \`${0##*/} --help' for more information." >&2
}
PARSED_OPTIONS=$(getopt -n "${0##*/}" -o hb:p:m:l:o:f: \
--long help,base-dir:,package-name:,output-dir:,output-file:,module:,library: -- "$@")
if [ $? != 0 ] ; then print_hint ; exit 1 ; fi
eval set -- "$PARSED_OPTIONS"
while true; do
case "$1" in
-h|--help)
echo "${0##*/} [options]"
echo ""
echo "options:"
echo "-h, --help show brief help"
echo "-b, --base-dir=DIR project base directory"
echo "-p, --package-name=NAME project package name"
echo "-o, --output-dir=DIR file output directory"
echo "-f, --output-file=FILE file output directory"
echo "-m, --module=MOD extra python module to interrogate for options"
echo "-l, --library=LIB extra library that registers options for discovery"
exit 0
;;
-b|--base-dir)
shift
BASEDIR=`echo $1 | sed -e 's/\/*$//g'`
shift
;;
-p|--package-name)
shift
PACKAGENAME=`echo $1`
shift
;;
-o|--output-dir)
shift
OUTPUTDIR=`echo $1 | sed -e 's/\/*$//g'`
shift
;;
-f|--output-file)
shift
OUTPUTFILE=`echo $1 | sed -e 's/\/*$//g'`
shift
;;
-m|--module)
shift
MODULES="$MODULES -m $1"
shift
;;
-l|--library)
shift
LIBRARIES="$LIBRARIES -l $1"
shift
;;
--)
break
;;
esac
done
BASEDIR=${BASEDIR:-`pwd`}
if ! [ -d $BASEDIR ]
then
echo "${0##*/}: missing project base directory" >&2 ; print_hint ; exit 1
elif [[ $BASEDIR != /* ]]
then
BASEDIR=$(cd "$BASEDIR" && pwd)
fi
PACKAGENAME=${PACKAGENAME:-${BASEDIR##*/}}
PACKAGENAME=`echo $PACKAGENAME | tr - _`
TARGETDIR=$BASEDIR/$PACKAGENAME
if ! [ -d $TARGETDIR ]
then
echo "${0##*/}: invalid project package name" >&2 ; print_hint ; exit 1
fi
OUTPUTDIR=${OUTPUTDIR:-$BASEDIR/etc}
# NOTE(bnemec): Some projects put their sample config in etc/,
# some in etc/$PACKAGENAME/
if [ -d $OUTPUTDIR/$PACKAGENAME ]
then
OUTPUTDIR=$OUTPUTDIR/$PACKAGENAME
elif ! [ -d $OUTPUTDIR ]
then
echo "${0##*/}: cannot access \`$OUTPUTDIR': No such file or directory" >&2
exit 1
fi
BASEDIRESC=`echo $BASEDIR | sed -e 's/\//\\\\\//g'`
find $TARGETDIR -type f -name "*.pyc" -delete
FILES=$(find $TARGETDIR -type f -name "*.py" ! -path "*/tests/*" ! -path "*/nova/*" \
-exec grep -l "Opt(" {} + | sed -e "s/^$BASEDIRESC\///g" | sort -u)
RC_FILE="`dirname $0`/oslo.config.generator.rc"
if test -r "$RC_FILE"
then
source "$RC_FILE"
fi
for mod in ${FUEL_AGENT_CONFIG_GENERATOR_EXTRA_MODULES}; do
MODULES="$MODULES -m $mod"
done
for lib in ${FUEL_AGENT_CONFIG_GENERATOR_EXTRA_LIBRARIES}; do
LIBRARIES="$LIBRARIES -l $lib"
done
export EVENTLET_NO_GREENDNS=yes
OS_VARS=$(set | sed -n '/^OS_/s/=[^=]*$//gp' | xargs)
[ "$OS_VARS" ] && eval "unset \$OS_VARS"
DEFAULT_MODULEPATH=fuel_agent.openstack.common.config.generator
MODULEPATH=${MODULEPATH:-$DEFAULT_MODULEPATH}
OUTPUTFILE=${OUTPUTFILE:-$OUTPUTDIR/$PACKAGENAME.conf.sample}
python -m $MODULEPATH $MODULES $LIBRARIES $FILES > $OUTPUTFILE
# Hook to allow projects to append custom config file snippets
CONCAT_FILES=$(ls $BASEDIR/tools/config/*.conf.sample 2>/dev/null)
for CONCAT_FILE in $CONCAT_FILES; do
cat $CONCAT_FILE >> $OUTPUTFILE
done

View File

@ -1,7 +0,0 @@
#!/bin/bash
tools_path=${tools_path:-$(dirname $0)}
venv_path=${venv_path:-${tools_path}}
venv_dir=${venv_name:-/../.venv}
TOOLS=${tools_path}
VENV=${venv:-${venv_path}/${venv_dir}}
source ${VENV}/bin/activate && "$@"

View File

@ -1,42 +0,0 @@
[tox]
minversion = 1.6
skipsdist = True
envlist = py26,py27,pep8
[testenv]
usedevelop = True
install_command = pip install --allow-external -U {opts} {packages}
setenv = VIRTUAL_ENV={envdir}
deps = -r{toxinidir}/requirements.txt
-r{toxinidir}/test-requirements.txt
commands =
py.test -vv {posargs:fuel_agent/tests}
[tox:jenkins]
downloadcache = ~/cache/pip
[testenv:pep8]
deps = hacking==0.10.2
commands =
flake8 {posargs:fuel_agent}
[testenv:cover]
setenv = VIRTUAL_ENV={envdir}
commands =
py.test --cov fuel_agent {posargs:fuel_agent/tests}
[testenv:venv]
commands = {posargs:}
[testenv:devenv]
envdir = devenv
usedevelop = True
[flake8]
exclude = .venv,.git,.tox,dist,doc,*openstack/common*,*lib/python*,*egg,build,tools,docs
show-pep8 = True
show-source = True
count = True
[hacking]
import_exceptions = fuel_agent.openstack.common.gettextutils._,testtools.matchers

View File

@ -1,5 +0,0 @@
[DEFAULT]
test_command=OS_STDOUT_CAPTURE=1 OS_STDERR_CAPTURE=1 OS_TEST_TIMEOUT=60 ${PYTHON:-python} -m subunit.run discover -s fuel_agent_ci/tests -p "*.py" $LISTOPT $IDOPTION
test_id_option=--load-list $IDFILE
test_list_option=--list
test_run_concurrency=echo 1

View File

@ -1,13 +0,0 @@
# Copyright 2014 Mirantis, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.

View File

@ -1,13 +0,0 @@
# Copyright 2014 Mirantis, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.

View File

@ -1,114 +0,0 @@
# Copyright 2014 Mirantis, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import logging
import signal
import sys
import yaml
from fuel_agent_ci import manager as ci_manager
logging.basicConfig(level=logging.DEBUG)
def parse_args():
parser = argparse.ArgumentParser()
parser.add_argument(
'-f', '--file', dest='env_file', action='store',
type=str, help='Environment data file', required=True
)
subparsers = parser.add_subparsers(dest='action')
env_parser = subparsers.add_parser('env')
env_parser.add_argument(
'-a', '--action', dest='env_action', action='store',
type=str, help='Env action', required=True
)
env_parser.add_argument(
'-k', '--kwargs', dest='env_kwargs', action='store',
type=str, required=False,
help='Env action kwargs, must be valid json or yaml',
)
env_parser.add_argument(
'-K', '--kwargs_file', dest='env_kwargs_file', action='store',
type=str, required=False,
help='Env action kwargs file, content must be valid json or yaml',
)
item_parser = subparsers.add_parser('item')
item_parser.add_argument(
'-t', '--type', dest='item_type', action='store',
type=str, help='Item type', required=True
)
item_parser.add_argument(
'-a', '--action', dest='item_action', action='store',
type=str, help='Item action', required=True
)
item_parser.add_argument(
'-n', '--name', dest='item_name', action='store',
type=str, help='Item name', required=False
)
item_parser.add_argument(
'-k', '--kwargs', dest='item_kwargs', action='store',
type=str, required=False,
help='Item action kwargs, must be valid json or yaml',
)
item_parser.add_argument(
'-K', '--kwargs_file', dest='item_kwargs_file', action='store',
type=str, required=False,
help='Item action kwargs file, content must be valid json or yaml',
)
return parser
def main():
def term_handler(signum=None, sigframe=None):
sys.exit()
signal.signal(signal.SIGTERM, term_handler)
signal.signal(signal.SIGINT, term_handler)
parser = parse_args()
params, other_params = parser.parse_known_args()
with open(params.env_file) as f:
env_data = yaml.load(f.read())
manager = ci_manager.Manager(env_data)
# print 'params: %s' % params
# print 'other_params: %s' % other_params
if params.action == 'env':
kwargs = {}
if params.env_kwargs:
kwargs.update(yaml.load(params.env_kwargs))
elif params.env_kwargs_file:
with open(params.env_kwargs_file) as f:
kwargs.update(yaml.load(f.read()))
manager.do_env(params.env_action, **kwargs)
elif params.action == 'item':
kwargs = {}
if params.item_kwargs:
kwargs.update(yaml.load(params.item_kwargs))
elif params.item_kwargs_file:
with open(params.item_kwargs_file) as f:
kwargs.update(yaml.load(f.read()))
manager.do_item(params.item_type, params.item_action,
params.item_name, **kwargs)
if __name__ == '__main__':
main()

View File

@ -1,66 +0,0 @@
# Copyright 2014 Mirantis, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from fuel_agent_ci.drivers import common_driver
from fuel_agent_ci.drivers import fabric_driver
from fuel_agent_ci.drivers import libvirt_driver
from fuel_agent_ci.drivers import pygit2_driver
from fuel_agent_ci.drivers import simple_http_driver
class Driver(object):
default_hierarchy = {
# these methods are from common_driver
'artifact_get': common_driver,
'artifact_clean': common_driver,
'artifact_status': common_driver,
# these methods are from fabric_driver
'ssh_status': fabric_driver,
'ssh_put_content': fabric_driver,
'ssh_put_file': fabric_driver,
'ssh_get_file': fabric_driver,
'ssh_run': fabric_driver,
# these methods are from libvirt_driver
'net_start': libvirt_driver,
'net_stop': libvirt_driver,
'net_status': libvirt_driver,
'vm_start': libvirt_driver,
'vm_stop': libvirt_driver,
'vm_status': libvirt_driver,
'dhcp_start': libvirt_driver,
'dhcp_stop': libvirt_driver,
'dhcp_status': libvirt_driver,
'tftp_start': libvirt_driver,
'tftp_stop': libvirt_driver,
'tftp_status': libvirt_driver,
# these methods are from pygit2_driver
'repo_clone': pygit2_driver,
'repo_clean': pygit2_driver,
'repo_status': pygit2_driver,
# these methods are from simple_http_driver
'http_start': simple_http_driver,
'http_stop': simple_http_driver,
'http_status': simple_http_driver,
}
def __init__(self, hierarchy=None):
self.hierarchy = self.default_hierarchy
self.hierarchy.update(hierarchy or {})
def __getattr__(self, item):
return getattr(self.hierarchy[item], item)

View File

@ -1,127 +0,0 @@
# Copyright 2014 Mirantis, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import requests
from fuel_agent_ci import utils
def artifact_get(artifact):
with open(os.path.join(artifact.env.envdir, artifact.path), 'wb') as f:
for chunk in requests.get(
artifact.url, stream=True).iter_content(1048576):
f.write(chunk)
f.flush()
utils.execute(artifact.unpack, cwd=artifact.env.envdir)
def artifact_clean(artifact):
utils.execute(artifact.clean, cwd=artifact.env.envdir)
def artifact_status(artifact):
return os.path.isfile(os.path.join(artifact.env.envdir, artifact.path))
def dhcp_start(*args, **kwargs):
raise NotImplementedError
def dhcp_stop(*args, **kwargs):
raise NotImplementedError
def dhcp_status(*args, **kwargs):
raise NotImplementedError
def http_start(*args, **kwargs):
raise NotImplementedError
def http_stop(*args, **kwargs):
raise NotImplementedError
def http_status(*args, **kwargs):
raise NotImplementedError
def net_start(*args, **kwargs):
raise NotImplementedError
def net_stop(*args, **kwargs):
raise NotImplementedError
def net_status(*args, **kwargs):
raise NotImplementedError
def repo_clone(*args, **kwargs):
raise NotImplementedError
def repo_clean(*args, **kwargs):
raise NotImplementedError
def repo_status(*args, **kwargs):
raise NotImplementedError
def ssh_status(*args, **kwargs):
raise NotImplementedError
def ssh_put_content(*args, **kwargs):
raise NotImplementedError
def ssh_put_file(*args, **kwargs):
raise NotImplementedError
def ssh_get_file(*args, **kwargs):
raise NotImplementedError
def ssh_run(*args, **kwargs):
raise NotImplementedError
def tftp_start(*args, **kwargs):
raise NotImplementedError
def tftp_stop(*args, **kwargs):
raise NotImplementedError
def tftp_status(*args, **kwargs):
raise NotImplementedError
def vm_start(*args, **kwargs):
raise NotImplementedError
def vm_stop(*args, **kwargs):
raise NotImplementedError
def vm_status(*args, **kwargs):
raise NotImplementedError

View File

@ -1,116 +0,0 @@
# Copyright 2014 Mirantis, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
import os
import sys
import tempfile
from fabric import api as fab
LOG = logging.getLogger(__name__)
def ssh_status(ssh):
LOG.debug('Trying to get ssh status')
with fab.settings(
host_string=ssh.host,
user=ssh.user,
key_filename=os.path.join(ssh.env.envdir, ssh.key_filename),
timeout=ssh.connection_timeout):
try:
with fab.hide('running', 'stdout', 'stderr'):
fab.run('echo')
LOG.debug('Ssh connection is available')
return True
except SystemExit:
sys.exit()
except Exception:
LOG.debug('Ssh connection is not available')
return False
def ssh_put_content(ssh, file_content, remote_filename):
LOG.debug('Trying to put content into remote file: %s' % remote_filename)
with fab.settings(
host_string=ssh.host,
user=ssh.user,
key_filename=os.path.join(ssh.env.envdir, ssh.key_filename),
timeout=ssh.connection_timeout):
with tempfile.NamedTemporaryFile() as f:
f.write(file_content)
try:
fab.put(f.file, remote_filename)
except SystemExit:
sys.exit()
except Exception:
LOG.error('Error while putting content into '
'remote file: %s' % remote_filename)
raise
def ssh_put_file(ssh, filename, remote_filename):
LOG.debug('Trying to put file on remote host: '
'local=%s remote=%s' % (filename, remote_filename))
with fab.settings(
host_string=ssh.host,
user=ssh.user,
key_filename=os.path.join(ssh.env.envdir, ssh.key_filename),
timeout=ssh.connection_timeout):
try:
fab.put(filename, remote_filename)
except SystemExit:
sys.exit()
except Exception:
LOG.error('Error while putting file on remote host: '
'local=%s remote=%s' % (filename, remote_filename))
raise
def ssh_get_file(ssh, remote_filename, filename):
LOG.debug('Trying to get file from remote host: '
'local=%s remote=%s' % (filename, remote_filename))
with fab.settings(
host_string=ssh.host,
user=ssh.user,
key_filename=os.path.join(ssh.env.envdir, ssh.key_filename),
timeout=ssh.connection_timeout):
try:
fab.get(remote_filename, filename)
except SystemExit:
sys.exit()
except Exception:
LOG.error('Error while getting file from remote host: '
'local=%s remote=%s' % (filename, remote_filename))
raise
def ssh_run(ssh, command, command_timeout=10):
LOG.debug('Trying to run command on remote host: %s' % command)
with fab.settings(
host_string=ssh.host,
user=ssh.user,
key_filename=os.path.join(ssh.env.envdir, ssh.key_filename),
timeout=ssh.connection_timeout,
command_timeout=command_timeout,
warn_only=True):
try:
with fab.hide('running', 'stdout', 'stderr'):
return fab.run(command, pty=True)
except SystemExit:
sys.exit()
except Exception:
LOG.error('Error while putting file on remote host: '
'%s' % command)
raise

View File

@ -1,507 +0,0 @@
# Copyright 2014 Mirantis, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
import os
import re
import subprocess
import libvirt
import xmlbuilder
LOG = logging.getLogger(__name__)
def get_file_size(path):
with open(path, 'rb') as file:
current = file.tell()
try:
file.seek(0, 2)
size = file.tell()
finally:
file.seek(current)
return size
def get_qcow_size(path):
p = subprocess.Popen(['qemu-img', 'info', path], stdout=subprocess.PIPE)
output = p.communicate()[0]
m = re.search(ur'.*?virtual size:.*?\((\d+) bytes\).*', output)
return m.group(1)
class LibvirtDriver(object):
def __init__(self, conn_str=None):
self.conn = libvirt.open(conn_str or "qemu:///system")
def net_define(self, name, uuid=None, bridge_name=None,
forward_mode=None, virtualport_type=None,
ip_address=None, ip_netmask=None,
dhcp=None, tftp_root=None):
xml = xmlbuilder.XMLBuilder('network')
xml.name(name)
if uuid:
xml.uuid(uuid)
if bridge_name:
xml.bridge(name=bridge_name)
if forward_mode:
xml.forward(mode=forward_mode)
if virtualport_type:
xml.virtualport(type=virtualport_type)
if ip_address:
with xml.ip(address=ip_address,
netmask=(ip_netmask or '255.255.255.0')):
if tftp_root:
xml.tftp(root=tftp_root)
if dhcp:
with xml.dhcp:
xml.range(start=dhcp['start'], end=dhcp['end'])
if dhcp.get('hosts'):
for host in dhcp['hosts']:
kwargs = {'mac': host['mac'], 'ip': host['ip']}
if host.get('name'):
kwargs.update({'name': host['name']})
xml.host(**kwargs)
if dhcp.get('bootp'):
if dhcp['bootp'].get('server'):
xml.bootp(
file=dhcp['bootp']['file'],
server=dhcp['bootp']['server']
)
else:
xml.bootp(file=dhcp['bootp']['file'])
net = self.conn.networkDefineXML(str(xml))
return net.UUIDString()
def net_start(self, uuid):
net = self.conn.networkLookupByUUIDString(uuid)
net.create()
def net_destroy(self, uuid):
net = self.conn.networkLookupByUUIDString(uuid)
net.destroy()
def net_undefine(self, uuid):
net = self.conn.networkLookupByUUIDString(uuid)
net.undefine()
def net_uuid_by_name(self, name):
net = self.conn.networkLookupByName(name)
return net.UUIDString()
def net_list(self):
return self.conn.listDefinedNetworks() + self.conn.listNetworks()
def net_list_active(self):
return self.conn.listNetworks()
def net_list_notactive(self):
return self.conn.listDefinedNetworks()
def net_status(self, uuid):
return {
0: 'notactive',
1: 'running'
}[self.conn.networkLookupByUUIDString(uuid).isActive()]
def _add_disk(self, xml, disk):
with xml.disk(type='file', device='disk', cache='writeback'):
xml.driver(name='qemu', type='qcow2')
xml.source(file=disk['source_file'])
xml.target(
dev=disk['target_dev'], bus=disk.get('target_bus', 'scsi'))
def _add_interface(self, xml, interface):
itype = interface.get('type', 'network')
with xml.interface(type=itype):
if itype == 'bridge':
xml.source(bridge=interface['source_bridge'])
elif itype == 'network':
xml.source(network=interface['source_network'])
xml.model(type=interface.get('model_type', 'e1000'))
if interface.get('mac_address'):
xml.mac(address=interface['mac_address'])
if interface.get('virtualport_type'):
xml.virtualport(type=interface['virtualport_type'])
def define(self, name, uuid=None, type='kvm', memory='2048', vcpu='1',
arch='x86_64', boot=None, disks=None, interfaces=None):
xml = xmlbuilder.XMLBuilder('domain', type=type)
xml.name(name)
if uuid:
xml.uuid(uuid)
xml.memory(memory, unit='MiB')
xml.vcpu(vcpu)
with xml.os:
xml.type('hvm', arch=arch, machine='pc-1.0')
if boot:
if isinstance(boot, (list, tuple)):
for dev in boot:
xml.boot(dev=dev)
elif isinstance(boot, (str, unicode)):
xml.boot(dev=boot)
xml.bootmenu(enable='no')
with xml.features:
xml.acpi
xml.apic
xml.pae
xml.clock(offset='utc')
xml.on_poweroff('destroy')
xml.on_reboot('restart')
xml.on_crash('restart')
with xml.devices:
if os.path.exists('/usr/bin/kvm'): # Debian
xml.emulator('/usr/bin/kvm')
elif os.path.exists('/usr/bin/qemu-kvm'): # Redhat
xml.emulator('/usr/bin/qemu-kvm')
xml.input(type='mouse', bus='ps2')
xml.graphics(type='vnc', port='-1', autoport='yes')
with xml.video:
xml.model(type='cirrus', vram='9216', heads='1')
xml.address(type='pci', domain='0x0000',
bus='0x00', slot='0x02', function='0x0')
with xml.memballoon(model='virtio'):
xml.address(type='pci', domain='0x0000',
bus='0x00', slot='0x07', function='0x0')
if disks:
if isinstance(disks, (list,)):
for disk in disks:
self._add_disk(xml, disk)
else:
self._add_disk(xml, disks)
if interfaces:
if isinstance(interfaces, (list,)):
for interface in interfaces:
self._add_interface(xml, interface)
else:
self._add_interface(xml, interfaces)
dom = self.conn.defineXML(str(xml))
return dom.UUIDString()
def destroy(self, uuid):
dom = self.conn.lookupByUUIDString(uuid)
dom.destroy()
def start(self, uuid):
dom = self.conn.lookupByUUIDString(uuid)
dom.create()
def undefine(self, uuid):
dom = self.conn.lookupByUUIDString(uuid)
dom.undefine()
def list(self):
return (
self.conn.listDefinedDomains() +
[self.conn.lookupByID(dom).name()
for dom in self.conn.listDomainsID()]
)
def list_active(self):
return [self.conn.lookupByID(dom).name()
for dom in self.conn.listDomainsID()]
def list_notactive(self):
return self.conn.listDefinedDomains()
def uuid_by_name(self, name):
dom = self.conn.lookupByName(name)
return dom.UUIDString()
def status(self, uuid):
states = {
libvirt.VIR_DOMAIN_NOSTATE: 'nostate',
libvirt.VIR_DOMAIN_RUNNING: 'running',
libvirt.VIR_DOMAIN_BLOCKED: 'blocked',
libvirt.VIR_DOMAIN_PAUSED: 'paused',
libvirt.VIR_DOMAIN_SHUTDOWN: 'shutdown',
libvirt.VIR_DOMAIN_SHUTOFF: 'shutoff',
libvirt.VIR_DOMAIN_CRASHED: 'crashed',
libvirt.VIR_DOMAIN_PMSUSPENDED: 'suspended',
}
dom = self.conn.lookupByUUIDString(uuid)
return states.get(dom.state()[0], 'unknown')
def pool_define(self, name, path):
xml = xmlbuilder.XMLBuilder('pool', type='dir')
xml.name(name)
with xml.target:
xml.path(path)
if not os.path.isdir(path):
os.makedirs(path, 0o755)
return self.conn.storagePoolCreateXML(str(xml)).UUIDString()
def pool_list(self):
return (self.conn.listDefinedStoragePools() +
self.conn.listStoragePools())
def pool_list_active(self):
return self.conn.listStoragePools()
def pool_list_notactive(self):
return self.conn.listDefinedStoragePools()
def pool_destroy(self, uuid):
pool = self.conn.storagePoolLookupByUUIDString(uuid)
pool.destroy()
def pool_start(self, uuid):
pool = self.conn.storagePoolLookupByUUIDString(uuid)
pool.create()
def pool_undefine(self, uuid):
pool = self.conn.storagePoolLookupByUUIDString(uuid)
pool.undefine()
def pool_uuid_by_name(self, name):
pool = self.conn.storagePoolLookupByName(name)
return pool.UUIDString()
def vol_create(self, name, capacity=None,
base=None, pool_name='default',
backing_store=False, base_plus=0):
xml = xmlbuilder.XMLBuilder('volume')
xml.name(name)
xml.allocation('0', unit='MiB')
if base:
xml.capacity(str(int(get_qcow_size(base)) +
int(base_plus) * 1048576))
else:
xml.capacity(capacity, unit='MiB')
with xml.target:
xml.format(type='qcow2')
pool = self.conn.storagePoolLookupByName(pool_name)
if base and backing_store:
with xml.backingStore:
xml.path(base)
xml.format(type='qcow2')
vol = pool.createXML(str(xml), flags=0)
if base and not backing_store:
self.volume_upload(vol.key(), base)
return vol.key()
def vol_list(self, pool_name='default'):
pool = self.conn.storagePoolLookupByName(pool_name)
return pool.listVolumes()
def vol_path(self, name, pool_name='default'):
pool = self.conn.storagePoolLookupByName(pool_name)
vol = pool.storageVolLookupByName(name)
return vol.path()
def vol_delete(self, name, pool_name='default'):
pool = self.conn.storagePoolLookupByName(pool_name)
vol = pool.storageVolLookupByName(name)
vol.delete(flags=0)
def chunk_render(self, stream, size, fd):
return fd.read(size)
def volume_upload(self, name, path):
size = get_file_size(path)
with open(path, 'rb') as fd:
stream = self.conn.newStream(0)
self.conn.storageVolLookupByKey(name).upload(
stream=stream, offset=0,
length=size, flags=0)
stream.sendAll(self.chunk_render, fd)
stream.finish()
def net_start(net, drv=None):
if drv is None:
drv = LibvirtDriver()
LOG.debug('Starting network: %s' % net.name)
netname = net.env.name + '_' + net.name
net_kwargs = {
'bridge_name': net.bridge,
'forward_mode': 'nat',
'ip_address': net.ip,
}
tftp = net.env.tftp_by_network(net.name)
if tftp:
net_kwargs['tftp_root'] = os.path.join(
net.env.envdir, tftp.tftp_root)
dhcp = net.env.dhcp_by_network(net.name)
if dhcp:
net_kwargs['dhcp'] = {
'start': dhcp.begin,
'end': dhcp.end,
}
if dhcp.bootp:
net_kwargs['dhcp']['bootp'] = dhcp.bootp
if dhcp.hosts:
net_kwargs['dhcp']['hosts'] = dhcp.hosts
drv.net_define(netname, **net_kwargs)
drv.net_start(drv.net_uuid_by_name(netname))
def net_stop(net, drv=None):
if drv is None:
drv = LibvirtDriver()
LOG.debug('Stopping net: %s' % net.name)
netname = net.env.name + '_' + net.name
if netname in drv.net_list():
uuid = drv.net_uuid_by_name(netname)
if netname in drv.net_list_active():
drv.net_destroy(uuid)
drv.net_undefine(uuid)
def net_status(net, drv=None):
if drv is None:
drv = LibvirtDriver()
return (net.env.name + '_' + net.name in drv.net_list_active())
def vm_start(vm, drv=None):
if drv is None:
drv = LibvirtDriver()
LOG.debug('Starting vm: %s' % vm.name)
vmname = vm.env.name + '_' + vm.name
if vm.env.name not in drv.pool_list():
LOG.debug('Defining volume pool %s' % vm.env.name)
drv.pool_define(vm.env.name, os.path.join(vm.env.envdir, 'volumepool'))
if vm.env.name not in drv.pool_list_active():
LOG.debug('Starting volume pool %s' % vm.env.name)
drv.pool_start(drv.pool_uuid_by_name(vm.env.name))
disks = []
for num, disk in enumerate(vm.disks):
disk_name = vmname + '_%s' % num
order = 'abcdefghijklmnopqrstuvwxyz'
if disk_name not in drv.vol_list(pool_name=vm.env.name):
if disk.base:
LOG.debug('Creating vm disk: pool=%s vol=%s base=%s' %
(vm.env.name, disk_name, disk.base))
drv.vol_create(disk_name, base=disk.base,
pool_name=vm.env.name)
else:
LOG.debug('Creating empty vm disk: pool=%s vol=%s '
'capacity=%s' % (vm.env.name, disk_name, disk.size))
drv.vol_create(disk_name, capacity=disk.size,
pool_name=vm.env.name)
disks.append({
'source_file': drv.vol_path(disk_name, pool_name=vm.env.name),
'target_dev': 'sd%s' % order[num],
'target_bus': 'scsi',
})
interfaces = []
for interface in vm.interfaces:
LOG.debug('Creating vm interface: net=%s mac=%s' %
(vm.env.name + '_' + interface.network, interface.mac))
interfaces.append({
'type': 'network',
'source_network': vm.env.name + '_' + interface.network,
'mac_address': interface.mac,
'model_type': interface.model_type
})
LOG.debug('Defining vm %s' % vm.name)
drv.define(vmname, boot=vm.boot, disks=disks, interfaces=interfaces)
LOG.debug('Starting vm %s' % vm.name)
drv.start(drv.uuid_by_name(vmname))
def vm_stop(vm, drv=None):
if drv is None:
drv = LibvirtDriver()
LOG.debug('Stopping vm: %s' % vm.name)
vmname = vm.env.name + '_' + vm.name
if vmname in drv.list():
uuid = drv.uuid_by_name(vmname)
if vmname in drv.list_active():
LOG.debug('Destroying vm: %s' % vm.name)
drv.destroy(uuid)
LOG.debug('Undefining vm: %s' % vm.name)
drv.undefine(uuid)
for volname in [v for v in drv.vol_list(pool_name=vm.env.name)
if v.startswith(vmname)]:
LOG.debug('Deleting vm disk: pool=%s vol=%s' % (vm.env.name, volname))
drv.vol_delete(volname, pool_name=vm.env.name)
if not drv.vol_list(pool_name=vm.env.name):
LOG.debug('Deleting volume pool: %s' % vm.env.name)
if vm.env.name in drv.pool_list():
uuid = drv.pool_uuid_by_name(vm.env.name)
if vm.env.name in drv.pool_list_active():
LOG.debug('Destroying pool: %s' % vm.env.name)
drv.pool_destroy(uuid)
if vm.env.name in drv.pool_list():
LOG.debug('Undefining pool: %s' % vm.env.name)
drv.pool_undefine(uuid)
def vm_status(vm, drv=None):
if drv is None:
drv = LibvirtDriver()
return (vm.env.name + '_' + vm.name in drv.list_active())
def dhcp_start(dhcp):
"""This feature is implemented in net_start
"""
pass
def dhcp_stop(dhcp):
"""This feature is implemented is net_stop
"""
pass
def dhcp_status(dhcp):
return dhcp.env.net_by_name(dhcp.network).status()
def tftp_start(tftp):
"""This feature is implemented is net_start
"""
pass
def tftp_stop(tftp):
"""This feature is implemented is net_stop
"""
pass
def tftp_status(tftp):
return tftp.env.net_by_name(tftp.network).status()

View File

@ -1,37 +0,0 @@
# Copyright 2014 Mirantis, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import pygit2
from fuel_agent_ci import utils
def repo_clone(repo):
return pygit2.clone_repository(
repo.url, os.path.join(repo.env.envdir, repo.path),
checkout_branch=repo.branch)
def repo_clean(repo):
utils.execute('rm -rf %s' % os.path.join(repo.env.envdir, repo.path))
def repo_status(repo):
try:
pygit2.discover_repository(os.path.join(repo.env.envdir, repo.path))
except KeyError:
return False
return True

View File

@ -1,235 +0,0 @@
# Copyright 2014 Mirantis, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import atexit
import BaseHTTPServer
import errno
import logging
import multiprocessing
import os
import signal
import SimpleHTTPServer
import sys
import time
import requests
LOG = logging.getLogger(__name__)
class Cwd(object):
def __init__(self, path):
self.path = path
self.orig_path = os.getcwd()
def __enter__(self):
os.chdir(self.path)
def __exit__(self, exc_type, exc_val, exc_tb):
os.chdir(self.orig_path)
class CustomHTTPRequestHandler(SimpleHTTPServer.SimpleHTTPRequestHandler):
def do_GET(self):
if self.path == self.server.parent.shutdown_url:
LOG.info('Shutdown request has been received: %s' % (self.path))
self.send_response(200)
self.end_headers()
self.server.parent.stop_self()
elif self.path == self.server.parent.status_url:
LOG.info('Status request has been received: %s' % (self.path))
self.send_response(200)
self.end_headers()
else:
with Cwd(self.server.parent.rootpath):
SimpleHTTPServer.SimpleHTTPRequestHandler.do_GET(self)
def do_HEAD(self):
with Cwd(self.server.parent.rootpath):
SimpleHTTPServer.SimpleHTTPRequestHandler.do_HEAD(self)
class CustomHTTPServer(object):
def __init__(self, host, port, rootpath,
shutdown_url='/shutdown',
status_url='/status',
piddir='/var/run',
pidfile='custom_httpd.pid',
stdin=None, stdout=None, stderr=None):
self.host = str(host)
self.port = int(port)
self.rootpath = rootpath
self.shutdown_url = shutdown_url
self.status_url = status_url
self.stdin = stdin
self.stdout = stdout
self.stderr = stderr
self.pidfile = os.path.join(piddir, pidfile)
# We cannot just inherit BaseHTTPServer.HTTPServer because
# it tries to bind socket during initialization but we need it
# to be done during actual launching.
self.server = None
def stop_self(self):
if self.server:
# We cannot use server.shutdown() here because
# it sets _BaseServer__shutdown_request to True
# end wait for _BaseServer__is_shut_down event to be set
# that locks thread forever. We can use shutdown() method
# from outside this thread.
self.server._BaseServer__shutdown_request = True
def daemonize(self):
# in order to avoid http process
# to become zombie we need to fork twice
try:
pid = os.fork()
if pid > 0:
sys.exit(0)
except OSError as e:
sys.stderr.write('Error while fork#1 HTTP server: '
'%d (%s)' % (e.errno, e.strerror))
sys.exit(1)
os.chdir('/')
os.setsid()
os.umask(0)
try:
pid = os.fork()
if pid > 0:
sys.exit(0)
except OSError as e:
sys.stderr.write('Error while fork#2 HTTP server: '
'%d (%s)' % (e.errno, e.strerror))
sys.exit(1)
if self.stdin:
si = file(self.stdin, 'r')
os.dup2(si.fileno(), sys.stdin.fileno())
if self.stdout:
sys.stdout.flush()
so = file(self.stdout, 'a+')
os.dup2(so.fileno(), sys.stdout.fileno())
if self.stderr:
sys.stderr.flush()
se = file(self.stderr, 'a+', 0)
os.dup2(se.fileno(), sys.stderr.fileno())
atexit.register(self.delpid)
pid = str(os.getpid())
with open(self.pidfile, 'w+') as f:
f.write('%s\n' % pid)
f.flush()
def delpid(self):
os.remove(self.pidfile)
def run(self):
self.server = BaseHTTPServer.HTTPServer(
(self.host, self.port), CustomHTTPRequestHandler)
self.server.parent = self
self.server.serve_forever()
def check_pid(self, pid):
try:
os.kill(pid, 0)
except OSError as err:
if err.errno == errno.ESRCH:
# ESRCH == No such process
return False
elif err.errno == errno.EPERM:
# EPERM clearly means there's a process to deny access to
return True
else:
# According to "man 2 kill" possible error values are
# (EINVAL, EPERM, ESRCH)
return True
def start(self):
try:
with open(self.pidfile) as f:
pid = int(f.read().strip())
except (IOError, ValueError):
pid = None
if pid:
if self.check_pid(pid):
message = 'pidfile %s already exists. Daemon already running?\n'
sys.stderr.write(message % self.pidfile)
sys.exit(1)
else:
message = "pidfile %s already exists. Daemon isn't running. Removing the pidfile\n"
sys.stderr.write(message % self.pidfile)
self.delpid()
self.daemonize()
self.run()
def stop(self):
try:
with open(self.pidfile) as f:
pid = int(f.read().strip())
except (IOError, ValueError):
pid = None
if not pid:
message = 'pidfile %s does not exist. Daemon not running?\n'
sys.stderr.write(message % self.pidfile)
return
try:
while True:
os.kill(pid, signal.SIGTERM)
time.sleep(1)
except OSError as err:
err = str(err)
if err.find('No such process') > 0:
if os.path.exists(self.pidfile):
os.remove(self.pidfile)
else:
sys.stdout.write(str(err))
sys.exit(1)
def http_start(http):
def start():
server = CustomHTTPServer(
http.env.net_by_name(http.network).ip, http.port,
os.path.join(http.env.envdir, http.http_root),
status_url=http.status_url, shutdown_url=http.shutdown_url,
pidfile=os.path.join(http.env.envdir,
http.env.name + '_custom_httpd.pid'))
server.start()
multiprocessing.Process(target=start).start()
def http_stop(http):
if http_status(http):
requests.get(
'http://%s:%s%s' % (http.env.net_by_name(http.network).ip,
http.port, http.shutdown_url))
def http_status(http):
try:
status = requests.get(
'http://%s:%s%s' % (http.env.net_by_name(http.network).ip,
http.port, http.status_url),
timeout=1
)
if status.status_code == 200:
return True
except Exception:
pass
return False

View File

@ -1,31 +0,0 @@
# Copyright 2014 Mirantis, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
from fuel_agent_ci.objects.environment import Environment
LOG = logging.getLogger(__name__)
class Manager(object):
def __init__(self, data):
self.env = Environment.new(**data)
def do_item(self, item_type, item_action, item_name=None, **kwargs):
return getattr(
self.env, '%s_%s' % (item_type, item_action))(item_name, **kwargs)
def do_env(self, env_action, **kwargs):
return getattr(self.env, env_action)(**kwargs)

View File

@ -1,34 +0,0 @@
# Copyright 2014 Mirantis, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# This mapping is supposed to be dynamically filled with
# names of objects and their types
OBJECT_TYPES = {}
class MetaObject(type):
def __init__(self, name, bases, dct):
if '__typename__' in dct:
OBJECT_TYPES[dct['__typename__']] = self
return super(MetaObject, self).__init__(name, bases, dct)
class Object(object):
__metaclass__ = MetaObject
__typename__ = 'object'
@property
def typename(self):
return self.__typename__

View File

@ -1,46 +0,0 @@
# Copyright 2014 Mirantis, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
from fuel_agent_ci.objects import Object
LOG = logging.getLogger(__name__)
class Artifact(Object):
__typename__ = 'artifact'
def __init__(self, env, name, url, path, unpack=None, clean=None):
self.env = env
self.name = name
self.url = url
self.path = path
self.unpack = unpack
self.clean = clean
def get(self):
if not self.status():
LOG.debug('Getting artifact %s' % self.name)
self.env.driver.artifact_get(self)
def clean(self):
if self.status():
LOG.debug('Cleaning artifact %s' % self.name)
self.env.driver.artifact_clean(self)
def status(self):
status = self.env.driver.artifact_status(self)
LOG.debug('Artifact %s status %s' % (self.name, status))
return status

View File

@ -1,56 +0,0 @@
# Copyright 2014 Mirantis, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
from fuel_agent_ci.objects import Object
LOG = logging.getLogger(__name__)
class Dhcp(Object):
__typename__ = 'dhcp'
def __init__(self, env, name, begin, end, network):
self.name = name
self.env = env
self.begin = begin
self.end = end
self.network = network
self.hosts = []
self.bootp = None
def add_host(self, mac, ip, name=None):
host = {'mac': mac, 'ip': ip}
if name is not None:
host['name'] = name
self.hosts.append(host)
def set_bootp(self, file):
self.bootp = {'file': file}
def start(self):
if not self.status():
LOG.debug('Starting DHCP')
self.env.driver.dhcp_start(self)
def stop(self):
if self.status():
LOG.debug('Stopping DHCP')
self.env.driver.dhcp_stop(self)
def status(self):
status = self.env.driver.dhcp_status(self)
LOG.debug('DHCP status %s' % status)
return status

View File

@ -1,181 +0,0 @@
# Copyright 2014 Mirantis, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import functools
import logging
import os
import tempfile
from fuel_agent_ci import drivers
from fuel_agent_ci.objects.artifact import Artifact
from fuel_agent_ci.objects.dhcp import Dhcp
from fuel_agent_ci.objects.http import Http
from fuel_agent_ci.objects.net import Net
from fuel_agent_ci.objects import OBJECT_TYPES
from fuel_agent_ci.objects.repo import Repo
from fuel_agent_ci.objects.ssh import Ssh
from fuel_agent_ci.objects.tftp import Tftp
from fuel_agent_ci.objects.vm import Vm
LOG = logging.getLogger(__name__)
class Environment(object):
def __init__(self, name, envdir, driver=None):
self.name = name
self.envdir = envdir
self.driver = driver or drivers.Driver()
self.items = []
@classmethod
def new(cls, **kwargs):
LOG.debug('Creating environment: %s' % kwargs['name'])
envdir = kwargs.get('envdir') or os.path.join(
tempfile.gettempdir(), kwargs['name'])
if not os.path.exists(envdir):
LOG.debug('Envdir %s does not exist. Creating envdir.' % envdir)
os.makedirs(envdir)
env = cls(kwargs['name'], envdir)
for item_type in OBJECT_TYPES.keys():
for item_kwargs in kwargs.get(item_type, []):
LOG.debug('Creating %s: %s' % (item_type, item_kwargs))
getattr(env, '%s_add' % item_type)(**item_kwargs)
return env
def __getattr__(self, attr_name):
"""This method maps item_add, item_by_name, item_action attributes into
attributes for particular types like artifact_add or dhcp_by_name.
:param attr_name: Attribute name to map (e.g. net_add, repo_clone)
:returns: Lambda which implements a particular attribute.
"""
try:
item_type, item_action = attr_name.split('_', 1)
except Exception:
raise AttributeError('Attribute %s not found' % attr_name)
else:
if item_action == 'add':
return functools.partial(self.item_add, item_type)
elif item_action == 'by_name':
return functools.partial(self.item_by_name, item_type)
else:
return functools.partial(self.item_action,
item_type, item_action)
def item_add(self, item_type, **kwargs):
if self.item_by_name(item_type, kwargs.get('name')):
raise Exception('Error while adding item: %s %s already exist' %
(item_type, kwargs.get('name')))
item = OBJECT_TYPES[item_type](env=self, **kwargs)
self.items.append(item)
return item
def vm_add(self, **kwargs):
if self.item_by_name('vm', kwargs.get('name')):
raise Exception('Error while adding vm: vm %s already exist' %
kwargs.get('name'))
disks = kwargs.pop('disks', [])
interfaces = kwargs.pop('interfaces', [])
vm = Vm(env=self, **kwargs)
for disk_kwargs in disks:
vm.add_disk(**disk_kwargs)
for interface_kwargs in interfaces:
vm.add_interface(**interface_kwargs)
self.items.append(vm)
return vm
def dhcp_add(self, **kwargs):
if self.item_by_name('dhcp', kwargs.get('name')):
raise Exception('Error while adding dhcp: dhcp %s already exist' %
kwargs.get('name'))
hosts = kwargs.pop('hosts', [])
bootp_kwargs = kwargs.pop('bootp', None)
dhcp = Dhcp(env=self, **kwargs)
for host_kwargs in hosts:
dhcp.add_host(**host_kwargs)
if bootp_kwargs is not None:
dhcp.set_bootp(**bootp_kwargs)
self.items.append(dhcp)
return dhcp
def item_by_name(self, item_type, item_name):
found = filter(
lambda x: x.typename == item_type and x.name == item_name,
self.items
)
if not found or len(found) > 1:
LOG.debug('Item %s %s not found' % (item_type, item_name))
return None
return found[0]
def item_action(self, item_type, item_action, item_name=None, **kwargs):
if item_name:
item = self.item_by_name(item_type, item_name)
return {item_name: getattr(item, item_action)(**kwargs)}
else:
result = {}
for item in [i for i in self.items if i.typename == item_type]:
LOG.debug('Trying to do action on item: '
'type=%s name=%s action=%s' %
(item_type, item.name, item_action))
result[item.name] = getattr(item, item_action)(**kwargs)
return result
# TODO(kozhukalov): implement this method as classmethod in tftp object
def tftp_by_network(self, network):
found = filter(
lambda x: x.typename == 'tftp' and x.network == network,
self.items
)
if not found or len(found) > 1:
LOG.debug('Tftp not found')
return None
return found[0]
# TODO(kozhukalov): implement this method as classmethod in dhcp object
def dhcp_by_network(self, network):
found = filter(
lambda x: x.typename == 'dhcp' and x.network == network,
self.items
)
if not found or len(found) > 1:
LOG.debug('Dhcp not found')
return None
return found[0]
def start(self):
LOG.debug('Starting environment')
self.artifact_get()
self.repo_clone()
self.net_start()
self.tftp_start()
self.dhcp_start()
self.http_start()
self.vm_start()
def stop(self, artifact_clean=False, repo_clean=False):
LOG.debug('Stopping environment')
self.vm_stop()
self.tftp_stop()
self.dhcp_stop()
self.http_stop()
self.net_stop()
if artifact_clean:
self.artifact_clean()
if repo_clean:
self.repo_clean()
def status(self):
return all((item.status() for item in self.items))

View File

@ -1,48 +0,0 @@
# Copyright 2014 Mirantis, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
from fuel_agent_ci.objects import Object
LOG = logging.getLogger(__name__)
class Http(Object):
__typename__ = 'http'
def __init__(self, env, name, http_root, port, network,
status_url='/status', shutdown_url='/shutdown'):
self.name = name
self.env = env
self.http_root = http_root
self.port = port
self.network = network
self.status_url = status_url
self.shutdown_url = shutdown_url
def start(self):
if not self.status():
LOG.debug('Starting HTTP server')
self.env.driver.http_start(self)
def stop(self):
if self.status():
LOG.debug('Stopping HTTP server')
self.env.driver.http_stop(self)
def status(self):
status = self.env.driver.http_status(self)
LOG.debug('HTTP status %s' % status)
return status

View File

@ -1,45 +0,0 @@
# Copyright 2014 Mirantis, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
from fuel_agent_ci.objects import Object
LOG = logging.getLogger(__name__)
class Net(Object):
__typename__ = 'net'
def __init__(self, env, name, bridge, ip, forward):
self.env = env
self.name = name
self.bridge = bridge
self.ip = ip
self.forward = forward
def start(self):
if not self.status():
LOG.debug('Starting network %s' % self.name)
self.env.driver.net_start(self)
def stop(self):
if self.status():
LOG.debug('Stopping network %s' % self.name)
self.env.driver.net_stop(self)
def status(self):
status = self.env.driver.net_status(self)
LOG.debug('Network %s status %s' % (self.name, status))
return status

View File

@ -1,45 +0,0 @@
# Copyright 2014 Mirantis, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
from fuel_agent_ci.objects import Object
LOG = logging.getLogger(__name__)
class Repo(Object):
__typename__ = 'repo'
def __init__(self, env, name, url, path, branch='master'):
self.env = env
self.name = name
self.url = url
self.path = path
self.branch = branch
def clone(self):
if not self.status():
LOG.debug('Cloning repo %s' % self.name)
self.env.driver.repo_clone(self)
def clean(self):
if self.status():
LOG.debug('Cleaning repo %s' % self.name)
self.env.driver.repo_clean(self)
def status(self):
status = self.env.driver.repo_status(self)
LOG.debug('Repo %s status %s' % (self.name, status))
return status

View File

@ -1,79 +0,0 @@
# Copyright 2014 Mirantis, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
import time
from fuel_agent_ci.objects import Object
LOG = logging.getLogger(__name__)
class Ssh(Object):
__typename__ = 'ssh'
def __init__(self, env, name, host, key_filename, user='root',
connection_timeout=5, command_timeout=10):
self.env = env
self.name = name
self.host = host
self.user = user
self.key_filename = key_filename
self.connection_timeout = int(connection_timeout)
self.command_timeout = int(command_timeout)
def status(self):
status = self.env.driver.ssh_status(self)
LOG.debug('SSH %s status %s' % (self.name, status))
return status
def put_content(self, content, remote_filename):
if self.status():
LOG.debug('Putting content %s' % self.name)
self.env.driver.ssh_put_content(self, content, remote_filename)
else:
raise Exception('Wrong ssh status: %s' % self.name)
def get_file(self, remote_filename, filename):
if self.status():
LOG.debug('Getting file %s' % self.name)
self.env.driver.ssh_get_file(self, remote_filename, filename)
else:
raise Exception('Wrong ssh status: %s' % self.name)
def put_file(self, filename, remote_filename):
if self.status():
LOG.debug('Putting file %s' % self.name)
self.env.driver.ssh_put_file(self, filename, remote_filename)
else:
raise Exception('Wrong ssh status: %s' % self.name)
def run(self, command, command_timeout=None):
if self.status():
LOG.debug('Running command %s' % self.name)
return self.env.driver.ssh_run(
self, command, command_timeout or self.command_timeout)
raise Exception('Wrong ssh status: %s' % self.name)
def wait(self, timeout=200):
begin_time = time.time()
# this loop does not have sleep statement
# because it relies on self.connection_timeout
# which is by default 5 seconds
while time.time() - begin_time < timeout:
if self.status():
return True
LOG.debug('Waiting for ssh connection to be '
'available: %s' % self.name)
return False

View File

@ -1,44 +0,0 @@
# Copyright 2014 Mirantis, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
from fuel_agent_ci.objects import Object
LOG = logging.getLogger(__name__)
class Tftp(Object):
__typename__ = 'tftp'
def __init__(self, env, name, tftp_root, network):
self.name = name
self.env = env
self.tftp_root = tftp_root
self.network = network
def start(self):
if not self.status():
LOG.debug('Starting TFTP')
self.env.driver.tftp_start(self)
def stop(self):
if self.status():
LOG.debug('Stopping TFTP')
self.env.driver.tftp_stop(self)
def status(self):
status = self.env.driver.tftp_status(self)
LOG.debug('TFTP status %s' % status)
return status

View File

@ -1,74 +0,0 @@
# Copyright 2014 Mirantis, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
from fuel_agent_ci.objects import Object
LOG = logging.getLogger(__name__)
class Vm(Object):
__typename__ = 'vm'
def __init__(self, env, name, boot=None):
self.env = env
self.name = name
self.interfaces = []
self.disks = []
self.boot = boot or 'hd'
def add_interface(self, **kwargs):
if 'interface' in kwargs:
interface = kwargs['interface']
else:
interface = Interface(**kwargs)
self.interfaces.append(interface)
return interface
def add_disk(self, **kwargs):
if 'disk' in kwargs:
disk = kwargs['disk']
else:
disk = Disk(**kwargs)
self.disks.append(disk)
return disk
def start(self):
if not self.status():
LOG.debug('Starting virtual machine %s' % self.name)
self.env.driver.vm_start(self)
def stop(self):
if self.status():
LOG.debug('Stopping virtual machine %s' % self.name)
self.env.driver.vm_stop(self)
def status(self):
status = self.env.driver.vm_status(self)
LOG.debug('Virtual machine %s status %s' % (self.name, status))
return status
class Interface(object):
def __init__(self, mac, network, model_type='e1000'):
self.mac = mac
self.network = network
self.model_type = model_type
class Disk(object):
def __init__(self, size=None, base=None):
self.size = size
self.base = base

View File

@ -1,13 +0,0 @@
# Copyright 2014 Mirantis, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.

View File

@ -1,105 +0,0 @@
# Copyright 2014 Mirantis, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import jinja2
import json
import os
import sys
import time
try:
from unittest.case import TestCase
except ImportError:
# Runing unit-tests in production environment
from unittest2.case import TestCase
import yaml
from fuel_agent_ci.objects import environment
from fuel_agent_ci import utils
# FIXME(kozhukalov) it is better to set this as command line arg
ENV_FILE = os.path.join(os.path.dirname(__file__),
'../../samples/ci_environment.yaml')
class BaseFuelAgentCITest(TestCase):
FUEL_AGENT_REPO_NAME = 'fuel_agent'
FUEL_AGENT_HTTP_NAME = 'http'
FUEL_AGENT_NET_NAME = 'net'
FUEL_AGENT_DHCP_NAME = 'dhcp'
FUEL_AGENT_SSH_NAME = 'vm'
FUEL_AGENT_TEMPLATE_PATH = '/usr/share/fuel-agent/cloud-init-templates'
def setUp(self):
super(BaseFuelAgentCITest, self).setUp()
# Starting environment
with open(ENV_FILE) as f:
ENV_DATA = (yaml.load(f.read()))
self.env = environment.Environment.new(**ENV_DATA)
self.env.start()
self.repo = self.env.repo_by_name(self.FUEL_AGENT_REPO_NAME)
self.ssh = self.env.ssh_by_name(self.FUEL_AGENT_SSH_NAME)
self.http = self.env.http_by_name(self.FUEL_AGENT_HTTP_NAME)
self.dhcp_hosts = self.env.dhcp_by_name(self.FUEL_AGENT_DHCP_NAME).hosts
self.net = self.env.net_by_name(self.FUEL_AGENT_NET_NAME)
self.ssh.wait()
self._upgrade_fuel_agent()
def _upgrade_fuel_agent(self):
"""This method is to be deprecated when artifact
based build system is ready.
"""
src_dir = os.path.join(self.env.envdir, self.repo.path, 'fuel_agent')
package_name = 'fuel-agent-0.1.0.tar.gz'
# Building fuel-agent pip package
utils.execute('python setup.py sdist', cwd=src_dir)
# Putting fuel-agent pip package on a node
self.ssh.put_file(
os.path.join(src_dir, 'dist', package_name),
os.path.join('/tmp', package_name))
# Installing fuel_agent pip package
self.ssh.run('pip install --upgrade %s' %
os.path.join('/tmp', package_name))
# Copying fuel_agent templates
self.ssh.run('mkdir -p %s' % self.FUEL_AGENT_TEMPLATE_PATH)
for f in os.listdir(
os.path.join(src_dir, 'cloud-init-templates')):
if f.endswith('.jinja2'):
self.ssh.put_file(
os.path.join(src_dir, 'cloud-init-templates', f),
os.path.join(self.FUEL_AGENT_TEMPLATE_PATH, f))
self.ssh.put_file(
os.path.join(src_dir, 'etc/fuel-agent/fuel-agent.conf.sample'),
'/etc/fuel-agent/fuel-agent.conf')
def tearDown(self):
super(BaseFuelAgentCITest, self).tearDown()
self.env.stop()
def render_template(self,
template_name,
template_dir=os.path.join(os.path.dirname(__file__),
'templates'),
template_data=None):
env = jinja2.Environment(loader=jinja2.FileSystemLoader(template_dir))
template = env.get_template(template_name)
return template.render(**(template_data or {}))

View File

@ -1,100 +0,0 @@
#cloud-boothook
#!/bin/bash
function add_str_to_file_if_not_exists {
file=$1
str=$2
val=$3
if ! grep -q "^ *${str}" $file; then
echo $val >> $file
fi
}
cloud-init-per instance disable_selinux_on_the_fly setenforce 0
cloud-init-per instance disable_selinux sed -i 's/^SELINUX=.*/SELINUX=disabled/g' /etc/sysconfig/selinux
# configure udev rules
# udev persistent net
cloud-init-per instance udev_persistent_net1 service network stop
DEFAULT_GW={{ MASTER_IP }}
ADMIN_MAC={{ ADMIN_MAC }}
ADMIN_IF=$(echo {{ UDEVRULES }} | sed 's/[,=]/\n/g' | grep "$ADMIN_MAC" | cut -d_ -f2 | head -1)
INSTALL_IF=$(ifconfig | grep "$ADMIN_MAC" | head -1 | cut -d' ' -f1)
NETADDR=( $(ifconfig $INSTALL_IF | grep -oP "[0-9]{1,3}\.[0-9]{1,3}\.[0-9]{1,3}\.[0-9]{1,3}") )
if [ ! -z "$(grep $ADMIN_IF /etc/sysconfig/network-scripts/ifcfg-$ADMIN_IF | grep dhcp)" ] ; then
echo -e "# FROM COBBLER SNIPPET\nDEVICE=$ADMIN_IF\nIPADDR=${NETADDR[0]}\nNETMASK=${NETADDR[2]}\nBOOTPROTO=none\nONBOOT=yes\nUSERCTL=no\n" > /etc/sysconfig/network-scripts/ifcfg-"$ADMIN_IF"
fi
cloud-init-per instance set_gateway echo GATEWAY="$DEFAULT_GW" | tee -a /etc/sysconfig/network
#Add static udev rules
cloud-init-per instance udev_persistent_net2 echo {{ UDEVRULES }} | tr ' ' '\n' | grep udevrules | tr '[:upper:]' '[:lower:]' | sed -e 's/udevrules=//g' -e 's/,/\n/g' | sed -e "s/^/SUBSYSTEM==\"net\",\ ACTION==\"add\",\ DRIVERS==\"?*\",\ ATTR{address}==\"/g" -e "s/_/\",\ ATTR{type}==\"1\",\ KERNEL==\"eth*\",\ NAME=\"/g" -e "s/$/\"/g" | tee /etc/udev/rules.d/70-persistent-net.rules
cloud-init-per instance udev_persistent_net3 udevadm control --reload-rules
cloud-init-per instance udev_persistent_net4 udevadm trigger --attr-match=subsystem=net
cloud-init-per instance udev_persistent_net5 service network start
# end of udev
# configure black module lists
# virt-what should be installed
if [ ! -f /etc/modprobe.d/blacklist-i2c_piix4.conf ]; then
([[ $(virt-what) = "virtualbox" ]] && echo "blacklist i2c_piix4" >> /etc/modprobe.d/blacklist-i2c_piix4.conf || :)
modprobe -r i2c_piix4
fi
cloud-init-per instance conntrack_ipv4 echo nf_conntrack_ipv4 | tee -a /etc/rc.modules
cloud-init-per instance conntrack_ipv6 echo nf_conntrack_ipv6 | tee -a /etc/rc.modules
cloud-init-per instance chmod_rc_modules chmod +x /etc/rc.modules
cloud-init-per instance conntrack_max echo "net.nf_conntrack_max=1048576" | tee -a /etc/sysctl.conf
cloud-init-per instance conntrack_ipv4_load modprobe nf_conntrack_ipv4
cloud-init-per instance conntrack_ipv6_load modprobe nf_conntrack_ipv6
cloud-init-per instance conntrack_max_set sysctl -w "net.nf_conntrack_max=1048576"
cloud-init-per instance mkdir_coredump mkdir -p /var/log/coredump
cloud-init-per instance set_coredump echo -e "kernel.core_pattern=/var/log/coredump/core.%e.%p.%h.%t" | tee -a /etc/sysctl.conf
cloud-init-per instance set_chmod chmod 777 /var/log/coredump
cloud-init-per instance set_limits echo -e "* soft core unlimited\n* hard core unlimited" | tee -a /etc/security/limits.conf
#NOTE: disabled for centos?
#cloud-init-per instance dhclient echo 'supersede routers 0;' | tee /etc/dhcp/dhclient.conf
# ntp sync
cloud-init-per instance service ntp stop | tee /dev/null
cloud-init-per instance sync_date ntpdate -t 4 -b {{ MASTER_IP }}
cloud-init-per instance sync_hwclock hwclock --systohc
cloud-init-per instance edit_ntp_conf1 sed -i '/^\s*tinker panic/ d' /etc/ntp.conf
cloud-init-per instance edit_ntp_conf2 sed -i '1 i tinker panic 0' /etc/ntp.conf
cloud-init-per instance edit_ntp_conf3 echo 0 > /var/lib/ntp/drift
cloud-init-per instance edit_ntp_conf_0 chown ntp: /var/lib/ntp/drift
cloud-init-per instance edit_ntp_conf3 sed -i '/^\s*server/ d' /etc/ntp.conf
cloud-init-per instance edit_ntp_conf4 echo "server {{ MASTER_IP }} burst iburst" | tee -a /etc/ntp.conf
# Point installed ntpd to Master node
cloud-init-per instance set_ntpdate sed -i 's/SYNC_HWCLOCK\s*=\s*no/SYNC_HWCLOCK=yes/' /etc/sysconfig/ntpdate
cloud-init-per instance set_ntpd_0 chkconfig ntpd on
cloud-init-per instance set_ntpd_1 chkconfig ntpdate on
cloud-init-per instance removeUseDNS sed -i --follow-symlinks -e '/UseDNS/d' /etc/ssh/sshd_config
add_str_to_file_if_not_exists /etc/ssh/sshd_config 'UseDNS' 'UseDNS no'
cloud-init-per instance gssapi_disable sed -i -e "/^\s*GSSAPICleanupCredentials yes/d" -e "/^\s*GSSAPIAuthentication yes/d" /etc/ssh/sshd_config
cloud-init-per instance nailgun_agent echo 'flock -w 0 -o /var/lock/agent.lock -c "/opt/nailgun/bin/agent >> /var/log/nailgun-agent.log 2>&1"' | tee /etc/rc.local
# Copying default bash settings to the root directory
cloud-init-per instance skel_bash cp -f /etc/skel/.bash* /root/
cloud-init-per instance hiera_puppet mkdir -p /etc/puppet /var/lib/hiera
cloud-init-per instance touch_puppet touch /var/lib/hiera/common.yaml /etc/puppet/hiera.yaml
cloud-init-per instance clean_repos find /etc/yum.repos.d/. -name '*.repo' -delete

View File

@ -1,74 +0,0 @@
#cloud-boothook
#!/bin/bash
function add_str_to_file_if_not_exists {
file=$1
str=$2
val=$3
if ! grep -q "^ *${str}" $file; then
echo $val >> $file
fi
}
# configure udev rules
# udev persistent net
cloud-init-per instance udev_persistent_net1 /etc/init.d/networking stop
ADMIN_MAC={{ ADMIN_MAC }}
ADMIN_IF=$(echo {{ UDEVRULES }} | sed 's/[,=]/\n/g' | grep "$ADMIN_MAC" | cut -d_ -f2 | head -1)
INSTALL_IF=$(ifconfig | grep "$ADMIN_MAC" | head -1 | cut -d' ' -f1)
# Check if we do not already have static config (or interface seems unconfigured)
NETADDR=( $(ifconfig $INSTALL_IF | grep -oP "[0-9]{1,3}\.[0-9]{1,3}\.[0-9]{1,3}\.[0-9]{1,3}") )
if [ ! -z "$(grep $ADMIN_IF /etc/network/interfaces.d/ifcfg-$ADMIN_IF | grep dhcp)" ] ; then
echo -e "auto $ADMIN_IF\niface $ADMIN_IF inet static\n\taddress ${NETADDR[0]}\n\tnetmask ${NETADDR[2]}\n\tbroadcast ${NETADDR[1]}" > /etc/network/interfaces.d/ifcfg-"$ADMIN_IF"
fi
#Add static udev rules
cloud-init-per instance udev_persistent_net2 echo {{ UDEVRULES }} | tr ' ' '\n' | grep udevrules | tr '[:upper:]' '[:lower:]' | sed -e 's/udevrules=//g' -e 's/,/\n/g' | sed -e "s/^/SUBSYSTEM==\"net\",\ ACTION==\"add\",\ DRIVERS==\"?*\",\ ATTR{address}==\"/g" -e "s/_/\",\ ATTR{type}==\"1\",\ KERNEL==\"eth*\",\ NAME=\"/g" -e "s/$/\"/g" | tee /etc/udev/rules.d/70-persistent-net.rules
cloud-init-per instance udev_persistent_net3 udevadm control --reload-rules
cloud-init-per instance udev_persistent_net4 udevadm trigger --attr-match=subsystem=net
cloud-init-per instance udev_persistent_net5 /etc/init.d/networking start
# end of udev
# configure black module lists
# virt-what should be installed
if [ ! -f /etc/modprobe.d/blacklist-i2c_piix4.conf ]; then
([[ $(virt-what) = "virtualbox" ]] && echo "blacklist i2c_piix4" >> /etc/modprobe.d/blacklist-i2c_piix4.conf || :) && update-initramfs -u -k all
modprobe -r i2c_piix4
fi
cloud-init-per instance conntrack_ipv4 echo nf_conntrack_ipv4 | tee -a /etc/modules
cloud-init-per instance conntrack_ipv6 echo nf_conntrack_ipv6 | tee -a /etc/modules
cloud-init-per instance conntrack_max echo "net.nf_conntrack_max=1048576" | tee -a /etc/sysctl.conf
cloud-init-per instance conntrack_ipv4_load modprobe nf_conntrack_ipv4
cloud-init-per instance conntrack_ipv6_load modprobe nf_conntrack_ipv6
cloud-init-per instance conntrack_max_set sysctl -w "net.nf_conntrack_max=1048576"
cloud-init-per instance dhclient echo 'supersede routers 0;' | tee /etc/dhcp/dhclient.conf
# ntp sync
cloud-init-per instance service ntp stop | tee /dev/null
cloud-init-per instance sync_date ntpdate -t 4 -b {{ MASTER_IP }}
cloud-init-per instance sync_hwclock hwclock --systohc
cloud-init-per instance edit_ntp_conf1 sed -i '/^\s*tinker panic/ d' /etc/ntp.conf
cloud-init-per instance edit_ntp_conf2 sed -i '1 i tinker panic 0' /etc/ntp.conf
cloud-init-per instance edit_ntp_conf3 echo 0 > /var/lib/ntp/drift
cloud-init-per instance edit_ntp_conf3 sed -i '/^\s*server/ d' /etc/ntp.conf
cloud-init-per instance edit_ntp_conf4 echo "server {{ MASTER_IP }} burst iburst" | tee -a /etc/ntp.conf
cloud-init-per instance removeUseDNS sed -i --follow-symlinks -e '/UseDNS/d' /etc/ssh/sshd_config
add_str_to_file_if_not_exists /etc/ssh/sshd_config 'UseDNS' 'UseDNS no'
cloud-init-per instance gssapi_disable sed -i -e "/^\s*GSSAPICleanupCredentials yes/d" -e "/^\s*GSSAPIAuthentication yes/d" /etc/ssh/sshd_config
cloud-init-per instance nailgun_agent echo 'flock -w 0 -o /var/lock/agent.lock -c "/opt/nailgun/bin/agent >> /var/log/nailgun-agent.log 2>&1"' | tee /etc/rc.local
# Copying default bash settings to the root directory
cloud-init-per instance skel_bash cp -f /etc/skel/.bash* /root/
cloud-init-per instance hiera_puppet mkdir -p /etc/puppet /var/lib/hiera
cloud-init-per instance touch_puppet touch /var/lib/hiera/common.yaml /etc/puppet/hiera.yaml

Some files were not shown because too many files have changed in this diff Show More