diff --git a/backend/proton/.gitignore b/backend/proton/.gitignore deleted file mode 100644 index f604d92f..00000000 --- a/backend/proton/.gitignore +++ /dev/null @@ -1,3 +0,0 @@ -# Ignore everything in this directory -* -# Except this file !.gitignore diff --git a/backend/tecs/HA.conf b/backend/tecs/HA.conf deleted file mode 100755 index e64b5ea8..00000000 --- a/backend/tecs/HA.conf +++ /dev/null @@ -1,166 +0,0 @@ -## HA配置(双机) -# 每套HA系统配置一个配置文件,该文件名命令规律如下,一套HA为HA_1.conf,两套HA命令格式为HA_2_1.conf和HA_2_2.conf,依次类推 -# 建议拷贝该模版改名后再编辑,如使用vi命令,应先执行 export LC_ALL="zh_CN.GB2312" ,否则会有乱码,编辑后unset LC_ALL - -[DEFAULT] -# HA安装的OpenCOS组件, 可以填写为loadbalance,database,amqp,keystone,neutron,glance,cinder,nova,horizon,heat,ceilometer,ironic(与下面组件服务列表的关键字一致) -# 之中的任意组合,用逗号分开, 全部可简写为all, 无顺序要求,haproxy代表配置LB. -# 注意HA是通过conf方式安装的,但这种方式不支持安装ironic,如果这里配置了ironic,应在整个安装流程前手动通过custom方式单独安装ironic -# 该配置项必填 -components=database,amqp,keystone,neutron,glance,cinder,nova,horizon,heat,ceilometer - -# 由HA管理的组件服务(可裁剪),多个服务以逗号分开. -# 一般对服务无增加或减少可不必修改如下选项,多余组件也无需注释掉,组件选择与否由“components”决定 -loadbalance = haproxy - -database=mariadb - -amqp=rabbitmq-server - -keystone=openstack-keystone - -#neutron-metadata-agent,neutron-lbaas-agent don't use default -neutron=neutron-server,neutron-l3-agent,neutron-dhcp-agent - -#openstack-glance-scrubber don't use default -glance=openstack-glance-api,openstack-glance-registry - -#openstack-cinder-backup don't use default -cinder=openstack-cinder-api,openstack-cinder-scheduler,openstack-cinder-volume - -nova=openstack-nova-api,openstack-nova-conductor,openstack-nova-scheduler,openstack-nova-cert,openstack-nova-consoleauth,openstack-nova-novncproxy - -horizon=httpd,opencos-alarmmanager,opencos-alarmagent - -heat=openstack-heat-api,openstack-heat-engine,openstack-heat-api-cfn,openstack-heat-api-cloudwatch - -ceilometer=openstack-ceilometer-api,openstack-ceilometer-central,openstack-ceilometer-alarm-evaluator,openstack-ceilometer-alarm-notifier,openstack-ceilometer-notification,openstack-ceilometer-collector - -ironic=openstack-ironic-api,openstack-ironic-conductor - -# 根据业务需要,增加clone服务资源(每个节点都运行),填写去掉.service后的服务名,多个服务以逗号分隔,可选 -#clone_service= - -# guard服务名字 -guard=tfg-guard - -# HA集群心跳线,至少一条,建议三条,每条是一对IP,用逗号分开 -# 如果LB和HA是使用相同服务器,则此处心跳线不用再填写 -# 第一条心跳线,例中是外网IP,必填 -heartbeat_link1=10.43.179.221,10.43.179.222 -# 第二条心跳线,不能与其他心跳线有相同IP,可选 -heartbeat_link2= -# 第三条心跳线,不能与其他心跳线有相同IP,可选 -heartbeat_link3= - -#执行HA脚本的节点为local node,其他节点为remote node,这里为ssh登录remote node的root用户密码,必填 -remote_node_password=ossdbg1 - - -# haproxy浮动IP地址,配置LB时,必填 -#loadbalance_fip=192.160.0.226 -#loadbalance_nic=ens33 -#loadbalance_netmask=23 -#############DB################ -# 数据库浮动IP,可以与LB浮动IP相同,必填 -# 浮动IP地址 -#database_fip=192.160.0.225 -# 浮动IP所在网卡 -#database_nic=baseleft -# 掩码,CIDR格式 -#database_netmask=23 - -# 数据库共享磁盘全路径名,组件存在则必填 -# 磁盘名,建议用lv方式,使用lv时应注意配置为逻辑盘名 -#database_device=/dev/mapper/vg_mysql-lv_mysql -# 文件系统类型 -#database_fs_type=ext4 - -#数据库备份共享磁盘全路径名,不能和其他共享磁盘相同(功能暂不支持),可选 -#backup_database_device=/dev/mapper/vg_mysqlbackup-lv_mysqlbackup -#backup_database_fs_type=ext4 - -##############AMQP################ -# AMQP浮动IP,可以与LB浮动IP相同,必填 -#amqp_fip=192.160.0.225 -#amqp_nic=baseleft -#amqp_netmask=23 - -##############keystone################ -# keystone浮动IP,配置为LB时,浮动IP不用填写,否则组件存在则必填 -#keystone_fip=192.160.0.225 -#keystone_nic=baseleft -#keystone_netmask=23 - -##############neutron################ -# neutron 浮动IP,配置为LB时,浮动IP不用填写,否则组件存在则必填 -#neutron_fip=192.160.0.225 -#neutron_nic=baseleft -#neutron_netmask=23 - -##############glance################ -# glance 浮动IP,配置为LB时,浮动IP不用填写,否则组件存在则必填 -#glance_fip=192.160.0.225 -#glance_nic=baseleft -#glance_netmask=23 - -# 镜像共享磁盘设置,不能和其他共享磁盘相同,组件存在则必填 -# glance_device_type可选drbd或iscsi - -#glance_device_type=drbd -#glance_device=/dev/mapper/vg_glance-lv_glance -#glance_fs_type=ext4 - -##############cinder################ -# cinder浮动IP,配置为LB时,浮动IP不用填写,否则组件存在则必填 -#cinder_fip=192.160.0.225 -#cinder_nic=baseleft -#cinder_netmask=23 - -#虚拟机块设备使用的磁阵管理口IP,如果有多个IP,用空格分开,可选 -#cinder_ping_ip=192.160.0.7 - -##############nova################ -# nova浮动IP,配置为LB时,浮动IP不用填写,否则组件存在则必填 -#nova_fip=192.160.0.225 -#nova_nic=baseleft -#nova_netmask=23 - -##############horizon################ -# TECS dashboard登录时使用的浮动IP,配置为LB时,浮动IP不用填写,否则组件存在则必填 -# 不同浮动IP的组件可以运行在不同节点上,如果还想与 -# 某个组件运行在相同节点,需配置location_constraint -#horizon_fip=10.43.179.230 -#horizon_nic=kmportv1 -#horizon_netmask=23 - -##############ironic################ -# ironic 浮动IP,配置为LB时,浮动IP不用填写,否则组件存在则必填 -#ironic_fip=192.160.0.225 -#ironic_nic=baseleft -#ironic_netmask=23 - -##############heat################ -# heat 浮动IP,配置为LB时,浮动IP不用填写,否则组件存在则必填 -#heat_fip=192.160.0.225 -#heat_nic=baseleft -#heat_netmask=23 - -##############ceilometer################ -# ceilometer浮动IP,配置为LB时,浮动IP不用填写,否则组件存在则必填 -#ceilometer_fip=192.160.0.225 -#ceilometer_nic=baseleft -#ceilometer_netmask=23 - -# mongod数据库共享磁盘全路径名,建议配置 -#mongod_device=/dev/mapper/vg_mongodb-lv_mongodb -# 文件系统类型 -#mongod_fs_type=ext4 - -# 若mongod数据库使用本地盘则配置成local,否则为空 -mongod_local=local - -# 如下两个配置项表示共享盘所在的磁阵信息,暂时仅支持本配置中用到的所有共享盘都在一个磁阵上,可选 -# 参数说明:(主控制器业务口IP地址,主控制器iqn),(备控制器业务口IP地址,备控制器iqn) -# 如果两个控制iqn相同,可以配置为(主控制器业务口IP地址,主控制器iqn) -#iscsi_storage=(172.32.1.1,iqn.2099-01.cn.com.zte:usp.spr11-4c:09:b4:b0:56:8b),(172.32.1.2,iqn.2099-01.cn.com.zte:usp.spr11-4c:09:b4:b0:56:8c) diff --git a/backend/tecs/getnodeinfo.sh b/backend/tecs/getnodeinfo.sh deleted file mode 100755 index 214fcc9a..00000000 --- a/backend/tecs/getnodeinfo.sh +++ /dev/null @@ -1,159 +0,0 @@ -#!/bin/bash - -dhcp_ip="127.0.0.1" -DISCOVERD_URL="http://$dhcp_ip:5050/v1/continue" - -function update() { - jq "$1" data.json > temp.json || echo "Error: update $1 to json failed" - mv temp.json data.json -} - -function get_system_info(){ - PRODUCT=$(dmidecode -s system-product-name) - FAMILY=$(dmidecode -t system|grep "Family"|cut -d ":" -f2) - VERSION=$(dmidecode -s system-version) - SERIAL=$(dmidecode -s system-serial-number) - MANUFACTURER=$(dmidecode -s system-manufacturer) - UUID=$(dmidecode -s system-uuid) - FQDN=$(hostname -f) - echo '{"system":{}}' > data.json - update ".system[\"product\"] = \"$PRODUCT\"" - update ".system[\"family\"] = \"$FAMILY\"" - update ".system[\"fqdn\"] = \"$FQDN\"" - update ".system[\"version\"] = \"$VERSION\"" - update ".system[\"serial\"] = \"$SERIAL\"" - update ".system[\"manufacturer\"] = \"$MANUFACTURER\"" - update ".system[\"uuid\"] = \"$UUID\"" -} - -function get_cpu_info(){ - REAL=$(cat /proc/cpuinfo |grep "physical id"|sort |uniq|wc -l) - TOTAL=$(cat /proc/cpuinfo |grep "processor"|wc -l) - update ".cpu[\"real\"] = $REAL" - update ".cpu[\"total\"] = $TOTAL" - - for i in $(seq $TOTAL) - do - if [ ! -z "$i" ]; then - SPEC_MODEL=$(cat /proc/cpuinfo | grep name | cut -f2 -d:|sed -n $i"p") - SPEC_FRE=$(cat /proc/cpuinfo | grep MHz | cut -f2 -d:|sed -n $i"p") - update ".cpu[\"spec_$i\"] = {model:\"$SPEC_MODEL\", frequency:$SPEC_FRE}" - fi - done -} - -function get_memory_info(){ - PHY_NUM=$(dmidecode -t memory|grep "Physical Memory Array"|wc -l) - TOTAL_MEM=$(cat /proc/meminfo |grep MemTotal |cut -d ":" -f2) - update ".memory[\"total\"] = \"$TOTAL_MEM\"" - for num in $(seq $PHY_NUM) - do - SLOTS=$(dmidecode -t memory |grep "Number Of Devices" |cut -d ":" -f2|sed -n $num"p") - MAX_CAP=$(dmidecode -t memory |grep "Maximum Capacity" |cut -d ":" -f2|sed -n $num"p") - update ".memory[\"phy_memory_$num\"] = {slots:\"$SLOTS\", maximum_capacity:\"$MAX_CAP\"}" - - for i in $(seq $SLOTS) - do - if [ ! -z "$i" ]; then - DEVICE_FRE=$(dmidecode -t memory |grep "Speed" |cut -d ":" -f2|sed -n $i"p") - DEVICE_TYPE=$(dmidecode -t memory |grep 'Type:' |grep -v "Error Correction Type"|cut -d ":" -f2|sed -n $i"p") - DEVICE_SIZE=$(dmidecode -t memory |grep Size |cut -d ":" -f2|sed -n $i"p") - update ".memory[\"phy_memory_$num\"][\"devices_$i\"] = {frequency:\"$DEVICE_FRE\", type:\"$DEVICE_TYPE\", size:\"$DEVICE_SIZE\"}" - fi - done - done -} - -function get_net_info(){ - physical_networks=`ls -l /sys/class/net/ | grep -v lo |grep "pci"|awk -F 'net/' '{print $2}'` - if [ -f "/sys/class/net/bonding_masters" ]; then - bond_network=$(cat /sys/class/net/bonding_masters) - if [ ! -z "$bond_network" ];then - physical_networks+=" $bond_network" - fi - fi - for iface in $physical_networks - do - NAME=$iface - MAC=$(ip link show $iface | awk '/ether/ {print $2}') - IP=$(ip addr show $iface | awk '/inet / { sub(/\/.*/, "", $2); print $2 }') - NETMASK=$(ifconfig $iface | grep netmask | awk '{print $4}') - STATE=$(ip link show $iface | awk '/mtu/ {print $3}') - PCI=$(ethtool -i $iface|grep "bus-info"|cut -d " " -f2) - CURRENT_SPEED=$(ethtool $iface |grep Speed |awk -F " " '{print $2}') - LINE=$(ethtool $iface|grep -n "Supported pause frame use"|awk -F ":" '{print $1}') - LINE=$[ LINE - 1 ] - LINE_SPEED=$(ethtool $iface|grep -n "Supported link modes"|awk -F ":" '{print $1}') - BOND=$(ifconfig $iface | grep MASTER) - if [ $LINE -eq $LINE_SPEED ]; then - MAX_SPEED=$(ethtool $iface|grep "Supported link modes"|cut -d ":" -f2) - else - MAX_SPEED=$(ethtool $iface |sed -n $LINE"p"|awk -F " " '{print $1}') - fi - - UP="UP" - if [[ "$STATE" =~ "$UP" ]]; then - STATE="up" - else - STATE="down" - fi - if [ -z "$BOND" ]; then - TYPE="ether" - else - TYPE="bond" - SLAVES=$(find /etc/sysconfig/network-scripts/ -name "ifcfg-*" |xargs grep "MASTER=$iface"|awk -F 'ifcfg-' '{print $2}'|awk -F ':' '{print $1}') - fi - if [ ! -z "$MAC" ]; then - update ".interfaces[\"$iface\"] = {mac: \"$MAC\", ip: \"$IP\", netmask: \"$NETMASK\", name: \"$iface\", max_speed: \"$MAX_SPEED\", state: \"$STATE\", pci: \"$PCI\", current_speed: \"$CURRENT_SPEED\", type: \"$TYPE\", slaves:\"$SLAVES\"}" - fi - done -} - -function get_disk_info(){ - for disk in $(fdisk -l|grep Disk|grep "/dev" |cut -d ":" -f1|awk -F "/" '{print $NF}') - do - DISK_NAME=$disk - DISK_SIZE=$(fdisk -l|grep Disk|grep "/dev" |grep -w $disk|cut -d "," -f2) - DISK_DISK=$(ls -l /dev/disk/by-path/|grep $disk"$"|awk '{print $9}') - DISK_EXTRA_1=$(ls -l /dev/disk/by-id/|grep $disk"$"|awk '{print $9}'|sed -n 1p) - DISK_EXTRA_2=$(ls -l /dev/disk/by-id/|grep $disk"$"|awk '{print $9}'|sed -n 2p) - MODEL=$(hdparm -I /dev/sda |grep Model | cut -d ":" -f2) - REMOVABLE=$(hdparm -I /dev/sda |grep removable|awk '{print $4}') - update ".disk[\"$disk\"] = {name: \"$DISK_NAME\", size: \"$DISK_SIZE\", disk: \"$DISK_DISK\", model: \"$MODEL\", removable: \"$REMOVABLE\",extra: [\"$DISK_EXTRA_1\", \"$DISK_EXTRA_2\"]}" - done -} - -function main(){ - get_system_info - get_cpu_info - get_memory_info - get_net_info - get_disk_info -} -main - -BMC_ADDRESS=$(ipmitool lan print | grep -e "IP Address [^S]" | awk '{ print $4 }') -if [ -z "$BMC_ADDRESS" ]; then - BMC_ADDRESS=$(ipmitool lan print 3| grep -e "IP Address [^S]" | awk '{ print $4 }') -fi -update ".ipmi_address = \"$BMC_ADDRESS\"" - -update ".data_name = \"baremetal_source\"" - -update ".os_status = \"active\"" - -echo Collected: -cat data.json - -RESULT=$(eval curl -i -X POST \ - "-H 'Accept: application/json'" \ - "-H 'Content-Type: application/json'" \ - "-d @data.json" \ - "$DISCOVERD_URL") - -if echo $RESULT | grep "HTTP/1.0 4"; then - echo "Ironic API returned error: $RESULT" -fi - -echo "Node is now discovered! Halting..." -sleep 5 diff --git a/backend/tecs/jq-1.3-2.el7.x86_64.rpm b/backend/tecs/jq-1.3-2.el7.x86_64.rpm deleted file mode 100755 index 61008c9c..00000000 Binary files a/backend/tecs/jq-1.3-2.el7.x86_64.rpm and /dev/null differ diff --git a/backend/tecs/network-configuration-1.1.1-25.x86_64.rpm b/backend/tecs/network-configuration-1.1.1-25.x86_64.rpm deleted file mode 100755 index e45fde00..00000000 Binary files a/backend/tecs/network-configuration-1.1.1-25.x86_64.rpm and /dev/null differ diff --git a/backend/tecs/storage_auto_config/base/cinder.json.sample b/backend/tecs/storage_auto_config/base/cinder.json.sample deleted file mode 100755 index c8e5c33d..00000000 --- a/backend/tecs/storage_auto_config/base/cinder.json.sample +++ /dev/null @@ -1,39 +0,0 @@ -[ - { - "protocol_type": "ISCSI", - "service": "glance", - "lun": "0", - "data_ips": [ - "10.43.177.159" - ], - "lvm_config": { - "size": 100, - "vg_name": "VolGroupHAImage", - "lv_name": "lvHAImage", - "fs_type": "ext4" - } - }, - { - "protocol_type": "ISCSI", - "service": "db", - "lun": "1", - "data_ips": [ - "162.1.1.101" - ], - "lvm_config": { - "size": 100, - "vg_name": "VolGroupHAMysql", - "lv_name": "lvHAMysql", - "fs_type": "ext4" - } - }, - { - "protocol_type": "CEPH", - "rbd_config": { - "size": 100, - "rbd_pool": "mysql", - "rbd_volume": "mysql", - "fs_type": "ext4" # can be none - } - } -] \ No newline at end of file diff --git a/backend/tecs/storage_auto_config/base/control.json.sample b/backend/tecs/storage_auto_config/base/control.json.sample deleted file mode 100755 index c8e5c33d..00000000 --- a/backend/tecs/storage_auto_config/base/control.json.sample +++ /dev/null @@ -1,39 +0,0 @@ -[ - { - "protocol_type": "ISCSI", - "service": "glance", - "lun": "0", - "data_ips": [ - "10.43.177.159" - ], - "lvm_config": { - "size": 100, - "vg_name": "VolGroupHAImage", - "lv_name": "lvHAImage", - "fs_type": "ext4" - } - }, - { - "protocol_type": "ISCSI", - "service": "db", - "lun": "1", - "data_ips": [ - "162.1.1.101" - ], - "lvm_config": { - "size": 100, - "vg_name": "VolGroupHAMysql", - "lv_name": "lvHAMysql", - "fs_type": "ext4" - } - }, - { - "protocol_type": "CEPH", - "rbd_config": { - "size": 100, - "rbd_pool": "mysql", - "rbd_volume": "mysql", - "fs_type": "ext4" # can be none - } - } -] \ No newline at end of file diff --git a/backend/tecs/storage_auto_config/base/multipath.conf b/backend/tecs/storage_auto_config/base/multipath.conf deleted file mode 100755 index 31d49e8b..00000000 --- a/backend/tecs/storage_auto_config/base/multipath.conf +++ /dev/null @@ -1,144 +0,0 @@ -# This is a basic configuration file with some examples, for device mapper -# mulead of using WWIDs as names. -defaults { - user_friendly_names yes - queue_without_daemon no -# find_multipaths yes -} -## -## Here is an example of how to configure some standard options. -## -# -#defaults { -# udev_dir /dev -# polling_interval 10 -# selector "round-robin 0" -# path_grouping_policy multibus -# getuid_callout "/lib/udev/scsi_id --whitelisted --device=/dev/%n" -# prio alua -# path_checker readsector0 -# rr_min_io 100 -# max_fds 8192 -# rr_weight priorities -# failback immediate -# no_path_retry fail -# user_friendly_names yes -#} -## -## The wwid line in the following blacklist section is shown as an example -## of how to blacklist devices by wwid. The 2 devnode lines are the -## compiled in default blacklist. If you want to blacklist entire types -## of devices, such as all scsi devices, you should use a devnode line. -## However, if you want to blacklist specific devices, you should use -## a wwid line. Since there is no guarantee that a specific device will -## not change names on reboot (from /dev/sda to /dev/sdb for example) -## devnode lines are not recommended for blacklisting specific devices. -## -#blacklist { -# wwid 26353900f02796769 -# devnode "^(ram|raw|loop|fd|md|dm-|sr|scd|st)[0-9]*" -# devnode "^hd[a-z]" -#} -#multipaths { -# multipath { -# wwid 3600508b4000156d700012000000b0000 -# alias yellow -# path_grouping_policy multibus -# path_checker readsector0 -# path_selector "round-robin 0" -# failback manual -# rr_weight priorities -# no_path_retry 5 -# } -# multipath { -# wwid 1DEC_____321816758474 -# alias red -# } -#} -#devices { -# device { -# vendor "COMPAQ " -# product "HSV110 (C)COMPAQ" -# path_grouping_policy multibus -# getuid_callout "/lib/udev/scsi_id --whitelisted --device=/dev/%n" -# path_checker readsector0 -# path_selector "round-robin 0" -# hardware_handler "0" -# failback 15 -# rr_weight priorities -# no_path_retry queue -# } -# device { -# vendor "COMPAQ " -# product "MSA1000 " -# path_grouping_policy multibus -# } -#} -devices { - device { - vendor "FUJITSU" - product "ETERNUS_DXL" - prio alua - path_grouping_policy group_by_prio - path_selector "round-robin 0" - failback immediate - no_path_retry 0 (*1) - path_checker tur - dev_loss_tmo 2097151 (*2) - fast_io_fail_tmo 1 - } - device { - vendor "FUJITSU" - product "ETERNUS_DXM" - prio alua - path_grouping_policy group_by_prio - path_selector "round-robin 0" - failback immediate - no_path_retry 0 (*1) - path_checker tur - dev_loss_tmo 2097151 (*2) - fast_io_fail_tmo 1 - } - device { - vendor "FUJITSU" - product "ETERNUS_DX400" - prio alua - path_grouping_policy group_by_prio - path_selector "round-robin 0" - failback immediate - no_path_retry 0 (*1) - path_checker tur - dev_loss_tmo 2097151 (*2) - fast_io_fail_tmo 1 - } - device { - vendor "FUJITSU" - product "ETERNUS_DX8000" - prio alua - path_grouping_policy group_by_prio - path_selector "round-robin 0" - failback immediate - no_path_retry 0 (*1) - path_checker tur - dev_loss_tmo 2097151 (*2) - fast_io_fail_tmo 1 - } - device { - vendor "ZTE" - product "ZXUSP" - path_grouping_policy group_by_prio - path_checker tur - prio alua - path_selector "round-robin 0" - hardware_handler "1 alua" - failback immediate - rr_weight priorities - no_path_retry 0 (*1) - rr_min_io_rq 1 - flush_on_last_del yes - } -} - - -blacklist { -} diff --git a/backend/tecs/storage_auto_config/common/cinder_conf.py b/backend/tecs/storage_auto_config/common/cinder_conf.py deleted file mode 100755 index c0932653..00000000 --- a/backend/tecs/storage_auto_config/common/cinder_conf.py +++ /dev/null @@ -1,281 +0,0 @@ - -import uuid -from utils import * -from xml.etree.ElementTree import ElementTree, Element - - -class BaseConfig(): - _CINDER_CONF_PATH = "/etc/cinder/cinder.conf" - SET_CONFIG = \ - "openstack-config --set {config_file} {section} {key} {value}" - GET_CONFIG = \ - "openstack-config --get {config_file} {section} {key}" - instance = None - - def __init__(self): - self._BACKEND_MAPPING = { - 'KS3200_IPSAN': ZTEBackendConfig, - 'KS3200_FCSAN': ZTEBackendConfig, - 'FUJISTU_ETERNUS': FUJISTUBackendConfig, - 'LVM': None, - 'CEPH': CEPHBackendConfig, - } - self.instance_mapping = {} - - def __get_backend_instance(self, backend_type): - if not backend_type or \ - backend_type not in self._BACKEND_MAPPING.keys(): - print_or_raise("Volume driver type '%s' is not valid." % - backend_type, - ScriptInnerError) - - backend_instance = self.instance_mapping.get(backend_type, BaseConfig) - if isinstance(backend_instance, self._BACKEND_MAPPING[backend_type]): - return backend_instance - else: - self.instance_mapping.update( - {backend_type: self._BACKEND_MAPPING[backend_type]()}) - return self.instance_mapping[backend_type] - - @classmethod - def single_instance(cls): - if not BaseConfig.instance: - BaseConfig.instance = BaseConfig() - return BaseConfig.instance - - def _construct_particular_cinder_data(self, backend, backend_data): - print_or_raise("Backend _construct_particular_cinder_data method no " - "implement!", ScriptInnerError) - - def _write_xml(self, fp_xml, **backend_device_args): - self.backend_instance._write_xml(fp_xml, **backend_device_args) - - def _construct_commonality_cinder_data(self, backend, backend_data): - backend_pools, xml_path = \ - self.backend_instance._construct_particular_cinder_data( - backend, backend_data) - - backend_data['volume_backend_name'] = \ - backend_data.pop('volume_type') - - set_backend = lambda x, y: self.SET_CONFIG.format( - config_file=self._CINDER_CONF_PATH, - section=backend, - key=x, value=y) - - backend_config_list = list() - backend_config_list += map( - set_backend, backend_data.keys(), backend_data.values()) - - get_bakcends = \ - self.GET_CONFIG.format(config_file=self._CINDER_CONF_PATH, - section="DEFAULT", - key="enabled_backends") - out, err = execute(get_bakcends, check_exit_code=[0, 1]) - exist_backends = out.split("\n")[0] if out else "" - enabled_backends = \ - exist_backends if backend in exist_backends else \ - "%s" % backend if not out else "%s,%s" % \ - (exist_backends, backend) - set_bakcends = \ - self.SET_CONFIG.format(config_file=self._CINDER_CONF_PATH, - section="DEFAULT", - key="enabled_backends", - value=enabled_backends) - - # write to cinder.conf - config_set_all = set_bakcends + ";" + ";".join(backend_config_list) - execute(config_set_all) - - return backend_pools, xml_path - - def is_needed_generate_backend_xml(self, backend_driver): - if backend_driver in ['KS3200_IPSAN', 'KS3200_FCSAN', - 'FUJISTU_ETERNUS']: - return True - else: - return False - - def config_backend(self, backend_cinder_args, **backend_device_args): - """ - Config outer interface,for public flow. - :param backend_device_args: device config - :param backend_cinder_args: backend config - :return: - """ - backend_data = backend_cinder_args[1] - backend_driver = backend_data.get('volume_driver', None) - self.backend_instance = self.__get_backend_instance(backend_driver) - - # config cinder.conf - backend_pools, xml_path = \ - self._construct_commonality_cinder_data(backend_cinder_args[0], - backend_data) - - # config xml - if self.is_needed_generate_backend_xml(backend_driver): - backend_device_args.update({'pools': backend_pools}) - with open(xml_path, "w+") as fp_xml: - self._write_xml(fp_xml, **backend_device_args) - execute("chown cinder:cinder %s" % xml_path) - - def update_xml_node(self, element_obj, node_path, content): - node_list = element_obj.findall(node_path) - if node_list: - node_list[0].text = content - else: - new_element = Element(node_path.split('/')[-1]) - new_element.text = content - parent_node = element_obj.findall(node_path.split('/')[0]) - parent_node[0].append(new_element) - - -class ZTEBackendConfig(BaseConfig): - _DEFAULT_USERNAME = "admin" - _DEFAULT_USERPWD = "admin" - _DEFAULT_XML_FILE_PREFIX = "cinder_zte_conf_file" - _DEFAULT_XML_TEMPLATE_PATH = "/etc/cinder/cinder_zte_conf.xml" - _ISCSI_DRIVER = 'cinder.volume.drivers.zte.zte_ks.ZteISCSIDriver' - _FC_DRIVER = 'cinder.volume.drivers.zte.zte_ks.ZteFCDriver' - - def _construct_particular_cinder_data(self, backend, backend_data): - # construct commonality data in cinder.conf - backend_data['volume_driver'] = \ - self._ISCSI_DRIVER \ - if "KS3200_IPSAN" == backend_data['volume_driver'] \ - else self._FC_DRIVER - backend_data[self._DEFAULT_XML_FILE_PREFIX] = \ - backend_data.pop('backend_config_file') \ - if backend_data.get('backend_config_file', None) \ - else "/etc/cinder/%s_%s.xml" % (self._DEFAULT_XML_FILE_PREFIX, - backend) - backend_data['use_multipath_for_image_xfer'] = \ - backend_data.get('multipath_tool', True) - backend_pools = backend_data.pop('pools') - - return backend_pools, backend_data[self._DEFAULT_XML_FILE_PREFIX] - - def _write_xml(self, fp, **backend_device_args): - if not os.path.exists(self._DEFAULT_XML_TEMPLATE_PATH): - print_or_raise("XML file template %s not exists,can't load defult " - "params." % self._DEFAULT_XML_TEMPLATE_PATH, - ScriptInnerError) - - mgnt_ips = backend_device_args['management_ips'] - user_name = backend_device_args['user_name'] - user_pwd = backend_device_args['user_pwd'] - cinder_host_ip = backend_device_args['cinder_host_ip'] - pools = backend_device_args['pools'] - xml_fp = fp - - tree = ElementTree() - elements = tree.parse(self._DEFAULT_XML_TEMPLATE_PATH) - for index in range(len(mgnt_ips)): - self.update_xml_node( - elements, - "Storage/ControllerIP" + str(index), mgnt_ips[index]) - - if cinder_host_ip: - self.update_xml_node(elements, "Storage/LocalIP", cinder_host_ip) - self.update_xml_node(elements, "Storage/UserName", user_name) - self.update_xml_node(elements, "Storage/UserPassword", user_pwd) - - # del all StoragePool and StorageVd node - pool_parent_node = elements.findall("LUN") - pool_child_nodes = elements.findall("LUN/StoragePool") - vd_child_nodes = elements.findall("LUN/StorageVd") - map(pool_parent_node[0].remove, pool_child_nodes + vd_child_nodes) - - # add StoragePool node base on pools - for pool in pools: - element = Element("StoragePool") - element.text = pool - element.tail = "\n\t" - pool_parent_node[0].insert(0, element) - - tree.write(xml_fp, encoding="utf-8", xml_declaration=True) - - -class FUJISTUBackendConfig(BaseConfig): - _DEFAULT_USERNAME = "root" - _DEFAULT_USERPWD = "root" - _DEFAULT_XML_FILE_PREFIX = "cinder_eternus_config_file" - _DEFAULT_XML_TEMPLATE_PATH = \ - "/etc/cinder/cinder_fujitsu_eternus_dx.xml" - FUJISTU_DRIVER = \ - "cinder.volume.drivers.fujitsu.eternus_dx_iscsi.FJDXISCSIDriver" - - def _construct_particular_cinder_data(self, backend, backend_data): - # construct commonality data in cinder.conf - backend_data['volume_driver'] = self.FUJISTU_DRIVER - backend_data[self._DEFAULT_XML_FILE_PREFIX] = \ - backend_data.pop('backend_config_file') \ - if backend_data.get('backend_config_file', None) \ - else "/etc/cinder/%s_%s.xml" % (self._DEFAULT_XML_FILE_PREFIX, - backend) - backend_data['use_multipath_for_image_xfer'] = \ - backend_data.get('multipath_tool', True) - backend_data['use_fujitsu_image_volume'] = \ - backend_data.get('use_fujitsu_image_volume', True) - backend_data['fujitsu_min_image_volume_per_storage'] = \ - backend_data.get('fujitsu_min_image_volume_per_storage', 1) - backend_data['fujitsu_image_management_dir'] = \ - backend_data.get('fujitsu_image_management_dir', - '/var/lib/glance/conversion') - backend_pools = backend_data.pop('pools') - - return backend_pools, backend_data[self._DEFAULT_XML_FILE_PREFIX] - - def _write_xml(self, fp, **backend_device_args): - if not os.path.exists(self._DEFAULT_XML_TEMPLATE_PATH): - print_or_raise("XML file template %s not exists,can't load defult " - "params." % self._DEFAULT_XML_TEMPLATE_PATH, - ScriptInnerError) - - mgnt_ip = backend_device_args['management_ips'][0] - data_ips = backend_device_args['data_ips'] - user_name = backend_device_args['user_name'] - user_pwd = backend_device_args['user_pwd'] - pool = backend_device_args['pools'][0] - xml_fp = fp - - tree = ElementTree() - elements = tree.parse(self._DEFAULT_XML_TEMPLATE_PATH) - self.update_xml_node(elements, "EternusIP", mgnt_ip) - self.update_xml_node(elements, "EternusUser", user_name) - self.update_xml_node(elements, "EternusPassword", user_pwd) - self.update_xml_node(elements, "EternusPool", pool) - self.update_xml_node(elements, "EternusSnapPool", pool) - - root = tree.getroot() - map(root.remove, root.findall("EternusISCSIIP")) - for ip in data_ips: - element = Element("EternusISCSIIP") - element.text = ip - element.tail = "\n" - root.insert(4, element) - # root.append(element) - - tree.write(xml_fp, encoding="utf-8", xml_declaration=True) - - -class CEPHBackendConfig(BaseConfig): - NOVA_CONF_FILE = "/etc/nova/nova.conf" - GLANCE_API_CONF_FILE = "/etc/glance/glance-api.conf" - _RBD_STORE_USER = "cinder" - _RBD_POOL = "volumes" - _RBD_MAX_CLONE_DEPTH = 5 - _RBD_FLATTEN_VOLUME_FROM_SNAPSHOT = "False" - _RBD_CEPH_CONF = "/etc/ceph/ceph.conf" - _RBD_DRIVER = 'cinder.volume.drivers.rbd.RBDDriver' - - def _construct_particular_cinder_data(self, backend, backend_data): - backend_data['volume_driver'] = self._RBD_DRIVER - backend_data['rbd_pool'] = self._RBD_POOL - backend_data['rbd_max_clone_depth'] = self._RBD_MAX_CLONE_DEPTH - backend_data['rbd_flatten_volume_from_snapshot'] = \ - self._RBD_FLATTEN_VOLUME_FROM_SNAPSHOT - backend_data['rbd_ceph_conf'] = self._RBD_CEPH_CONF - uuid_instance = uuid.uuid3(uuid.NAMESPACE_DNS, "zte.com.cn") - backend_data['rbd_secret_uuid'] = uuid_instance.urn.split(":")[2] - return [], [] diff --git a/backend/tecs/storage_auto_config/common/share_disk.py b/backend/tecs/storage_auto_config/common/share_disk.py deleted file mode 100755 index 0b682c52..00000000 --- a/backend/tecs/storage_auto_config/common/share_disk.py +++ /dev/null @@ -1,312 +0,0 @@ - -from utils import * - - -class BaseShareDisk(): - instance = None - - def __init__(self): - self._PROTOCOL_MAPPING = { - 'ISCSI': ISCSIShareDisk, - 'CEPH': CEPHShareDisk - } - self.instance_mapping = {} - - def __get_protocol_instance(self, protocol_type): - if not protocol_type or \ - protocol_type not in self._PROTOCOL_MAPPING.keys(): - print_or_raise("Protocol type '%s' is not valid." % protocol_type, - ScriptInnerError) - - protocol_instance = self.instance_mapping.get(protocol_type, - BaseShareDisk) - if isinstance(protocol_instance, - self._PROTOCOL_MAPPING[protocol_type]): - return protocol_instance - else: - self.instance_mapping.update( - {protocol_type: self._PROTOCOL_MAPPING[protocol_type]()}) - return self.instance_mapping[protocol_type] - - @classmethod - def single_instance(cls): - if not BaseShareDisk.instance: - BaseShareDisk.instance = BaseShareDisk() - return BaseShareDisk.instance - - def deploy_share_disk(self, item, host_name): - protocol_instance = self.__get_protocol_instance( - item.get('protocol_type', 'ISCSI')) - protocol_instance.deploy_share_disk(item, host_name) - - -class ISCSIShareDisk(BaseShareDisk): - _LV_DEFAULT_NAME = { - 'glance': ("VolGroupHAImage", "lvHAImage", 254), - 'db': ("VolGroupHAMysql", "lvHAMysql", 253), - 'db_backup': ("VolGroupHABakMysql", "lvHABakMysql", 252), - 'mongodb': ("VolGroupHAMongodb", "lvHAMongodb", 251), - } - - def _get_iscsi_configs(self, record_list): - raid_config = {} - for record in record_list: - discovery_media_ip = record.split(" ")[0].split(":")[0] - discovery_media_iqn = record.split(" ")[1] - try: - execute("ping -c 1 -W 2 %s" % discovery_media_ip) - except ProcessExecutionError: - execute("iscsiadm -m node -T %s -p %s -o delete" % - (discovery_media_iqn, discovery_media_ip), - check_exit_code=[0, 1]) - continue - - if discovery_media_ip in raid_config.get(discovery_media_iqn, []): - execute("iscsiadm -m node -T %s -p %s -R" % - (discovery_media_iqn, discovery_media_ip), - check_exit_code=[0, 1]) - - elif discovery_media_iqn in raid_config.keys(): - raid_config[discovery_media_iqn] += [discovery_media_ip] - else: - raid_config[discovery_media_iqn] = [discovery_media_ip] - - print_or_raise("Raid config is:\n%s" % str(raid_config)) - return raid_config - - def _lv_reentrant_check( - self, vg_name, lv_name, iscsi_session_setup, lun=None, - data_ips=[]): - """ - Check if share disk operation is reentrant. - :return:True,continue follow action; False, do nothing. - """ - lv_device_path = "/dev/%s/%s" % (vg_name, lv_name) - if not os.path.exists(lv_device_path): - return True - - if not iscsi_session_setup: - exist_volumes = \ - [sd for sd in self._ls_sd_path() if "-lun-" + lun in sd - for ip in data_ips if "ip-" + ip in sd] - if not exist_volumes: - print_or_raise("Lvm %s is exist, but no sd device match!" % - lv_device_path, ScriptInnerError) - - return False - - def _lv_rollback(self, lv, vg, block_device): - try: - execute("lvremove -y -ff /dev/%s/%s" % (lv, vg), - check_exit_code=[0, 1, 5]) - execute("vgremove -y -ff %s" % vg, check_exit_code=[0, 1, 5]) - execute("pvremove -y -ff %s" % block_device, - check_exit_code=[0, 1, 5]) - except Exception as e: - print_or_raise("Rollback lvm resource failed!", e) - - def _establish_iscsi_session(self, available_data_ips): - # discovery - discovery_ret = "" - for ip in available_data_ips: - out, err = execute( - "iscsiadm -m discovery -t st -p %s:3260" % ip) - discovery_ret += out - # if('0' != err) or ('0\n' != err ) or err: - # print_or_raise("Discovery ip:%s failed,continue.." % ip) - - if not discovery_ret: - print_or_raise("No discovery record!", ScriptInnerError) - - record_list = list(set(discovery_ret.split('\n')[:-1])) - print_or_raise( - "Discovery successful! Record:\n%s" % "\n".join(record_list)) - - # get iqn and ip like {iqn1: ip1, iqn2:ip2} - raid_config = self._get_iscsi_configs(record_list) - - # auto config & login - login_cmd = \ - lambda x, y: "iscsiadm -m node -T %s -p %s:3260 -l" % (x, y) - auto_cmd = \ - lambda x, y: "iscsiadm -m node -T %s -p %s -o update -n " \ - "node.startup -v automatic" % (x, y) - login = [] - auto_config = [] - for index in range(len(raid_config.keys())): - k = raid_config.keys()[index] - v = raid_config[k] - login += map(login_cmd, [k] * len(v), v) - auto_config += map(auto_cmd, [k] * len(v), v) - execute(";".join(login)) - execute(";".join(auto_config)) - print_or_raise("Login successful!") - return raid_config - - def _modify_host_iqn(self, host_name): - # modify host IQN - host_iqn, err = execute("cat /etc/iscsi/initiatorname.iscsi") - md5_str, err = execute("echo -n %s | openssl md5" % host_name) - host_iqn = host_iqn.split("=")[1].strip() - wish_iqn = "iqn.opencos.rh:" + md5_str.split("=")[1].strip() - if wish_iqn != host_iqn: - print_or_raise( - "The host iqn is:%s, but wish iqn is %s, it will be modified." - % (host_iqn, wish_iqn)) - with open("/etc/iscsi/initiatorname.iscsi", "w") as fp: - fp.write("InitiatorName=" + wish_iqn + "\n") - execute("systemctl restart iscsid.service") - - def _ls_sd_path(self): - out, err = execute("ls /dev/disk/by-path") - return out.split("\n")[:-1] - - def _find_multipath_by_sd(self, iqns, lun_id): - sd_path = [] - attemps = 0 - while not sd_path: - sd_path = \ - [sd for sd in self._ls_sd_path() - if filter(lambda complex_sd_path: complex_sd_path in sd, - [iqn + "-lun-" + str(lun_id) for iqn in iqns])] - attemps += 1 - - if attemps == 5: - execute("iscsiadm -m node -R") - elif attemps > 10: - print_or_raise( - "After login successful," - "there is no local sd device match with block device.", - ScriptInnerError) - - time.sleep(2) - - sd_path = "/dev/disk/by-path/" + sd_path[0] - sd_real_path = os.path.realpath(sd_path) - - attemps = 0 - multipath_path = "" - while not os.path.exists(multipath_path): - multipath_device, err = execute("multipath -l %s" % sd_real_path) - # if not multipath_device or ('0' != err) or ('0\n' != err) or err: - # continue - - multipath_path = "/dev/mapper/" + \ - multipath_device.split("\n")[0].split(" ")[0] - attemps += 1 - - if attemps > 5: - print_or_raise( - "No multipath match with local sd device:%s." % - sd_real_path, - ScriptInnerError) - time.sleep(2) - - return multipath_path - - def _create_lv_by_multipath_device( - self, multipath, vg_name, lv_name, size, fs_type): - try: - # create lvm base on block device - execute("pvcreate -y -ff %s" % multipath, - check_exit_code=[0, 1, 5]) - execute("vgcreate -y -ff %s %s" % (vg_name, multipath), - check_exit_code=[0, 1, 5]) - - if size == -1: - lvcreate = "lvcreate -W y -l 100%%FREE -n %s %s" % \ - (lv_name, vg_name) - else: - lvcreate = "lvcreate -W y -L %sG -n %s %s" % \ - (round(size * 0.95, 2), lv_name, vg_name) - execute(lvcreate, check_exit_code=[0, 1, 5]) - execute("pvscan --cache --activate ay") - - # make filesystem - execute("mkfs.%s /dev/%s/%s" % (fs_type, vg_name, lv_name)) - except Exception as e: - self._lv_rollback(lv_name, vg_name, multipath) - print_or_raise("LVM create failed, resource has been rollbacked.", - e) - - def deploy_share_disk(self, item, host_name): - config_computer() - self._modify_host_iqn(host_name) - service = item['service'] - if service not in ['glance', 'db', 'db_backup', 'mongodb']: - print_or_raise("Service name '%s' is not valid." % service) - - # check ip - available_data_ips, invalid_ips = \ - get_available_data_ip(item['data_ips']) - if not available_data_ips: - print_or_raise("No valid data ips,please check.", ScriptInnerError) - - raid_config = self._establish_iscsi_session(available_data_ips) - - lv_config = item.get('lvm_config', None) - vg_name = lv_config.get('vg_name', self._LV_DEFAULT_NAME[service][0]) - lv_name = lv_config.get('lv_name', self._LV_DEFAULT_NAME[service][1]) - if not self._lv_reentrant_check(vg_name, lv_name, True): - return - - multipath = self._find_multipath_by_sd( - raid_config.keys(), - item.get('lun', self._LV_DEFAULT_NAME[service][2])) - - self._create_lv_by_multipath_device(multipath, - vg_name, - lv_name, - lv_config.get('size', -1), - lv_config.get('fs_type', 'ext4')) - - -class CEPHShareDisk(BaseShareDisk): - def __init__(self): - self.monitor_ip = '' - self.monitor_passwd = '' - - def deploy_share_disk(self, item, host_name): - self.monitor_ip = item.get('monitor_ip', '') - self.monitor_passwd = item.get('monitor_passwd', '') - rbd_pool = item['rbd_config']['rbd_pool'] - rbd_img = item['rbd_config']['rbd_volume'] - img_size = int(item['rbd_config']['size'])*1024 - fs_type = item['rbd_config'].get('fs_type', 'ext4') - cmd_create = 'sshpass -p %s ssh %s rbd create -p %s --size %s %s ' % \ - (self.monitor_passwd, - self.monitor_ip, - rbd_pool, - img_size, - rbd_img) - cmd_query = 'sshpass -p %s ssh %s rbd ls -l %s' % ( - self.monitor_passwd, self.monitor_ip, rbd_pool) - image_in_monitor = [] - print_or_raise("Create image %s in pool %s at monitor %s." % - (rbd_img, rbd_pool, self.monitor_ip)) - try: - out, err = execute(cmd_query) - if out: - for line in out.splitlines(): - image_in_monitor.append(line.split()[0]) - if rbd_img not in image_in_monitor: - execute(cmd_create) - except Exception as e: - print_or_raise("Query pool %s in monitor error or create image %s " - "in pool %s." % (rbd_pool, rbd_img, rbd_pool), e) - execute("systemctl stop rbdmap") - rbd_map = '%s/%s id=admin,' \ - 'keyring=/etc/ceph/ceph.client.admin.keyring' % (rbd_pool, - rbd_img) - rbd_map_need_to_write = True - print_or_raise("Write rbdmap.") - with open("/etc/ceph/rbdmap", "a+") as fp: - for line in fp: - if line == rbd_map + "\n": - rbd_map_need_to_write = False - if rbd_map_need_to_write is True: - fp.write(rbd_map + "\n") - execute("chmod 777 /etc/ceph/rbdmap") - execute("systemctl enable rbdmap") - execute("systemctl start rbdmap") - execute("mkfs.%s /dev/rbd/%s/%s" % (fs_type, rbd_pool, rbd_img)) diff --git a/backend/tecs/storage_auto_config/common/utils.py b/backend/tecs/storage_auto_config/common/utils.py deleted file mode 100755 index db810791..00000000 --- a/backend/tecs/storage_auto_config/common/utils.py +++ /dev/null @@ -1,231 +0,0 @@ -import subprocess -import random -import shlex -import signal -import time -import os -import logging - - -LOG = logging.getLogger() -formatter = "%(asctime)s %(name)s %(levelname)s %(message)s" -logging.basicConfig(format=formatter, - filename="storage_auto_config.log", - filemode="a", - level=logging.DEBUG) -stream_log = logging.StreamHandler() -stream_log.setLevel(logging.DEBUG) -stream_log.setFormatter(logging.Formatter(formatter)) -LOG.addHandler(stream_log) - - -def print_or_raise(msg, exc=None): - if not exc: - LOG.debug(msg) - else: - if isinstance(exc, Exception): - LOG.error(msg) - raise exc - elif issubclass(exc, Exception): - raise exc(msg) - - -class ScriptInnerError(Exception): - def __init__(self, message=None): - super(ScriptInnerError, self).__init__(message) - - -class UnknownArgumentError(Exception): - def __init__(self, message=None): - super(UnknownArgumentError, self).__init__(message) - - -class NoRootWrapSpecified(Exception): - def __init__(self, message=None): - super(NoRootWrapSpecified, self).__init__(message) - - -class ProcessExecutionError(Exception): - def __init__(self, stdout=None, stderr=None, exit_code=None, cmd=None, - description=None): - self.exit_code = exit_code - self.stderr = stderr - self.stdout = stdout - self.cmd = cmd - self.description = description - - if description is None: - description = "Unexpected error while running command." - if exit_code is None: - exit_code = '-' - message = ("%s\nCommand: %s\nExit code: %s\nStdout: %r\nStderr: %r" - % (description, cmd, exit_code, stdout, stderr)) - super(ProcessExecutionError, self).__init__(message) - - -def execute(cmd, **kwargs): - """Helper method to shell out and execute a command through subprocess. - - Allows optional retry.s - - :param cmd: Passed to subprocess.Popen. - :type cmd: string - TODO:param process_input: Send to opened process. - :type proces_input: string - TODO:param check_exit_code: Single bool, int, or list of allowed exit - codes. Defaults to [0]. Raise - :class:`ProcessExecutionError` unless - program exits with one of these code. - :type check_exit_code: boolean, int, or [int] - :param delay_on_retry: True | False. Defaults to True. If set to True, - wait a short amount of time before retrying. - :type delay_on_retry: boolean - :param attempts: How many times to retry cmd. - :type attempts: int - TODO:param run_as_root: True | False. Defaults to False. If set to True, - the command is prefixed by the command specified - in the root_helper kwarg. - :type run_as_root: boolean - :param root_helper: command to prefix to commands called with - run_as_root=True - :type root_helper: string - TODO:param shell: whether or not there should be a shell used to - execute this command. Defaults to false. - :type shell: boolean - :param loglevel: log level for execute commands. - :type loglevel: int. (Should be logging.DEBUG or logging.INFO) - :returns: (stdout, stderr) from process execution - :raises: :class:`UnknownArgumentError` on - receiving unknown arguments - :raises: :class:`ProcessExecutionError` - """ - def _subprocess_setup(): - # Python installs a SIGPIPE handler by default. - # This is usually not what non-Python subprocesses expect. - signal.signal(signal.SIGPIPE, signal.SIG_DFL) - - # stdin - process_input = kwargs.pop('process_input', None) - check_exit_code = kwargs.pop('check_exit_code', [0]) - ignore_exit_code = False - delay_on_retry = kwargs.pop('delay_on_retry', True) - attempts = kwargs.pop('attempts', 1) - run_as_root = kwargs.pop('run_as_root', False) - root_helper = kwargs.pop('root_helper', '') - shell = kwargs.pop('shell', True) - silent = kwargs.pop('silent', False) - # loglevel = kwargs.pop('loglevel', logging.DEBUG) - - if isinstance(check_exit_code, bool): - ignore_exit_code = not check_exit_code - check_exit_code = [0] - elif isinstance(check_exit_code, int): - check_exit_code = [check_exit_code] - - if kwargs: - raise UnknownArgumentError( - 'Got unknown keyword args to utils.execute: %r' % kwargs) - - if run_as_root and hasattr(os, 'geteuid') and os.geteuid() != 0: - if not root_helper: - raise NoRootWrapSpecified( - message=('Command requested root, but did not specify a root ' - 'helper.')) - cmd = shlex.split(root_helper) + list(cmd) - - while attempts > 0: - attempts -= 1 - try: - if not silent: - print_or_raise('Running cmd (subprocess): %s' % cmd) - - # windows - if os.name == 'nt': - preexec_fn = None - close_fds = False - else: - preexec_fn = _subprocess_setup - close_fds = True - - obj = subprocess.Popen(cmd, - stdin=subprocess.PIPE, - stdout=subprocess.PIPE, - stderr=subprocess.PIPE, - close_fds=close_fds, - preexec_fn=preexec_fn, - shell=shell) - - if process_input is not None: - result = obj.communicate(process_input) - else: - result = obj.communicate() - obj.stdin.close() - _returncode = obj.returncode - if not silent: - print_or_raise('Result was %s' % _returncode) - if not ignore_exit_code and _returncode not in check_exit_code: - (stdout, stderr) = result - raise ProcessExecutionError(exit_code=_returncode, - stdout=stdout, - stderr=stderr, - cmd=cmd) - # cmd=sanitized_cmd) - return result - except ProcessExecutionError: - if not attempts: - raise - else: - if not silent: - print_or_raise('%r failed. Retrying.' % cmd) - if delay_on_retry: - time.sleep(random.randint(20, 200) / 100.0) - finally: - time.sleep(0) - - -def get_available_data_ip(media_ips): - unavailable_ip = [] - for media_ip in media_ips: - try: - execute("ping -c 1 -W 2 %s" % media_ip) - except ProcessExecutionError: - unavailable_ip.append(media_ip) - continue - return list(set(media_ips) - set(unavailable_ip)), unavailable_ip - - -def clear_host_iscsi_resource(): - out, err = execute("iscsiadm -m node", check_exit_code=[0, 21]) - if not out: - return - - sd_ips_list = map(lambda x: x.split(":3260")[0], out.split("\n")[:-1]) - if not sd_ips_list: - return - - valid_ips, invalid_ips = get_available_data_ip(sd_ips_list) - clear_resource = "" - for ip in invalid_ips: - logout_session = "iscsiadm -m node -p %s -u;" % ip - del_node = "iscsiadm -m node -p %s -o delete;" % ip - # manual_startup = "iscsiadm -m node -p %s -o update -n node.startup " - # "-v manual;" % ip - clear_resource += (logout_session + del_node) - execute(clear_resource, check_exit_code=[0, 21], silent=True) - # _execute("multipath -F") - - -def config_computer(): - # remove exist iscsi resource - clear_host_iscsi_resource() - config_multipath() - - -def config_multipath(): - if os.path.exists("/etc/multipath.conf"): - execute("echo y|mv /etc/multipath.conf /etc/multipath.conf.bak", - check_exit_code=[0, 1]) - - execute("cp -p base/multipath.conf /etc/") - execute("systemctl enable multipathd.service;" - "systemctl restart multipathd.service") diff --git a/backend/tecs/storage_auto_config/storage_auto_config.py b/backend/tecs/storage_auto_config/storage_auto_config.py deleted file mode 100755 index afe45281..00000000 --- a/backend/tecs/storage_auto_config/storage_auto_config.py +++ /dev/null @@ -1,168 +0,0 @@ -############################################################################### -# Author: CG -# Description: -# 1.The script should be copied to the host, before running. -# 2.The script is not thread safe. -# 3.Example for script call: -# [config share disk]: -# python storage_auto_config share_disk , -# we use host_pxe_mac to generate host IQN by md5 and write it to -# '/etc/iscsi/initiatorname.iscsi' -# [config cinder]: python storage_auto_config cinder_conf 10.43.177.129, -# the second parameter for cinder_config is cinder . -# If the backend is CEPH,you should call the following command: -# python storage_auto_config glance_rbd_conf at glance node & -# python storage_auto_config nova_rbd_conf at nova node. -# [config multipath]:python storage_auto_config check_multipath. -# 4.Before run script,the cinder.json and control.json file -# must be must be config. -############################################################################### -import sys -import uuid -import traceback -from common.utils import * -from common.cinder_conf import BaseConfig, CEPHBackendConfig -from common.share_disk import BaseShareDisk - -try: - import simplejson as json -except ImportError: - import json - - -def _set_config_file(file, section, key, value): - set_config = BaseConfig.SET_CONFIG.format( - config_file=file, - section=section, - key=key, - value=value) - execute(set_config) - - -def config_share_disk(config, host_name): - # deploy share_disk - for item in config: - BaseShareDisk.single_instance().deploy_share_disk(item, host_name) - - -def config_cinder(config, cinder_host_ip=""): - # config xml and cinder.conf - for config in config['disk_array']: - # load disk array global config - backends = config['backend'] - for item in backends.items(): - BaseConfig.single_instance().config_backend( - item, - management_ips=config.get('management_ips', []), - data_ips=config.get('data_ips', []), - user_name=config.get('user_name', []), - user_pwd=config.get('user_pwd', []), - cinder_host_ip=cinder_host_ip) - - # config multipath - config_computer() - - # enable config - execute("systemctl restart openstack-cinder-volume.service") - - -def config_nova_with_rbd(config): - # config xml and cinder.conf - for config in config['disk_array']: - # load disk array global config - backends = config['backend'] - for key, value in backends.items(): - if value.get('volume_driver') == 'CEPH': - uuid_instance = uuid.uuid3(uuid.NAMESPACE_DNS, "zte.com.cn") - uuid_str = uuid_instance.urn.split(":")[2] - _set_config_file(CEPHBackendConfig.NOVA_CONF_FILE, - 'libvirt', - 'images_type', - 'rbd') - _set_config_file(CEPHBackendConfig.NOVA_CONF_FILE, - 'libvirt', - 'rbd_secret_uuid', - uuid_str) - return - - # enable config - execute("systemctl restart openstack-nova-compute.service") - - -def config_glance_with_rbd(config): - # config xml and cinder.conf - for config in config['disk_array']: - # load disk array global config - backends = config['backend'] - for key, value in backends.items(): - if value.get('volume_driver') == 'CEPH': - _set_config_file(CEPHBackendConfig.GLANCE_API_CONF_FILE, - 'DEFAULT', - 'show_image_direct_url', - 'True') - _set_config_file(CEPHBackendConfig.GLANCE_API_CONF_FILE, - 'glance_store', - 'default_store', - 'rbd') - return - - # enable config - execute("systemctl restart openstack-glance-api.service") - - -def _launch_script(): - def subcommand_launcher(args, valid_args_len, json_path, oper_type): - if len(args) < valid_args_len: - print_or_raise("Too few parameter is given,please check.", - ScriptInnerError) - - with open(json_path, "r") as fp_json: - params = json.load(fp_json) - - print_or_raise("-----Begin config %s, params is %s.-----" % - (oper_type, params)) - return params - - oper_type = sys.argv[1] if len(sys.argv) > 1 else "" - try: - if oper_type == "share_disk": - share_disk_config = \ - subcommand_launcher(sys.argv, 3, "base/control.json", - oper_type) - config_share_disk(share_disk_config, sys.argv[2]) - elif oper_type == "cinder_conf": - cinder_backend_config = subcommand_launcher(sys.argv, 3, - "base/cinder.json", - oper_type) - config_cinder(cinder_backend_config, sys.argv[2]) - elif oper_type == "nova_rbd_conf": - nova_rbd_config = subcommand_launcher(sys.argv, 1, - "base/cinder.json", - oper_type) - config_nova_with_rbd(nova_rbd_config) - elif oper_type == "glance_rbd_conf": - glance_rbd_config = subcommand_launcher(sys.argv, 1, - "base/cinder.json", - oper_type) - config_glance_with_rbd(glance_rbd_config) - elif oper_type == "check_multipath": - print_or_raise("-----Begin config %s.-----") - config_computer() - elif oper_type == "debug": - pass - else: - print_or_raise("Script operation is not given,such as:share_disk," - "cinder_conf,nova_rbd_conf,glance_rbd_conf," - "check_multipath.", ScriptInnerError) - except Exception as e: - print_or_raise("----------Operation %s is Failed.----------\n" - "Exception call chain as follow,%s" % - (oper_type, traceback.format_exc())) - raise e - else: - print_or_raise("----------Operation %s is done!----------" % - oper_type) - - -if __name__ == "__main__": - _launch_script() \ No newline at end of file diff --git a/backend/tecs/tecs.conf b/backend/tecs/tecs.conf deleted file mode 100755 index 681012d8..00000000 --- a/backend/tecs/tecs.conf +++ /dev/null @@ -1,1447 +0,0 @@ -[general] - -# Cluster ID for daisy -CLUSTER_ID= - -# Path to a public key to install on servers. If a usable key has not -# been installed on the remote servers, the user is prompted for a -# password and this key is installed so the password will not be -# required again. -CONFIG_SSH_KEY=/root/.ssh/id_rsa.pub - -# Default password to be used ssh login operation system -CONFIG_OS_PASSWORD=ossdbg1 - -# Default password to be used everywhere (overridden by passwords set -# for individual services or users). -CONFIG_DEFAULT_PASSWORD= - -# Specify 'y' to install MariaDB. ['y', 'n'] -CONFIG_MARIADB_INSTALL=y - -# Specify 'y' to install OpenStack Image Service (glance). ['y', 'n'] -CONFIG_GLANCE_INSTALL=y - -# Specify 'y' to install OpenStack Block Storage (cinder). ['y', 'n'] -CONFIG_CINDER_INSTALL=y - -# Specify 'y' to install OpenStack Shared File System (manila). ['y', -# 'n'] -CONFIG_MANILA_INSTALL=n - -# Specify 'y' to install OpenStack Compute (nova). ['y', 'n'] -CONFIG_NOVA_INSTALL=y - -# Specify 'y' to install OpenStack Networking (neutron); otherwise, -# Compute Networking (nova) will be used. ['y', 'n'] -CONFIG_NEUTRON_INSTALL=y - -# Specify 'y' to install OpenStack Dashboard (horizon). ['y', 'n'] -CONFIG_HORIZON_INSTALL=y - -# Specify 'y' to install OpenStack Object Storage (swift). ['y', 'n'] -CONFIG_SWIFT_INSTALL=n - -# Specify 'y' to install OpenStack Metering (ceilometer). ['y', 'n'] -CONFIG_CEILOMETER_INSTALL=y - -# Specify 'y' to install OpenStack Orchestration (heat). ['y', 'n'] -CONFIG_HEAT_INSTALL=y - -# Specify 'y' to install OpenStack Data Processing (sahara). ['y', -# 'n'] -CONFIG_SAHARA_INSTALL=n - -# Specify 'y' to install OpenStack Database (trove) ['y', 'n'] -CONFIG_TROVE_INSTALL=n - -# Specify 'y' to install OpenStack Bare Metal Provisioning (ironic). -# ['y', 'n'] -CONFIG_IRONIC_INSTALL=n - -# Set to 'y' if you would like Packstack to install ha -CONFIG_HA_INSTALL=n - -# Set to 'y' if you would like Packstack to install LB -CONFIG_LB_INSTALL=n - -#IP address of the servers on which to config HA,including HA master host -CONFIG_HA_HOST= - -# IP address of the servers on which to install ha software -CONFIG_HA_HOSTS= - -#Float IP of LB, only one LB system is support now. -CONFIG_LB_HOST= - -# IP address of LB front-end servers on which to install haproxy -CONFIG_LB_FRONTEND_HOSTS= - -# IP address of LB front-end servers on which to install LB services -CONFIG_LB_BACKEND_HOSTS= - -# Specify 'y' to install the OpenStack Client packages (command-line -# tools). An admin "rc" file will also be installed. ['y', 'n'] -CONFIG_CLIENT_INSTALL=y - -# Comma-separated list of NTP servers. Leave plain if Packstack -# should not install ntpd on instances. -# Please give float ip if NTP server is HA node. -CONFIG_NTP_SERVERS= - -# Specify 'y' to install Nagios to monitor OpenStack hosts. Nagios -# provides additional tools for monitoring the OpenStack environment. -# ['y', 'n'] -CONFIG_NAGIOS_INSTALL=n - -# Comma-separated list of servers to be excluded from the -# installation. This is helpful if you are running Packstack a second -# time with the same answer file and do not want Packstack to -# overwrite these server's configurations. Leave empty if you do not -# need to exclude any servers. -EXCLUDE_SERVERS= - -# Specify 'y' if you want to run OpenStack services in debug mode; -# otherwise, specify 'n'. ['y', 'n'] -CONFIG_DEBUG_MODE=n - -# IP address of the server on which to install OpenStack services -# specific to the controller role (for example, API servers or -# dashboard). -CONFIG_CONTROLLER_HOST= - -# List of IP addresses of the servers on which to install the Compute -# service. -CONFIG_COMPUTE_HOSTS= - -# public IP to form public url of services -CONFIG_PUBLIC_IP= - -# admin IP to form admin url of services -CONFIG_ADMIN_IP= - -# internal IP to form internal url of services -CONFIG_INTERNAL_IP= - -# List of IP addresses of the servers on which to install the Api cell -# and child cell service. -CONFIG_NOVA_CELLS_HOST= -CONFIG_NOVA_CELLS_HOSTS= -CONFIG_CHILD_CELL_DICT= - -# Specify 'y' if you want to use VMware vCenter as hypervisor and -# storage; otherwise, specify 'n'. ['y', 'n'] -CONFIG_VMWARE_BACKEND=n - -# Specify 'y' if you want to use unsupported parameters. This should -# be used only if you know what you are doing. Issues caused by using -# unsupported options will not be fixed before the next major release. -# ['y', 'n'] -CONFIG_UNSUPPORTED=y - -# IP address of the VMware vCenter server. -CONFIG_VCENTER_HOST= - -# User name for VMware vCenter server authentication. -CONFIG_VCENTER_USER= - -# Password for VMware vCenter server authentication. -CONFIG_VCENTER_PASSWORD= - -# Name of the VMware vCenter cluster. -CONFIG_VCENTER_CLUSTER_NAME= - -# (Unsupported!) IP address of the server on which to install -# OpenStack services specific to storage servers such as Image or -# Block Storage services. -CONFIG_STORAGE_HOST= - -# (Unsupported!) IP address of the server on which to install -# OpenStack services specific to OpenStack Data Processing (sahara). -CONFIG_SAHARA_HOST= - -# Specify 'y' to enable the EPEL repository (Extra Packages for -# Enterprise Linux). ['y', 'n'] -CONFIG_USE_EPEL=n - -# Comma-separated list of URLs for any additional yum repositories, -# to use for installation. -#for example:http://127.0.0.1/tecs_install/ -CONFIG_REPO= - -# To subscribe each server with Red Hat Subscription Manager, include -# this with CONFIG_RH_PW. -CONFIG_RH_USER= - -# To subscribe each server to receive updates from a Satellite -# server, provide the URL of the Satellite server. You must also -# provide a user name (CONFIG_SATELLITE_USERNAME) and password -# (CONFIG_SATELLITE_PASSWORD) or an access key (CONFIG_SATELLITE_AKEY) -# for authentication. -CONFIG_SATELLITE_URL= - -# To subscribe each server with Red Hat Subscription Manager, include -# this with CONFIG_RH_USER. -CONFIG_RH_PW= - -# Specify 'y' to enable RHEL optional repositories. ['y', 'n'] -CONFIG_RH_OPTIONAL=n - -# HTTP proxy to use with Red Hat Subscription Manager. -CONFIG_RH_PROXY= - -# Port to use for Red Hat Subscription Manager's HTTP proxy. -CONFIG_RH_PROXY_PORT= - -# User name to use for Red Hat Subscription Manager's HTTP proxy. -CONFIG_RH_PROXY_USER= - -# Password to use for Red Hat Subscription Manager's HTTP proxy. -CONFIG_RH_PROXY_PW= - -# User name to authenticate with the RHN Satellite server; if you -# intend to use an access key for Satellite authentication, leave this -# blank. -CONFIG_SATELLITE_USER= - -# Password to authenticate with the RHN Satellite server; if you -# intend to use an access key for Satellite authentication, leave this -# blank. -CONFIG_SATELLITE_PW= - -# Access key for the Satellite server; if you intend to use a user -# name and password for Satellite authentication, leave this blank. -CONFIG_SATELLITE_AKEY= - -# Certificate path or URL of the certificate authority to verify that -# the connection with the Satellite server is secure. If you are not -# using Satellite in your deployment, leave this blank. -CONFIG_SATELLITE_CACERT= - -# Profile name that should be used as an identifier for the system in -# RHN Satellite (if required). -CONFIG_SATELLITE_PROFILE= - -# Comma-separated list of flags passed to the rhnreg_ks command. -# Valid flags are: novirtinfo, norhnsd, nopackages ['novirtinfo', -# 'norhnsd', 'nopackages'] -CONFIG_SATELLITE_FLAGS= - -# HTTP proxy to use when connecting to the RHN Satellite server (if -# required). -CONFIG_SATELLITE_PROXY= - -# User name to authenticate with the Satellite-server HTTP proxy. -CONFIG_SATELLITE_PROXY_USER= - -# User password to authenticate with the Satellite-server HTTP proxy. -CONFIG_SATELLITE_PROXY_PW= - -# Service to be used as the AMQP broker. Allowed values are: qpid, -# rabbitmq ['qpid', 'rabbitmq'] -CONFIG_AMQP_BACKEND=rabbitmq - -# support LB, HA or None -CONFIG_AMQP_INSTALL_MODE=None - -# IF CONFIG_AMQP_INSTALL_MODE is LB, please set cluster master Node IP -CONFIG_AMQP_CLUSTER_MASTER_NODE_IP= - -# IF CONFIG_AMQP_INSTALL_MODE is LB, please set cluster master Node Name -CONFIG_AMQP_CLUSTER_MASTER_NODE_HOSTNAME= - -# float IP address of the server to use the AMQP service. -CONFIG_AMQP_HOST= - -# IP address of the server on which to install the AMQP service. -CONFIG_AMQP_HOSTS= - -# dict of install amqp.such -# as,{'10.43.179.1':'10.43.179.2,10.43.179.3'} -CONFIG_AMQP_DICT= - -# Specify 'y' to enable SSL for the AMQP service. ['y', 'n'] -CONFIG_AMQP_ENABLE_SSL=n - -# Specify 'y' to enable authentication for the AMQP service. ['y', -# 'n'] -CONFIG_AMQP_ENABLE_AUTH=n - -# Password for the NSS certificate database of the AMQP service. -CONFIG_AMQP_NSS_CERTDB_PW=amqp - -# Port on which the AMQP service listens for SSL connections. -CONFIG_AMQP_SSL_PORT=5671 - -# File name of the CAcertificate that the AMQP service will use for -# verification. -CONFIG_AMQP_SSL_CACERT_FILE=/etc/pki/tls/certs/amqp_selfcert.pem - -# File name of the certificate that the AMQP service will use for -# verification. -CONFIG_AMQP_SSL_CERT_FILE=/etc/pki/tls/certs/amqp_selfcert.pem - -# File name of the private key that the AMQP service will use for -# verification. -CONFIG_AMQP_SSL_KEY_FILE=/etc/pki/tls/private/amqp_selfkey.pem - -# Specify 'y' to automatically generate a self-signed SSL certificate -# and key. ['y', 'n'] -CONFIG_AMQP_SSL_SELF_SIGNED=y - -# User for amqp authentication -CONFIG_AMQP_AUTH_USER=guest - -# Password for user authentication -CONFIG_AMQP_AUTH_PASSWORD=guest - -# IP address of the server on which to use MariaDB. If a MariaDB -# installation was not specified in CONFIG_MARIADB_INSTALL, specify -# the IP address of an existing database server (a MariaDB cluster can -# also be specified). -CONFIG_MARIADB_HOST= - -# The IP address of the servers on which to install Mariadb -CONFIG_MARIADB_HOSTS= - -# IP address of the servers on which to assign Mariadb.such -# as{'10.43.179.1':'10.43.179.2,10.43.179.3'} -CONFIG_MARIADB_DICT= - -# User name for the MariaDB administrative user. -CONFIG_MARIADB_USER=root - -# Password for the MariaDB administrative user. -CONFIG_MARIADB_PW=root - -# support LB, HA or None -CONFIG_KEYSTONE_INSTALL_MODE=None - -# The float IP address of the server on which to install Keystone -CONFIG_KEYSTONE_HOST= - -# The IP address of the server on which to install Keystone -CONFIG_KEYSTONE_HOSTS= - -# Password to use for the Identity service (keystone) to access the -# database. -CONFIG_KEYSTONE_DB_PW=keystone - -# Default region name to use when creating tenants in the Identity -# service. -CONFIG_KEYSTONE_REGION=RegionOne - -# Token to use for the Identity service API. -CONFIG_KEYSTONE_ADMIN_TOKEN=e93e9abf42f84be48e0996e5bd44f096 - -# Email address for the Identity service 'admin' user. Defaults to -CONFIG_KEYSTONE_ADMIN_EMAIL=root@localhost - -# User name for the Identity service 'admin' user. Defaults to -# 'admin'. -CONFIG_KEYSTONE_ADMIN_USERNAME=admin - -# Password to use for the Identity service 'admin' user. -CONFIG_KEYSTONE_ADMIN_PW=keystone - -# Password to use for the Identity service 'demo' user. -CONFIG_KEYSTONE_DEMO_PW=keystone - -# Identity service API version string. ['v2.0', 'v3'] -CONFIG_KEYSTONE_API_VERSION=v2.0 - -# Identity service token format (UUID or PKI). The recommended format -# for new deployments is UUID. ['UUID', 'PKI'] -CONFIG_KEYSTONE_TOKEN_FORMAT=UUID - -# Name of service to use to run the Identity service (keystone or -# httpd). ['keystone', 'httpd'] -CONFIG_KEYSTONE_SERVICE_NAME=keystone - -# Type of Identity service backend (sql or ldap). ['sql', 'ldap'] -CONFIG_KEYSTONE_IDENTITY_BACKEND=sql - -# URL for the Identity service LDAP backend. -CONFIG_KEYSTONE_LDAP_URL=ldap://127.0.0.1 - -# User DN for the Identity service LDAP backend. Used to bind to the -# LDAP server if the LDAP server does not allow anonymous -# authentication. -CONFIG_KEYSTONE_LDAP_USER_DN= - -# User DN password for the Identity service LDAP backend. -CONFIG_KEYSTONE_LDAP_USER_PASSWORD= - -# Base suffix for the Identity service LDAP backend. -CONFIG_KEYSTONE_LDAP_SUFFIX= - -# Query scope for the Identity service LDAP backend (base, one, sub). -# ['base', 'one', 'sub'] -CONFIG_KEYSTONE_LDAP_QUERY_SCOPE=one - -# Query page size for the Identity service LDAP backend. -CONFIG_KEYSTONE_LDAP_PAGE_SIZE=-1 - -# User subtree for the Identity service LDAP backend. -CONFIG_KEYSTONE_LDAP_USER_SUBTREE= - -# User query filter for the Identity service LDAP backend. -CONFIG_KEYSTONE_LDAP_USER_FILTER= - -# User object class for the Identity service LDAP backend. -CONFIG_KEYSTONE_LDAP_USER_OBJECTCLASS= - -# User ID attribute for the Identity service LDAP backend. -CONFIG_KEYSTONE_LDAP_USER_ID_ATTRIBUTE= - -# User name attribute for the Identity service LDAP backend. -CONFIG_KEYSTONE_LDAP_USER_NAME_ATTRIBUTE= - -# User email address attribute for the Identity service LDAP backend. -CONFIG_KEYSTONE_LDAP_USER_MAIL_ATTRIBUTE= - -# User-enabled attribute for the Identity service LDAP backend. -CONFIG_KEYSTONE_LDAP_USER_ENABLED_ATTRIBUTE= - -# Bit mask applied to user-enabled attribute for the Identity service -# LDAP backend. -CONFIG_KEYSTONE_LDAP_USER_ENABLED_MASK=-1 - -# Value of enabled attribute which indicates user is enabled for the -# Identity service LDAP backend. -CONFIG_KEYSTONE_LDAP_USER_ENABLED_DEFAULT=TRUE - -# Specify 'y' if users are disabled (not enabled) in the Identity -# service LDAP backend. ['n', 'y'] -CONFIG_KEYSTONE_LDAP_USER_ENABLED_INVERT=n - -# Comma-separated list of attributes stripped from LDAP user entry -# upon update. -CONFIG_KEYSTONE_LDAP_USER_ATTRIBUTE_IGNORE= - -# Identity service LDAP attribute mapped to default_project_id for -# users. -CONFIG_KEYSTONE_LDAP_USER_DEFAULT_PROJECT_ID_ATTRIBUTE= - -# Specify 'y' if you want to be able to create Identity service users -# through the Identity service interface; specify 'n' if you will -# create directly in the LDAP backend. ['n', 'y'] -CONFIG_KEYSTONE_LDAP_USER_ALLOW_CREATE=n - -# Specify 'y' if you want to be able to update Identity service users -# through the Identity service interface; specify 'n' if you will -# update directly in the LDAP backend. ['n', 'y'] -CONFIG_KEYSTONE_LDAP_USER_ALLOW_UPDATE=n - -# Specify 'y' if you want to be able to delete Identity service users -# through the Identity service interface; specify 'n' if you will -# delete directly in the LDAP backend. ['n', 'y'] -CONFIG_KEYSTONE_LDAP_USER_ALLOW_DELETE=n - -# Identity service LDAP attribute mapped to password. -CONFIG_KEYSTONE_LDAP_USER_PASS_ATTRIBUTE= - -# DN of the group entry to hold enabled LDAP users when using enabled -# emulation. -CONFIG_KEYSTONE_LDAP_USER_ENABLED_EMULATION_DN= - -# List of additional LDAP attributes for mapping additional attribute -# mappings for users. The attribute-mapping format is -# :, where ldap_attr is the attribute in the -# LDAP entry and user_attr is the Identity API attribute. -CONFIG_KEYSTONE_LDAP_USER_ADDITIONAL_ATTRIBUTE_MAPPING= - -# Group subtree for the Identity service LDAP backend. -CONFIG_KEYSTONE_LDAP_GROUP_SUBTREE= - -# Group query filter for the Identity service LDAP backend. -CONFIG_KEYSTONE_LDAP_GROUP_FILTER= - -# Group object class for the Identity service LDAP backend. -CONFIG_KEYSTONE_LDAP_GROUP_OBJECTCLASS= - -# Group ID attribute for the Identity service LDAP backend. -CONFIG_KEYSTONE_LDAP_GROUP_ID_ATTRIBUTE= - -# Group name attribute for the Identity service LDAP backend. -CONFIG_KEYSTONE_LDAP_GROUP_NAME_ATTRIBUTE= - -# Group member attribute for the Identity service LDAP backend. -CONFIG_KEYSTONE_LDAP_GROUP_MEMBER_ATTRIBUTE= - -# Group description attribute for the Identity service LDAP backend. -CONFIG_KEYSTONE_LDAP_GROUP_DESC_ATTRIBUTE= - -# Comma-separated list of attributes stripped from LDAP group entry -# upon update. -CONFIG_KEYSTONE_LDAP_GROUP_ATTRIBUTE_IGNORE= - -# Specify 'y' if you want to be able to create Identity service -# groups through the Identity service interface; specify 'n' if you -# will create directly in the LDAP backend. ['n', 'y'] -CONFIG_KEYSTONE_LDAP_GROUP_ALLOW_CREATE=n - -# Specify 'y' if you want to be able to update Identity service -# groups through the Identity service interface; specify 'n' if you -# will update directly in the LDAP backend. ['n', 'y'] -CONFIG_KEYSTONE_LDAP_GROUP_ALLOW_UPDATE=n - -# Specify 'y' if you want to be able to delete Identity service -# groups through the Identity service interface; specify 'n' if you -# will delete directly in the LDAP backend. ['n', 'y'] -CONFIG_KEYSTONE_LDAP_GROUP_ALLOW_DELETE=n - -# List of additional LDAP attributes used for mapping additional -# attribute mappings for groups. The attribute=mapping format is -# :, where ldap_attr is the attribute in the -# LDAP entry and group_attr is the Identity API attribute. -CONFIG_KEYSTONE_LDAP_GROUP_ADDITIONAL_ATTRIBUTE_MAPPING= - -# Specify 'y' if the Identity service LDAP backend should use TLS. -# ['n', 'y'] -CONFIG_KEYSTONE_LDAP_USE_TLS=n - -# CA certificate directory for Identity service LDAP backend (if TLS -# is used). -CONFIG_KEYSTONE_LDAP_TLS_CACERTDIR= - -# CA certificate file for Identity service LDAP backend (if TLS is -# used). -CONFIG_KEYSTONE_LDAP_TLS_CACERTFILE= - -# Certificate-checking strictness level for Identity service LDAP -# backend; valid options are: never, allow, demand. ['never', 'allow', -# 'demand'] -CONFIG_KEYSTONE_LDAP_TLS_REQ_CERT=demand - -# support LB, HA or None -CONFIG_GLANCE_API_INSTALL_MODE=None - -# support LB, HA or None -CONFIG_GLANCE_REGISTRY_INSTALL_MODE=None - -# The float IP address of the server on which to install Glance -CONFIG_GLANCE_HOST= - -# The IP address of the server on which to install Glance -CONFIG_GLANCE_HOSTS= - -# Password to use for the Image service (glance) to access the -# database. -CONFIG_GLANCE_DB_PW=glance - -# Password to use for the Image service to authenticate with the -# Identity service. -CONFIG_GLANCE_KS_PW=glance - -# Storage backend for the Image service (controls how the Image -# service stores disk images). Valid options are: file or swift -# (Object Storage). The Object Storage service must be enabled to use -# it as a working backend; otherwise, Packstack falls back to 'file'. -# ['file', 'swift'] -CONFIG_GLANCE_BACKEND=file - -# support LB, HA or None -CONFIG_CINDER_API_INSTALL_MODE=None - -# The float IP address of the server on which to install Cinder Api -CONFIG_CINDER_API_HOST= - -# IP address of the server on which to install Cinder Api -CONFIG_CINDER_API_HOSTS= - -# The float IP address of the server on which to install Cinder Scheduler -CONFIG_CINDER_SCHEDULER_HOST= - -# IP address of the server on which to install Cinder Scheduler -CONFIG_CINDER_SCHEDULER_HOSTS= - -# The float IP address of the server on which to install Cinder Volume -CONFIG_CINDER_VOLUME_HOST= - -# IP address of the server on which to install Cinder Volume -CONFIG_CINDER_VOLUME_HOSTS= - -# The float IP address of the server on which to install Cinder Backup -CONFIG_CINDER_BACKUP_HOST= - -# IP address of the server on which to install Cinder Backup -CONFIG_CINDER_BACKUP_HOSTS= - -# Password to use for the Block Storage service (cinder) to access -# the database. -CONFIG_CINDER_DB_PW=cinder - -# Password to use for the Block Storage service to authenticate with -# the Identity service. -CONFIG_CINDER_KS_PW=cinder - -# Storage backend to use for the Block Storage service; valid options -# are: lvm, gluster, nfs, vmdk, netapp. ['lvm', 'gluster', 'nfs', -# 'vmdk', 'netapp'] -CONFIG_CINDER_BACKEND=lvm - -# Specify 'y' to create the Block Storage volumes group. That is, -# Packstack creates a raw disk image in /var/lib/cinder, and mounts it -# using a loopback device. This should only be used for testing on a -# proof-of-concept installation of the Block Storage service (a file- -# backed volume group is not suitable for production usage). ['y', -# 'n'] -CONFIG_CINDER_VOLUMES_CREATE=y - -# Size of Block Storage volumes group. Actual volume size will be -# extended with 3% more space for VG metadata. Remember that the size -# of the volume group will restrict the amount of disk space that you -# can expose to Compute instances, and that the specified amount must -# be available on the device used for /var/lib/cinder. -CONFIG_CINDER_VOLUMES_SIZE=20G - -# A single or comma-separated list of Red Hat Storage (gluster) -# volume shares to mount. Example: 'ip-address:/vol-name', 'domain -# :/vol-name' -CONFIG_CINDER_GLUSTER_MOUNTS= - -# A single or comma-separated list of NFS exports to mount. Example: -# 'ip-address:/export-name' -CONFIG_CINDER_NFS_MOUNTS= - -# Administrative user account name used to access the NetApp storage -# system or proxy server. -CONFIG_CINDER_NETAPP_LOGIN= - -# Password for the NetApp administrative user account specified in -# the CONFIG_CINDER_NETAPP_LOGIN parameter. -CONFIG_CINDER_NETAPP_PASSWORD= - -# Hostname (or IP address) for the NetApp storage system or proxy -# server. -CONFIG_CINDER_NETAPP_HOSTNAME= - -# The TCP port to use for communication with the storage system or -# proxy. If not specified, Data ONTAP drivers will use 80 for HTTP and -# 443 for HTTPS; E-Series will use 8080 for HTTP and 8443 for HTTPS. -# Defaults to 80. -CONFIG_CINDER_NETAPP_SERVER_PORT=80 - -# Storage family type used on the NetApp storage system; valid -# options are ontap_7mode for using Data ONTAP operating in 7-Mode, -# ontap_cluster for using clustered Data ONTAP, or E-Series for NetApp -# E-Series. Defaults to ontap_cluster. ['ontap_7mode', -# 'ontap_cluster', 'eseries'] -CONFIG_CINDER_NETAPP_STORAGE_FAMILY=ontap_cluster - -# The transport protocol used when communicating with the NetApp -# storage system or proxy server. Valid values are http or https. -# Defaults to 'http'. ['http', 'https'] -CONFIG_CINDER_NETAPP_TRANSPORT_TYPE=http - -# Storage protocol to be used on the data path with the NetApp -# storage system; valid options are iscsi, fc, nfs. Defaults to nfs. -# ['iscsi', 'fc', 'nfs'] -CONFIG_CINDER_NETAPP_STORAGE_PROTOCOL=nfs - -# Quantity to be multiplied by the requested volume size to ensure -# enough space is available on the virtual storage server (Vserver) to -# fulfill the volume creation request. Defaults to 1.0. -CONFIG_CINDER_NETAPP_SIZE_MULTIPLIER=1.0 - -# Time period (in minutes) that is allowed to elapse after the image -# is last accessed, before it is deleted from the NFS image cache. -# When a cache-cleaning cycle begins, images in the cache that have -# not been accessed in the last M minutes, where M is the value of -# this parameter, are deleted from the cache to create free space on -# the NFS share. Defaults to 720. -CONFIG_CINDER_NETAPP_EXPIRY_THRES_MINUTES=720 - -# If the percentage of available space for an NFS share has dropped -# below the value specified by this parameter, the NFS image cache is -# cleaned. Defaults to 20. -CONFIG_CINDER_NETAPP_THRES_AVL_SIZE_PERC_START=20 - -# When the percentage of available space on an NFS share has reached -# the percentage specified by this parameter, the driver stops -# clearing files from the NFS image cache that have not been accessed -# in the last M minutes, where M is the value of the -# CONFIG_CINDER_NETAPP_EXPIRY_THRES_MINUTES parameter. Defaults to 60. -CONFIG_CINDER_NETAPP_THRES_AVL_SIZE_PERC_STOP=60 - -# Single or comma-separated list of NetApp NFS shares for Block -# Storage to use. Format: ip-address:/export-name. Defaults to ''. -CONFIG_CINDER_NETAPP_NFS_SHARES= - -# File with the list of available NFS shares. Defaults to -# '/etc/cinder/shares.conf'. -CONFIG_CINDER_NETAPP_NFS_SHARES_CONFIG=/etc/cinder/shares.conf - -# This parameter is only utilized when the storage protocol is -# configured to use iSCSI or FC. This parameter is used to restrict -# provisioning to the specified controller volumes. Specify the value -# of this parameter to be a comma separated list of NetApp controller -# volume names to be used for provisioning. Defaults to ''. -CONFIG_CINDER_NETAPP_VOLUME_LIST= - -# The vFiler unit on which provisioning of block storage volumes will -# be done. This parameter is only used by the driver when connecting -# to an instance with a storage family of Data ONTAP operating in -# 7-Mode Only use this parameter when utilizing the MultiStore feature -# on the NetApp storage system. Defaults to ''. -CONFIG_CINDER_NETAPP_VFILER= - -# The name of the config.conf stanza for a Data ONTAP (7-mode) HA -# partner. This option is only used by the driver when connecting to -# an instance with a storage family of Data ONTAP operating in 7-Mode, -# and it is required if the storage protocol selected is FC. Defaults -# to ''. -CONFIG_CINDER_NETAPP_PARTNER_BACKEND_NAME= - -# This option specifies the virtual storage server (Vserver) name on -# the storage cluster on which provisioning of block storage volumes -# should occur. Defaults to ''. -CONFIG_CINDER_NETAPP_VSERVER= - -# Restricts provisioning to the specified controllers. Value must be -# a comma-separated list of controller hostnames or IP addresses to be -# used for provisioning. This option is only utilized when the storage -# family is configured to use E-Series. Defaults to ''. -CONFIG_CINDER_NETAPP_CONTROLLER_IPS= - -# Password for the NetApp E-Series storage array. Defaults to ''. -CONFIG_CINDER_NETAPP_SA_PASSWORD= - -# This option is used to define how the controllers in the E-Series -# storage array will work with the particular operating system on the -# hosts that are connected to it. Defaults to 'linux_dm_mp' -CONFIG_CINDER_NETAPP_ESERIES_HOST_TYPE=linux_dm_mp - -# Path to the NetApp E-Series proxy application on a proxy server. -# The value is combined with the value of the -# CONFIG_CINDER_NETAPP_TRANSPORT_TYPE, CONFIG_CINDER_NETAPP_HOSTNAME, -# and CONFIG_CINDER_NETAPP_HOSTNAME options to create the URL used by -# the driver to connect to the proxy application. Defaults to -# '/devmgr/v2'. -CONFIG_CINDER_NETAPP_WEBSERVICE_PATH=/devmgr/v2 - -# Restricts provisioning to the specified storage pools. Only dynamic -# disk pools are currently supported. The value must be a comma- -# separated list of disk pool names to be used for provisioning. -# Defaults to ''. -CONFIG_CINDER_NETAPP_STORAGE_POOLS= - -# Password to use for the OpenStack File Share service (manila) to -# access the database. -CONFIG_MANILA_DB_PW=manila - -# Password to use for the OpenStack File Share service (manila) to -# authenticate with the Identity service. -CONFIG_MANILA_KS_PW=manila - -# Backend for the OpenStack File Share service (manila); valid -# options are: generic or netapp. ['generic', 'netapp'] -CONFIG_MANILA_BACKEND=generic - -# Denotes whether the driver should handle the responsibility of -# managing share servers. This must be set to false if the driver is -# to operate without managing share servers. Defaults to 'false' -# ['true', 'false'] -CONFIG_MANILA_NETAPP_DRV_HANDLES_SHARE_SERVERS=false - -# The transport protocol used when communicating with the storage -# system or proxy server. Valid values are 'http' and 'https'. -# Defaults to 'https'. ['https', 'http'] -CONFIG_MANILA_NETAPP_TRANSPORT_TYPE=https - -# Administrative user account name used to access the NetApp storage -# system. Defaults to ''. -CONFIG_MANILA_NETAPP_LOGIN=admin - -# Password for the NetApp administrative user account specified in -# the CONFIG_MANILA_NETAPP_LOGIN parameter. Defaults to ''. -CONFIG_MANILA_NETAPP_PASSWORD= - -# Hostname (or IP address) for the NetApp storage system or proxy -# server. Defaults to ''. -CONFIG_MANILA_NETAPP_SERVER_HOSTNAME= - -# The storage family type used on the storage system; valid values -# are ontap_cluster for clustered Data ONTAP. Defaults to -# 'ontap_cluster'. ['ontap_cluster'] -CONFIG_MANILA_NETAPP_STORAGE_FAMILY=ontap_cluster - -# The TCP port to use for communication with the storage system or -# proxy server. If not specified, Data ONTAP drivers will use 80 for -# HTTP and 443 for HTTPS. Defaults to '443'. -CONFIG_MANILA_NETAPP_SERVER_PORT=443 - -# Pattern for searching available aggregates for NetApp provisioning. -# Defaults to '(.*)'. -CONFIG_MANILA_NETAPP_AGGREGATE_NAME_SEARCH_PATTERN=(.*) - -# Name of aggregate on which to create the NetApp root volume. This -# option only applies when the option -# CONFIG_MANILA_NETAPP_DRV_HANDLES_SHARE_SERVERS is set to True. -CONFIG_MANILA_NETAPP_ROOT_VOLUME_AGGREGATE= - -# NetApp root volume name. Defaults to 'root'. -CONFIG_MANILA_NETAPP_ROOT_VOLUME_NAME=root - -# This option specifies the storage virtual machine (previously -# called a Vserver) name on the storage cluster on which provisioning -# of shared file systems should occur. This option only applies when -# the option driver_handles_share_servers is set to False. Defaults to -# ''. -CONFIG_MANILA_NETAPP_VSERVER= - -# Denotes whether the driver should handle the responsibility of -# managing share servers. This must be set to false if the driver is -# to operate without managing share servers. Defaults to 'true'. -# ['true', 'false'] -CONFIG_MANILA_GENERIC_DRV_HANDLES_SHARE_SERVERS=true - -# Volume name template for Manila service. Defaults to 'manila- -# share-%s'. -CONFIG_MANILA_GENERIC_VOLUME_NAME_TEMPLATE=manila-share-%s - -# Share mount path for Manila service. Defaults to '/shares'. -CONFIG_MANILA_GENERIC_SHARE_MOUNT_PATH=/shares - -# Location of disk image for Manila service instance. Defaults to ' -CONFIG_MANILA_SERVICE_IMAGE_LOCATION=https://www.dropbox.com/s/vi5oeh10q1qkckh/ubuntu_1204_nfs_cifs.qcow2 - -# User in Manila service instance. -CONFIG_MANILA_SERVICE_INSTANCE_USER=ubuntu - -# Password to service instance user. -CONFIG_MANILA_SERVICE_INSTANCE_PASSWORD=ubuntu - -# Type of networking that the backend will use. A more detailed -# description of each option is available in the Manila docs. Defaults -# to 'neutron'. ['neutron', 'nova-network', 'standalone'] -CONFIG_MANILA_NETWORK_TYPE=neutron - -# Gateway IPv4 address that should be used. Required. Defaults to ''. -CONFIG_MANILA_NETWORK_STANDALONE_GATEWAY= - -# Network mask that will be used. Can be either decimal like '24' or -# binary like '255.255.255.0'. Required. Defaults to ''. -CONFIG_MANILA_NETWORK_STANDALONE_NETMASK= - -# Set it if network has segmentation (VLAN, VXLAN, etc). It will be -# assigned to share-network and share drivers will be able to use this -# for network interfaces within provisioned share servers. Optional. -# Example: 1001. Defaults to ''. -CONFIG_MANILA_NETWORK_STANDALONE_SEG_ID= - -# Can be IP address, range of IP addresses or list of addresses or -# ranges. Contains addresses from IP network that are allowed to be -# used. If empty, then will be assumed that all host addresses from -# network can be used. Optional. Examples: 10.0.0.10 or -# 10.0.0.10-10.0.0.20 or -# 10.0.0.10-10.0.0.20,10.0.0.30-10.0.0.40,10.0.0.50. Defaults to ''. -CONFIG_MANILA_NETWORK_STANDALONE_IP_RANGE= - -# IP version of network. Optional. Defaults to '4'. ['4', '6'] -CONFIG_MANILA_NETWORK_STANDALONE_IP_VERSION=4 - -# support LB, HA or None -CONFIG_IRONIC_API_INSTALL_MODE=None - -# The float IP address of the server on which to install the Ironic -# service -CONFIG_IRONIC_HOST= - -# The IP address of the servers on which to install the Ironic -# service -CONFIG_IRONIC_HOSTS= - -# Password to use for OpenStack Bare Metal Provisioning (ironic) to -# access the database. -CONFIG_IRONIC_DB_PW=ironic - -# Password to use for OpenStack Bare Metal Provisioning to -# authenticate with the Identity service. -CONFIG_IRONIC_KS_PW=ironic - -# support LB, HA or None -CONFIG_NOVA_API_INSTALL_MODE=None - -# The float IP address of the server on which to install the Nova API -# service -CONFIG_NOVA_API_HOST= - -# The IP address of the servers on which to install the Nova API -# service -CONFIG_NOVA_API_HOSTS= - -# The float IP address of the server on which to install the Nova -# Cert service -CONFIG_NOVA_CERT_HOST= - -# The IP address of the servers on which to install the Nova Cert -# service -CONFIG_NOVA_CERT_HOSTS= - -# support LB, HA or None -CONFIG_NOVA_VNCPROXY_INSTALL_MODE=None - -# The float IP address of the server on which to install the Nova VNC -# proxy -CONFIG_NOVA_VNCPROXY_HOST= - -# The IP address of the servers on which to install the Nova VNC -# proxy -CONFIG_NOVA_VNCPROXY_HOSTS= - -# The IP address of the server on which to install the Nova Conductor -# service -CONFIG_NOVA_CONDUCTOR_HOST= - -# The IP address of the server on which to install the Nova Conductor -# service -CONFIG_NOVA_CONDUCTOR_HOSTS= - -# The IP address of the server on which to install the Nova Scheduler -# service -CONFIG_NOVA_SCHED_HOST= - -# The IP address of the servers on which to install the Nova -# Scheduler service -CONFIG_NOVA_SCHED_HOSTS= - -# Password to use for the Compute service (nova) to access the -# database. -CONFIG_NOVA_DB_PW=nova - -# Password to use for the Compute service to authenticate with the -# Identity service. -CONFIG_NOVA_KS_PW=nova - -# Overcommitment ratio for virtual to physical CPUs. Specify 1.0 to -# disable CPU overcommitment. -CONFIG_NOVA_SCHED_CPU_ALLOC_RATIO=1.0 - -# Overcommitment ratio for virtual to physical RAM. Specify 1.0 to -# disable RAM overcommitment. -CONFIG_NOVA_SCHED_RAM_ALLOC_RATIO=1.5 - -# Protocol used for instance migration. Valid options are: tcp and -# ssh. Note that by default, the Compute user is created with the -# /sbin/nologin shell so that the SSH protocol will not work. To make -# the SSH protocol work, you must configure the Compute user on -# compute hosts manually. ['tcp', 'ssh'] -CONFIG_NOVA_COMPUTE_MIGRATE_PROTOCOL=tcp - -# Manager that runs the Compute service. -CONFIG_NOVA_COMPUTE_MANAGER=nova.compute.manager.ComputeManager - -# Private interface for flat DHCP on the Compute servers. -CONFIG_NOVA_COMPUTE_PRIVIF=eth1 - -# The list of IP addresses of the server on which to install the Nova -# Network service -CONFIG_NOVA_NETWORK_HOSTS= - -# Compute Network Manager. ['^nova\.network\.manager\.\w+Manager$'] -CONFIG_NOVA_NETWORK_MANAGER=nova.network.manager.FlatDHCPManager - -# Public interface on the Compute network server. -CONFIG_NOVA_NETWORK_PUBIF=eth0 - -# Private interface for flat DHCP on the Compute network server. -CONFIG_NOVA_NETWORK_PRIVIF=eth1 - -# IP Range for flat DHCP. ['^[\:\.\da-fA-f]+(\/\d+){0,1}$'] -CONFIG_NOVA_NETWORK_FIXEDRANGE=192.168.32.0/22 - -# IP Range for floating IP addresses. ['^[\:\.\da- -# fA-f]+(\/\d+){0,1}$'] -CONFIG_NOVA_NETWORK_FLOATRANGE=10.3.4.0/22 - -# Specify 'y' to automatically assign a floating IP to new instances. -# ['y', 'n'] -CONFIG_NOVA_NETWORK_AUTOASSIGNFLOATINGIP=n - -# First VLAN for private networks (Compute networking). -CONFIG_NOVA_NETWORK_VLAN_START=100 - -# Number of networks to support (Compute networking). -CONFIG_NOVA_NETWORK_NUMBER=1 - -# Number of addresses in each private subnet (Compute networking). -CONFIG_NOVA_NETWORK_SIZE=255 - -# support LB, HA or None -CONFIG_NEUTRON_SERVER_INSTALL_MODE=None - -# The float IP addresses of the server on which to install the -# Neutron server -CONFIG_NEUTRON_SERVER_HOST= - -# IP addresses of the servers on which to install the Neutron server -CONFIG_NEUTRON_SERVER_HOSTS= - -# Password to use for OpenStack Networking (neutron) to authenticate -# with the Identity service. -CONFIG_NEUTRON_KS_PW=neutron - -# The password to use for OpenStack Networking to access the -# database. -CONFIG_NEUTRON_DB_PW=neutron - -# A comma separated list of IP addresses on which to install Neutron -# L3 agent -CONFIG_NEUTRON_L3_HOSTS= - -# The name of the Open vSwitch bridge (or empty for linuxbridge) for -# the OpenStack Networking L3 agent to use for external traffic. -# Specify 'provider' if you intend to use a provider network to handle -# external traffic. -CONFIG_NEUTRON_L3_EXT_BRIDGE=br-ex - -# A comma separated list of IP addresses on which to install Neutron -# DHCP agent -CONFIG_NEUTRON_DHCP_HOSTS= - -# A comma separated list of IP addresses on which to install Neutron -# LBaaS agent -CONFIG_NEUTRON_LBAAS_HOSTS= - -# A comma separated list of IP addresses on which to install Neutron -# metadata agent -CONFIG_NEUTRON_METADATA_HOSTS= - -# Password for the OpenStack Networking metadata agent. -CONFIG_NEUTRON_METADATA_PW=neutron - -# Specify 'y' to install OpenStack Networking's Load-Balancing- -# as-a-Service (LBaaS). ['y', 'n'], discard -#CONFIG_LBAAS_INSTALL=n - -# Specify 'y' to install OpenStack Networking's L3 Metering agent -# ['y', 'n'] -CONFIG_NEUTRON_METERING_AGENT_INSTALL=n - -# Specify 'y' to configure OpenStack Networking's Firewall- -# as-a-Service (FWaaS). ['y', 'n'] -CONFIG_NEUTRON_FWAAS=n - -#The MAC address pattern to use. -CONFIG_NEUTRON_BASE_MAC=fa:16:3e:00:00:00 - -# Comma-separated list of network-type driver entry points to be -# loaded from the neutron.ml2.type_drivers namespace. ['local', -# 'flat', 'vlan', 'gre', 'vxlan'] -CONFIG_NEUTRON_ML2_TYPE_DRIVERS=vlan,vxlan - -# Comma-separated, ordered list of network types to allocate as -# tenant networks. The 'local' value is only useful for single-box -# testing and provides no connectivity between hosts. ['local', -# 'vlan', 'gre', 'vxlan'] -CONFIG_NEUTRON_ML2_TENANT_NETWORK_TYPES=vlan,vxlan - -# Comma-separated ordered list of networking mechanism driver entry -# points to be loaded from the neutron.ml2.mechanism_drivers -# namespace. ['logger', 'test', 'linuxbridge', 'openvswitch', -# 'hyperv', 'ncs', 'arista', 'cisco_nexus', 'mlnx', 'l2population' -# 'sriovnicswitch', 'proxydriver'] -CONFIG_NEUTRON_ML2_MECHANISM_DRIVERS=openvswitch,sriovnicswitch - -# Comma-separated list of physical_network names with which flat -# networks can be created. Use * to allow flat networks with arbitrary -# physical_network names. -CONFIG_NEUTRON_ML2_FLAT_NETWORKS=* - -# Comma-separated list of :: or -# specifying physical_network names usable for VLAN -# provider and tenant networks, as well as ranges of VLAN tags on each -# available for allocation to tenant networks. -CONFIG_NEUTRON_ML2_VLAN_RANGES=physnet1:2:2999,physnet2:2:2999 - -# Comma-separated list of : tuples enumerating -# ranges of GRE tunnel IDs that are available for tenant-network -# allocation. A tuple must be an array with tun_max +1 - tun_min > -# 1000000. -CONFIG_NEUTRON_ML2_TUNNEL_ID_RANGES= - -# Comma-separated list of addresses for VXLAN multicast group. If -# left empty, disables VXLAN from sending allocate broadcast traffic -# (disables multicast VXLAN mode). Should be a Multicast IP (v4 or v6) -# address. -CONFIG_NEUTRON_ML2_VXLAN_GROUP= - -# Comma-separated list of : tuples enumerating -# ranges of VXLAN VNI IDs that are available for tenant network -# allocation. Minimum value is 0 and maximum value is 16777215. -CONFIG_NEUTRON_ML2_VNI_RANGES=10:100 - -# Comma-separated list of [['192.168.0.2','192.168.0.200'],["192.168.2.2","192.168.2.200"]] -# list enumerating ranges of VXLAN local ip that available for -# neutron-openvswitch-agent or neutron-ovdk-agent. -CONFIG_NEUTRON_ML2_VTEP_IP_RANGES=[['172.43.166.2','172.43.166.20']] - -# The IP address and port of zenic northbound interface.(eg. 1.1.1.1:8181 ) -CONFIG_ZENIC_API_NODE= - -# The user name and password of zenic northbound interface. -CONFIG_ZENIC_USER_AND_PW=restconf:LkfhRDGIPyGzbWGM2uAaNQ== - -#Custom l2 json files include sriov agent and ovs agent ether configuration -#configuration just for daisy -CONFIG_NEUTRON_ML2_JSON_PATH= - -# Name of the L2 agent to be used with OpenStack Networking. -# ['linuxbridge', 'openvswitch'] -CONFIG_NEUTRON_L2_AGENT=openvswitch - -# Comma-separated list of interface mappings for the OpenStack -# Networking linuxbridge plugin. Each tuple in the list must be in the -# format :. Example: -# physnet1:eth1,physnet2:eth2,physnet3:eth3. -CONFIG_NEUTRON_LB_INTERFACE_MAPPINGS= - -# Comma-separated list of bridge mappings for the OpenStack -# Networking Open vSwitch plugin. Each tuple in the list must be in -# the format :. Example: physnet1:br- -# eth1,physnet2:br-eth2,physnet3:br-eth3 -CONFIG_NEUTRON_OVS_BRIDGE_MAPPINGS=physnet1:br-data1 - -# A comma separated list of colon-separated OVS physnet:interface -# pairs. The interface will be added to the associated physnet. -CONFIG_NEUTRON_OVS_PHYSNET_IFACES= - -# Comma-separated list of colon-separated Open vSwitch -# : pairs. The interface will be added to the -# associated bridge. -CONFIG_NEUTRON_OVS_BRIDGE_IFACES=br-data1:eth0 - -#config compute hosts for the Neutron sriov agent type -CONFIG_NEUTRON_SRIOV_AGENT_TYPE= - -#Enter a comma separated list of bridge mappings for -#the Neutron sriov plugin -CONFIG_NEUTRON_SRIOV_BRIDGE_MAPPINGS= - -#A comma separated list of colon-separated SRIOV -#physnet:interface pairs. The interface will be added to the associated physnet. -CONFIG_NEUTRON_SRIOV_PHYSNET_IFACES= - -# Interface for the Open vSwitch tunnel. Packstack overrides the IP -# address used for tunnels on this hypervisor to the IP found on the -# specified interface (for example, eth1). -CONFIG_NEUTRON_OVS_TUNNEL_IF= - -# VXLAN UDP port. -CONFIG_NEUTRON_OVS_VXLAN_UDP_PORT=4789 - -# support LB, HA or None. when select LB, TECS BIN must be run in another node -# which doesn't install any openstack service. -# HORIZON LB is not support now for some technical matter. -CONFIG_HORIZON_INSTALL_MODE=None - -# The float IP address of the server on which to install Horizon -CONFIG_HORIZON_HOST= - -# IP address of the servers on which to install Horizon -CONFIG_HORIZON_HOSTS= - -# Specify 'y' to set up Horizon communication over https. ['y', 'n'] -CONFIG_HORIZON_SSL=n - -# PEM-encoded certificate to be used for SSL connections on the https -# server (the certificate should not require a passphrase). To -# generate a certificate, leave blank. -CONFIG_SSL_CERT= - -# SSL keyfile corresponding to the certificate if one was specified. -CONFIG_SSL_KEY= - -# PEM-encoded CA certificates from which the certificate chain of the -# server certificate can be assembled. -CONFIG_SSL_CACHAIN= - -# Password to use for the Object Storage service to authenticate with -# the Identity service. -CONFIG_SWIFT_KS_PW=swift - -# Comma-separated list of devices to use as storage device for Object -# Storage. Each entry must take the format /path/to/dev (for example, -# specifying /dev/vdb installs /dev/vdb as the Object Storage storage -# device; Packstack does not create the filesystem, you must do this -# first). If left empty, Packstack creates a loopback device for test -# setup. -CONFIG_SWIFT_STORAGES= - -# Number of Object Storage storage zones; this number MUST be no -# larger than the number of configured storage devices. -CONFIG_SWIFT_STORAGE_ZONES=1 - -# Number of Object Storage storage replicas; this number MUST be no -# larger than the number of configured storage zones. -CONFIG_SWIFT_STORAGE_REPLICAS=1 - -# File system type for storage nodes. ['xfs', 'ext4'] -CONFIG_SWIFT_STORAGE_FSTYPE=ext4 - -# Custom seed number to use for swift_hash_path_suffix in -# /etc/swift/swift.conf. If you do not provide a value, a seed number -# is automatically generated. -CONFIG_SWIFT_HASH=4348fdf97ba34767 - -# Size of the Object Storage loopback file storage device. -CONFIG_SWIFT_STORAGE_SIZE=2G - -# support LB, HA or None -CONFIG_HEAT_API_INSTALL_MODE=None - -# support LB, HA or None -CONFIG_HEAT_API_CFN_INSTALL_MODE=None - -# The float IP address of the server on which to install Heat service -CONFIG_HEAT_HOST= - -# IP address of the servers on which to install Heat service -CONFIG_HEAT_HOSTS= -# The float IP address of the server on which to install heat-api service -CONFIG_HEAT_API_HOST= - -# IP address of the servers on which to install heat-api service -CONFIG_HEAT_API_HOSTS= - -# The float IP address of the server on which to install heat-api-cfn service -CONFIG_HEAT_API_CFN_HOST= - -# IP address of the servers on which to install heat-api-cfn service -CONFIG_HEAT_API_CFN_HOSTS= - -# The float IP address of the server on which to install heat-api-cloudwatch service -CONFIG_HEAT_API_CLOUDWATCH_HOST= - -# IP address of the servers on which to install heat-api-cloudwatch service -CONFIG_HEAT_API_CLOUDWATCH_HOSTS= - -# The float IP address of the server on which to install heat-engine service -CONFIG_HEAT_ENGINE_HOST= - -# IP address of the servers on which to install heat-engine service -CONFIG_HEAT_ENGINE_HOSTS= - -# Password used by Orchestration service user to authenticate against -# the database. -CONFIG_HEAT_DB_PW=heat - -# Encryption key to use for authentication in the Orchestration -# database (16, 24, or 32 chars). -CONFIG_HEAT_AUTH_ENC_KEY=d344d3167eb34b07 - -# Password to use for the Orchestration service to authenticate with -# the Identity service. -CONFIG_HEAT_KS_PW=heat - -# Specify 'y' to install the Orchestration CloudWatch API. ['y', 'n'] -CONFIG_HEAT_CLOUDWATCH_INSTALL=n - -# Specify 'y' to install the Orchestration CloudFormation API. ['y', -# 'n'] -CONFIG_HEAT_CFN_INSTALL=y - -# Name of the Identity domain for Orchestration. -CONFIG_HEAT_DOMAIN=heat - -# Name of the Identity domain administrative user for Orchestration. -CONFIG_HEAT_DOMAIN_ADMIN=heat_admin - -# Password for the Identity domain administrative user for Orchestration. -CONFIG_HEAT_DOMAIN_PASSWORD=heat - -# Specify 'y' to provision for demo usage and testing. ['y', 'n'] -CONFIG_PROVISION_DEMO=n - -# Specify 'y' to configure the OpenStack Integration Test Suite -# (tempest) for testing. The test suite requires OpenStack Networking -# to be installed. ['y', 'n'] -CONFIG_PROVISION_TEMPEST=n - -# CIDR network address for the floating IP subnet. -CONFIG_PROVISION_DEMO_FLOATRANGE=172.24.4.224/28 - -# The name to be assigned to the demo image in Glance (default -# "cirros"). -CONFIG_PROVISION_IMAGE_NAME=cirros - -# A URL or local file location for an image to download and provision -# in Glance (defaults to a URL for a recent "cirros" image). -CONFIG_PROVISION_IMAGE_URL=http://download.cirros-cloud.net/0.3.3/cirros-0.3.3-x86_64-disk.img - -# Format for the demo image (default "qcow2"). -CONFIG_PROVISION_IMAGE_FORMAT=qcow2 - -# User to use when connecting to instances booted from the demo -# image. -CONFIG_PROVISION_IMAGE_SSH_USER=cirros - -# Name of the Integration Test Suite provisioning user. If you do not -# provide a user name, Tempest is configured in a standalone mode. -CONFIG_PROVISION_TEMPEST_USER= - -# Password to use for the Integration Test Suite provisioning user. -CONFIG_PROVISION_TEMPEST_USER_PW=tempest - -# CIDR network address for the floating IP subnet. -CONFIG_PROVISION_TEMPEST_FLOATRANGE=172.24.4.224/28 - -# URI of the Integration Test Suite git repository. -CONFIG_PROVISION_TEMPEST_REPO_URI=https://github.com/openstack/tempest.git - -# Revision (branch) of the Integration Test Suite git repository. -CONFIG_PROVISION_TEMPEST_REPO_REVISION=master - -# Specify 'y' to configure the Open vSwitch external bridge for an -# all-in-one deployment (the L3 external bridge acts as the gateway -# for virtual machines). ['y', 'n'] -CONFIG_PROVISION_ALL_IN_ONE_OVS_BRIDGE=n - -# Password to use for OpenStack Data Processing (sahara) to access -# the database. -CONFIG_SAHARA_DB_PW=sahara - -# Password to use for OpenStack Data Processing to authenticate with -# the Identity service. -CONFIG_SAHARA_KS_PW=sahara - -# Secret key for signing Telemetry service (ceilometer) messages. -CONFIG_CEILOMETER_SECRET=d8c381820a444a6e - -# Password to use for Telemetry to authenticate with the Identity -# service. -CONFIG_CEILOMETER_KS_PW=ceilometer - -# Backend driver for Telemetry's group membership coordination. -# ['redis', 'none'] -CONFIG_CEILOMETER_COORDINATION_BACKEND=none - -# support LB, HA or None -CONFIG_CEILOMETER_API_INSTALL_MODE=None - -#float ip address of ceilometer-api -CONFIG_CEILOMETER_API_HOST= - -#IP address of the server on which to install ceilometer-api -CONFIG_CEILOMETER_API_HOSTS= - -#float ip address of ceilometer-collector -CONFIG_CEILOMETER_COLLECTOR_HOST= - -#IP address of the server on which to install ceilometer-collector -CONFIG_CEILOMETER_COLLECTOR_HOSTS= - -#float ip address of ceilometer-notification -CONFIG_CEILOMETER_NOTIFICATION_HOST= - -#IP address of the server on which to install ceilometer-notification -CONFIG_CEILOMETER_NOTIFICATION_HOSTS= - -#float ip address of ceilometer-central -CONFIG_CEILOMETER_CENTRAL_HOST= - -#IP address of the server on which to install ceilometer-central -CONFIG_CEILOMETER_CENTRAL_HOSTS= - -#float ip address of ceilometer-alarm-evaluator & ceilometer-alarm-notifier. -CONFIG_CEILOMETER_ALARM_HOST= - -#IP address of the server on which to install ceilometer-alarm-evaluator & ceilometer-alarm-notifier. -CONFIG_CEILOMETER_ALARM_HOSTS= - -# float IP address of the MongoDB. -CONFIG_MONGODB_HOST= - -#IP address of the server on which to install MongoDB. -CONFIG_MONGODB_HOSTS= - -# IP address of the server on which to install the Redis master -# server. -CONFIG_REDIS_MASTER_HOST= - -# Port on which the Redis server(s) listens. -CONFIG_REDIS_PORT=6379 - -# Specify 'y' to have Redis try to use HA. ['y', 'n'] -CONFIG_REDIS_HA=n - -# Hosts on which to install Redis slaves. -CONFIG_REDIS_SLAVE_HOSTS= - -# Hosts on which to install Redis sentinel servers. -CONFIG_REDIS_SENTINEL_HOSTS= - -# Host to configure as the Redis coordination sentinel. -CONFIG_REDIS_SENTINEL_CONTACT_HOST= - -# Port on which Redis sentinel servers listen. -CONFIG_REDIS_SENTINEL_PORT=26379 - -# Quorum value for Redis sentinel servers. -CONFIG_REDIS_SENTINEL_QUORUM=2 - -# Name of the master server watched by the Redis sentinel. ['[a-z]+'] -CONFIG_REDIS_MASTER_NAME=mymaster - -# Password to use for OpenStack Database-as-a-Service (trove) to -# access the database. -CONFIG_TROVE_DB_PW=trove - -# Password to use for OpenStack Database-as-a-Service to authenticate -# with the Identity service. -CONFIG_TROVE_KS_PW=trove - -# User name to use when OpenStack Database-as-a-Service connects to -# the Compute service. -CONFIG_TROVE_NOVA_USER=admin - -# Tenant to use when OpenStack Database-as-a-Service connects to the -# Compute service. -CONFIG_TROVE_NOVA_TENANT=services - -# Password to use when OpenStack Database-as-a-Service connects to -# the Compute service. -CONFIG_TROVE_NOVA_PW=trove - -# Password of the nagiosadmin user on the Nagios server. -CONFIG_NAGIOS_PW=nagios - -# This option decides whether install ovdk and ovdk agents or -# ovs agent patch which can support sdn vxlan, the format such as -# {'ovdk':['10.43.211.2','10.43.211.12','10.43.211.15'], -# 'ovs_agent_patch':['10.43.211.105','10.43.211.106']}", -CONFIG_DVS_TYPE= - -# Comma-separated list of physical nics used by ovdk -# Example: eth0,eth1 -CONFIG_DVS_PHYSICAL_NICS= - -# Set dvs vxlan info, when use "vxlan bond" mode, the format -# as:bondname(bond mode; lacp mode; bond nics) -# Example: bond1(active-backup;off;eth0-eth1) or eth0 -CONFIG_DVS_VXLAN_INFO= - -# This option decide whether let ovdk agents to enable dvs support outside vtep endpoint. -CONFIG_DVS_VTEP=n - -# The ID of DCI domain, to let agents between different DCI center donot create vxlan endpoint - -# Comma-separated dict of {0:['10.43.211.2','10.43.211.12','10.43.211.15'],1:['10.43.211.105','10.43.211.105']} -# list enumerating the ID of DCI domain to let agents -# between different DCI center donot create vxlan endpoint neutron-ovdk-agent. -CONFIG_DVS_NODE_DOMAIN_ID=0 - -# Type of network to allocate for tenant networks (eg. vlan, local, -# gre, vxlan) -CONFIG_NEUTRON_OVS_TENANT_NETWORK_TYPE=vxlan - -#if install log -CONFIG_LOG_INSTALL=n - -#hosts install log server -CONFIG_LOG_SERVER_HOSTS= - -# Set to 'y' if you would like Packstack to install ha -CONFIG_HA_INSTALL_MONGODB_LOCAL=n \ No newline at end of file diff --git a/backend/tecs/tfg_upgrade.sh b/backend/tecs/tfg_upgrade.sh deleted file mode 100755 index 39825a78..00000000 --- a/backend/tecs/tfg_upgrade.sh +++ /dev/null @@ -1,9 +0,0 @@ -#!/bin/bash - -scriptsdir=$(cd $(dirname $0) && pwd) -ISODIR=`mktemp -d /mnt/TFG_ISOXXXXXX` -mount -o loop $scriptsdir/*CGSL_VPLAT*.iso ${ISODIR} -cp ${ISODIR}/*CGSL_VPLAT*.bin $scriptsdir -umount ${ISODIR} -[ -e ${ISODIR} ] && rm -rf ${ISODIR} -$scriptsdir/*CGSL_VPLAT*.bin upgrade reboot diff --git a/backend/tecs/trustme.sh b/backend/tecs/trustme.sh deleted file mode 100755 index 77303894..00000000 --- a/backend/tecs/trustme.sh +++ /dev/null @@ -1,93 +0,0 @@ -#!/bin/sh -# 让某个主机彻底信任我,以后ssh登录过去不需要密码 - -#检查参数是否合法 -logfile=/var/log/trustme.log -function print_log -{ - local promt="$1" - echo -e "$promt" - echo -e "`date -d today +"%Y-%m-%d %H:%M:%S"` $promt" >> $logfile -} - -ip=$1 -if [ -z $ip ]; then - print_log "Usage: `basename $0` ipaddr passwd" - exit 1 -fi - -passwd=$2 -if [ -z $passwd ]; then - print_log "Usage: `basename $0` ipaddr passwd" - exit 1 -fi - -rpm -qi sshpass >/dev/null -if [ $? != 0 ]; then - print_log "Please install sshpass first" - exit 1 -fi - -#试试对端能不能ping得通 -unreachable=`ping $ip -c 1 -W 3 | grep -c "100% packet loss"` -if [ $unreachable -eq 1 ]; then - print_log "host $ip is unreachable" - exit 1 -fi - -#如果本机还没有ssh公钥,就生成一个 -if [ ! -e ~/.ssh/id_dsa.pub ]; then - print_log "generating ssh public key ..." - ssh-keygen -t dsa -f /root/.ssh/id_dsa -N "" - if [ $? != 0 ]; then - print_log "ssh-keygen failed" - exit 1 - fi -fi - -#首先在对端删除原来保存的信任公钥 -user=`whoami` -host=`hostname` -keyend="$user@$host" -print_log "my keyend = $keyend" -cmd="sed '/$keyend$/d' -i ~/.ssh/authorized_keys" -#echo cmd:$cmd -print_log "clear my old pub key on $ip ..." -sshpass -p $passwd ssh -o StrictHostKeyChecking=no $ip "rm -rf /root/.ssh/known_hosts" -if [ $? != 0 ]; then - print_log "ssh $ip to delete known_hosts failed" - exit 1 -fi -sshpass -p $passwd ssh -o StrictHostKeyChecking=no $ip "touch ~/.ssh/authorized_keys" -if [ $? != 0 ]; then - print_log "ssh $ip to create file authorized_keys failed" - exit 1 -fi -sshpass -p $passwd ssh -o StrictHostKeyChecking=no $ip "$cmd" -if [ $? != 0 ]; then - print_log "ssh $ip to edit authorized_keys failed" - exit 1 -fi -#把新生成的拷贝过去 -print_log "copy my public key to $ip ..." -tmpfile=/tmp/`hostname`.key.pub -sshpass -p $passwd scp -o StrictHostKeyChecking=no ~/.ssh/id_dsa.pub $ip:$tmpfile -if [ $? != 0 ]; then - print_log "scp file to $ip failed" - exit 1 -fi -#在对端将其追加到authorized_keys -print_log "on $ip, append my public key to ~/.ssh/authorized_keys ..." -sshpass -p $passwd ssh -o StrictHostKeyChecking=no $ip "cat $tmpfile >> ~/.ssh/authorized_keys" -if [ $? != 0 ]; then - print_log "ssh $ip to add public key for authorized_keys failed" - exit 1 -fi -print_log "rm tmp file $ip:$tmpfile" -sshpass -p $passwd ssh -o StrictHostKeyChecking=no $ip "rm $tmpfile" -if [ $? != 0 ]; then - print_log "ssh $ip to delete tmp file failed" - exit 1 -fi -print_log "trustme ok!" - diff --git a/backend/zenic/trustme.sh b/backend/zenic/trustme.sh deleted file mode 100755 index 54bd7cb3..00000000 --- a/backend/zenic/trustme.sh +++ /dev/null @@ -1,62 +0,0 @@ -#!/bin/sh -# 让某个主机彻底信任我,以后ssh登录过去不需要密码 - -#检查参数是否合法 -ip=$1 -if [ -z $ip ]; then - echo "Usage: `basename $0` ipaddr passwd" >&2 - exit 1 -fi - -passwd=$2 -if [ -z $passwd ]; then - echo "Usage: `basename $0` ipaddr passwd" >&2 - exit 1 -fi - -rpm -qi sshpass >/dev/null -if [ $? != 0 ]; then - echo "Please install sshpass first!" >&2 - exit 1 -fi - -#试试对端能不能ping得通 -unreachable=`ping $ip -c 1 -W 3 | grep -c "100% packet loss"` -if [ $unreachable -eq 1 ]; then - echo "host $ip is unreachable!!!" - exit 1 -fi - -#如果本机还没有ssh公钥,就生成一个 -if [ ! -e ~/.ssh/id_dsa.pub ]; then - echo "generating ssh public key ..." - ssh-keygen -t dsa -f /root/.ssh/id_dsa -N "" -fi - -#首先在对端删除原来保存的信任公钥 -user=`whoami` -host=`hostname` -keyend="$user@$host" -echo "my keyend = $keyend" -cmd="sed '/$keyend$/d' -i ~/.ssh/authorized_keys" -#echo cmd:$cmd -echo "clear my old pub key on $ip ..." -sshpass -p $passwd ssh $ip "rm -rf /root/.ssh/known_hosts" -sshpass -p $passwd ssh $ip "touch ~/.ssh/authorized_keys" -sshpass -p $passwd ssh $ip "$cmd" - -#把新生成的拷贝过去 -echo "copy my public key to $ip ..." -tmpfile=/tmp/`hostname`.key.pub -sshpass -p $passwd scp ~/.ssh/id_dsa.pub $ip:$tmpfile - -#在对端将其追加到authorized_keys -echo "on $ip, append my public key to ~/.ssh/authorized_keys ..." -sshpass -p $passwd ssh $ip "cat $tmpfile >> ~/.ssh/authorized_keys" -echo "rm tmp file $ip:$tmpfile" -sshpass -p $passwd ssh $ip "rm $tmpfile" -echo "trustme ok!" - - - - diff --git a/backend/zenic/zenic.conf b/backend/zenic/zenic.conf deleted file mode 100755 index ca243272..00000000 --- a/backend/zenic/zenic.conf +++ /dev/null @@ -1,17 +0,0 @@ -[general] -nodeip=192.168.3.1 -nodeid=1 -hostname=sdn59 -needzamp=y -zbpips=192.168.3.1 -zbp_node_num=1 -zbpnodelist=1,256 -zampips=192.168.3.1 -zamp_node_num=1 -mongodbips=192.168.3.1 -mongodb_node_num=1 -zamp_vip= -mongodb_vip= -MacName=eth1 -netid=1234 -memmode=tiny diff --git a/code/daisy/daisy/api/authorization.py b/code/daisy/daisy/api/authorization.py deleted file mode 100755 index 015eca95..00000000 --- a/code/daisy/daisy/api/authorization.py +++ /dev/null @@ -1,899 +0,0 @@ -# Copyright 2012 OpenStack Foundation -# Copyright 2013 IBM Corp. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import copy - -from daisy.common import exception -import daisy.domain.proxy -from daisy import i18n - -_ = i18n._ - - -def is_image_mutable(context, image): - """Return True if the image is mutable in this context.""" - if context.is_admin: - return True - - if image.owner is None or context.owner is None: - return False - - return image.owner == context.owner - - -def proxy_image(context, image): - if is_image_mutable(context, image): - return ImageProxy(image, context) - else: - return ImmutableImageProxy(image, context) - - -def is_member_mutable(context, member): - """Return True if the image is mutable in this context.""" - if context.is_admin: - return True - - if context.owner is None: - return False - - return member.member_id == context.owner - - -def proxy_member(context, member): - if is_member_mutable(context, member): - return member - else: - return ImmutableMemberProxy(member) - - -def is_task_mutable(context, task): - """Return True if the task is mutable in this context.""" - if context.is_admin: - return True - - if context.owner is None: - return False - - return task.owner == context.owner - - -def is_task_stub_mutable(context, task_stub): - """Return True if the task stub is mutable in this context.""" - if context.is_admin: - return True - - if context.owner is None: - return False - - return task_stub.owner == context.owner - - -def proxy_task(context, task): - if is_task_mutable(context, task): - return task - else: - return ImmutableTaskProxy(task) - - -def proxy_task_stub(context, task_stub): - if is_task_stub_mutable(context, task_stub): - return task_stub - else: - return ImmutableTaskStubProxy(task_stub) - - -class ImageRepoProxy(daisy.domain.proxy.Repo): - - def __init__(self, image_repo, context): - self.context = context - self.image_repo = image_repo - proxy_kwargs = {'context': self.context} - super(ImageRepoProxy, self).__init__(image_repo, - item_proxy_class=ImageProxy, - item_proxy_kwargs=proxy_kwargs) - - def get(self, image_id): - image = self.image_repo.get(image_id) - return proxy_image(self.context, image) - - def list(self, *args, **kwargs): - images = self.image_repo.list(*args, **kwargs) - return [proxy_image(self.context, i) for i in images] - - -class ImageMemberRepoProxy(daisy.domain.proxy.Repo): - - def __init__(self, member_repo, image, context): - self.member_repo = member_repo - self.image = image - self.context = context - super(ImageMemberRepoProxy, self).__init__(member_repo) - - def get(self, member_id): - if (self.context.is_admin or - self.context.owner in (self.image.owner, member_id)): - member = self.member_repo.get(member_id) - return proxy_member(self.context, member) - else: - message = _("You cannot get image member for %s") - raise exception.Forbidden(message % member_id) - - def list(self, *args, **kwargs): - members = self.member_repo.list(*args, **kwargs) - if (self.context.is_admin or - self.context.owner == self.image.owner): - return [proxy_member(self.context, m) for m in members] - for member in members: - if member.member_id == self.context.owner: - return [proxy_member(self.context, member)] - message = _("You cannot get image member for %s") - raise exception.Forbidden(message % self.image.image_id) - - def remove(self, image_member): - if (self.image.owner == self.context.owner or - self.context.is_admin): - self.member_repo.remove(image_member) - else: - message = _("You cannot delete image member for %s") - raise exception.Forbidden(message - % self.image.image_id) - - def add(self, image_member): - if (self.image.owner == self.context.owner or - self.context.is_admin): - self.member_repo.add(image_member) - else: - message = _("You cannot add image member for %s") - raise exception.Forbidden(message - % self.image.image_id) - - def save(self, image_member, from_state=None): - if (self.context.is_admin or - self.context.owner == image_member.member_id): - self.member_repo.save(image_member, from_state=from_state) - else: - message = _("You cannot update image member %s") - raise exception.Forbidden(message % image_member.member_id) - - -class ImageFactoryProxy(daisy.domain.proxy.ImageFactory): - - def __init__(self, image_factory, context): - self.image_factory = image_factory - self.context = context - kwargs = {'context': self.context} - super(ImageFactoryProxy, self).__init__(image_factory, - proxy_class=ImageProxy, - proxy_kwargs=kwargs) - - def new_image(self, **kwargs): - owner = kwargs.pop('owner', self.context.owner) - - if not self.context.is_admin: - if owner is None or owner != self.context.owner: - message = _("You are not permitted to create images " - "owned by '%s'.") - raise exception.Forbidden(message % owner) - - return super(ImageFactoryProxy, self).new_image(owner=owner, **kwargs) - - -class ImageMemberFactoryProxy(object): - - def __init__(self, image_member_factory, context): - self.image_member_factory = image_member_factory - self.context = context - - def new_image_member(self, image, member_id): - owner = image.owner - - if not self.context.is_admin: - if owner is None or owner != self.context.owner: - message = _("You are not permitted to create image members " - "for the image.") - raise exception.Forbidden(message) - - if image.visibility == 'public': - message = _("Public images do not have members.") - raise exception.Forbidden(message) - - return self.image_member_factory.new_image_member(image, member_id) - - -def _immutable_attr(target, attr, proxy=None): - - def get_attr(self): - value = getattr(getattr(self, target), attr) - if proxy is not None: - value = proxy(value) - return value - - def forbidden(self, *args, **kwargs): - resource = getattr(self, 'resource_name', 'resource') - message = _("You are not permitted to modify '%(attr)s' on this " - "%(resource)s.") - raise exception.Forbidden(message % {'attr': attr, - 'resource': resource}) - - return property(get_attr, forbidden, forbidden) - - -class ImmutableLocations(list): - def forbidden(self, *args, **kwargs): - message = _("You are not permitted to modify locations " - "for this image.") - raise exception.Forbidden(message) - - def __deepcopy__(self, memo): - return ImmutableLocations(copy.deepcopy(list(self), memo)) - - append = forbidden - extend = forbidden - insert = forbidden - pop = forbidden - remove = forbidden - reverse = forbidden - sort = forbidden - __delitem__ = forbidden - __delslice__ = forbidden - __iadd__ = forbidden - __imul__ = forbidden - __setitem__ = forbidden - __setslice__ = forbidden - - -class ImmutableProperties(dict): - def forbidden_key(self, key, *args, **kwargs): - message = _("You are not permitted to modify '%s' on this image.") - raise exception.Forbidden(message % key) - - def forbidden(self, *args, **kwargs): - message = _("You are not permitted to modify this image.") - raise exception.Forbidden(message) - - __delitem__ = forbidden_key - __setitem__ = forbidden_key - pop = forbidden - popitem = forbidden - setdefault = forbidden - update = forbidden - - -class ImmutableTags(set): - def forbidden(self, *args, **kwargs): - message = _("You are not permitted to modify tags on this image.") - raise exception.Forbidden(message) - - add = forbidden - clear = forbidden - difference_update = forbidden - intersection_update = forbidden - pop = forbidden - remove = forbidden - symmetric_difference_update = forbidden - update = forbidden - - -class ImmutableImageProxy(object): - def __init__(self, base, context): - self.base = base - self.context = context - self.resource_name = 'image' - - name = _immutable_attr('base', 'name') - image_id = _immutable_attr('base', 'image_id') - name = _immutable_attr('base', 'name') - status = _immutable_attr('base', 'status') - created_at = _immutable_attr('base', 'created_at') - updated_at = _immutable_attr('base', 'updated_at') - visibility = _immutable_attr('base', 'visibility') - min_disk = _immutable_attr('base', 'min_disk') - min_ram = _immutable_attr('base', 'min_ram') - protected = _immutable_attr('base', 'protected') - locations = _immutable_attr('base', 'locations', proxy=ImmutableLocations) - checksum = _immutable_attr('base', 'checksum') - owner = _immutable_attr('base', 'owner') - disk_format = _immutable_attr('base', 'disk_format') - container_format = _immutable_attr('base', 'container_format') - size = _immutable_attr('base', 'size') - virtual_size = _immutable_attr('base', 'virtual_size') - extra_properties = _immutable_attr('base', 'extra_properties', - proxy=ImmutableProperties) - tags = _immutable_attr('base', 'tags', proxy=ImmutableTags) - - def delete(self): - message = _("You are not permitted to delete this image.") - raise exception.Forbidden(message) - - def get_member_repo(self): - member_repo = self.base.get_member_repo() - return ImageMemberRepoProxy(member_repo, self, self.context) - - def get_data(self, *args, **kwargs): - return self.base.get_data(*args, **kwargs) - - def set_data(self, *args, **kwargs): - message = _("You are not permitted to upload data for this image.") - raise exception.Forbidden(message) - - -class ImmutableMemberProxy(object): - def __init__(self, base): - self.base = base - self.resource_name = 'image member' - - id = _immutable_attr('base', 'id') - image_id = _immutable_attr('base', 'image_id') - member_id = _immutable_attr('base', 'member_id') - status = _immutable_attr('base', 'status') - created_at = _immutable_attr('base', 'created_at') - updated_at = _immutable_attr('base', 'updated_at') - - -class ImmutableTaskProxy(object): - def __init__(self, base): - self.base = base - self.resource_name = 'task' - - task_id = _immutable_attr('base', 'task_id') - type = _immutable_attr('base', 'type') - status = _immutable_attr('base', 'status') - owner = _immutable_attr('base', 'owner') - expires_at = _immutable_attr('base', 'expires_at') - created_at = _immutable_attr('base', 'created_at') - updated_at = _immutable_attr('base', 'updated_at') - input = _immutable_attr('base', 'input') - message = _immutable_attr('base', 'message') - result = _immutable_attr('base', 'result') - - def run(self, executor): - self.base.run(executor) - - def begin_processing(self): - message = _("You are not permitted to set status on this task.") - raise exception.Forbidden(message) - - def succeed(self, result): - message = _("You are not permitted to set status on this task.") - raise exception.Forbidden(message) - - def fail(self, message): - message = _("You are not permitted to set status on this task.") - raise exception.Forbidden(message) - - -class ImmutableTaskStubProxy(object): - def __init__(self, base): - self.base = base - self.resource_name = 'task stub' - - task_id = _immutable_attr('base', 'task_id') - type = _immutable_attr('base', 'type') - status = _immutable_attr('base', 'status') - owner = _immutable_attr('base', 'owner') - expires_at = _immutable_attr('base', 'expires_at') - created_at = _immutable_attr('base', 'created_at') - updated_at = _immutable_attr('base', 'updated_at') - - -class ImageProxy(daisy.domain.proxy.Image): - - def __init__(self, image, context): - self.image = image - self.context = context - super(ImageProxy, self).__init__(image) - - def get_member_repo(self, **kwargs): - if self.image.visibility == 'public': - message = _("Public images do not have members.") - raise exception.Forbidden(message) - else: - member_repo = self.image.get_member_repo(**kwargs) - return ImageMemberRepoProxy(member_repo, self, self.context) - - -class TaskProxy(daisy.domain.proxy.Task): - - def __init__(self, task): - self.task = task - super(TaskProxy, self).__init__(task) - - -class TaskFactoryProxy(daisy.domain.proxy.TaskFactory): - - def __init__(self, task_factory, context): - self.task_factory = task_factory - self.context = context - super(TaskFactoryProxy, self).__init__( - task_factory, - task_proxy_class=TaskProxy) - - def new_task(self, **kwargs): - owner = kwargs.get('owner', self.context.owner) - - # NOTE(nikhil): Unlike Images, Tasks are expected to have owner. - # We currently do not allow even admins to set the owner to None. - if owner is not None and (owner == self.context.owner - or self.context.is_admin): - return super(TaskFactoryProxy, self).new_task(**kwargs) - else: - message = _("You are not permitted to create this task with " - "owner as: %s") - raise exception.Forbidden(message % owner) - - -class TaskRepoProxy(daisy.domain.proxy.TaskRepo): - - def __init__(self, task_repo, context): - self.task_repo = task_repo - self.context = context - super(TaskRepoProxy, self).__init__(task_repo) - - def get(self, task_id): - task = self.task_repo.get(task_id) - return proxy_task(self.context, task) - - -class TaskStubRepoProxy(daisy.domain.proxy.TaskStubRepo): - - def __init__(self, task_stub_repo, context): - self.task_stub_repo = task_stub_repo - self.context = context - super(TaskStubRepoProxy, self).__init__(task_stub_repo) - - def list(self, *args, **kwargs): - task_stubs = self.task_stub_repo.list(*args, **kwargs) - return [proxy_task_stub(self.context, t) for t in task_stubs] - - -# Metadef Namespace classes -def is_namespace_mutable(context, namespace): - """Return True if the namespace is mutable in this context.""" - if context.is_admin: - return True - - if context.owner is None: - return False - - return namespace.owner == context.owner - - -def proxy_namespace(context, namespace): - if is_namespace_mutable(context, namespace): - return namespace - else: - return ImmutableMetadefNamespaceProxy(namespace) - - -class ImmutableMetadefNamespaceProxy(object): - - def __init__(self, base): - self.base = base - self.resource_name = 'namespace' - - namespace_id = _immutable_attr('base', 'namespace_id') - namespace = _immutable_attr('base', 'namespace') - display_name = _immutable_attr('base', 'display_name') - description = _immutable_attr('base', 'description') - owner = _immutable_attr('base', 'owner') - visibility = _immutable_attr('base', 'visibility') - protected = _immutable_attr('base', 'protected') - created_at = _immutable_attr('base', 'created_at') - updated_at = _immutable_attr('base', 'updated_at') - - def delete(self): - message = _("You are not permitted to delete this namespace.") - raise exception.Forbidden(message) - - def save(self): - message = _("You are not permitted to update this namespace.") - raise exception.Forbidden(message) - - -class MetadefNamespaceProxy(daisy.domain.proxy.MetadefNamespace): - - def __init__(self, namespace): - self.namespace_input = namespace - super(MetadefNamespaceProxy, self).__init__(namespace) - - -class MetadefNamespaceFactoryProxy( - daisy.domain.proxy.MetadefNamespaceFactory): - - def __init__(self, meta_namespace_factory, context): - self.meta_namespace_factory = meta_namespace_factory - self.context = context - super(MetadefNamespaceFactoryProxy, self).__init__( - meta_namespace_factory, - meta_namespace_proxy_class=MetadefNamespaceProxy) - - def new_namespace(self, **kwargs): - owner = kwargs.pop('owner', self.context.owner) - - if not self.context.is_admin: - if owner is None or owner != self.context.owner: - message = _("You are not permitted to create namespace " - "owned by '%s'") - raise exception.Forbidden(message % (owner)) - - return super(MetadefNamespaceFactoryProxy, self).new_namespace( - owner=owner, **kwargs) - - -class MetadefNamespaceRepoProxy(daisy.domain.proxy.MetadefNamespaceRepo): - - def __init__(self, namespace_repo, context): - self.namespace_repo = namespace_repo - self.context = context - super(MetadefNamespaceRepoProxy, self).__init__(namespace_repo) - - def get(self, namespace): - namespace_obj = self.namespace_repo.get(namespace) - return proxy_namespace(self.context, namespace_obj) - - def list(self, *args, **kwargs): - namespaces = self.namespace_repo.list(*args, **kwargs) - return [proxy_namespace(self.context, namespace) for - namespace in namespaces] - - -# Metadef Object classes -def is_object_mutable(context, object): - """Return True if the object is mutable in this context.""" - if context.is_admin: - return True - - if context.owner is None: - return False - - return object.namespace.owner == context.owner - - -def proxy_object(context, object): - if is_object_mutable(context, object): - return object - else: - return ImmutableMetadefObjectProxy(object) - - -class ImmutableMetadefObjectProxy(object): - - def __init__(self, base): - self.base = base - self.resource_name = 'object' - - object_id = _immutable_attr('base', 'object_id') - name = _immutable_attr('base', 'name') - required = _immutable_attr('base', 'required') - description = _immutable_attr('base', 'description') - properties = _immutable_attr('base', 'properties') - created_at = _immutable_attr('base', 'created_at') - updated_at = _immutable_attr('base', 'updated_at') - - def delete(self): - message = _("You are not permitted to delete this object.") - raise exception.Forbidden(message) - - def save(self): - message = _("You are not permitted to update this object.") - raise exception.Forbidden(message) - - -class MetadefObjectProxy(daisy.domain.proxy.MetadefObject): - - def __init__(self, meta_object): - self.meta_object = meta_object - super(MetadefObjectProxy, self).__init__(meta_object) - - -class MetadefObjectFactoryProxy(daisy.domain.proxy.MetadefObjectFactory): - - def __init__(self, meta_object_factory, context): - self.meta_object_factory = meta_object_factory - self.context = context - super(MetadefObjectFactoryProxy, self).__init__( - meta_object_factory, - meta_object_proxy_class=MetadefObjectProxy) - - def new_object(self, **kwargs): - owner = kwargs.pop('owner', self.context.owner) - - if not self.context.is_admin: - if owner is None or owner != self.context.owner: - message = _("You are not permitted to create object " - "owned by '%s'") - raise exception.Forbidden(message % (owner)) - - return super(MetadefObjectFactoryProxy, self).new_object(**kwargs) - - -class MetadefObjectRepoProxy(daisy.domain.proxy.MetadefObjectRepo): - - def __init__(self, object_repo, context): - self.object_repo = object_repo - self.context = context - super(MetadefObjectRepoProxy, self).__init__(object_repo) - - def get(self, namespace, object_name): - meta_object = self.object_repo.get(namespace, object_name) - return proxy_object(self.context, meta_object) - - def list(self, *args, **kwargs): - objects = self.object_repo.list(*args, **kwargs) - return [proxy_object(self.context, meta_object) for - meta_object in objects] - - -# Metadef ResourceType classes -def is_meta_resource_type_mutable(context, meta_resource_type): - """Return True if the meta_resource_type is mutable in this context.""" - if context.is_admin: - return True - - if context.owner is None: - return False - - # (lakshmiS): resource type can exist without an association with - # namespace and resource type cannot be created/update/deleted directly( - # they have to be associated/de-associated from namespace) - if meta_resource_type.namespace: - return meta_resource_type.namespace.owner == context.owner - else: - return False - - -def proxy_meta_resource_type(context, meta_resource_type): - if is_meta_resource_type_mutable(context, meta_resource_type): - return meta_resource_type - else: - return ImmutableMetadefResourceTypeProxy(meta_resource_type) - - -class ImmutableMetadefResourceTypeProxy(object): - - def __init__(self, base): - self.base = base - self.resource_name = 'meta_resource_type' - - namespace = _immutable_attr('base', 'namespace') - name = _immutable_attr('base', 'name') - prefix = _immutable_attr('base', 'prefix') - properties_target = _immutable_attr('base', 'properties_target') - created_at = _immutable_attr('base', 'created_at') - updated_at = _immutable_attr('base', 'updated_at') - - def delete(self): - message = _("You are not permitted to delete this meta_resource_type.") - raise exception.Forbidden(message) - - -class MetadefResourceTypeProxy(daisy.domain.proxy.MetadefResourceType): - - def __init__(self, meta_resource_type): - self.meta_resource_type = meta_resource_type - super(MetadefResourceTypeProxy, self).__init__(meta_resource_type) - - -class MetadefResourceTypeFactoryProxy( - daisy.domain.proxy.MetadefResourceTypeFactory): - - def __init__(self, resource_type_factory, context): - self.meta_resource_type_factory = resource_type_factory - self.context = context - super(MetadefResourceTypeFactoryProxy, self).__init__( - resource_type_factory, - resource_type_proxy_class=MetadefResourceTypeProxy) - - def new_resource_type(self, **kwargs): - owner = kwargs.pop('owner', self.context.owner) - - if not self.context.is_admin: - if owner is None or owner != self.context.owner: - message = _("You are not permitted to create resource_type " - "owned by '%s'") - raise exception.Forbidden(message % (owner)) - - return super(MetadefResourceTypeFactoryProxy, self).new_resource_type( - **kwargs) - - -class MetadefResourceTypeRepoProxy( - daisy.domain.proxy.MetadefResourceTypeRepo): - - def __init__(self, meta_resource_type_repo, context): - self.meta_resource_type_repo = meta_resource_type_repo - self.context = context - super(MetadefResourceTypeRepoProxy, self).__init__( - meta_resource_type_repo) - - def list(self, *args, **kwargs): - meta_resource_types = self.meta_resource_type_repo.list( - *args, **kwargs) - return [proxy_meta_resource_type(self.context, meta_resource_type) for - meta_resource_type in meta_resource_types] - - def get(self, *args, **kwargs): - meta_resource_type = self.meta_resource_type_repo.get(*args, **kwargs) - return proxy_meta_resource_type(self.context, meta_resource_type) - - -# Metadef namespace properties classes -def is_namespace_property_mutable(context, namespace_property): - """Return True if the object is mutable in this context.""" - if context.is_admin: - return True - - if context.owner is None: - return False - - return namespace_property.namespace.owner == context.owner - - -def proxy_namespace_property(context, namespace_property): - if is_namespace_property_mutable(context, namespace_property): - return namespace_property - else: - return ImmutableMetadefPropertyProxy(namespace_property) - - -class ImmutableMetadefPropertyProxy(object): - - def __init__(self, base): - self.base = base - self.resource_name = 'namespace_property' - - property_id = _immutable_attr('base', 'property_id') - name = _immutable_attr('base', 'name') - schema = _immutable_attr('base', 'schema') - - def delete(self): - message = _("You are not permitted to delete this property.") - raise exception.Forbidden(message) - - def save(self): - message = _("You are not permitted to update this property.") - raise exception.Forbidden(message) - - -class MetadefPropertyProxy(daisy.domain.proxy.MetadefProperty): - - def __init__(self, namespace_property): - self.meta_object = namespace_property - super(MetadefPropertyProxy, self).__init__(namespace_property) - - -class MetadefPropertyFactoryProxy(daisy.domain.proxy.MetadefPropertyFactory): - - def __init__(self, namespace_property_factory, context): - self.meta_object_factory = namespace_property_factory - self.context = context - super(MetadefPropertyFactoryProxy, self).__init__( - namespace_property_factory, - property_proxy_class=MetadefPropertyProxy) - - def new_namespace_property(self, **kwargs): - owner = kwargs.pop('owner', self.context.owner) - - if not self.context.is_admin: - if owner is None or owner != self.context.owner: - message = _("You are not permitted to create property " - "owned by '%s'") - raise exception.Forbidden(message % (owner)) - - return super(MetadefPropertyFactoryProxy, self).new_namespace_property( - **kwargs) - - -class MetadefPropertyRepoProxy(daisy.domain.proxy.MetadefPropertyRepo): - - def __init__(self, namespace_property_repo, context): - self.namespace_property_repo = namespace_property_repo - self.context = context - super(MetadefPropertyRepoProxy, self).__init__(namespace_property_repo) - - def get(self, namespace, object_name): - namespace_property = self.namespace_property_repo.get(namespace, - object_name) - return proxy_namespace_property(self.context, namespace_property) - - def list(self, *args, **kwargs): - namespace_properties = self.namespace_property_repo.list( - *args, **kwargs) - return [proxy_namespace_property(self.context, namespace_property) for - namespace_property in namespace_properties] - - -# Metadef Tag classes -def is_tag_mutable(context, tag): - """Return True if the tag is mutable in this context.""" - if context.is_admin: - return True - - if context.owner is None: - return False - - return tag.namespace.owner == context.owner - - -def proxy_tag(context, tag): - if is_tag_mutable(context, tag): - return tag - else: - return ImmutableMetadefTagProxy(tag) - - -class ImmutableMetadefTagProxy(object): - - def __init__(self, base): - self.base = base - self.resource_name = 'tag' - - tag_id = _immutable_attr('base', 'tag_id') - name = _immutable_attr('base', 'name') - created_at = _immutable_attr('base', 'created_at') - updated_at = _immutable_attr('base', 'updated_at') - - def delete(self): - message = _("You are not permitted to delete this tag.") - raise exception.Forbidden(message) - - def save(self): - message = _("You are not permitted to update this tag.") - raise exception.Forbidden(message) - - -class MetadefTagProxy(daisy.domain.proxy.MetadefTag): - pass - - -class MetadefTagFactoryProxy(daisy.domain.proxy.MetadefTagFactory): - - def __init__(self, meta_tag_factory, context): - self.meta_tag_factory = meta_tag_factory - self.context = context - super(MetadefTagFactoryProxy, self).__init__( - meta_tag_factory, - meta_tag_proxy_class=MetadefTagProxy) - - def new_tag(self, **kwargs): - owner = kwargs.pop('owner', self.context.owner) - if not self.context.is_admin: - if owner is None: - message = _("Owner must be specified to create a tag.") - raise exception.Forbidden(message) - elif owner != self.context.owner: - message = _("You are not permitted to create a tag" - " in the namespace owned by '%s'") - raise exception.Forbidden(message % (owner)) - - return super(MetadefTagFactoryProxy, self).new_tag(**kwargs) - - -class MetadefTagRepoProxy(daisy.domain.proxy.MetadefTagRepo): - - def __init__(self, tag_repo, context): - self.tag_repo = tag_repo - self.context = context - super(MetadefTagRepoProxy, self).__init__(tag_repo) - - def get(self, namespace, tag_name): - meta_tag = self.tag_repo.get(namespace, tag_name) - return proxy_tag(self.context, meta_tag) - - def list(self, *args, **kwargs): - tags = self.tag_repo.list(*args, **kwargs) - return [proxy_tag(self.context, meta_tag) for - meta_tag in tags] diff --git a/code/daisy/daisy/api/backends/common.py b/code/daisy/daisy/api/backends/common.py index 745baf9a..cb76652f 100755 --- a/code/daisy/daisy/api/backends/common.py +++ b/code/daisy/daisy/api/backends/common.py @@ -1,235 +1,367 @@ -# Copyright 2013 OpenStack Foundation -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -""" -/install endpoint for tecs API -""" -import copy -import subprocess -import time - -import traceback -import webob.exc -from oslo_config import cfg -from oslo_log import log as logging -from webob.exc import HTTPBadRequest -from webob.exc import HTTPForbidden - -from threading import Thread - -from daisy import i18n -from daisy import notifier - -from daisy.api import policy -import daisy.api.v1 - -from daisy.common import exception -import daisy.registry.client.v1.api as registry - - -try: - import simplejson as json -except ImportError: - import json - -LOG = logging.getLogger(__name__) -_ = i18n._ -_LE = i18n._LE -_LI = i18n._LI -_LW = i18n._LW - -daisy_path = '/var/lib/daisy/' -tecs_backend_name = "tecs" -zenic_backend_name = "zenic" -proton_backend_name = "proton" -os_install_start_time = 0.0 - -def subprocess_call(command,file=None): - if file: - return_code = subprocess.call(command, - shell=True, - stdout=file, - stderr=file) - else: - return_code = subprocess.call(command, - shell=True, - stdout=open('/dev/null', 'w'), - stderr=subprocess.STDOUT) - if return_code != 0: - msg = "execute '%s' failed by subprocess call." % command - raise exception.SubprocessCmdFailed(msg) - -def get_host_detail(req, host_id): - try: - host_detail = registry.get_host_metadata(req.context, host_id) - except exception.Invalid as e: - raise HTTPBadRequest(explanation=e.msg, request=req) - return host_detail - -def get_roles_detail(req): - try: - roles = registry.get_roles_detail(req.context) - except exception.Invalid as e: - raise HTTPBadRequest(explanation=e.msg, request=req) - return roles - -def get_cluster_roles_detail(req, cluster_id): - try: - params = {'cluster_id':cluster_id} - roles = registry.get_roles_detail(req.context, **params) - except exception.Invalid as e: - raise HTTPBadRequest(explanation=e.msg, request=req) - return roles - -def get_hosts_of_role(req, role_id): - try: - hosts = registry.get_role_host_metadata(req.context, role_id) - except exception.Invalid as e: - raise HTTPBadRequest(explanation=e.msg, request=req) - return hosts - -def get_role_detail(req, role_id): - try: - role = registry.get_role_metadata(req.context, role_id) - except exception.Invalid as e: - raise HTTPBadRequest(explanation=e.msg, request=req) - return role - -def update_role(req, role_id,role_meta): - try: - registry.update_role_metadata(req.context, role_id, role_meta) - except exception.Invalid as e: - raise HTTPBadRequest(explanation=e.msg, request=req) - -def update_role_host(req, role_id, role_host): - try: - registry.update_role_host_metadata(req.context, role_id, role_host) - except exception.Invalid as e: - raise HTTPBadRequest(explanation=e.msg, request=req) - -def delete_role_hosts(req, role_id): - try: - registry.delete_role_host_metadata(req.context, role_id) - except exception.Invalid as e: - raise HTTPBadRequest(explanation=e.msg, request=req) - -def get_cluster_networks_detail(req, cluster_id): - try: - networks = registry.get_networks_detail(req.context, cluster_id) - except exception.Invalid as e: - raise HTTPBadRequest(explanation=e.msg, request=req) - return networks - -def get_assigned_network(req, host_interface_id, network_id): - try: - assigned_network = registry.get_assigned_network(req.context, host_interface_id, network_id) - except exception.Invalid as e: - raise HTTPBadRequest(explanation=e.msg, request=req) - return assigned_network - -def _ping_hosts_test(ips): - ping_cmd = 'fping' - for ip in set(ips): - ping_cmd = ping_cmd + ' ' + ip - obj = subprocess.Popen(ping_cmd, shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE) - (stdoutput, erroutput) = obj.communicate() - _returncode = obj.returncode - if _returncode == 0 or _returncode == 1: - ping_result = stdoutput.split('\n') - unreachable_hosts = [result.split()[0] for result in ping_result if result and result.split()[2] != 'alive'] - else: - msg = "ping failed beaceuse there is invlid ip in %s" % ips - raise exception.InvalidIP(msg) - return unreachable_hosts - - -def check_ping_hosts(ping_ips, max_ping_times): - if not ping_ips: - LOG.info(_("no ip got for ping test")) - return ping_ips - ping_count = 0 - time_step = 5 - LOG.info(_("begin ping test for %s" % ','.join(ping_ips))) - while True: - if ping_count == 0: - ips = _ping_hosts_test(ping_ips) - else: - ips = _ping_hosts_test(ips) - - ping_count += 1 - if ips: - LOG.debug(_("ping host %s for %s times" % (','.join(ips), ping_count))) - if ping_count >= max_ping_times: - LOG.info(_("ping host %s timeout for %ss" % (','.join(ips), ping_count*time_step))) - return ips - time.sleep(time_step) - else: - LOG.info(_("ping %s successfully" % ','.join(ping_ips))) - return ips - -def _ping_reachable_to_unreachable_host_test(ip,max_ping_times): - ping_cmd = 'fping' - ping_cmd = ping_cmd + ' ' + ip - ping_count = 0 - time_step = 5 - while True: - obj = subprocess.Popen(ping_cmd, shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE) - (stdoutput, erroutput) = obj.communicate() - _returncode = obj.returncode - if _returncode != 0: - return True - ping_count += 1 - if ping_count >= max_ping_times: - LOG.info(_("ping host %s timeout for %ss" % (ip, ping_count*time_step))) - return False - time.sleep(time_step) - return False - -def _ping_unreachable_to_reachable_host_test(ip, max_ping_times): - ping_count = 0 - time_step = 5 - ping_cmd = 'fping' - ping_cmd = ping_cmd + ' ' + ip - while True: - obj = subprocess.Popen(ping_cmd, shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE) - (stdoutput, erroutput) = obj.communicate() - _returncode = obj.returncode - if _returncode == 0: - return True - ping_count += 1 - if ping_count >= max_ping_times: - LOG.info(_("ping host %s timeout for %ss" % (ip, ping_count*time_step))) - return False - time.sleep(time_step) - return False - -def check_reboot_ping(ip): - stop_max_ping_times = 360 #ha host reboot may spend 20 min,so timeout time is 30min - start_max_ping_times = 60 - _ping_reachable_to_unreachable_host_test(ip, stop_max_ping_times) - _ping_unreachable_to_reachable_host_test(ip, start_max_ping_times) - time.sleep(5) - -def cidr_to_netmask(cidr): - ip_netmask = cidr.split('/') - if len(ip_netmask) != 2 or not ip_netmask[1]: - raise exception.InvalidNetworkConfig("cidr is not valid") - - cidr_end = ip_netmask[1] - mask = ~(2**(32 - int(cidr_end)) - 1) - inter_ip = lambda x: '.'.join([str(x/(256**i)%256) for i in range(3,-1,-1)]) - netmask = inter_ip(mask) - - return netmask \ No newline at end of file +# Copyright 2013 OpenStack Foundation +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +""" +/install endpoint for tecs API +""" +import subprocess +import time +from oslo_log import log as logging +from webob.exc import HTTPBadRequest +from daisy import i18n + +from daisy.common import exception +import daisy.registry.client.v1.api as registry + + +LOG = logging.getLogger(__name__) +_ = i18n._ +_LE = i18n._LE +_LI = i18n._LI +_LW = i18n._LW + +daisy_path = '/var/lib/daisy/' +tecs_backend_name = "tecs" +zenic_backend_name = "zenic" +proton_backend_name = "proton" +os_install_start_time = 0.0 + + +def subprocess_call(command, file=None): + if file: + return_code = subprocess.call(command, + shell=True, + stdout=file, + stderr=file) + else: + return_code = subprocess.call(command, + shell=True, + stdout=open('/dev/null', 'w'), + stderr=subprocess.STDOUT) + if return_code != 0: + msg = "execute '%s' failed by subprocess call." % command + raise exception.SubprocessCmdFailed(msg) + + +def get_host_detail(req, host_id): + try: + host_detail = registry.get_host_metadata(req.context, host_id) + except exception.Invalid as e: + raise HTTPBadRequest(explanation=e.msg, request=req) + return host_detail + + +def get_roles_detail(req): + try: + roles = registry.get_roles_detail(req.context) + except exception.Invalid as e: + raise HTTPBadRequest(explanation=e.msg, request=req) + return roles + + +def get_cluster_roles_detail(req, cluster_id): + try: + params = {'cluster_id': cluster_id} + roles = registry.get_roles_detail(req.context, **params) + except exception.Invalid as e: + raise HTTPBadRequest(explanation=e.msg, request=req) + return roles + + +def get_hosts_of_role(req, role_id): + try: + hosts = registry.get_role_host_metadata(req.context, role_id) + except exception.Invalid as e: + raise HTTPBadRequest(explanation=e.msg, request=req) + return hosts + + +def get_role_detail(req, role_id): + try: + role = registry.get_role_metadata(req.context, role_id) + except exception.Invalid as e: + raise HTTPBadRequest(explanation=e.msg, request=req) + return role + + +def get_cluster_configs_list(req, cluster_id): + roles = get_cluster_roles_detail(req, cluster_id) + config_set_list = [role['config_set_id'] for role in roles] + cluster_configs_list = [] + for config_set_id in config_set_list: + config_set_metadata = registry.get_config_set_metadata(req.context, + config_set_id) + if config_set_metadata.get('config', None): + cluster_configs_list.extend(config_set_metadata['config']) + return cluster_configs_list + + +def update_role(req, role_id, role_meta): + try: + registry.update_role_metadata(req.context, role_id, role_meta) + except exception.Invalid as e: + raise HTTPBadRequest(explanation=e.msg, request=req) + + +def update_role_host(req, role_id, role_host): + try: + registry.update_role_host_metadata(req.context, role_id, role_host) + except exception.Invalid as e: + raise HTTPBadRequest(explanation=e.msg, request=req) + + +def set_role_status_and_progress(req, cluster_id, opera, status, + backend_name='tecs'): + """ + set information in role of some backend. + :status:key in host_role tables, such as: + {'messages':'Waiting','progress': '0'} + """ + roles = get_cluster_roles_detail(req, cluster_id) + for role in roles: + if role.get('deployment_backend') == backend_name: + role_hosts = get_hosts_of_role(req, role['id']) + for role_host in role_hosts: + if (opera == 'upgrade' and role_host['status'] in ['active']) \ + or (opera == 'install' and role_host['status'] not in + ['active', 'updating', 'update-failed']): + update_role_host(req, role_host['id'], status) + + +def delete_role_hosts(req, role_id): + try: + registry.delete_role_host_metadata(req.context, role_id) + except exception.Invalid as e: + raise HTTPBadRequest(explanation=e.msg, request=req) + + +def get_cluster_networks_detail(req, cluster_id): + try: + networks = registry.get_networks_detail(req.context, cluster_id) + except exception.Invalid as e: + raise HTTPBadRequest(explanation=e.msg, request=req) + return networks + + +def get_assigned_network(req, host_interface_id, network_id): + try: + assigned_network = registry.get_assigned_network( + req.context, host_interface_id, network_id) + except exception.Invalid as e: + raise HTTPBadRequest(explanation=e.msg, request=req) + return assigned_network + + +def _ping_hosts_test(ips): + ping_cmd = 'fping' + for ip in set(ips): + ping_cmd = ping_cmd + ' ' + ip + obj = subprocess.Popen( + ping_cmd, shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE) + (stdoutput, erroutput) = obj.communicate() + _returncode = obj.returncode + if _returncode == 0 or _returncode == 1: + ping_result = stdoutput.split('\n') + unreachable_hosts = [result.split( + )[0] for result in ping_result if result and + result.split()[2] != 'alive'] + else: + msg = "ping failed beaceuse there is invlid ip in %s" % ips + raise exception.InvalidIP(msg) + return unreachable_hosts + + +def check_ping_hosts(ping_ips, max_ping_times): + if not ping_ips: + LOG.info(_("no ip got for ping test")) + return ping_ips + ping_count = 0 + time_step = 5 + LOG.info(_("begin ping test for %s" % ','.join(ping_ips))) + while True: + if ping_count == 0: + ips = _ping_hosts_test(ping_ips) + else: + ips = _ping_hosts_test(ips) + + ping_count += 1 + if ips: + LOG.debug( + _("ping host %s for %s times" % (','.join(ips), ping_count))) + if ping_count >= max_ping_times: + LOG.info(_("ping host %s timeout for %ss" % + (','.join(ips), ping_count * time_step))) + return ips + time.sleep(time_step) + else: + LOG.info(_("ping %s successfully" % ','.join(ping_ips))) + return ips + + +def _ping_reachable_to_unreachable_host_test(ip, max_ping_times): + ping_cmd = 'fping' + ping_cmd = ping_cmd + ' ' + ip + ping_count = 0 + time_step = 5 + while True: + obj = subprocess.Popen( + ping_cmd, shell=True, stdout=subprocess.PIPE, + stderr=subprocess.PIPE) + (stdoutput, erroutput) = obj.communicate() + _returncode = obj.returncode + if _returncode != 0: + return True + ping_count += 1 + if ping_count >= max_ping_times: + LOG.info( + _("ping host %s timeout for %ss" + % (ip, ping_count * time_step))) + return False + time.sleep(time_step) + return False + + +def _ping_unreachable_to_reachable_host_test(ip, max_ping_times): + ping_count = 0 + time_step = 5 + ping_cmd = 'fping' + ping_cmd = ping_cmd + ' ' + ip + while True: + obj = subprocess.Popen( + ping_cmd, shell=True, stdout=subprocess.PIPE, + stderr=subprocess.PIPE) + (stdoutput, erroutput) = obj.communicate() + _returncode = obj.returncode + if _returncode == 0: + return True + ping_count += 1 + if ping_count >= max_ping_times: + LOG.info( + _("ping host %s timeout for %ss" + % (ip, ping_count * time_step))) + return False + time.sleep(time_step) + return False + + +def check_reboot_ping(ip): + # ha host reboot may spend 20 min,so timeout time is 30min + stop_max_ping_times = 360 + start_max_ping_times = 60 + _ping_reachable_to_unreachable_host_test(ip, stop_max_ping_times) + _ping_unreachable_to_reachable_host_test(ip, start_max_ping_times) + time.sleep(5) + + +def cidr_to_netmask(cidr): + ip_netmask = cidr.split('/') + if len(ip_netmask) != 2 or not ip_netmask[1]: + raise exception.InvalidNetworkConfig("cidr is not valid") + + cidr_end = ip_netmask[1] + mask = ~(2 ** (32 - int(cidr_end)) - 1) + inter_ip = lambda x: '.'.join( + [str(x / (256 ** i) % 256) for i in range(3, -1, -1)]) + netmask = inter_ip(mask) + return netmask + + +def get_rpm_package_by_name(path, rpm_name): + cmd = "ls %s | grep ^%s.*\.rpm" % (path, rpm_name) + try: + rpm_name = subprocess.check_output( + cmd, shell=True, stderr=subprocess.STDOUT).split('\n')[0] + except subprocess.CalledProcessError: + msg = _("Get rpm %s failed in %s!" % (rpm_name, path)) + raise exception.SubprocessCmdFailed(message=msg) + return rpm_name + + +def remote_remove_rpm(rpm_name, dest_ip): + remove_cmd = 'clush -S -w %s "rpm -q %s && rpm -e %s"' % (dest_ip, + rpm_name, + rpm_name) + subprocess.call(remove_cmd, + shell=True, + stdout=open('/dev/null', 'w'), + stderr=subprocess.STDOUT) + + +def remote_install_rpm(rpm_name, rpm_src_path, rpm_dest_path, dest_ips): + rpm_package = get_rpm_package_by_name(rpm_src_path, rpm_name) + for dest_ip in dest_ips: + scp_rpm = "scp -o ConnectTimeout=10 %s/%s root@%s:%s" \ + % (rpm_src_path, rpm_package, dest_ip, rpm_dest_path) + subprocess_call(scp_rpm) + + remote_remove_rpm(rpm_name, dest_ip) + + install_cmd = 'clush -S -w %s "rpm -i %s/%s"' % (dest_ip, + rpm_dest_path, + rpm_package) + subprocess_call(install_cmd) + + +def remote_upgrade_rpm(rpm_name, rpm_src_path, rpm_dest_path, dest_ip): + rpm_package = get_rpm_package_by_name(rpm_src_path, rpm_name) + scp_rpm = "scp -o ConnectTimeout=10 %s/%s root@%s:%s" \ + % (rpm_src_path, rpm_package, dest_ip, rpm_dest_path) + subprocess_call(scp_rpm) + + upgrade_cmd = 'clush -S -w %s "rpm -U %s/%s"' % (dest_ip, + rpm_dest_path, + rpm_package) + subprocess.call(upgrade_cmd, + shell=True, + stdout=open('/dev/null', 'w'), + stderr=subprocess.STDOUT) + + +def trust_me(host_ips, root_passwd): + for host_ip in host_ips: + count = 0 + try_times = 10 + while count < try_times: + try: + trust_me_cmd = "/var/lib/daisy/tecs/trustme.sh\ + %s %s" % (host_ip, root_passwd) + subprocess_call(trust_me_cmd) + except: + count += 1 + LOG.info("Trying to trust '%s' for %s times" % + (host_ip, count)) + time.sleep(2) + if count >= try_times: + message = "Setup trust for '%s' failed,"\ + "see '/var/log/trustme.log' please" % (host_ip) + raise exception.TrustMeFailed(message=message) + else: + message = "Setup trust to '%s' successfully" % (host_ip) + LOG.info(message) + break + + +def calc_host_iqn(min_mac): + cmd = "echo -n %s |openssl md5" % min_mac + obj = subprocess.Popen(cmd, + shell=True, + stdout=subprocess.PIPE, + stderr=subprocess.PIPE) + (stdoutput, erroutput) = obj.communicate() + iqn = "" + if stdoutput: + get_uuid = stdoutput.split('=')[1] + iqn = "iqn.opencos.rh:" + get_uuid.strip() + return iqn diff --git a/code/daisy/daisy/api/backends/driver.py b/code/daisy/daisy/api/backends/driver.py index bb065d54..283086ad 100755 --- a/code/daisy/daisy/api/backends/driver.py +++ b/code/daisy/daisy/api/backends/driver.py @@ -17,18 +17,15 @@ """ Driver base-classes: - (Beginning of) the contract that deployment backends drivers must follow, and shared - types that support that contract + (Beginning of) the contract that deployment backends drivers must follow, + and shared types that support that contract """ -import sys -from oslo_config import cfg from oslo_log import log as logging from oslo_utils import importutils from daisy import i18n -from daisy.common import exception _ = i18n._ _LE = i18n._LE @@ -36,10 +33,13 @@ _LI = i18n._LI _LW = i18n._LW LOG = logging.getLogger(__name__) + class DeploymentDriver(object): + """base class for deployment interface. """ + def install(self, req, cluster_id): raise NotImplementedError() @@ -48,11 +48,11 @@ class DeploymentDriver(object): def uninstall(self, req, cluster_id): raise NotImplementedError() - + def uninstall_progress(self, req, cluster_id): LOG.info(_("driver no interface for 'uninstall_progress'")) return {} - + def upgrade_progress(self, req, cluster_id): LOG.info(_("driver no interface for 'upgrade_progress'")) return {} @@ -60,17 +60,19 @@ class DeploymentDriver(object): def exprot_db(self, req, cluster_id): LOG.info(_("driver no interface for 'exprot_db'")) return {} - + def update_disk_array(self, req, cluster_id): LOG.info(_("driver no interface for 'update_disk_array'")) return {} + def check_isinstance(obj, cls): """Checks that obj is of type cls, and lets PyLint infer types.""" if isinstance(obj, cls): return obj raise Exception(_('Expected object of type: %s') % (str(cls))) - + + def load_deployment_dirver(backend_name): """Load a cluster backend installation driver. """ @@ -78,8 +80,11 @@ def load_deployment_dirver(backend_name): LOG.info(_("Loading deployment backend '%s'") % backend_driver) try: - driver = importutils.import_object_ns('daisy.api.backends',backend_driver) + driver = importutils.import_object_ns( + 'daisy.api.backends', backend_driver) return check_isinstance(driver, DeploymentDriver) except ImportError: - LOG.exception(_("Error, unable to load the deployment backends '%s'" % backend_driver)) + LOG.exception( + _("Error, unable to load the deployment backends '%s'" + % backend_driver)) return None diff --git a/code/daisy/daisy/api/backends/os.py b/code/daisy/daisy/api/backends/os.py index 65f4ddc1..5bb8721c 100755 --- a/code/daisy/daisy/api/backends/os.py +++ b/code/daisy/daisy/api/backends/os.py @@ -1,742 +1,920 @@ -# Copyright 2013 OpenStack Foundation -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -""" -/install endpoint for tecs API -""" -import copy -import subprocess -import time - -import traceback -import webob.exc -from oslo_config import cfg -from oslo_log import log as logging -from webob.exc import HTTPBadRequest -from webob.exc import HTTPForbidden -from webob.exc import HTTPServerError -import threading -from threading import Thread - -from daisy import i18n -from daisy import notifier - -from daisy.api import policy -import daisy.api.v1 - -from daisy.common import exception -import daisy.registry.client.v1.api as registry -from daisy.api.backends.tecs import config -from daisy.api.backends import driver -from daisy.api.network_api import network as neutron -from ironicclient import client as ironic_client -import daisy.api.backends.common as daisy_cmn -import daisy.api.backends.tecs.common as tecs_cmn - - -try: - import simplejson as json -except ImportError: - import json - -LOG = logging.getLogger(__name__) -_ = i18n._ -_LE = i18n._LE -_LI = i18n._LI -_LW = i18n._LW - -CONF = cfg.CONF -install_opts = [ - cfg.StrOpt('max_parallel_os_number', default=10, - help='Maximum number of hosts install os at the same time.'), -] -CONF.register_opts(install_opts) -upgrade_opts = [ - cfg.StrOpt('max_parallel_os_upgrade_number', default=10, - help='Maximum number of hosts upgrade os at the same time.'), -] -CONF.register_opts(upgrade_opts) - -host_os_status = { - 'INIT' : 'init', - 'INSTALLING' : 'installing', - 'ACTIVE' : 'active', - 'INSTALL_FAILED': 'install-failed', - 'UPDATING': 'updating', - 'UPDATE_FAILED': 'update-failed' -} - -LINUX_BOND_MODE = {'balance-rr':'0', 'active-backup':'1', 'balance-xor':'2', 'broadcast':'3','802.3ad':'4', 'balance-tlb':'5', 'balance-alb':'6'} - -daisy_tecs_path = tecs_cmn.daisy_tecs_path - -def get_ironicclient(): # pragma: no cover - """Get Ironic client instance.""" - args = {'os_auth_token': 'fake', - 'ironic_url':'http://127.0.0.1:6385/v1'} - return ironic_client.get_client(1, **args) - -def pxe_server_build(req, install_meta): - cluster_id = install_meta['cluster_id'] - try: - networks = registry.get_networks_detail(req.context, cluster_id) - except exception.Invalid as e: - raise HTTPBadRequest(explanation=e.msg, request=req) - - try: - ip_inter = lambda x:sum([256**j*int(i) for j,i in enumerate(x.split('.')[::-1])]) - inter_ip = lambda x: '.'.join([str(x/(256**i)%256) for i in range(3,-1,-1)]) - network_cidr = [network['cidr'] for network in networks if network['name'] == 'DEPLOYMENT'][0] - if not network_cidr: - msg = "Error:The CIDR is blank of DEPLOYMENT!" - raise exception.Forbidden(msg) - cidr_end=network_cidr.split('/')[1] - ip_addr = network_cidr.split('/')[0] - ip_addr_int=ip_inter(ip_addr) - mask = ~(2**(32 - int(cidr_end)) - 1) - net_mask=inter_ip(mask) - ip_addr_min = inter_ip(ip_addr_int & (mask & 0xffffffff)) - ip_addr_max = inter_ip(ip_addr_int | (~mask & 0xffffffff)) - pxe_server_ip=inter_ip((ip_inter(ip_addr_min))+1) - client_ip_begin=inter_ip((ip_inter(ip_addr_min))+2) - client_ip_end=ip_addr_max - args = {'build_pxe': 'yes', 'eth_name': install_meta['deployment_interface'], 'ip_address': pxe_server_ip, 'net_mask': net_mask, - 'client_ip_begin': client_ip_begin, 'client_ip_end': client_ip_end} - ironic = get_ironicclient() - ironic.daisy.build_pxe(**args) - except exception.Invalid as e: - msg = "build pxe server failed" - raise exception.InvalidNetworkConfig(msg) - -def _get_network_plat(req,host_config, cluster_networks, dhcp_mac): - host_config['dhcp_mac'] = dhcp_mac - if host_config['interfaces']: - count = 0 - host_config_orig = copy.deepcopy(host_config) - for interface in host_config['interfaces']: - count += 1 - if (interface.has_key('assigned_networks') and - interface['assigned_networks']): - assigned_networks = copy.deepcopy(interface['assigned_networks']) - host_config['interfaces'][count-1]['assigned_networks'] = [] - alias = [] - for assigned_network in assigned_networks: - network_name = assigned_network['name'] - cluster_network = [network for network in cluster_networks if network['name'] in network_name][0] - alias.append(cluster_network['alias']) - # convert cidr to netmask - cidr_to_ip = "" - assigned_networks_ip=tecs_cmn.get_host_network_ip(req, host_config_orig, cluster_networks, network_name) - if cluster_network.get('cidr', None): - inter_ip = lambda x: '.'.join([str(x/(256**i)%256) for i in range(3,-1,-1)]) - cidr_to_ip = inter_ip(2**32-2**(32-int(cluster_network['cidr'].split('/')[1]))) - if cluster_network['alias'] == None or len(alias) == 1: - network_type = cluster_network['network_type'] - network_plat = dict(network_type=network_type, - ml2_type=cluster_network['ml2_type'], - capability=cluster_network['capability'], - physnet_name=cluster_network['physnet_name'], - gateway=cluster_network.get('gateway', ""), - ip=assigned_networks_ip, - #ip=cluster_network.get('ip', ""), - netmask=cidr_to_ip, - vlan_id=cluster_network.get('vlan_id', "")) - if network_type == "MANAGEMENT" and cluster_network.get('gateway', "") == "": - msg = "Error: The gateway of network 'MANAGEMENT' is not given!" - raise exception.Forbidden(msg) - host_config['interfaces'][count-1]['assigned_networks'].append(network_plat) - interface['ip']="" - interface['netmask']="" - interface['gateway']="" - - return host_config - -def get_cluster_hosts_config(req, cluster_id): - params = dict(limit=1000000) - try: - cluster_data = registry.get_cluster_metadata(req.context, cluster_id) - networks = registry.get_networks_detail(req.context, cluster_id) - all_roles = registry.get_roles_detail(req.context) - except exception.Invalid as e: - raise HTTPBadRequest(explanation=e.msg, request=req) - - roles = [role for role in all_roles if role['cluster_id'] == cluster_id] - all_hosts_ids = cluster_data['nodes'] - hosts_config = [] - for host_id in all_hosts_ids: - host_detail = daisy_cmn.get_host_detail(req, host_id) - role_host_db_lv_size_lists = list() - if host_detail.has_key('role') and host_detail['role']: - host_roles = host_detail['role'] - for role in roles: - if role['name'] in host_detail['role'] and role['glance_lv_size']: - host_detail['glance_lv_size'] = role['glance_lv_size'] - if role.get('db_lv_size', None) and host_roles and role['name'] in host_roles: - role_host_db_lv_size_lists.append(role['db_lv_size']) - if role['name'] == 'COMPUTER' and role['name'] in host_detail['role'] and role['nova_lv_size']: - host_detail['nova_lv_size'] = role['nova_lv_size'] - service_disks = tecs_cmn.get_service_disk_list(req, {'role_id':role['id']}) - for service_disk in service_disks: - if service_disk['disk_location'] == 'local' and service_disk['service'] == 'mongodb': - host_detail['mongodb_lv_size'] = service_disk['size'] - break - if role_host_db_lv_size_lists: - host_detail['db_lv_size'] = max(role_host_db_lv_size_lists) - else: - host_detail['db_lv_size'] = 0 - - for interface in host_detail['interfaces']: - if interface['type'] == 'bond'and interface['mode'] in LINUX_BOND_MODE.keys(): - interface['mode'] = LINUX_BOND_MODE[interface['mode']] - - if (host_detail['os_status'] == host_os_status['INIT'] or - host_detail['os_status'] == host_os_status['INSTALLING'] or - host_detail['os_status'] == host_os_status['INSTALL_FAILED']): - host_dhcp_interface = [hi for hi in host_detail['interfaces'] if hi['is_deployment']] - if not host_dhcp_interface: - msg = "cann't find dhcp interface on host %s" % host_detail['id'] - raise exception.InvalidNetworkConfig(msg) - if len(host_dhcp_interface) > 1: - msg = "dhcp interface should only has one on host %s" % host_detail['id'] - raise exception.InvalidNetworkConfig(msg) - - host_config_detail = copy.deepcopy(host_detail) - host_config = _get_network_plat(req,host_config_detail, - networks, - host_dhcp_interface[0]['mac']) - hosts_config.append(tecs_cmn.sort_interfaces_by_pci(host_config)) - return hosts_config - -def check_tfg_exist(): - get_tfg_patch = "ls %s|grep CGSL_VPLAT-.*\.iso$" % daisy_tecs_path - obj = subprocess.Popen(get_tfg_patch, - shell=True, - stdout=subprocess.PIPE, - stderr=subprocess.PIPE) - (stdoutput, erroutput) = obj.communicate() - tfg_patch_pkg_file = "" - tfg_patch_pkg_name = "" - if stdoutput: - tfg_patch_pkg_name = stdoutput.split('\n')[0] - tfg_patch_pkg_file = daisy_tecs_path + tfg_patch_pkg_name - chmod_for_tfg_bin = 'chmod +x %s' % tfg_patch_pkg_file - daisy_cmn.subprocess_call(chmod_for_tfg_bin) - - if not stdoutput or not tfg_patch_pkg_name: - LOG.info(_("no CGSL_VPLAT iso file got in %s" % daisy_tecs_path)) - return "" - return tfg_patch_pkg_file - -def update_db_host_status(req, host_id, host_status): - """ - Update host status and intallation progress to db. - :return: - """ - try: - host_meta = {} - host_meta['os_progress'] = host_status['os_progress'] - host_meta['os_status'] = host_status['os_status'] - host_meta['messages'] = host_status['messages'] - registry.update_host_metadata(req.context, - host_id, - host_meta) - except exception.Invalid as e: - raise HTTPBadRequest(explanation=e.msg, request=req) - -class OSInstall(): - """ - Class for install OS. - """ - """ Definition for install states.""" - def __init__(self, req, cluster_id): - self.req = req - self.cluster_id = cluster_id - #5s - self.time_step = 5 - # 30 min - self.single_host_install_timeout = 30 * (12*self.time_step) - - self.max_parallel_os_num = int(CONF.max_parallel_os_number) - self.cluster_hosts_install_timeout = (self.max_parallel_os_num/4 + 2 )* 60 * (12*self.time_step) - self.ironicclient = get_ironicclient() - - def _set_boot_or_power_state(self, user, passwd, addr, action): - count = 0 - repeat_times = 24 - while count < repeat_times: - set_obj = self.ironicclient.daisy.set_boot_or_power_state(user, - passwd, - addr, - action) - set_dict = dict([(f, getattr(set_obj, f, '')) for f in ['return_code', 'info']]) - rc = int(set_dict['return_code']) - if rc == 0: - LOG.info(_("set %s to '%s' successfully for %s times by ironic" % (addr,action,count+1))) - break - else: - count += 1 - LOG.info(_("try setting %s to '%s' failed for %s times by ironic" % (addr,action,count))) - time.sleep(count*2) - if count >= repeat_times: - message = "set %s to '%s' failed for 10 mins" % (addr,action) - raise exception.IMPIOprationFailed(message=message) - - def _baremetal_install_os(self, host_detail): - # os_install_disk = 'sda' - os_version_file = host_detail['os_version_file'] - if os_version_file: - test_os_version_exist = 'test -f %s' % os_version_file - daisy_cmn.subprocess_call(test_os_version_exist) - else: - self.message = "no OS version file configed for host %s" % host_detail['id'] - raise exception.NotFound(message=self.message) - - if host_detail.get('root_disk',None): - root_disk = host_detail['root_disk'] - else: - root_disk = 'sda' - if host_detail.get('root_lv_size',None): - root_lv_size_m = host_detail['root_lv_size'] - else: - root_lv_size_m = 51200 - memory_size_b_str = str(host_detail['memory']['total']) - memory_size_b_int = int(memory_size_b_str.strip().split()[0]) - memory_size_m = memory_size_b_int//1024 - memory_size_g = memory_size_m//1024 - swap_lv_size_m = host_detail['swap_lv_size'] - cinder_vg_size_m = 0 - disk_list = [] - disk_storage_size_b = 0 - for key in host_detail['disks']: - disk_list.append(host_detail['disks'][key]['name']) - stroage_size_str = host_detail['disks'][key]['size'] - stroage_size_b_int = int(stroage_size_str.strip().split()[0]) - disk_storage_size_b = disk_storage_size_b + stroage_size_b_int - - disk_list = ','.join(disk_list) - disk_storage_size_m = disk_storage_size_b//(1024*1024) - if host_detail.has_key('root_pwd') and host_detail['root_pwd']: - root_pwd = host_detail['root_pwd'] - else: - root_pwd = 'ossdbg1' - - if host_detail.has_key('isolcpus') and host_detail['isolcpus']: - isolcpus = host_detail['isolcpus'] - else: - isolcpus = None - - if host_detail.get('hugepages',None): - hugepages = host_detail['hugepages'] - else: - hugepages = 0 - - if host_detail.get('hugepagesize',None): - hugepagesize = host_detail['hugepagesize'] - else: - hugepagesize = '1G' - - - - #tfg_patch_pkg_file = check_tfg_exist() - - if (not host_detail['ipmi_user'] or - not host_detail['ipmi_passwd'] or - not host_detail['ipmi_addr'] ): - self.message = "Invalid ipmi information configed for host %s" % host_detail['id'] - raise exception.NotFound(message=self.message) - - - - self._set_boot_or_power_state(host_detail['ipmi_user'], - host_detail['ipmi_passwd'], - host_detail['ipmi_addr'], - 'pxe') - - kwargs = {'hostname':host_detail['name'], - 'iso_path':os_version_file, - #'tfg_bin':tfg_patch_pkg_file, - 'dhcp_mac':host_detail['dhcp_mac'], - 'storage_size':disk_storage_size_m, - 'memory_size':memory_size_g, - 'interfaces':host_detail['interfaces'], - 'root_lv_size':root_lv_size_m, - 'swap_lv_size':swap_lv_size_m, - 'cinder_vg_size':cinder_vg_size_m, - 'disk_list':disk_list, - 'root_disk':root_disk, - 'root_pwd':root_pwd, - 'isolcpus':isolcpus, - 'hugepagesize':hugepagesize, - 'hugepages':hugepages, - 'reboot':'no'} - - if host_detail.has_key('glance_lv_size'): - kwargs['glance_lv_size'] = host_detail['glance_lv_size'] - else: - kwargs['glance_lv_size'] = 0 - - if host_detail.has_key('db_lv_size') and host_detail['db_lv_size']: - kwargs['db_lv_size'] = host_detail['db_lv_size'] - else: - kwargs['db_lv_size'] = 0 - - if host_detail.has_key('mongodb_lv_size') and host_detail['mongodb_lv_size']: - kwargs['mongodb_lv_size'] = host_detail['mongodb_lv_size'] - else: - kwargs['mongodb_lv_size'] = 0 - - if host_detail.has_key('nova_lv_size') and host_detail['nova_lv_size']: - kwargs['nova_lv_size'] = host_detail['nova_lv_size'] - else: - kwargs['nova_lv_size'] = 0 - install_os_obj = self.ironicclient.daisy.install_os(**kwargs) - install_os_dict = dict([(f, getattr(install_os_obj, f, '')) for f in ['return_code', 'info']]) - rc = int(install_os_dict['return_code']) - if rc != 0: - install_os_description = install_os_dict['info'] - LOG.info(_("install os config failed because of '%s'" % (install_os_description))) - host_status = {'os_status':host_os_status['INSTALL_FAILED'], - 'os_progress':0, - 'messages':install_os_description} - update_db_host_status(self.req, host_detail['id'],host_status) - msg = "ironic install os return failed for host %s" % host_detail['id'] - raise exception.OSInstallFailed(message=msg) - - self._set_boot_or_power_state(host_detail['ipmi_user'], - host_detail['ipmi_passwd'], - host_detail['ipmi_addr'], - 'reset') - - - - def _install_os_by_rousource_type(self, hosts_detail): - # all hosts status set to 'init' before install os - for host_detail in hosts_detail: - host_status = {'os_status':host_os_status['INIT'], - 'os_progress':0, - 'messages':''} - update_db_host_status(self.req, host_detail['id'],host_status) - - for host_detail in hosts_detail: - self._baremetal_install_os(host_detail) - - - def _set_disk_start_mode(self, host_detail): - LOG.info(_("Set boot from disk for host %s" % (host_detail['id']))) - self._set_boot_or_power_state(host_detail['ipmi_user'], - host_detail['ipmi_passwd'], - host_detail['ipmi_addr'], - 'disk') - LOG.info(_("reboot host %s" % (host_detail['id']))) - self._set_boot_or_power_state(host_detail['ipmi_user'], - host_detail['ipmi_passwd'], - host_detail['ipmi_addr'], - 'reset') - - def _init_progress(self, host_detail, hosts_status): - host_id = host_detail['id'] - - host_status = hosts_status[host_id] = {} - host_status['os_status'] = host_os_status['INSTALLING'] - host_status['os_progress'] = 0 - host_status['count'] = 0 - if host_detail['resource_type'] == 'docker': - host_status['messages'] = "docker container is creating" - else: - host_status['messages'] = "OS installing" - - update_db_host_status(self.req, host_id, host_status) - - def _query_host_progress(self, host_detail, host_status, host_last_status): - host_id = host_detail['id'] - install_result_obj = \ - self.ironicclient.daisy.get_install_progress(host_detail['dhcp_mac']) - install_result = dict([(f, getattr(install_result_obj, f, '')) - for f in ['return_code', 'info', 'progress']]) - rc = int(install_result['return_code']) - host_status['os_progress'] = int(install_result['progress']) - if rc == 0: - if host_status['os_progress'] == 100: - time_cost = str(round((time.time() - daisy_cmn.os_install_start_time)/60, 2)) - LOG.info(_("It takes %s min for host %s to install os" % (time_cost, host_id))) - LOG.info(_("host %s install os completely." % host_id)) - host_status['os_status'] = host_os_status['ACTIVE'] - host_status['messages'] = "OS installed successfully" - # wait for nicfix script complete - time.sleep(10) - self._set_disk_start_mode(host_detail) - else: - if host_status['os_progress'] == host_last_status['os_progress']: - host_status['count'] = host_status['count'] + 1 - LOG.debug(_("host %s has kept %ss when progress is %s." % (host_id, - host_status['count']*self.time_step, host_status['os_progress']))) - else: - LOG.info(_("host %s install failed." % host_id)) - host_status['os_status'] = host_os_status['INSTALL_FAILED'] - host_status['messages'] = install_result['info'] - - def _query_progress(self, hosts_last_status, hosts_detail): - hosts_status = copy.deepcopy(hosts_last_status) - for host_detail in hosts_detail: - host_id = host_detail['id'] - if not hosts_status.has_key(host_id): - self._init_progress(host_detail, hosts_status) - continue - - host_status = hosts_status[host_id] - host_last_status = hosts_last_status[host_id] - #only process installing hosts after init, other hosts info will be kept in hosts_status - if host_status['os_status'] != host_os_status['INSTALLING']: - continue - - self._query_host_progress(host_detail, host_status, host_last_status) - - if host_status['count']*self.time_step >= self.single_host_install_timeout: - host_status['os_status'] = host_os_status['INSTALL_FAILED'] - if host_detail['resource_type'] == 'docker': - host_status['messages'] = "docker container created timeout" - else: - host_status['messages'] = "os installed timeout" - if (host_status['os_progress'] != host_last_status['os_progress'] or\ - host_status['os_status'] != host_last_status['os_status']): - host_status['count'] = 0 - update_db_host_status(self.req, host_id,host_status) - return hosts_status - - def _get_install_status(self, hosts_detail): - query_count = 0 - hosts_last_status = {} - while True: - hosts_install_status = self._query_progress(hosts_last_status, hosts_detail) - # if all hosts install over, break - installing_hosts = [id for id in hosts_install_status.keys() - if hosts_install_status[id]['os_status'] == host_os_status['INSTALLING']] - if not installing_hosts: - break - #after 3h, if some hosts are not 'active', label them to 'failed'. - elif query_count*self.time_step >= self.cluster_hosts_install_timeout: - for host_id,host_status in hosts_install_status.iteritems(): - if (host_status['os_status'] != host_os_status['ACTIVE'] and - host_status['os_status'] != host_os_status['INSTALL_FAILED']): - # label the host install failed because of time out for 3h - host_status['os_status'] = host_os_status['INSTALL_FAILED'] - host_status['messages'] = "cluster os installed timeout" - update_db_host_status(self.req, host_id, host_status) - break - else: - query_count += 1 - hosts_last_status = hosts_install_status - time.sleep(self.time_step) - return hosts_install_status - - def install_os(self, hosts_detail, role_hosts_ids): - if len(hosts_detail) > self.max_parallel_os_num: - install_hosts = hosts_detail[:self.max_parallel_os_num] - hosts_detail = hosts_detail[self.max_parallel_os_num:] - else: - install_hosts = hosts_detail - hosts_detail = [] - - install_hosts_id = [host_detail['id'] for host_detail in install_hosts] - LOG.info(_("Begin install os for hosts %s." % ','.join(install_hosts_id))) - daisy_cmn.os_install_start_time = time.time() - self._install_os_by_rousource_type(install_hosts) - LOG.info(_("Begin to query install progress...")) - # wait to install completely - cluster_install_status = self._get_install_status(install_hosts) - total_time_cost = str(round((time.time() - daisy_cmn.os_install_start_time)/60, 2)) - LOG.info(_("It totally takes %s min for all host to install os" % total_time_cost)) - LOG.info(_("OS install in cluster %s result is:" % self.cluster_id)) - LOG.info(_("%s %s %s" % ('host-id', 'os-status', 'description'))) - - for host_id,host_status in cluster_install_status.iteritems(): - LOG.info(_("%s %s %s" % (host_id, host_status['os_status'], host_status['messages']))) - if host_id in role_hosts_ids: - if host_status['os_status'] == host_os_status['INSTALL_FAILED']: - break - else: - role_hosts_ids.remove(host_id) - return (hosts_detail, role_hosts_ids) - - -def _os_thread_bin(req, host_ip, host_id): - host_meta = {} - password = "ossdbg1" - LOG.info(_("Begin update os for host %s." % (host_ip))) - cmd = 'mkdir -p /var/log/daisy/daisy_update/' - daisy_cmn.subprocess_call(cmd) - - var_log_path = "/var/log/daisy/daisy_update/%s_update_tfg.log" % host_ip - with open(var_log_path, "w+") as fp: - cmd = '/var/lib/daisy/tecs/trustme.sh %s %s' % (host_ip, password) - daisy_cmn.subprocess_call(cmd,fp) - cmd = 'clush -S -w %s "mkdir -p /home/daisy_update"' % (host_ip,) - daisy_cmn.subprocess_call(cmd,fp) - cmd = 'clush -S -b -w %s "rm -rf /home/daisy_update/*"' % (host_ip,) - daisy_cmn.subprocess_call(cmd,fp) - cmd = 'clush -S -w %s -c /var/lib/daisy/tecs/*CGSL_VPLAT*.iso /var/lib/daisy/tecs/tfg_upgrade.sh --dest=/home/daisy_update' % (host_ip,) - daisy_cmn.subprocess_call(cmd,fp) - cmd = 'clush -S -w %s "chmod 777 /home/daisy_update/*"' % (host_ip,) - daisy_cmn.subprocess_call(cmd,fp) - host_meta['os_progress'] = 30 - host_meta['os_status'] = host_os_status['UPDATING'] - host_meta['messages'] = "" - update_db_host_status(req, host_id, host_meta) - try: - exc_result = subprocess.check_output( - 'clush -S -w %s "/home/daisy_update/tfg_upgrade.sh"' % (host_ip,), - shell=True, stderr=subprocess.STDOUT) - except subprocess.CalledProcessError as e: - if e.returncode == 255 and "reboot" in e.output.strip(): - host_meta['os_progress'] = 100 - host_meta['os_status'] = host_os_status['ACTIVE'] - host_meta['messages'] = "upgrade tfg successfully,os reboot" - LOG.info(_("Update tfg for %s successfully,os reboot!" % host_ip)) - daisy_cmn.check_reboot_ping(host_ip) - else: - host_meta['os_progress'] = 0 - host_meta['os_status'] = host_os_status['UPDATE_FAILED'] - host_meta['messages'] = e.output.strip()[-400:-200].replace('\n',' ') - LOG.error(_("Update tfg for %s failed!" % host_ip)) - update_db_host_status(req, host_id, host_meta) - fp.write(e.output.strip()) - else: - host_meta['os_progress'] = 100 - host_meta['os_status'] = host_os_status['ACTIVE'] - host_meta['messages'] = "upgrade tfg successfully" - update_db_host_status(req, host_id, host_meta) - LOG.info(_("Update os for %s successfully!" % host_ip)) - fp.write(exc_result) - if "reboot" in exc_result: - daisy_cmn.check_reboot_ping(host_ip) -# this will be raise raise all the exceptions of the thread to log file -def os_thread_bin(req, host_ip, host_id): - try: - _os_thread_bin(req, host_ip, host_id) - except Exception as e: - LOG.exception(e.message) - raise exception.ThreadBinException(message=e.message) - - -def _get_host_os_version(host_ip, host_pwd='ossdbg1'): - version = "" - tfg_version_file = '/usr/sbin/tfg_showversion' - try: - subprocess.check_output("sshpass -p %s ssh -o StrictHostKeyChecking=no" - " %s test -f %s" % (host_pwd, host_ip, - tfg_version_file), - shell=True, stderr=subprocess.STDOUT) - except subprocess.CalledProcessError: - LOG.info(_("Host %s os version is TFG" % host_ip)) - return version - try: - process = subprocess.Popen(["sshpass", "-p", "%s" % host_pwd, "ssh", - "-o StrictHostKeyChecking=no", "%s" % host_ip, - 'tfg_showversion'], shell=False, - stdout=subprocess.PIPE, stderr=subprocess.PIPE) - version = process.stdout.read().strip('\n') - except subprocess.CalledProcessError: - msg = _("Get host %s os version by subprocess failed!" % host_ip) - raise exception.SubprocessCmdFailed(message=msg) - - if version: - LOG.info(_("Host %s os version is %s" % (host_ip, version))) - return version - else: - msg = _("Get host %s os version by tfg_showversion failed!" % host_ip) - LOG.error(msg) - raise exception.Invalid(message=msg) - - -def _cmp_os_version(new_os_file, old_os_version, target_host_ip, password='ossdbg1'): - shell_file = '/usr/sbin/tfg_showversion' - if old_os_version: - try: - subprocess.check_output("test -f %s" % shell_file, shell=True, - stderr=subprocess.STDOUT) - except subprocess.CalledProcessError: - scripts = ["sshpass -p %s scp -r -o StrictHostKeyChecking=no %s:%s " - "/usr/sbin/" % (password, target_host_ip, shell_file)] - tecs_cmn.run_scrip(scripts) - - cmp_script = "tfg_showversion %s %s" % (new_os_file, old_os_version) - try: - result = subprocess.check_output(cmp_script, shell=True, - stderr=subprocess.STDOUT) - except subprocess.CalledProcessError: - return -1 - else: - if new_os_file.find("Mimosa") != -1: - return 0 - else: - msg = _("Please use Mimosa os to upgrade instead of TFG") - LOG.error(msg) - raise exception.Forbidden(message=msg) - return result.find("yes") - - -def upgrade_os(req, hosts_list): - upgrade_hosts = [] - max_parallel_os_upgrade_number = int(CONF.max_parallel_os_upgrade_number) - while hosts_list: - host_meta = {} - threads = [] - if len(hosts_list) > max_parallel_os_upgrade_number: - upgrade_hosts = hosts_list[:max_parallel_os_upgrade_number] - hosts_list = hosts_list[max_parallel_os_upgrade_number:] - else: - upgrade_hosts = hosts_list - hosts_list = [] - - new_os_file = check_tfg_exist() - for host_info in upgrade_hosts: - host_id = host_info.keys()[0] - host_ip = host_info.values()[0] - host_detail = daisy_cmn.get_host_detail(req, host_id) - target_host_os = _get_host_os_version(host_ip, host_detail['root_pwd']) - - if _cmp_os_version(new_os_file, target_host_os, host_ip) == 0: - host_meta['os_progress'] = 10 - host_meta['os_status'] = host_os_status['UPDATING'] - host_meta['messages'] = "os updating,begin copy iso" - update_db_host_status(req, host_id, host_meta) - t = threading.Thread(target=os_thread_bin, args=(req, host_ip, - host_id)) - t.setDaemon(True) - t.start() - threads.append(t) - else: - LOG.warn(_("new os version is lower than or equal to that of " - "host %s, don't need to upgrade!" % host_ip)) - try: - for t in threads: - t.join() - except: - LOG.warn(_("Join update thread %s failed!" % t)) - else: - for host_info in upgrade_hosts: - update_failed_flag = False - host_id = host_info.keys()[0] - host_ip = host_info.values()[0] - host = registry.get_host_metadata(req.context, host_id) - if host['os_status'] == host_os_status['UPDATE_FAILED'] or host['os_status'] == host_os_status['INIT']: - update_failed_flag = True - raise exception.ThreadBinException("%s update tfg failed! %s" % (host_ip, host['messages'])) - if not update_failed_flag: - host_meta = {} - host_meta['os_progress'] = 100 - host_meta['os_status'] = host_os_status['ACTIVE'] - host_meta['messages'] = "os upgrade successfully" - update_db_host_status(req, host_id,host_meta) +# Copyright 2013 OpenStack Foundation +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +""" +/install endpoint for tecs API +""" +import copy +import subprocess +import time + +from oslo_config import cfg +from oslo_log import log as logging +from webob.exc import HTTPBadRequest +import threading + +from daisy import i18n + +from daisy.common import exception +from daisy.api import common +from daisy.common import utils +import daisy.registry.client.v1.api as registry +from ironicclient import client as ironic_client +from daisyclient.v1 import client as daisy_client +import daisy.api.backends.common as daisy_cmn +import daisy.api.backends.tecs.common as tecs_cmn + + +import ConfigParser +DISCOVER_DEFAULTS = { + 'listen_port': '5050', + 'ironic_url': 'http://127.0.0.1:6385/v1', +} + +LOG = logging.getLogger(__name__) +_ = i18n._ +_LE = i18n._LE +_LI = i18n._LI +_LW = i18n._LW + +CONF = cfg.CONF +install_opts = [ + cfg.StrOpt('max_parallel_os_number', default=10, + help='Maximum number of hosts install os at the same time.'), +] +CONF.register_opts(install_opts) +upgrade_opts = [ + cfg.StrOpt('max_parallel_os_upgrade_number', default=10, + help='Maximum number of hosts upgrade os at the same time.'), +] +CONF.register_opts(upgrade_opts) + +host_os_status = { + 'INIT': 'init', + 'PRE_INSTALL': 'pre-install', + 'INSTALLING': 'installing', + 'ACTIVE': 'active', + 'INSTALL_FAILED': 'install-failed', + 'UPDATING': 'updating', + 'UPDATE_FAILED': 'update-failed' +} + +LINUX_BOND_MODE = {'balance-rr': '0', 'active-backup': '1', + 'balance-xor': '2', 'broadcast': '3', + '802.3ad': '4', 'balance-tlb': '5', + 'balance-alb': '6'} + +daisy_tecs_path = tecs_cmn.daisy_tecs_path + + +def get_ironicclient(): # pragma: no cover + """Get Ironic client instance.""" + config_discoverd = ConfigParser.ConfigParser(defaults=DISCOVER_DEFAULTS) + config_discoverd.read("/etc/ironic-discoverd/discoverd.conf") + ironic_url = config_discoverd.get("discoverd", "ironic_url") + args = {'os_auth_token': 'fake', + 'ironic_url': ironic_url} + return ironic_client.get_client(1, **args) + + +def get_daisyclient(): + """Get Daisy client instance.""" + config_daisy = ConfigParser.ConfigParser() + config_daisy.read("/etc/daisy/daisy-api.conf") + daisy_port = config_daisy.get("DEFAULT", "bind_port") + args = {'version': 1.0, 'endpoint': 'http://127.0.0.1:' + daisy_port} + return daisy_client.Client(**args) + + +def pxe_server_build(req, install_meta): + params = {'filters': {'type': 'system'}} + try: + networks = registry.get_all_networks(req.context, **params) + except exception.Invalid as e: + raise HTTPBadRequest(explanation=e.msg, request=req) + + try: + ip_inter = lambda x: sum([256 ** j * int(i) + for j, i in enumerate(x.split('.')[::-1])]) + inter_ip = lambda x: '.'.join( + [str(x / (256**i) % 256) for i in range(3, -1, -1)]) + for network in networks: + if 'system' in network['type']: + network_cidr = network.get('cidr') + if not network_cidr: + msg = "Error:The CIDR is blank of pxe server!" + LOG.error(msg) + raise exception.Forbidden(msg) + cidr_end = network_cidr.split('/')[1] + mask = ~(2**(32 - int(cidr_end)) - 1) + net_mask = inter_ip(mask) + pxe_server_ip = network.get('ip') + ip_ranges = network.get('ip_ranges') + for ip_range in ip_ranges: + client_ip_begin = ip_range.get('start') + client_ip_end = ip_range.get('end') + ip_addr = network_cidr.split('/')[0] + ip_addr_int = ip_inter(ip_addr) + ip_addr_min = inter_ip(ip_addr_int & (mask & 0xffffffff)) + ip_addr_max = inter_ip(ip_addr_int | (~mask & 0xffffffff)) + if not client_ip_begin and not client_ip_end: + client_ip_begin = inter_ip((ip_inter(ip_addr_min)) + 2) + client_ip_end = ip_addr_max + if pxe_server_ip: + ip_in_cidr = utils.is_ip_in_cidr(pxe_server_ip, + network_cidr) + if not ip_in_cidr: + msg = "Error:The ip '%s' is not in cidr '%s'" \ + " range." % (pxe_server_ip, network_cidr) + LOG.error(msg) + raise HTTPBadRequest(explanation=msg) + else: + pxe_server_ip = inter_ip((ip_inter(ip_addr_min)) + 1) + + eth_name = install_meta.get('deployment_interface') + if not eth_name: + msg = "Error:The nic name is blank of build pxe server!" + LOG.error(msg) + raise exception.Forbidden(msg) + args = {'build_pxe': 'yes', + 'eth_name': eth_name, + 'ip_address': pxe_server_ip, + 'net_mask': net_mask, + 'client_ip_begin': client_ip_begin, + 'client_ip_end': client_ip_end} + ironic = get_ironicclient() + ironic.daisy.build_pxe(**args) + except exception.Invalid as e: + msg = "build pxe server failed" + LOG.error(msg) + raise exception.InvalidNetworkConfig(msg) + + +def _get_network_plat(req, host_config, cluster_networks, dhcp_mac): + host_config['dhcp_mac'] = dhcp_mac + if host_config['interfaces']: + count = 0 + host_config_orig = copy.deepcopy(host_config) + for interface in host_config['interfaces']: + count += 1 + # if (interface.has_key('assigned_networks') and + if ('assigned_networks' in interface and + interface['assigned_networks']): + assigned_networks = copy.deepcopy( + interface['assigned_networks']) + host_config['interfaces'][count - 1]['assigned_networks'] = [] + alias = [] + for assigned_network in assigned_networks: + network_name = assigned_network['name'] + cluster_network = [ + network for network in cluster_networks + if network['name'] in network_name][0] + alias.append(cluster_network['alias']) + # convert cidr to netmask + cidr_to_ip = "" + assigned_networks_ip = tecs_cmn.get_host_network_ip( + req, host_config_orig, cluster_networks, network_name) + if cluster_network.get('cidr', None): + inter_ip = lambda x: '.'.join( + [str(x / (256**i) % 256) for i in + range(3, -1, -1)]) + cidr_to_ip = inter_ip( + 2**32 - 2**(32 - int( + cluster_network['cidr'].split('/')[1]))) + if cluster_network['alias'] is None or len(alias) == 1: + network_type = cluster_network['network_type'] + network_plat = dict(network_type=network_type, + ml2_type=cluster_network[ + 'ml2_type'], + capability=cluster_network[ + 'capability'], + physnet_name=cluster_network[ + 'physnet_name'], + gateway=cluster_network.get( + 'gateway', ""), + ip=assigned_networks_ip, + # ip=cluster_network.get('ip', ""), + netmask=cidr_to_ip, + vlan_id=cluster_network.get( + 'vlan_id', "")) + host_config['interfaces'][ + count - 1][ + 'assigned_networks'].append(network_plat) + interface['ip'] = "" + interface['netmask'] = "" + interface['gateway'] = "" + + return host_config + + +def get_cluster_hosts_config(req, cluster_id): + # params = dict(limit=1000000) + try: + cluster_data = registry.get_cluster_metadata(req.context, cluster_id) + networks = registry.get_networks_detail(req.context, cluster_id) + all_roles = registry.get_roles_detail(req.context) + except exception.Invalid as e: + raise HTTPBadRequest(explanation=e.msg, request=req) + + roles = [role for role in all_roles if role['cluster_id'] == cluster_id] + all_hosts_ids = cluster_data['nodes'] + hosts_config = [] + for host_id in all_hosts_ids: + host_detail = daisy_cmn.get_host_detail(req, host_id) + role_host_db_lv_size_lists = list() + # if host_detail.has_key('role') and host_detail['role']: + if 'role' in host_detail and host_detail['role']: + host_roles = host_detail['role'] + for role in roles: + if role['name'] in host_detail['role'] and\ + role['glance_lv_size']: + host_detail['glance_lv_size'] = role['glance_lv_size'] + if role.get('db_lv_size', None) and host_roles and\ + role['name'] in host_roles: + role_host_db_lv_size_lists.append(role['db_lv_size']) + if role['name'] == 'COMPUTER' and\ + role['name'] in host_detail['role'] and\ + role['nova_lv_size']: + host_detail['nova_lv_size'] = role['nova_lv_size'] + service_disks = tecs_cmn.get_service_disk_list( + req, {'role_id': role['id']}) + for service_disk in service_disks: + if service_disk['disk_location'] == 'local' and\ + service_disk['service'] == 'mongodb': + host_detail['mongodb_lv_size'] = service_disk['size'] + break + if role_host_db_lv_size_lists: + host_detail['db_lv_size'] = max(role_host_db_lv_size_lists) + else: + host_detail['db_lv_size'] = 0 + + for interface in host_detail['interfaces']: + if interface['type'] == 'bond'and\ + interface['mode'] in LINUX_BOND_MODE.keys(): + interface['mode'] = LINUX_BOND_MODE[interface['mode']] + + if (host_detail['os_status'] == host_os_status['INIT'] or + host_detail['os_status'] == host_os_status['PRE_INSTALL'] or + host_detail['os_status'] == host_os_status['INSTALLING'] or + host_detail['os_status'] == host_os_status['INSTALL_FAILED']): + pxe_macs = common.get_pxe_mac(host_detail) + if not pxe_macs: + msg = "cann't find dhcp interface on host %s" % host_detail[ + 'id'] + raise exception.InvalidNetworkConfig(msg) + if len(pxe_macs) > 1: + msg = "dhcp interface should only has one on host %s"\ + % host_detail['id'] + raise exception.InvalidNetworkConfig(msg) + + host_config_detail = copy.deepcopy(host_detail) + host_config = _get_network_plat(req, host_config_detail, + networks, + pxe_macs[0]) + hosts_config.append(tecs_cmn.sort_interfaces_by_pci(networks, + host_config)) + return hosts_config + + +def check_tfg_exist(): + get_tfg_patch = "ls %s|grep CGSL_VPLAT-.*\.iso$" % daisy_tecs_path + obj = subprocess.Popen(get_tfg_patch, + shell=True, + stdout=subprocess.PIPE, + stderr=subprocess.PIPE) + (stdoutput, erroutput) = obj.communicate() + tfg_patch_pkg_file = "" + tfg_patch_pkg_name = "" + if stdoutput: + tfg_patch_pkg_name = stdoutput.split('\n')[0] + tfg_patch_pkg_file = daisy_tecs_path + tfg_patch_pkg_name + chmod_for_tfg_bin = 'chmod +x %s' % tfg_patch_pkg_file + daisy_cmn.subprocess_call(chmod_for_tfg_bin) + + if not stdoutput or not tfg_patch_pkg_name: + LOG.info(_("no CGSL_VPLAT iso file got in %s" % daisy_tecs_path)) + return "" + return tfg_patch_pkg_file + + +def update_db_host_status(req, host_id, host_status): + """ + Update host status and intallation progress to db. + :return: + """ + try: + host_meta = {} + host_meta['os_progress'] = host_status['os_progress'] + host_meta['os_status'] = host_status['os_status'] + host_meta['messages'] = host_status['messages'] + registry.update_host_metadata(req.context, + host_id, + host_meta) + except exception.Invalid as e: + raise HTTPBadRequest(explanation=e.msg, request=req) + + +class OSInstall(): + + """ + Class for install OS. + """ + """ Definition for install states.""" + + def __init__(self, req, cluster_id): + self.req = req + self.cluster_id = cluster_id + # 5s + self.time_step = 5 + # 30 min + self.single_host_install_timeout = 30 * (12 * self.time_step) + + self.max_parallel_os_num = int(CONF.max_parallel_os_number) + self.cluster_hosts_install_timeout = ( + self.max_parallel_os_num / 4 + 2) * 60 * (12 * self.time_step) + self.ironicclient = get_ironicclient() + self.daisyclient = get_daisyclient() + + def _set_boot_or_power_state(self, user, passwd, addr, action): + count = 0 + repeat_times = 24 + while count < repeat_times: + set_obj = self.ironicclient.daisy.set_boot_or_power_state(user, + passwd, + addr, + action) + set_dict = dict([(f, getattr(set_obj, f, '')) + for f in ['return_code', 'info']]) + rc = int(set_dict['return_code']) + if rc == 0: + LOG.info( + _("Set %s to '%s' successfully for %s times by ironic" % ( + addr, action, count + 1))) + # One host set 'disk' return success, but it still 'pxe' + # mode in German site. If we have a method to confirm, + # this can be deleted. + if action == 'pxe' or action == 'disk': + self.ironicclient.daisy.set_boot_or_power_state(user, + passwd, + addr, + action) + break + else: + count += 1 + LOG.info( + _("Try setting %s to '%s' failed for %s times by ironic" + % (addr, action, count))) + time.sleep(count * 2) + if count >= repeat_times: + message = "Set %s to '%s' failed for 10 mins" % (addr, action) + raise exception.IMPIOprationFailed(message=message) + + def _install_os_for_baremetal(self, host_detail): + # os_install_disk = 'sda' + os_version_file = host_detail['os_version_file'] + if os_version_file: + test_os_version_exist = 'test -f %s' % os_version_file + daisy_cmn.subprocess_call(test_os_version_exist) + else: + self.message = "no OS version file configed for host %s"\ + % host_detail['id'] + raise exception.NotFound(message=self.message) + + if host_detail.get('root_disk', None): + root_disk = host_detail['root_disk'] + else: + root_disk = 'sda' + if host_detail.get('root_lv_size', None): + root_lv_size_m = host_detail['root_lv_size'] + else: + root_lv_size_m = 102400 + memory_size_b_str = str(host_detail['memory']['total']) + memory_size_b_int = int(memory_size_b_str.strip().split()[0]) + memory_size_m = memory_size_b_int // 1024 + memory_size_g = memory_size_m // 1024 + swap_lv_size_m = host_detail['swap_lv_size'] + cinder_vg_size_m = 0 + disk_list = [] + disk_storage_size_b = 0 + for key in host_detail['disks']: + if host_detail['disks'][key]['disk'].find("-fc-") != -1 \ + or host_detail['disks'][key]['disk'].\ + find("-iscsi-") != -1 \ + or host_detail['disks'][key]['name'].\ + find("mpath") != -1 \ + or host_detail['disks'][key]['name'].\ + find("spath") != -1: + continue + disk_list.append(host_detail['disks'][key]['name']) + stroage_size_str = host_detail['disks'][key]['size'] + stroage_size_b_int = int(stroage_size_str.strip().split()[0]) + disk_storage_size_b = disk_storage_size_b + stroage_size_b_int + disk_list = ','.join(disk_list) + disk_storage_size_m = disk_storage_size_b // (1024 * 1024) + + if 'root_pwd' in host_detail and host_detail['root_pwd']: + root_pwd = host_detail['root_pwd'] + else: + root_pwd = 'ossdbg1' + + isolcpus = None + if 'os_cpus' in host_detail and host_detail['os_cpus']: + os_cpus = utils.cpu_str_to_list(host_detail['os_cpus']) + host_cpu = host_detail.get('cpu', {}) + if 'total' in host_cpu: + total_cpus = range(0, host_cpu['total']) + isolcpus_list = list(set(total_cpus) - set(os_cpus)) + isolcpus_list.sort() + isolcpus = utils.cpu_list_to_str(isolcpus_list) + + if host_detail.get('hugepages', None): + hugepages = host_detail['hugepages'] + else: + hugepages = 0 + + if host_detail.get('hugepagesize', None): + hugepagesize = host_detail['hugepagesize'] + else: + hugepagesize = '1G' + # tfg_patch_pkg_file = check_tfg_exist() + + if host_detail.get('hwm_id'): + host_hwm_meta = { + "hwm_ip": host_detail.get('hwm_ip'), + "hwm_id": host_detail.get('hwm_id'), + "boot_type": "pxe" + } + self.daisyclient.node.set_boot(**host_hwm_meta) + else: + if (not host_detail['ipmi_user'] or + not host_detail['ipmi_passwd'] or + not host_detail['ipmi_addr']): + self.message = "Invalid ipmi information configed for host %s"\ + % host_detail['id'] + raise exception.NotFound(message=self.message) + + self._set_boot_or_power_state(host_detail['ipmi_user'], + host_detail['ipmi_passwd'], + host_detail['ipmi_addr'], + 'pxe') + + kwargs = {'hostname': host_detail['name'], + 'iso_path': os_version_file, + # 'tfg_bin':tfg_patch_pkg_file, + 'dhcp_mac': host_detail['dhcp_mac'], + 'storage_size': disk_storage_size_m, + 'memory_size': memory_size_g, + 'interfaces': host_detail['interfaces'], + 'root_lv_size': root_lv_size_m, + 'swap_lv_size': swap_lv_size_m, + 'cinder_vg_size': cinder_vg_size_m, + 'disk_list': disk_list, + 'root_disk': root_disk, + 'root_pwd': root_pwd, + 'isolcpus': isolcpus, + 'hugepagesize': hugepagesize, + 'hugepages': hugepages, + 'reboot': 'no'} + + # if host_detail.has_key('glance_lv_size'): + if 'glance_lv_size' in host_detail: + kwargs['glance_lv_size'] = host_detail['glance_lv_size'] + else: + kwargs['glance_lv_size'] = 0 + + # if host_detail.has_key('db_lv_size') and host_detail['db_lv_size']: + if 'db_lv_size' in host_detail and host_detail['db_lv_size']: + kwargs['db_lv_size'] = host_detail['db_lv_size'] + else: + kwargs['db_lv_size'] = 0 + + # if host_detail.has_key('mongodb_lv_size') and + # host_detail['mongodb_lv_size']: + if 'mongodb_lv_size' in host_detail and host_detail['mongodb_lv_size']: + kwargs['mongodb_lv_size'] = host_detail['mongodb_lv_size'] + else: + kwargs['mongodb_lv_size'] = 0 + + # if host_detail.has_key('nova_lv_size') and + # host_detail['nova_lv_size']: + if 'nova_lv_size' in host_detail and host_detail['nova_lv_size']: + kwargs['nova_lv_size'] = host_detail['nova_lv_size'] + else: + kwargs['nova_lv_size'] = 0 + install_os_obj = self.ironicclient.daisy.install_os(**kwargs) + install_os_dict = dict( + [(f, getattr(install_os_obj, f, '')) for f in + ['return_code', 'info']]) + rc = int(install_os_dict['return_code']) + if rc != 0: + install_os_description = install_os_dict['info'] + LOG.info( + _("install os config failed because of '%s'" + % (install_os_description))) + host_status = {'os_status': host_os_status['INSTALL_FAILED'], + 'os_progress': 0, + 'messages': install_os_description} + update_db_host_status(self.req, host_detail['id'], host_status) + msg = "ironic install os return failed for host %s" % host_detail[ + 'id'] + raise exception.OSInstallFailed(message=msg) + + if host_detail.get('hwm_id'): + host_hwm_meta = { + "hwm_ip": host_detail.get('hwm_ip'), + "hwm_id": host_detail.get('hwm_id') + } + self.daisyclient.node.restart(**host_hwm_meta) + else: + self._set_boot_or_power_state(host_detail['ipmi_user'], + host_detail['ipmi_passwd'], + host_detail['ipmi_addr'], + 'reset') + + def _begin_install_os(self, hosts_detail): + # all hosts status is set to 'pre-install' before os installing + for host_detail in hosts_detail: + host_status = {'os_status': host_os_status['PRE_INSTALL'], + 'os_progress': 0, + 'messages': 'Preparing for OS installation'} + update_db_host_status(self.req, host_detail['id'], host_status) + + for host_detail in hosts_detail: + self._install_os_for_baremetal(host_detail) + + def _set_disk_start_mode(self, host_detail): + LOG.info(_("Set boot from disk for host %s" % (host_detail['id']))) + if host_detail.get('hwm_id'): + host_hwm_meta = { + "hwm_ip": host_detail.get('hwm_ip'), + "hwm_id": host_detail.get('hwm_id'), + "boot_type": "disk" + } + self.daisyclient.node.set_boot(**host_hwm_meta) + LOG.info(_("reboot host %s" % (host_detail['id']))) + host_hwm_meta.pop('boot_type') + self.daisyclient.node.restart(**host_hwm_meta) + else: + self._set_boot_or_power_state(host_detail['ipmi_user'], + host_detail['ipmi_passwd'], + host_detail['ipmi_addr'], + 'disk') + LOG.info(_("reboot host %s" % (host_detail['id']))) + self._set_boot_or_power_state(host_detail['ipmi_user'], + host_detail['ipmi_passwd'], + host_detail['ipmi_addr'], + 'reset') + + def _init_progress(self, host_detail, hosts_status): + host_id = host_detail['id'] + + host_status = hosts_status[host_id] = {} + host_status['os_status'] = host_os_status['INSTALLING'] + host_status['os_progress'] = 0 + host_status['count'] = 0 + if host_detail['resource_type'] == 'docker': + host_status['messages'] = "docker container is creating" + else: + host_status['messages'] = "OS installing" + + update_db_host_status(self.req, host_id, host_status) + + def _query_host_progress(self, host_detail, host_status, host_last_status): + host_id = host_detail['id'] + install_result_obj = \ + self.ironicclient.daisy.get_install_progress( + host_detail['dhcp_mac']) + install_result = dict([(f, getattr(install_result_obj, f, '')) + for f in ['return_code', 'info', 'progress']]) + rc = int(install_result['return_code']) + host_status['os_progress'] = int(install_result['progress']) + if rc == 0: + if host_status['os_progress'] == 100: + time_cost = str( + round((time.time() - + daisy_cmn.os_install_start_time) / 60, 2)) + LOG.info( + _("It takes %s min for host %s to install os" + % (time_cost, host_id))) + LOG.info(_("host %s install os completely." % host_id)) + host_status['os_status'] = host_os_status['ACTIVE'] + host_status['messages'] = "OS installed successfully" + # wait for nicfix script complete + time.sleep(10) + self._set_disk_start_mode(host_detail) + else: + if host_status['os_progress'] ==\ + host_last_status['os_progress']: + host_status['count'] = host_status['count'] + 1 + LOG.debug(_("host %s has kept %ss when progress is %s." + % (host_id, + host_status['count'] * self.time_step, + host_status['os_progress']))) + else: + LOG.info(_("host %s install failed." % host_id)) + host_status['os_status'] = host_os_status['INSTALL_FAILED'] + host_status['messages'] = install_result['info'] + + def _query_progress(self, hosts_last_status, hosts_detail): + hosts_status = copy.deepcopy(hosts_last_status) + for host_detail in hosts_detail: + host_id = host_detail['id'] + # if not hosts_status.has_key(host_id): + if host_id not in hosts_status: + self._init_progress(host_detail, hosts_status) + continue + + host_status = hosts_status[host_id] + host_last_status = hosts_last_status[host_id] + # only process installing hosts after init, other hosts info will + # be kept in hosts_status + if host_status['os_status'] != host_os_status['INSTALLING']: + continue + + self._query_host_progress( + host_detail, host_status, host_last_status) + + if host_status['count'] * self.time_step >=\ + self.single_host_install_timeout: + host_status['os_status'] = host_os_status['INSTALL_FAILED'] + if host_detail['resource_type'] == 'docker': + host_status[ + 'messages'] = "docker container created timeout" + else: + host_status['messages'] = "os installed timeout" + if (host_status['os_progress'] != + host_last_status['os_progress'] or + host_status['os_status'] != host_last_status['os_status']): + host_status['count'] = 0 + update_db_host_status(self.req, host_id, host_status) + return hosts_status + + def _get_install_status(self, hosts_detail): + query_count = 0 + hosts_last_status = {} + while True: + hosts_install_status = self._query_progress( + hosts_last_status, hosts_detail) + # if all hosts install over, break + installing_hosts = [id for id in hosts_install_status.keys() + if hosts_install_status[id]['os_status'] == + host_os_status['INSTALLING']] + if not installing_hosts: + break + # after 3h, if some hosts are not 'active', label them to 'failed'. + elif query_count * self.time_step >=\ + self.cluster_hosts_install_timeout: + for host_id, host_status in hosts_install_status.iteritems(): + if (host_status['os_status'] != + host_os_status['ACTIVE'] and + host_status['os_status'] != + host_os_status['INSTALL_FAILED']): + # label the host install failed because of time out for + # 3h + host_status['os_status'] = host_os_status[ + 'INSTALL_FAILED'] + host_status[ + 'messages'] = "cluster os installed timeout" + update_db_host_status(self.req, host_id, host_status) + break + else: + query_count += 1 + hosts_last_status = hosts_install_status + time.sleep(self.time_step) + return hosts_install_status + + def install_os(self, hosts_detail, role_hosts_ids): + if len(hosts_detail) > self.max_parallel_os_num: + install_hosts = hosts_detail[:self.max_parallel_os_num] + hosts_detail = hosts_detail[self.max_parallel_os_num:] + else: + install_hosts = hosts_detail + hosts_detail = [] + + install_hosts_id = [host_detail['id'] for host_detail in install_hosts] + LOG.info( + _("Begin install os for hosts %s." % ','.join(install_hosts_id))) + daisy_cmn.os_install_start_time = time.time() + self._begin_install_os(install_hosts) + LOG.info(_("Begin to query install progress...")) + # wait to install completely + cluster_install_status = self._get_install_status(install_hosts) + total_time_cost = str( + round((time.time() - daisy_cmn.os_install_start_time) / 60, 2)) + LOG.info( + _("It totally takes %s min for all host to install os" + % total_time_cost)) + LOG.info(_("OS install in cluster %s result is:" % self.cluster_id)) + LOG.info(_("%s %s %s" % + ('host-id', 'os-status', 'description'))) + + for host_id, host_status in cluster_install_status.iteritems(): + LOG.info( + _("%s %s %s" % (host_id, host_status['os_status'], + host_status['messages']))) + if host_id in role_hosts_ids: + if host_status['os_status'] ==\ + host_os_status['INSTALL_FAILED']: + break + else: + role_hosts_ids.remove(host_id) + return (hosts_detail, role_hosts_ids) + + +def _os_thread_bin(req, host_ip, host_id): + host_meta = {} + password = "ossdbg1" + LOG.info(_("Begin update os for host %s." % (host_ip))) + cmd = 'mkdir -p /var/log/daisy/daisy_update/' + daisy_cmn.subprocess_call(cmd) + + var_log_path = "/var/log/daisy/daisy_update/%s_update_tfg.log" % host_ip + with open(var_log_path, "w+") as fp: + cmd = '/var/lib/daisy/tecs/trustme.sh %s %s' % (host_ip, password) + daisy_cmn.subprocess_call(cmd, fp) + cmd = 'clush -S -w %s "mkdir -p /home/daisy_update/"' % (host_ip,) + daisy_cmn.subprocess_call(cmd, fp) + cmd = 'clush -S -b -w %s "rm -rf /home/daisy_update/*"' % (host_ip,) + daisy_cmn.subprocess_call(cmd, fp) + cmd = 'clush -S -w %s -c /var/lib/daisy/tecs/*CGSL_VPLAT*.iso\ + /var/lib/daisy/tecs/tfg_upgrade.sh \ + --dest=/home/daisy_update' % ( + host_ip,) + daisy_cmn.subprocess_call(cmd, fp) + cmd = 'clush -S -w %s "chmod 777 /home/daisy_update/*"' % (host_ip,) + daisy_cmn.subprocess_call(cmd, fp) + host_meta['os_progress'] = 30 + host_meta['os_status'] = host_os_status['UPDATING'] + host_meta['messages'] = "os updating,copy iso successfully" + update_db_host_status(req, host_id, host_meta) + try: + exc_result = subprocess.check_output( + 'clush -S -w %s "/home/daisy_update/tfg_upgrade.sh"' % ( + host_ip,), + shell=True, stderr=subprocess.STDOUT) + except subprocess.CalledProcessError as e: + if e.returncode == 255 and "reboot" in e.output.strip(): + host_meta['os_progress'] = 100 + host_meta['os_status'] = host_os_status['ACTIVE'] + host_meta['messages'] = "upgrade tfg successfully,os reboot" + LOG.info( + _("Update tfg for %s successfully,os reboot!" % host_ip)) + daisy_cmn.check_reboot_ping(host_ip) + else: + host_meta['os_progress'] = 0 + host_meta['os_status'] = host_os_status['UPDATE_FAILED'] + host_meta[ + 'messages'] =\ + e.output.strip()[-400:-200].replace('\n', ' ') + LOG.error(_("Update tfg for %s failed!" % host_ip)) + update_db_host_status(req, host_id, host_meta) + fp.write(e.output.strip()) + else: + host_meta['os_progress'] = 100 + host_meta['os_status'] = host_os_status['ACTIVE'] + host_meta['messages'] = "upgrade tfg successfully" + update_db_host_status(req, host_id, host_meta) + LOG.info(_("Update os for %s successfully!" % host_ip)) + fp.write(exc_result) + if "reboot" in exc_result: + daisy_cmn.check_reboot_ping(host_ip) + + +# this will be raise raise all the exceptions of the thread to log file +def os_thread_bin(req, host_ip, host_id): + try: + _os_thread_bin(req, host_ip, host_id) + except Exception as e: + LOG.exception(e.message) + raise exception.ThreadBinException(message=e.message) + + +def _get_host_os_version(host_ip, host_pwd='ossdbg1'): + version = "" + tfg_version_file = '/usr/sbin/tfg_showversion' + try: + subprocess.check_output("sshpass -p %s ssh -o StrictHostKeyChecking=no" + " %s test -f %s" % (host_pwd, host_ip, + tfg_version_file), + shell=True, stderr=subprocess.STDOUT) + except subprocess.CalledProcessError: + LOG.info(_("Host %s os version is TFG" % host_ip)) + return version + try: + process =\ + subprocess.Popen(["sshpass", "-p", "%s" % host_pwd, "ssh", + "-o StrictHostKeyChecking=no", "%s" % host_ip, + 'tfg_showversion'], shell=False, + stdout=subprocess.PIPE, stderr=subprocess.PIPE) + version = process.stdout.read().strip('\n') + except subprocess.CalledProcessError: + msg = _("Get host %s os version by subprocess failed!" % host_ip) + raise exception.SubprocessCmdFailed(message=msg) + + if version: + LOG.info(_("Host %s os version is %s" % (host_ip, version))) + return version + else: + msg = _("Get host %s os version by tfg_showversion failed!" % host_ip) + LOG.error(msg) + raise exception.Invalid(message=msg) + + +def _cmp_os_version(new_os_file, old_os_version, + target_host_ip, password='ossdbg1'): + shell_file = '/usr/sbin/tfg_showversion' + if old_os_version: + try: + subprocess.check_output("test -f %s" % shell_file, shell=True, + stderr=subprocess.STDOUT) + except subprocess.CalledProcessError: + scripts = ["sshpass -p %s scp -r -o\ + StrictHostKeyChecking=no %s:%s " + "/usr/sbin/" % (password, target_host_ip, shell_file)] + tecs_cmn.run_scrip(scripts) + + cmp_script = "tfg_showversion %s %s" % (new_os_file, old_os_version) + try: + result = subprocess.check_output(cmp_script, shell=True, + stderr=subprocess.STDOUT) + except subprocess.CalledProcessError: + return -1 + else: + if new_os_file.find("Mimosa") != -1: + return 0 + else: + msg = _("Please use Mimosa os to upgrade instead of TFG") + LOG.error(msg) + raise exception.Forbidden(message=msg) + return result.find("yes") + + +def upgrade_os(req, hosts_list): + upgrade_hosts = [] + max_parallel_os_upgrade_number = int(CONF.max_parallel_os_upgrade_number) + while hosts_list: + host_meta = {} + threads = [] + if len(hosts_list) > max_parallel_os_upgrade_number: + upgrade_hosts = hosts_list[:max_parallel_os_upgrade_number] + hosts_list = hosts_list[max_parallel_os_upgrade_number:] + else: + upgrade_hosts = hosts_list + hosts_list = [] + + new_os_file = check_tfg_exist() + for host_info in upgrade_hosts: + host_id = host_info.keys()[0] + host_ip = host_info.values()[0] + host_detail = daisy_cmn.get_host_detail(req, host_id) + target_host_os = _get_host_os_version( + host_ip, host_detail['root_pwd']) + + if _cmp_os_version(new_os_file, target_host_os, host_ip) != -1: + host_meta['os_progress'] = 10 + host_meta['os_status'] = host_os_status['UPDATING'] + host_meta['messages'] = "os updating,begin copy iso" + update_db_host_status(req, host_id, host_meta) + t = threading.Thread(target=os_thread_bin, args=(req, host_ip, + host_id)) + t.setDaemon(True) + t.start() + threads.append(t) + else: + LOG.warn(_("new os version is lower than or equal to that of " + "host %s, don't need to upgrade!" % host_ip)) + try: + for t in threads: + t.join() + except: + LOG.warn(_("Join update thread %s failed!" % t)) + else: + for host_info in upgrade_hosts: + update_failed_flag = False + host_id = host_info.keys()[0] + host_ip = host_info.values()[0] + host = registry.get_host_metadata(req.context, host_id) + if host['os_status'] == host_os_status['UPDATE_FAILED'] or\ + host['os_status'] == host_os_status['INIT']: + update_failed_flag = True + raise exception.ThreadBinException( + "%s update tfg failed! %s" % ( + host_ip, host['messages'])) + if not update_failed_flag: + host_meta = {} + host_meta['os_progress'] = 100 + host_meta['os_status'] = host_os_status['ACTIVE'] + host_meta['messages'] = "upgrade tfg successfully" + update_db_host_status(req, host_id, host_meta) diff --git a/code/daisy/daisy/api/backends/proton/install.py b/code/daisy/daisy/api/backends/proton/install.py index 59780054..5b6d2372 100755 --- a/code/daisy/daisy/api/backends/proton/install.py +++ b/code/daisy/daisy/api/backends/proton/install.py @@ -54,7 +54,6 @@ def get_proton_ip(req, role_hosts): return proton_ip_list - def get_proton_hosts(req, cluster_id): all_roles = proton_cmn.get_roles_detail(req) for role in all_roles: diff --git a/code/daisy/daisy/api/backends/tecs/api.py b/code/daisy/daisy/api/backends/tecs/api.py index fa7472f5..23e846b9 100755 --- a/code/daisy/daisy/api/backends/tecs/api.py +++ b/code/daisy/daisy/api/backends/tecs/api.py @@ -1,382 +1,427 @@ -# Copyright 2013 OpenStack Foundation -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -""" -/install endpoint for tecs API -""" -import os -import copy -import subprocess -import time -import commands - -import traceback -import webob.exc -from oslo_config import cfg -from oslo_log import log as logging -from webob.exc import HTTPBadRequest -from webob.exc import HTTPForbidden -from webob.exc import HTTPServerError - -import threading -from threading import Thread - -from daisy import i18n -from daisy import notifier - -from daisy.api import policy -import daisy.api.v1 - -from daisy.common import exception -import daisy.registry.client.v1.api as registry -from daisy.api.backends.tecs import config -from daisy.api.backends import driver -from daisy.api.network_api import network as neutron -from ironicclient import client as ironic_client -import daisy.api.backends.os as os_handle -import daisy.api.backends.common as daisy_cmn -import daisy.api.backends.tecs.common as tecs_cmn -import daisy.api.backends.tecs.install as instl -import daisy.api.backends.tecs.uninstall as unstl -import daisy.api.backends.tecs.upgrade as upgrd -import daisy.api.backends.tecs.disk_array as disk_array - -try: - import simplejson as json -except ImportError: - import json - -LOG = logging.getLogger(__name__) -_ = i18n._ -_LE = i18n._LE -_LI = i18n._LI -_LW = i18n._LW -CONF = cfg.CONF -upgrade_opts = [ - cfg.StrOpt('max_parallel_os_upgrade_number', default=10, - help='Maximum number of hosts upgrade os at the same time.'), -] -CONF.register_opts(upgrade_opts) - -tecs_state = tecs_cmn.TECS_STATE - -class API(driver.DeploymentDriver): - """ - The hosts API is a RESTful web service for host data. The API - is as follows:: - - GET /hosts -- Returns a set of brief metadata about hosts - GET /hosts/detail -- Returns a set of detailed metadata about - hosts - HEAD /hosts/ -- Return metadata about an host with id - GET /hosts/ -- Return host data for host with id - POST /hosts -- Store host data and return metadata about the - newly-stored host - PUT /hosts/ -- Update host metadata and/or upload host - data for a previously-reserved host - DELETE /hosts/ -- Delete the host with id - """ - - def __init__(self): - super(API, self).__init__() - return - - def install(self, req, cluster_id): - """ - Install TECS to a cluster. - - param req: The WSGI/Webob Request object - cluster_id:cluster id - """ - - tecs_install_task = instl.TECSInstallTask(req, cluster_id) - tecs_install_task.start() - - def _get_roles_and_hosts_ip_list(self, req, cluster_id): - host_ha_list = set() - host_ip_list = set() - role_id_list = set() - hosts_id_list = [] - hosts_list = [] - - roles = daisy_cmn.get_cluster_roles_detail(req,cluster_id) - cluster_networks = daisy_cmn.get_cluster_networks_detail(req, cluster_id) - for role in roles: - if role['deployment_backend'] != daisy_cmn.tecs_backend_name: - continue - role_hosts = daisy_cmn.get_hosts_of_role(req, role['id']) - if role_hosts: - for role_host in role_hosts: - host = daisy_cmn.get_host_detail(req, role_host['host_id']) - host_ip = tecs_cmn.get_host_network_ip(req, host, cluster_networks, 'MANAGEMENT') - if role['name'] == "CONTROLLER_HA": - host_ha_list.add(host_ip) - host_ip_list.add(host_ip) - hosts_id_list.append({host['id']:host_ip}) - role_id_list.add(role['id']) - for host in hosts_id_list: - if host not in hosts_list: - hosts_list.append(host) - return (role_id_list, host_ip_list, host_ha_list, hosts_list) - - def _query_progress(self, req, cluster_id, action=""): - nodes_list = [] - roles = daisy_cmn.get_roles_detail(req) - (role_id_list,host_ip_list,host_ha_list,hosts_list) = self._get_roles_and_hosts_ip_list(req, cluster_id) - for host in hosts_list: - node = {} - host_id = host.keys()[0] - host = daisy_cmn.get_host_detail(req, host_id) - node['id'] = host['id'] - node['name'] = host['name'] - - if 0 == cmp("upgrade", action): - node['os-progress'] = host['os_progress'] - node['os-status'] = host['os_status'] - node['os-messages'] = host['messages'] - - if host['status'] == "with-role": - host_roles = [ role for role in roles if role['name'] in host['role'] and role['cluster_id'] == cluster_id] - if host_roles: - node['role-status'] = host_roles[0]['status'] - node['role-progress'] = str(host_roles[0]['progress']) - # node['role-message'] = host_roles[0]['messages'] - nodes_list.append(node) - if nodes_list: - return {'tecs_nodes': nodes_list} - else: - return {'tecs_nodes': "TECS uninstall successfully, the host has been removed from the host_roles table"} - - def uninstall(self, req, cluster_id): - """ - Uninstall TECS to a cluster. - - :param req: The WSGI/Webob Request object - - :raises HTTPBadRequest if x-install-cluster is missing - """ - (role_id_list, host_ip_list,host_ha_list, hosts_list) = self._get_roles_and_hosts_ip_list(req, cluster_id) - if role_id_list: - if not host_ip_list: - msg = _("there is no host in cluster %s") % cluster_id - raise exception.ThreadBinException(msg) - - unstl.update_progress_to_db(req, role_id_list, tecs_state['UNINSTALLING'], hosts_list) - - threads = [] - for host_ip in host_ip_list: - t = threading.Thread(target=unstl.thread_bin,args=(req,host_ip,role_id_list,hosts_list)) - t.setDaemon(True) - t.start() - threads.append(t) - LOG.info(_("Uninstall threads have started, please waiting....")) - - try: - for t in threads: - t.join() - except: - LOG.warn(_("Join uninstall thread %s failed!" % t)) - else: - uninstall_failed_flag = False - for role_id in role_id_list: - role_hosts=daisy_cmn.get_hosts_of_role(req,role_id) - for role_host in role_hosts: - if role_host['status'] == tecs_state['UNINSTALL_FAILED']: - unstl.update_progress_to_db(req, role_id_list, tecs_state['UNINSTALL_FAILED'],hosts_list) - uninstall_failed_flag = True - break - if not uninstall_failed_flag: - LOG.info(_("All uninstall threads have done, set all roles status to 'init'!")) - unstl.update_progress_to_db(req, role_id_list, tecs_state['INIT'], hosts_list) - try: - (status, output) = commands.getstatusoutput('rpm -e --nodeps openstack-packstack\ - openstack-packstack-puppet openstack-puppet-modules puppet') - except exception.Invalid as e: - raise HTTPBadRequest(explanation=e.msg, request=req) - - def uninstall_progress(self, req, cluster_id): - return self._query_progress(req, cluster_id, "uninstall") - - def upgrade(self, req, cluster_id): - """ - update TECS to a cluster. - - :param req: The WSGI/Webob Request object - - :raises HTTPBadRequest if x-install-cluster is missing - """ - (role_id_list,host_ip_list,host_ha_list,hosts_list) = self._get_roles_and_hosts_ip_list(req, cluster_id) - if role_id_list: - if not host_ip_list: - msg = _("there is no host in cluster %s") % cluster_id - raise exception.ThreadBinException(msg) - unreached_hosts = daisy_cmn.check_ping_hosts(host_ip_list, 1) - if unreached_hosts: - self.message = "hosts %s ping failed" % unreached_hosts - raise exception.NotFound(message=self.message) - - daisy_cmn.subprocess_call('rm -rf /root/.ssh/known_hosts') - - if os_handle.check_tfg_exist(): - os_handle.upgrade_os(req, hosts_list) - unreached_hosts = daisy_cmn.check_ping_hosts(host_ip_list, 30) - if unreached_hosts: - self.message = "hosts %s ping failed after tfg upgrade" % unreached_hosts - raise exception.NotFound(message=self.message) - # check and get TECS version - tecs_version_pkg_file = tecs_cmn.check_and_get_tecs_version(tecs_cmn.daisy_tecs_path) - if not tecs_version_pkg_file: - self.state = tecs_state['INSTALL_FAILED'] - self.message = "TECS version file not found in %s" % tecs_cmn.daisy_tecs_path - raise exception.NotFound(message=self.message) - threads = [] - LOG.info(_("Begin to update TECS controller nodes, please waiting....")) - upgrd.update_progress_to_db(req, role_id_list, tecs_state['UPDATING'], hosts_list) - for host_ip in host_ha_list: - LOG.info(_("Update TECS controller node %s..." % host_ip)) - rc = upgrd.thread_bin(req,role_id_list,host_ip,hosts_list) - if rc == 0: - LOG.info(_("Update TECS for %s successfully" % host_ip)) - else: - LOG.info(_("Update TECS failed for %s, return %s" % (host_ip,rc))) - return - LOG.info(_("Begin to update TECS other nodes, please waiting....")) - max_parallel_upgrade_number = int(CONF.max_parallel_os_upgrade_number) - compute_ip_list = host_ip_list - host_ha_list - while compute_ip_list: - threads = [] - if len(compute_ip_list) > max_parallel_upgrade_number: - upgrade_hosts = compute_ip_list[:max_parallel_upgrade_number] - compute_ip_list = compute_ip_list[max_parallel_upgrade_number:] - else: - upgrade_hosts = compute_ip_list - compute_ip_list = [] - for host_ip in upgrade_hosts: - t = threading.Thread(target=upgrd.thread_bin,args=(req,role_id_list,host_ip,hosts_list)) - t.setDaemon(True) - t.start() - threads.append(t) - try: - for t in threads: - t.join() - except: - LOG.warn(_("Join update thread %s failed!" % t)) - - for role_id in role_id_list: - role_hosts=daisy_cmn.get_hosts_of_role(req,role_id) - for role_host in role_hosts: - if (role_host['status'] == tecs_state['UPDATE_FAILED'] or - role_host['status'] == tecs_state['UPDATING']): - role_id = [role_host['role_id']] - upgrd.update_progress_to_db(req, - role_id, - tecs_state['UPDATE_FAILED'], - hosts_list) - break - elif role_host['status'] == tecs_state['ACTIVE']: - role_id = [role_host['role_id']] - upgrd.update_progress_to_db(req, - role_id, - tecs_state['ACTIVE'], - hosts_list) - - def upgrade_progress(self, req, cluster_id): - return self._query_progress(req, cluster_id, "upgrade") - - - def export_db(self, req, cluster_id): - """ - Export daisy db data to tecs.conf and HA.conf. - - :param req: The WSGI/Webob Request object - - :raises HTTPBadRequest if x-install-cluster is missing - """ - - (tecs_config, mgnt_ip_list) =\ - instl.get_cluster_tecs_config(req, cluster_id) - - config_files = {'tecs_conf':'','ha_conf':''} - tecs_install_path = "/home/tecs_install" - tecs_config_file = '' - if tecs_config: - cluster_conf_path = tecs_install_path + "/" + cluster_id - create_cluster_conf_path =\ - "rm -rf %s;mkdir %s" %(cluster_conf_path, cluster_conf_path) - daisy_cmn.subprocess_call(create_cluster_conf_path) - config.update_tecs_config(tecs_config, cluster_conf_path) - - get_tecs_conf = "ls %s|grep tecs.conf" % cluster_conf_path - obj = subprocess.Popen(get_tecs_conf, - shell=True, - stdout=subprocess.PIPE, - stderr=subprocess.PIPE) - (stdoutput, erroutput) = obj.communicate() - tecs_conf_file = "" - if stdoutput: - tecs_conf_file = stdoutput.split('\n')[0] - config_files['tecs_conf'] =\ - cluster_conf_path + "/" + tecs_conf_file - - get_ha_conf_cmd = "ls %s|grep HA_1.conf" % cluster_conf_path - obj = subprocess.Popen(get_ha_conf_cmd, - shell=True, - stdout=subprocess.PIPE, - stderr=subprocess.PIPE) - (stdoutput, erroutput) = obj.communicate() - ha_conf_file = "" - if stdoutput: - ha_conf_file = stdoutput.split('\n')[0] - config_files['ha_conf'] =\ - cluster_conf_path + "/" + ha_conf_file - else: - LOG.info(_("No TECS config files generated.")) - - return config_files - - def update_disk_array(self, req, cluster_id): - (share_disk_info, volume_disk_info) =\ - disk_array.get_disk_array_info(req, cluster_id) - (controller_ha_nodes, computer_ips) =\ - disk_array.get_ha_and_compute_ips(req, cluster_id) - all_nodes_ip = computer_ips + controller_ha_nodes.keys() - - if all_nodes_ip: - compute_error_msg =\ - disk_array.config_compute_multipath(all_nodes_ip) - if compute_error_msg: - return compute_error_msg - else: - LOG.info(_("Config Disk Array multipath successfully")) - - if share_disk_info: - ha_error_msg =\ - disk_array.config_ha_share_disk(share_disk_info, - controller_ha_nodes) - if ha_error_msg: - return ha_error_msg - else: - LOG.info(_("Config Disk Array for HA nodes successfully")) - - if volume_disk_info: - cinder_error_msg =\ - disk_array.config_ha_cinder_volume(volume_disk_info, - controller_ha_nodes.keys()) - if cinder_error_msg: - return cinder_error_msg - else: - LOG.info(_("Config cinder volume for HA nodes successfully")) - - return 'update successfully' +# Copyright 2013 OpenStack Foundation +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +""" +/install endpoint for tecs API +""" +import subprocess +import commands + +from oslo_config import cfg +from oslo_log import log as logging +from webob.exc import HTTPBadRequest + +import threading + +from daisy import i18n + +from daisy.common import exception +from daisy.api.backends.tecs import config +from daisy.api.backends import driver +import daisy.api.backends.os as os_handle +import daisy.api.backends.common as daisy_cmn +import daisy.api.backends.tecs.common as tecs_cmn +import daisy.api.backends.tecs.install as instl +import daisy.api.backends.tecs.uninstall as unstl +import daisy.api.backends.tecs.upgrade as upgrd +import daisy.api.backends.tecs.disk_array as disk_array +from daisy.api.backends.tecs import write_configs +import daisy.registry.client.v1.api as registry + + +LOG = logging.getLogger(__name__) +_ = i18n._ +_LE = i18n._LE +_LI = i18n._LI +_LW = i18n._LW +CONF = cfg.CONF +upgrade_opts = [ + cfg.StrOpt('max_parallel_os_upgrade_number', default=10, + help='Maximum number of hosts upgrade os at the same time.'), +] +CONF.register_opts(upgrade_opts) + +tecs_state = tecs_cmn.TECS_STATE +daisy_tecs_path = tecs_cmn.daisy_tecs_path + + +class API(driver.DeploymentDriver): + + """ + The hosts API is a RESTful web service for host data. The API + is as follows:: + + GET /hosts -- Returns a set of brief metadata about hosts + GET /hosts/detail -- Returns a set of detailed metadata about + hosts + HEAD /hosts/ -- Return metadata about an host with id + GET /hosts/ -- Return host data for host with id + POST /hosts -- Store host data and return metadata about the + newly-stored host + PUT /hosts/ -- Update host metadata and/or upload host + data for a previously-reserved host + DELETE /hosts/ -- Delete the host with id + """ + + def __init__(self): + super(API, self).__init__() + return + + def install(self, req, cluster_id): + """ + Install TECS to a cluster. + + param req: The WSGI/Webob Request object + cluster_id:cluster id + """ + write_configs.update_configset(req, cluster_id) + + tecs_install_task = instl.TECSInstallTask(req, cluster_id) + tecs_install_task.start() + + def _get_roles_and_hosts_ip_list(self, req, cluster_id): + role_host_ips = {'ha': set(), 'lb': set(), 'all': set()} + role_id_list = set() + hosts_id_list = [] + hosts_list = [] + tecs_install_failed_list = set() + + roles = daisy_cmn.get_cluster_roles_detail(req, cluster_id) + cluster_networks = daisy_cmn.get_cluster_networks_detail( + req, cluster_id) + for role in roles: + if role['deployment_backend'] != daisy_cmn.tecs_backend_name: + continue + role_hosts = daisy_cmn.get_hosts_of_role(req, role['id']) + if role_hosts: + for role_host in role_hosts: + host = daisy_cmn.get_host_detail(req, role_host['host_id']) + host_ip = tecs_cmn.get_host_network_ip( + req, host, cluster_networks, 'MANAGEMENT') + if role['name'] == "CONTROLLER_HA": + role_host_ips['ha'].add(host_ip) + if role['name'] == "CONTROLLER_LB": + role_host_ips['lb'].add(host_ip) + role_host_ips['all'].add(host_ip) + hosts_id_list.append({host['id']: host_ip}) + if role_host['status'] == tecs_state['INSTALL_FAILED']: + tecs_install_failed_list.add(host_ip) + role_id_list.add(role['id']) + for host in hosts_id_list: + if host not in hosts_list: + hosts_list.append(host) + return (role_id_list, role_host_ips, + hosts_list, tecs_install_failed_list) + + def _query_progress(self, req, cluster_id, action=""): + nodes_list = [] + roles = daisy_cmn.get_roles_detail(req) + (role_id_list, role_host_ips, hosts_list, tecs_install_failed_list) =\ + self._get_roles_and_hosts_ip_list(req, cluster_id) + for host in hosts_list: + node = {} + host_id = host.keys()[0] + host = daisy_cmn.get_host_detail(req, host_id) + node['id'] = host['id'] + node['name'] = host['name'] + + if 0 == cmp("upgrade", action): + node['os-progress'] = host['os_progress'] + node['os-status'] = host['os_status'] + node['os-messages'] = host['messages'] + + if host['status'] == "with-role": + host_roles = [role for role in roles if role['name'] in host[ + 'role'] and role['cluster_id'] == cluster_id] + if host_roles: + node['role-status'] = host_roles[0]['status'] + node['role-progress'] = str(host_roles[0]['progress']) + # node['role-message'] = host_roles[0]['messages'] + nodes_list.append(node) + if nodes_list: + return {'tecs_nodes': nodes_list} + else: + return {'tecs_nodes': "TECS uninstall successfully,\ + the host has been removed from the host_roles table"} + + def _modify_running_version_of_configs(self, req, + running_version, cluster_id): + cluster_configs_list = daisy_cmn.get_cluster_configs_list(req, + cluster_id) + if cluster_configs_list: + for cluster_config in cluster_configs_list: + registry.update_config_metadata(req.context, + cluster_config['id'], + {'running_version': + running_version}) + + def uninstall(self, req, cluster_id): + """ + Uninstall TECS to a cluster. + + :param req: The WSGI/Webob Request object + + :raises HTTPBadRequest if x-install-cluster is missing + """ + (role_id_list, role_host_ips, hosts_list, tecs_install_failed_list) =\ + self._get_roles_and_hosts_ip_list(req, cluster_id) + if role_id_list: + if not role_host_ips['all']: + msg = _("there is no host in cluster %s") % cluster_id + raise exception.ThreadBinException(msg) + + unstl.update_progress_to_db( + req, role_id_list, tecs_state['UNINSTALLING'], hosts_list) + + threads = [] + for host_ip in role_host_ips['all']: + t = threading.Thread( + target=unstl.thread_bin, args=(req, host_ip, role_id_list, + hosts_list)) + t.setDaemon(True) + t.start() + threads.append(t) + LOG.info(_("Uninstall threads have started, please waiting....")) + + try: + for t in threads: + t.join() + except: + LOG.warn(_("Join uninstall thread %s failed!" % t)) + else: + uninstall_failed_flag = False + for role_id in role_id_list: + role_hosts = daisy_cmn.get_hosts_of_role(req, role_id) + for role_host in role_hosts: + if role_host['status'] ==\ + tecs_state['UNINSTALL_FAILED']: + unstl.update_progress_to_db( + req, role_id_list, tecs_state[ + 'UNINSTALL_FAILED'], hosts_list) + uninstall_failed_flag = True + break + if not uninstall_failed_flag: + LOG.info( + _("All uninstall threads have done,\ + set all roles status to 'init'!")) + unstl.update_progress_to_db( + req, role_id_list, tecs_state['INIT'], hosts_list) + LOG.info(_("modify the running_version of configs to 0")) + running_version = 0 + self._modify_running_version_of_configs( + req, running_version, cluster_id) + tecs_cmn.inform_provider_cloud_state(req.context, cluster_id, + operation='delete') + try: + (status, output) = commands.getstatusoutput('rpm -e --nodeps openstack-packstack\ + openstack-packstack-puppet \ + openstack-puppet-modules puppet') + except exception.Invalid as e: + raise HTTPBadRequest(explanation=e.msg, request=req) + + def uninstall_progress(self, req, cluster_id): + return self._query_progress(req, cluster_id, "uninstall") + + def upgrade(self, req, cluster_id): + """ + update TECS to a cluster. + + :param req: The WSGI/Webob Request object + + :raises HTTPBadRequest if x-install-cluster is missing + """ + # daisy_update_path = '/home/daisy_update/' + + (role_id_list, role_host_ips, hosts_list, tecs_install_failed_list) =\ + self._get_roles_and_hosts_ip_list(req, cluster_id) + if role_id_list: + if not role_host_ips['all']: + msg = _("there is no host in cluster %s") % cluster_id + raise exception.ThreadBinException(msg) + unreached_hosts = daisy_cmn.check_ping_hosts( + role_host_ips['all'], 1) + if unreached_hosts: + self.message = "hosts %s ping failed" % unreached_hosts + raise exception.NotFound(message=self.message) + daisy_cmn.subprocess_call('rm -rf /root/.ssh/known_hosts') + if os_handle.check_tfg_exist(): + os_handle.upgrade_os(req, hosts_list) + unreached_hosts = daisy_cmn.check_ping_hosts( + role_host_ips['all'], 30) + if unreached_hosts: + self.message = "hosts %s ping failed after tfg upgrade" \ + % unreached_hosts + raise exception.NotFound(message=self.message) + # check and get TECS version + tecs_version_pkg_file = tecs_cmn.check_and_get_tecs_version( + tecs_cmn.daisy_tecs_path) + if not tecs_version_pkg_file: + self.state = tecs_state['UPDATE_FAILED'] + self.message = "TECS version file not found in %s"\ + % tecs_cmn.daisy_tecs_path + raise exception.NotFound(message=self.message) + threads = [] + LOG.info( + _("Begin to update TECS controller nodes, please waiting....")) + upgrd.update_progress_to_db( + req, role_id_list, tecs_state['UPDATING'], hosts_list) + for host_ip in role_host_ips['ha']: + if host_ip in tecs_install_failed_list: + continue + LOG.info(_("Update TECS controller node %s..." % host_ip)) + rc = upgrd.thread_bin(req, role_id_list, host_ip, hosts_list) + if rc == 0: + LOG.info(_("Update TECS for %s successfully" % host_ip)) + else: + LOG.info( + _("Update TECS failed for %s, return %s" + % (host_ip, rc))) + return + + LOG.info(_("Begin to update TECS other nodes, please waiting....")) + max_parallel_upgrade_number = int( + CONF.max_parallel_os_upgrade_number) + compute_ip_list = role_host_ips[ + 'all'] - role_host_ips['ha'] - tecs_install_failed_list + while compute_ip_list: + threads = [] + if len(compute_ip_list) > max_parallel_upgrade_number: + upgrade_hosts = compute_ip_list[ + :max_parallel_upgrade_number] + compute_ip_list = compute_ip_list[ + max_parallel_upgrade_number:] + else: + upgrade_hosts = compute_ip_list + compute_ip_list = [] + for host_ip in upgrade_hosts: + t = threading.Thread( + target=upgrd.thread_bin, + args=(req, role_id_list, host_ip, hosts_list)) + t.setDaemon(True) + t.start() + threads.append(t) + try: + for t in threads: + t.join() + except: + LOG.warn(_("Join update thread %s failed!" % t)) + + for role_id in role_id_list: + role_hosts = daisy_cmn.get_hosts_of_role(req, role_id) + for role_host in role_hosts: + if (role_host['status'] == tecs_state['UPDATE_FAILED'] or + role_host['status'] == tecs_state['UPDATING']): + role_id = [role_host['role_id']] + upgrd.update_progress_to_db(req, + role_id, + tecs_state[ + 'UPDATE_FAILED'], + hosts_list) + break + elif role_host['status'] == tecs_state['ACTIVE']: + role_id = [role_host['role_id']] + upgrd.update_progress_to_db(req, + role_id, + tecs_state['ACTIVE'], + hosts_list) + + def upgrade_progress(self, req, cluster_id): + return self._query_progress(req, cluster_id, "upgrade") + + def export_db(self, req, cluster_id): + """ + Export daisy db data to tecs.conf and HA.conf. + + :param req: The WSGI/Webob Request object + + :raises HTTPBadRequest if x-install-cluster is missing + """ + + tecs_config =\ + instl.get_cluster_tecs_config(req, cluster_id) + + config_files = {'tecs_conf': '', 'ha_conf': ''} + tecs_install_path = "/home/tecs_install" + if tecs_config: + cluster_conf_path = tecs_install_path + "/" + cluster_id + create_cluster_conf_path =\ + "rm -rf %s;mkdir %s" % (cluster_conf_path, cluster_conf_path) + daisy_cmn.subprocess_call(create_cluster_conf_path) + config.update_tecs_config(tecs_config, cluster_conf_path) + + get_tecs_conf = "ls %s|grep tecs.conf" % cluster_conf_path + obj = subprocess.Popen(get_tecs_conf, + shell=True, + stdout=subprocess.PIPE, + stderr=subprocess.PIPE) + (stdoutput, erroutput) = obj.communicate() + tecs_conf_file = "" + if stdoutput: + tecs_conf_file = stdoutput.split('\n')[0] + config_files['tecs_conf'] =\ + cluster_conf_path + "/" + tecs_conf_file + + get_ha_conf_cmd = "ls %s|grep HA_1.conf" % cluster_conf_path + obj = subprocess.Popen(get_ha_conf_cmd, + shell=True, + stdout=subprocess.PIPE, + stderr=subprocess.PIPE) + (stdoutput, erroutput) = obj.communicate() + ha_conf_file = "" + if stdoutput: + ha_conf_file = stdoutput.split('\n')[0] + config_files['ha_conf'] =\ + cluster_conf_path + "/" + ha_conf_file + else: + LOG.info(_("No TECS config files generated.")) + + return config_files + + def update_disk_array(self, req, cluster_id): + (share_disk_info, volume_disk_info) =\ + disk_array.get_disk_array_info(req, cluster_id) + array_nodes_addr =\ + tecs_cmn.get_disk_array_nodes_addr(req, cluster_id) + + ha_nodes_ip = array_nodes_addr['ha'].keys() + all_nodes_ip = list(array_nodes_addr['computer']) + ha_nodes_ip + + if all_nodes_ip: + compute_error_msg =\ + disk_array.config_compute_multipath(all_nodes_ip) + if compute_error_msg: + return compute_error_msg + else: + LOG.info(_("Config Disk Array multipath successfully")) + + if share_disk_info: + ha_error_msg =\ + disk_array.config_ha_share_disk(share_disk_info, + array_nodes_addr['ha']) + if ha_error_msg: + return ha_error_msg + else: + LOG.info(_("Config Disk Array for HA nodes successfully")) + + if volume_disk_info: + cinder_error_msg =\ + disk_array.config_ha_cinder_volume(volume_disk_info, + ha_nodes_ip) + if cinder_error_msg: + return cinder_error_msg + else: + LOG.info(_("Config cinder volume for HA nodes successfully")) + + return 'update successfully' diff --git a/code/daisy/daisy/api/backends/tecs/common.py b/code/daisy/daisy/api/backends/tecs/common.py index 6f05b6cb..0c36872d 100755 --- a/code/daisy/daisy/api/backends/tecs/common.py +++ b/code/daisy/daisy/api/backends/tecs/common.py @@ -19,33 +19,21 @@ import os import copy import subprocess -import time import re -import traceback -import webob.exc -from oslo_config import cfg from oslo_log import log as logging from webob.exc import HTTPBadRequest from webob.exc import HTTPForbidden - -from threading import Thread - from daisy import i18n -from daisy import notifier - -from daisy.api import policy -import daisy.api.v1 +from daisy.common import utils from daisy.common import exception import daisy.registry.client.v1.api as registry import daisy.api.backends.common as daisy_cmn +from daisyclient.v1 import client as daisy_client +import ConfigParser -try: - import simplejson as json -except ImportError: - import json - +STR_MASK = '*' * 8 LOG = logging.getLogger(__name__) _ = i18n._ _LE = i18n._LE @@ -53,11 +41,12 @@ _LI = i18n._LI _LW = i18n._LW daisy_tecs_path = '/var/lib/daisy/tecs/' +tecs_install_path = '/home/tecs_install' TECS_STATE = { - 'INIT' : 'init', - 'INSTALLING' : 'installing', - 'ACTIVE' : 'active', + 'INIT': 'init', + 'INSTALLING': 'installing', + 'ACTIVE': 'active', 'INSTALL_FAILED': 'install-failed', 'UNINSTALLING': 'uninstalling', 'UNINSTALL_FAILED': 'uninstall-failed', @@ -66,42 +55,65 @@ TECS_STATE = { } +def get_daisyclient(): + """Get Daisy client instance.""" + config_daisy = ConfigParser.ConfigParser() + config_daisy.read("/etc/daisy/daisy-api.conf") + daisy_port = config_daisy.get("DEFAULT", "bind_port") + args = {'version': 1.0, 'endpoint': 'http://127.0.0.1:' + daisy_port} + return daisy_client.Client(**args) + + +def mkdir_tecs_install(host_ips=None): + if not host_ips: + cmd = "mkdir -p %s" % tecs_install_path + daisy_cmn.subprocess_call(cmd) + return + for host_ip in host_ips: + cmd = 'clush -S -w %s "mkdir -p %s"' % (host_ip, tecs_install_path) + daisy_cmn.subprocess_call(cmd) + + def _get_cluster_network(cluster_networks, network_name): - network = [cn for cn in cluster_networks - if cn['name'] in network_name] + network = [cn for cn in cluster_networks if cn['name'] == network_name] if not network or not network[0]: msg = "network %s is not exist" % (network_name) raise exception.InvalidNetworkConfig(msg) else: return network[0] + def get_host_interface_by_network(host_detail, network_name): host_detail_info = copy.deepcopy(host_detail) - interface_list = [hi for hi in host_detail_info['interfaces'] - for assigned_network in hi['assigned_networks'] - if assigned_network and network_name == assigned_network['name']] + interface_list = [hi for hi in host_detail_info['interfaces'] + for assigned_network in hi['assigned_networks'] + if assigned_network and + network_name == assigned_network['name']] interface = {} if interface_list: interface = interface_list[0] - + if not interface and 'MANAGEMENT' == network_name: - msg = "network %s of host %s is not exist" % (network_name, host_detail_info['id']) + msg = "network %s of host %s is not exist" % ( + network_name, host_detail_info['id']) raise exception.InvalidNetworkConfig(msg) return interface + def get_host_network_ip(req, host_detail, cluster_networks, network_name): interface_network_ip = '' host_interface = get_host_interface_by_network(host_detail, network_name) if host_interface: network = _get_cluster_network(cluster_networks, network_name) - assigned_network = daisy_cmn.get_assigned_network(req, - host_interface['id'], - network['id']) + assigned_network = daisy_cmn.get_assigned_network(req, + host_interface['id'], + network['id']) interface_network_ip = assigned_network['ip'] - if not interface_network_ip and 'MANAGEMENT' == network_name : - msg = "%s network ip of host %s can't be empty" % (network_name, host_detail['id']) + if not interface_network_ip and 'MANAGEMENT' == network_name: + msg = "%s network ip of host %s can't be empty" % ( + network_name, host_detail['id']) raise exception.InvalidNetworkConfig(msg) return interface_network_ip @@ -147,26 +159,36 @@ def get_network_netmask(cluster_networks, network_name): raise exception.InvalidNetworkConfig(msg) return netmask + # every host only have one gateway def get_network_gateway(cluster_networks, network_name): network = _get_cluster_network(cluster_networks, network_name) gateway = network['gateway'] - if not gateway and 'MANAGEMENT' == network_name: - msg = "gateway of network %s can't be empty" % (network_name) - raise exception.InvalidNetworkConfig(msg) return gateway + +def get_network_cidr(cluster_networks, network_name): + network = _get_cluster_network(cluster_networks, network_name) + cidr = network['cidr'] + if not cidr: + msg = "cidr of network %s is not exist" % (network_name) + raise exception.InvalidNetworkConfig(msg) + return cidr + + def get_mngt_network_vlan_id(cluster_networks): mgnt_vlan_id = "" - management_network = [network for network in cluster_networks if network['network_type'] == 'MANAGEMENT'] + management_network = [network for network in cluster_networks if network[ + 'network_type'] == 'MANAGEMENT'] if (not management_network or - not management_network[0] or - not management_network[0].has_key('vlan_id')): + not management_network[0] or + # not management_network[0].has_key('vlan_id')): + 'vlan_id' not in management_network[0]): msg = "can't get management network vlan id" raise exception.InvalidNetworkConfig(msg) else: mgnt_vlan_id = management_network[0]['vlan_id'] - return mgnt_vlan_id + return mgnt_vlan_id def get_network_vlan_id(cluster_networks, network_type): @@ -174,7 +196,8 @@ def get_network_vlan_id(cluster_networks, network_type): general_network = [network for network in cluster_networks if network['network_type'] == network_type] if (not general_network or not general_network[0] or - not general_network[0].has_key('vlan_id')): + # not general_network[0].has_key('vlan_id')): + 'vlan_id' not in general_network[0]): msg = "can't get %s network vlan id" % network_type raise exception.InvalidNetworkConfig(msg) else: @@ -182,7 +205,7 @@ def get_network_vlan_id(cluster_networks, network_type): return vlan_id -def sort_interfaces_by_pci(host_detail): +def sort_interfaces_by_pci(networks, host_detail): """ Sort interfaces by pci segment, if interface type is bond, user the pci of first memeber nic.This function is fix bug for @@ -192,61 +215,45 @@ def sort_interfaces_by_pci(host_detail): :return: """ interfaces = eval(host_detail.get('interfaces', None)) \ - if isinstance(host_detail, unicode) else host_detail.get('interfaces', None) + if isinstance(host_detail, unicode) else \ + host_detail.get('interfaces', None) if not interfaces: - LOG.info("This host don't have /interfaces info.") + LOG.info("This host has no interfaces info.") return host_detail tmp_interfaces = copy.deepcopy(interfaces) - if not [interface for interface in tmp_interfaces - if interface.get('name', None) and len(interface['name']) > 8]: - LOG.info("The interfaces name of host is all less than 9 character, no need sort.") - return host_detail - # add pci segment for the bond nic, the pci is equal to the first member nic pci slaves_name_list = [] for interface in tmp_interfaces: - if interface.get('type', None) == "bond" and \ - interface.get('slave1', None) and interface.get('slave2', None): - + if interface.get('type', None) == "bond" and\ + interface.get('slave1', None) and\ + interface.get('slave2', None): slaves_name_list.append(interface['slave1']) slaves_name_list.append(interface['slave2']) - first_member_nic_name = interface['slave1'] - tmp_pci = [interface_tmp['pci'] - for interface_tmp in tmp_interfaces - if interface_tmp.get('name', None) and - interface_tmp.get('pci', None) and - interface_tmp['name'] == first_member_nic_name] + for interface in interfaces: + if interface.get('name') not in slaves_name_list: + vlan_id_len_list = [len(network['vlan_id']) + for assigned_network in interface.get( + 'assigned_networks', []) + for network in networks + if assigned_network.get('name') == + network.get('name') and network.get('vlan_id')] + max_vlan_id_len = max(vlan_id_len_list) if vlan_id_len_list else 0 + interface_name_len = len(interface['name']) + redundant_bit = interface_name_len + max_vlan_id_len - 14 + interface['name'] = interface['name'][ + redundant_bit:] if redundant_bit > 0 else interface['name'] + return host_detail - if len(tmp_pci) != 1: - LOG.error("This host have two nics with same pci.") - continue - interface['pci'] = tmp_pci[0] - tmp_interfaces = [interface for interface in tmp_interfaces - if interface.get('name', None) and - interface['name'] not in slaves_name_list] - - tmp_interfaces = sorted(tmp_interfaces, key = lambda interface: interface['pci']) - for index in range(0, len(tmp_interfaces)): - for interface in interfaces: - if interface['name'] != tmp_interfaces[index]['name']: - continue - - interface['name'] = "b" + str(index) if interface['type'] == "bond" else "e" + str(index) - - tmp_host_detail = copy.deepcopy(host_detail) - tmp_host_detail.update({'interfaces': interfaces}) - return tmp_host_detail - def check_and_get_tecs_version(daisy_tecs_pkg_path): tecs_version_pkg_file = "" get_tecs_version_pkg = "ls %s| grep ^ZXTECS.*\.bin$" % daisy_tecs_pkg_path obj = subprocess.Popen(get_tecs_version_pkg, - shell=True, - stdout=subprocess.PIPE, - stderr=subprocess.PIPE) + shell=True, + stdout=subprocess.PIPE, + stderr=subprocess.PIPE) (stdoutput, erroutput) = obj.communicate() if stdoutput: tecs_version_pkg_name = stdoutput.split('\n')[0] @@ -255,33 +262,60 @@ def check_and_get_tecs_version(daisy_tecs_pkg_path): daisy_cmn.subprocess_call(chmod_for_tecs_version) return tecs_version_pkg_file + def get_service_disk_list(req, params): try: - service_disks = registry.list_service_disk_metadata(req.context, **params) + service_disks = registry.list_service_disk_metadata( + req.context, **params) except exception.Invalid as e: raise HTTPBadRequest(explanation=e.msg, request=req) return service_disks + def get_cinder_volume_list(req, params): try: - cinder_volumes = registry.list_cinder_volume_metadata(req.context, **params) + cinder_volumes = registry.list_cinder_volume_metadata( + req.context, **params) except exception.Invalid as e: raise HTTPBadRequest(explanation=e.msg, request=req) return cinder_volumes -def get_network_configuration_rpm_name(): - cmd = "ls %s | grep ^network-configuration.*\.rpm" % daisy_tecs_path +def mask_string(unmasked, mask_list=None, replace_list=None): + """ + Replaces words from mask_list with MASK in unmasked string. + If words are needed to be transformed before masking, transformation + could be describe in replace list. For example [("'","'\\''")] + replaces all ' characters with '\\''. + """ + mask_list = mask_list or [] + replace_list = replace_list or [] + + masked = unmasked + for word in sorted(mask_list, lambda x, y: len(y) - len(x)): + if not word: + continue + for before, after in replace_list: + word = word.replace(before, after) + masked = masked.replace(word, STR_MASK) + return masked + + +def run_scrip(script, ip=None, password=None, msg=None): try: - network_rpm_name = subprocess.check_output( - cmd, shell=True, stderr=subprocess.STDOUT).split('\n')[0] - except subprocess.CalledProcessError: - msg = _("Get network-configuration rpm name by subprocess failed!") - raise exception.SubprocessCmdFailed(message=msg) - return network_rpm_name + _run_scrip(script, ip, password) + except: + msg1 = 'Error occurred during running scripts.' + message = msg1 + msg if msg else msg1 + LOG.error(message) + raise HTTPForbidden(explanation=message) + else: + LOG.info('Running scripts successfully!') -def run_scrip(script, ip=None, password=None): +def _run_scrip(script, ip=None, password=None): + mask_list = [] + repl_list = [("'", "'\\''")] script = "\n".join(script) _PIPE = subprocess.PIPE if ip: @@ -297,31 +331,117 @@ def run_scrip(script, ip=None, password=None): script = "function t(){ exit $? ; } \n trap t ERR \n" + script out, err = obj.communicate(script) - return out, err + masked_out = mask_string(out, mask_list, repl_list) + masked_err = mask_string(err, mask_list, repl_list) + if obj.returncode: + pattern = (r'^ssh\:') + if re.search(pattern, err): + LOG.error(_("Network error occured when run script.")) + raise exception.NetworkError(masked_err, stdout=out, stderr=err) + else: + msg = ('Failed to run remote script, stdout: %s\nstderr: %s' % + (masked_out, masked_err)) + LOG.error(msg) + raise exception.ScriptRuntimeError(msg, stdout=out, stderr=err) + return obj.returncode, out + + +def inform_provider_cloud_state(context, cluster_id, **kwargs): + params = dict() + daisyclient = get_daisyclient() + cluster = registry.get_cluster_metadata(context, cluster_id) + params['operation'] = kwargs.get('operation') + params['name'] = cluster.get('name') + params['url'] = "http://" + cluster.get('public_vip') + params['provider_ip'] = cluster.get('hwm_ip') + daisyclient.node.cloud_state(**params) + + +def get_disk_array_nodes_addr(req, cluster_id): + controller_ha_nodes = {} + computer_ips = set() + + roles = daisy_cmn.get_cluster_roles_detail(req, cluster_id) + cluster_networks =\ + daisy_cmn.get_cluster_networks_detail(req, cluster_id) + for role in roles: + if role['deployment_backend'] != daisy_cmn.tecs_backend_name: + continue + role_hosts = daisy_cmn.get_hosts_of_role(req, role['id']) + for role_host in role_hosts: + # host has installed tecs are exclusive + if (role_host['status'] == TECS_STATE['ACTIVE'] or + role_host['status'] == TECS_STATE['UPDATING'] or + role_host['status'] == TECS_STATE['UPDATE_FAILED']): + continue + host_detail = daisy_cmn.get_host_detail(req, + role_host['host_id']) + host_ip = get_host_network_ip(req, + host_detail, + cluster_networks, + 'MANAGEMENT') + if role['name'] == "CONTROLLER_HA": + min_mac = utils.get_host_min_mac(host_detail['interfaces']) + controller_ha_nodes[host_ip] = min_mac + if role['name'] == "COMPUTER": + computer_ips.add(host_ip) + return {'ha': controller_ha_nodes, 'computer': computer_ips} + + +def get_ctl_ha_nodes_min_mac(req, cluster_id): + ''' + ctl_ha_nodes_min_mac = {'host_name1':'min_mac1', ...} + ''' + ctl_ha_nodes_min_mac = {} + roles = daisy_cmn.get_cluster_roles_detail(req, cluster_id) + cluster_networks =\ + daisy_cmn.get_cluster_networks_detail(req, cluster_id) + for role in roles: + if role['deployment_backend'] != daisy_cmn.tecs_backend_name: + continue + role_hosts = daisy_cmn.get_hosts_of_role(req, role['id']) + for role_host in role_hosts: + # host has installed tecs are exclusive + if (role_host['status'] == TECS_STATE['ACTIVE'] or + role_host['status'] == TECS_STATE['UPDATING'] or + role_host['status'] == TECS_STATE['UPDATE_FAILED']): + continue + host_detail = daisy_cmn.get_host_detail(req, + role_host['host_id']) + host_name = host_detail['name'] + if role['name'] == "CONTROLLER_HA": + min_mac = utils.get_host_min_mac(host_detail['interfaces']) + ctl_ha_nodes_min_mac[host_name] = min_mac + return ctl_ha_nodes_min_mac class TecsShellExector(object): + """ Class config task before install tecs bin. """ - def __init__(self, mgnt_ip, task_type, params={}): + + def __init__(self, mgnt_ip, task_type, params={}): self.task_type = task_type self.mgnt_ip = mgnt_ip self.params = params self.clush_cmd = "" - self.rpm_name = get_network_configuration_rpm_name() + self.rpm_name =\ + daisy_cmn.get_rpm_package_by_name(daisy_tecs_path, + 'network-configuration') self.NETCFG_RPM_PATH = daisy_tecs_path + self.rpm_name self.oper_type = { - 'install_rpm' : self._install_netcfg_rpm, - 'uninstall_rpm' : self._uninstall_netcfg_rpm, - 'update_rpm' : self._update_netcfg_rpm, + 'install_rpm': self._install_netcfg_rpm, + 'uninstall_rpm': self._uninstall_netcfg_rpm, + 'update_rpm': self._update_netcfg_rpm, } self.oper_shell = { 'CMD_SSHPASS_PRE': "sshpass -p ossdbg1 %(ssh_ip)s %(cmd)s", 'CMD_RPM_UNINSTALL': "rpm -e network-configuration", 'CMD_RPM_INSTALL': "rpm -i /home/%(rpm)s" % {'rpm': self.rpm_name}, 'CMD_RPM_UPDATE': "rpm -U /home/%(rpm)s" % {'rpm': self.rpm_name}, - 'CMD_RPM_SCP': "scp -o StrictHostKeyChecking=no %(path)s root@%(ssh_ip)s:/home" % + 'CMD_RPM_SCP': "scp -o StrictHostKeyChecking=no \ + %(path)s root@%(ssh_ip)s:/home" % {'path': self.NETCFG_RPM_PATH, 'ssh_ip': mgnt_ip} } LOG.info(_("<<>>" % self.rpm_name)) @@ -329,13 +449,17 @@ class TecsShellExector(object): def _uninstall_netcfg_rpm(self): self.clush_cmd = self.oper_shell['CMD_SSHPASS_PRE'] % \ - {"ssh_ip":"ssh -o StrictHostKeyChecking=no " + self.mgnt_ip, "cmd":self.oper_shell['CMD_RPM_UNINSTALL']} - subprocess.check_output(self.clush_cmd, shell = True, stderr=subprocess.STDOUT) + {"ssh_ip": "ssh -o StrictHostKeyChecking=no " + self.mgnt_ip, + "cmd": self.oper_shell['CMD_RPM_UNINSTALL']} + subprocess.check_output( + self.clush_cmd, shell=True, stderr=subprocess.STDOUT) def _update_netcfg_rpm(self): self.clush_cmd = self.oper_shell['CMD_SSHPASS_PRE'] % \ - {"ssh_ip":"ssh -o StrictHostKeyChecking=no " + self.mgnt_ip, "cmd":self.oper_shell['CMD_RPM_UPDATE']} - subprocess.check_output(self.clush_cmd, shell = True, stderr=subprocess.STDOUT) + {"ssh_ip": "ssh -o StrictHostKeyChecking=no " + self.mgnt_ip, + "cmd": self.oper_shell['CMD_RPM_UPDATE']} + subprocess.check_output( + self.clush_cmd, shell=True, stderr=subprocess.STDOUT) def _install_netcfg_rpm(self): if not os.path.exists(self.NETCFG_RPM_PATH): @@ -343,22 +467,30 @@ class TecsShellExector(object): return self.clush_cmd = "%s;%s" % \ - (self.oper_shell['CMD_SSHPASS_PRE'] % - {"ssh_ip":"", "cmd":self.oper_shell['CMD_RPM_SCP']}, \ - self.oper_shell['CMD_SSHPASS_PRE'] % - {"ssh_ip":"ssh -o StrictHostKeyChecking=no " + self.mgnt_ip, "cmd":self.oper_shell['CMD_RPM_INSTALL']}) - subprocess.check_output(self.clush_cmd, shell = True, stderr=subprocess.STDOUT) + (self.oper_shell['CMD_SSHPASS_PRE'] % + {"ssh_ip": "", "cmd": self.oper_shell['CMD_RPM_SCP']}, + self.oper_shell['CMD_SSHPASS_PRE'] % + {"ssh_ip": "ssh -o StrictHostKeyChecking=no " + + self.mgnt_ip, "cmd": self.oper_shell['CMD_RPM_INSTALL']}) + subprocess.check_output( + self.clush_cmd, shell=True, stderr=subprocess.STDOUT) def _execute(self): try: - if not self.task_type or not self.mgnt_ip : - LOG.error(_("<<>>" % self.mgnt_ip, )) + if not self.task_type or not self.mgnt_ip: + LOG.error( + _("<<>>" % self.mgnt_ip, )) return self.oper_type[self.task_type]() except subprocess.CalledProcessError as e: - LOG.warn(_("<<>>" % (self.mgnt_ip, e.output.strip()))) + LOG.warn(_("<<>>" % ( + self.mgnt_ip, e.output.strip()))) except Exception as e: LOG.exception(_(e.message)) else: - LOG.info(_("<<>>" % (self.clush_cmd, self.mgnt_ip))) + LOG.info(_("<<>>" % ( + self.clush_cmd, self.mgnt_ip))) diff --git a/code/daisy/daisy/api/backends/tecs/config.py b/code/daisy/daisy/api/backends/tecs/config.py index e594917c..1869b41d 100755 --- a/code/daisy/daisy/api/backends/tecs/config.py +++ b/code/daisy/daisy/api/backends/tecs/config.py @@ -4,6 +4,8 @@ import re import commands import types import subprocess +import socket +import netaddr from oslo_log import log as logging from ConfigParser import ConfigParser from daisy.common import exception @@ -21,16 +23,18 @@ service_map = { 'ha': '', 'mariadb': 'mariadb', 'amqp': 'rabbitmq-server', - 'ceilometer-api':'openstack-ceilometer-api', - 'ceilometer-collector':'openstack-ceilometer-collector,openstack-ceilometer-mend', - 'ceilometer-central':'openstack-ceilometer-central', - 'ceilometer-notification':'openstack-ceilometer-notification', - 'ceilometer-alarm':'openstack-ceilometer-alarm-evaluator,openstack-ceilometer-alarm-notifier', + 'ceilometer-api': 'openstack-ceilometer-api', + 'ceilometer-collector': 'openstack-ceilometer-collector,\ + openstack-ceilometer-mend', + 'ceilometer-central': 'openstack-ceilometer-central', + 'ceilometer-notification': 'openstack-ceilometer-notification', + 'ceilometer-alarm': 'openstack-ceilometer-alarm-evaluator,\ + openstack-ceilometer-alarm-notifier', 'heat-api': 'openstack-heat-api', 'heat-api-cfn': 'openstack-heat-api-cfn', 'heat-engine': 'openstack-heat-engine', 'ironic': 'openstack-ironic-api,openstack-ironic-conductor', - 'horizon': 'httpd', + 'horizon': 'httpd,opencos-alarmmanager', 'keystone': 'openstack-keystone', 'glance': 'openstack-glance-api,openstack-glance-registry', 'cinder-volume': 'openstack-cinder-volume', @@ -47,8 +51,9 @@ service_map = { 'nova-vncproxy': 'openstack-nova-novncproxy,openstack-nova-consoleauth', 'nova-conductor': 'openstack-nova-conductor', 'nova-api': 'openstack-nova-api', - 'nova-cells': 'openstack-nova-cells' - } + 'nova-cells': 'openstack-nova-cells', + 'camellia-api': 'camellia-api' +} def add_service_with_host(services, name, host): @@ -63,36 +68,33 @@ def add_service_with_hosts(services, name, hosts): for h in hosts: services[name].append(h['management']['ip']) + def test_ping(ping_src_nic, ping_desc_ips): ping_cmd = 'fping' for ip in set(ping_desc_ips): ping_cmd = ping_cmd + ' -I ' + ping_src_nic + ' ' + ip - obj = subprocess.Popen(ping_cmd, shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE) + obj = subprocess.Popen( + ping_cmd, shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE) (stdoutput, erroutput) = obj.communicate() _returncode = obj.returncode if _returncode == 0 or _returncode == 1: ping_result = stdoutput.split('\n') - unreachable_hosts = [result.split()[0] for result in ping_result if result and result.split()[2] != 'alive'] + if "No such device" in erroutput: + return [] + reachable_hosts = [result.split( + )[0] for result in ping_result if result and + result.split()[2] == 'alive'] else: msg = "ping failed beaceuse there is invlid ip in %s" % ping_desc_ips raise exception.InvalidIP(msg) - return unreachable_hosts + return reachable_hosts -def get_local_deployment_ip(tecs_deployment_ip): - def _get_ip_segment(full_ip): - if not full_ip: - return None - match = re.search('([0-9]{1,3}\.){3}', full_ip) - if match: - return match.group() - else: - print "can't find ip segment" - return None - + +def get_local_deployment_ip(tecs_deployment_ips): (status, output) = commands.getstatusoutput('ifconfig') netcard_pattern = re.compile('\S*: ') ip_str = '([0-9]{1,3}\.){3}[0-9]{1,3}' - ip_pattern = re.compile('(inet %s)' % ip_str) + # ip_pattern = re.compile('(inet %s)' % ip_str) pattern = re.compile(ip_str) nic_ip = {} for netcard in re.finditer(netcard_pattern, str(output)): @@ -108,20 +110,20 @@ def get_local_deployment_ip(tecs_deployment_ip): nic_ip[nic_name] = ip.group() deployment_ip = '' - ip_segment = _get_ip_segment(tecs_deployment_ip) for nic in nic_ip.keys(): - if ip_segment == _get_ip_segment(nic_ip[nic]): + if nic_ip[nic] in tecs_deployment_ips: deployment_ip = nic_ip[nic] break if not deployment_ip: - for nic,ip in nic_ip.items(): - if not test_ping(nic,[tecs_deployment_ip]): + for nic, ip in nic_ip.items(): + if test_ping(nic, tecs_deployment_ips): deployment_ip = nic_ip[nic] break return deployment_ip class AnalsyConfig(object): + def __init__(self, all_configs): self.all_configs = all_configs @@ -139,24 +141,39 @@ class AnalsyConfig(object): self.glance_vip = '' self.public_vip = '' self.share_disk_services = [] + self.share_cluster_disk_services = [] self.ha_conf = {} self.child_cell_dict = {} self.ha_master_host = {} def get_heartbeats(self, host_interfaces): for network in host_interfaces: - #if network.has_key("deployment") and network["deployment"]["ip"]: - # self.heartbeats[0].append(network["deployment"]["ip"]) self.heartbeats[0].append(network["management"]["ip"]) - if network.has_key("storage") and network["storage"]["ip"]: - self.heartbeats[1].append(network["storage"]["ip"]) + # if network.has_key("heartbeat1") and network["heartbeat1"]["ip"]: + if "heartbeat1" in network and network["heartbeat1"]["ip"]: + self.heartbeats[1].append(network["heartbeat1"]["ip"]) - #delete empty heartbeat line + # if network.has_key("heartbeat2") and network["heartbeat2"]["ip"]: + if "heartbeat2" in network and network["heartbeat2"]["ip"]: + self.heartbeats[2].append(network["heartbeat2"]["ip"]) + + # if network.has_key("storage") and network["storage"]["ip"]: + if "storage" in network and network["storage"]["ip"]: + # if not network.has_key("heartbeat1"): + if "heartbeat1" not in network: + self.heartbeats[1].append(network["storage"]["ip"]) + # if network.has_key("heartbeat1") and not \ + # network.has_key("heartbeat2"): + if "heartbeat1" in network and \ + "heartbeat2" not in network: + self.heartbeats[2].append(network["storage"]["ip"]) + + # delete empty heartbeat line if not self.heartbeats[0]: - self.heartbeats[0] = self.heartbeats[1] - self.heartbeats[1] = self.heartbeats[2] + self.heartbeats[0] = self.heartbeats[1] + self.heartbeats[1] = self.heartbeats[2] if not self.heartbeats[1]: - self.heartbeats[1] = self.heartbeats[2] + self.heartbeats[1] = self.heartbeats[2] # remove repeated ip if set(self.heartbeats[1]) == set(self.heartbeats[0]): @@ -164,7 +181,8 @@ class AnalsyConfig(object): if set(self.heartbeats[2]) != set(self.heartbeats[0]): self.heartbeats[1] = self.heartbeats[2] self.heartbeats[2] = [] - if set(self.heartbeats[2]) == set(self.heartbeats[0]) or set(self.heartbeats[2]) == set(self.heartbeats[1]): + if set(self.heartbeats[2]) == set(self.heartbeats[0]) or \ + set(self.heartbeats[2]) == set(self.heartbeats[1]): self.heartbeats[2] = [] def prepare_child_cell(self, child_cell_name, configs): @@ -181,69 +199,105 @@ class AnalsyConfig(object): child_cell_host = configs['host_interfaces'][0]['management']['ip'] self.child_cell_dict[repr(child_cell_host).strip("u'")] \ = repr(cell_compute_hosts).strip("u'") - add_service_with_host(self.services, 'CONFIG_CHILD_CELL_DICT', - str(self.child_cell_dict)) def prepare_ha_lb(self, role_configs, is_ha, is_lb): - if is_lb: - self.ha_master_host['ip'] = role_configs['host_interfaces'][0]['management']['ip'] - self.ha_master_host['hostname'] = role_configs['host_interfaces'][0]['name'] - self.components.append('CONFIG_LB_INSTALL') - add_service_with_hosts(self.services, - 'CONFIG_LB_BACKEND_HOSTS', - role_configs['host_interfaces']) - self.lb_vip = role_configs['vip'] - if is_ha: - self.ha_vip = role_configs['vip'] - self.share_disk_services += role_configs['share_disk_services'] - local_deployment_ip = get_local_deployment_ip( - role_configs['host_interfaces'][0]['management']['ip']) - if local_deployment_ip: + if is_lb: + self.ha_master_host['ip'] = role_configs[ + 'host_interfaces'][0]['management']['ip'] + self.ha_master_host['hostname'] = role_configs[ + 'host_interfaces'][0]['name'] + self.components.append('CONFIG_LB_INSTALL') + add_service_with_hosts(self.services, + 'CONFIG_LB_BACKEND_HOSTS', + role_configs['host_interfaces']) + self.lb_vip = role_configs['vip'] + if is_ha: + # convert dns to ip + manage_ips = [] + for host_interface in role_configs['host_interfaces']: + manage_ip = '' + management_addr =\ + host_interface['management']['ip'] + try: + ip_lists = socket.gethostbyname_ex(management_addr) + manage_ip = ip_lists[2][0] + except Exception: + if netaddr.IPAddress(management_addr).version == 6: + manage_ip = management_addr + else: + raise exception.InvalidNetworkConfig( + "manage ip is not valid %s" % management_addr) + finally: + manage_ips.append(manage_ip) + + self.ha_vip = role_configs['vip'] + self.share_disk_services += role_configs['share_disk_services'] + self.share_cluster_disk_services += \ + role_configs['share_cluster_disk_services'] + local_deployment_ip = get_local_deployment_ip(manage_ips) + filename = r'/etc/zte-docker' + if local_deployment_ip: + if os.path.exists(filename): add_service_with_host( self.services, 'CONFIG_REPO', - 'http://'+local_deployment_ip+'/tecs_install/') + 'http://' + local_deployment_ip + + ':18080' + '/tecs_install/') else: - msg = "can't find ip for yum repo" - raise exception.InvalidNetworkConfig(msg) - self.components.append('CONFIG_HA_INSTALL') + add_service_with_host( + self.services, 'CONFIG_REPO', + 'http://' + local_deployment_ip + '/tecs_install/') + else: + msg = "can't find ip for yum repo" + raise exception.InvalidNetworkConfig(msg) + self.components.append('CONFIG_HA_INSTALL') + add_service_with_host( + self.services, 'CONFIG_HA_HOST', + role_configs['host_interfaces'][0]['management']['ip']) + add_service_with_hosts(self.services, 'CONFIG_HA_HOSTS', + role_configs['host_interfaces']) + ntp_host = role_configs['ntp_server'] \ + if role_configs['ntp_server'] else role_configs['vip'] + add_service_with_host(self.services, 'CONFIG_NTP_SERVERS', + ntp_host) + + if role_configs['db_vip']: + self.db_vip = role_configs['db_vip'] add_service_with_host( - self.services, 'CONFIG_HA_HOST', - role_configs['host_interfaces'][0]['management']['ip']) - add_service_with_hosts(self.services, 'CONFIG_HA_HOSTS', - role_configs['host_interfaces']) - ntp_host = role_configs['ntp_server'] \ - if role_configs['ntp_server'] else role_configs['vip'] - add_service_with_host(self.services, 'CONFIG_NTP_SERVERS', - ntp_host) + self.services, 'CONFIG_MARIADB_HOST', + role_configs['db_vip']) + else: + self.db_vip = role_configs['vip'] + add_service_with_host( + self.services, 'CONFIG_MARIADB_HOST', role_configs['vip']) - if role_configs['db_vip']: - self.db_vip = role_configs['db_vip'] - add_service_with_host(self.services, 'CONFIG_MARIADB_HOST', role_configs['db_vip']) - else: - self.db_vip = role_configs['vip'] - add_service_with_host(self.services, 'CONFIG_MARIADB_HOST', role_configs['vip']) + if role_configs['glance_vip']: + self.glance_vip = role_configs['glance_vip'] + add_service_with_host( + self.services, 'CONFIG_GLANCE_HOST', + role_configs['glance_vip']) + else: + self.glance_vip = role_configs['vip'] + add_service_with_host( + self.services, 'CONFIG_GLANCE_HOST', role_configs['vip']) - if role_configs['glance_vip']: - self.glance_vip = role_configs['glance_vip'] - add_service_with_host(self.services, 'CONFIG_GLANCE_HOST', role_configs['glance_vip']) - else: - self.glance_vip = role_configs['vip'] - add_service_with_host(self.services, 'CONFIG_GLANCE_HOST', role_configs['vip']) + if role_configs['public_vip']: + self.public_vip = role_configs['public_vip'] + else: + self.public_vip = role_configs['vip'] - if role_configs['public_vip']: - vip = role_configs['public_vip'] - self.public_vip = role_configs['public_vip'] - else: - vip = role_configs['vip'] - - self.public_vip = vip - add_service_with_host(self.services, - 'CONFIG_NOVA_VNCPROXY_HOST', vip) - add_service_with_host(self.services, 'CONFIG_PUBLIC_IP', vip) - add_service_with_host(self.services, 'CONFIG_HORIZON_HOST', vip) - - add_service_with_host(self.services, 'CONFIG_ADMIN_IP', vip) - add_service_with_host(self.services, 'CONFIG_INTERNAL_IP', vip) + add_service_with_host(self.services, + 'CONFIG_NOVA_VNCPROXY_HOST', + self.public_vip) + add_service_with_host(self.services, 'CONFIG_PUBLIC_IP', + self.public_vip) + add_service_with_host(self.services, 'CONFIG_HORIZON_HOST', + self.public_vip) + ''' + add_service_with_host(self.services, 'CONFIG_ADMIN_IP', + role_configs['vip']) + add_service_with_host(self.services, 'CONFIG_INTERNAL_IP', + role_configs['vip']) + ''' def prepare_role_service(self, is_ha, service, role_configs): host_key_name = "CONFIG_%s_HOST" % service @@ -251,7 +305,8 @@ class AnalsyConfig(object): add_service_with_hosts(self.services, hosts_key_name, role_configs['host_interfaces']) - if service != 'LB' and service not in ['NOVA_VNCPROXY', 'MARIADB', 'GLANCE', 'HORIZON']: + if service != 'LB' and service not in ['NOVA_VNCPROXY', 'MARIADB', + 'GLANCE', 'HORIZON']: add_service_with_host(self.services, host_key_name, role_configs['vip']) @@ -272,11 +327,12 @@ class AnalsyConfig(object): {'CONFIG_GLANCE_API_INSTALL_MODE': 'LB'}) self.modes.update( {'CONFIG_GLANCE_REGISTRY_INSTALL_MODE': 'LB'}) - #if s == 'HEAT': + # if s == 'HEAT': # self.modes.update({'CONFIG_HEAT_API_INSTALL_MODE': 'LB'}) # self.modes.update({'CONFIG_HEAT_API_CFN_INSTALL_MODE': 'LB'}) - #if s == 'CEILOMETER': - # self.modes.update({'CONFIG_CEILOMETER_API_INSTALL_MODE': 'LB'}) + # if s == 'CEILOMETER': + # self.modes.update({ + # 'CONFIG_CEILOMETER_API_INSTALL_MODE': 'LB'}) if service == 'IRONIC': self.modes.update( {'CONFIG_IRONIC_API_INSTALL_MODE': 'LB'}) @@ -287,8 +343,8 @@ class AnalsyConfig(object): if component not in self.services_in_component.keys(): self.services_in_component[component] = {} self.services_in_component[component]["service"] = [] - self.services_in_component[component]["service"].append(service_map[service]) - + self.services_in_component[component][ + "service"].append(service_map[service]) if component == "horizon": self.services_in_component[component]["fip"] = self.public_vip @@ -296,13 +352,13 @@ class AnalsyConfig(object): self.services_in_component[component]["fip"] = self.db_vip elif component == "glance": self.services_in_component[component]["fip"] = self.glance_vip - else: + else: self.services_in_component[component]["fip"] = role_configs["vip"] - - + network_name = '' - if component in ['horizon'] and role_configs["host_interfaces"][0].has_key('public'): - network_name = 'public' + if component in ['horizon'] and\ + 'publicapi' in role_configs["host_interfaces"][0]: + network_name = 'publicapi' else: network_name = 'management' @@ -311,10 +367,10 @@ class AnalsyConfig(object): self.services_in_component[component]["nic_name"] = \ role_configs["host_interfaces"][0][network_name]["name"] if component == 'loadbalance' and \ - self.all_configs.has_key('CONTROLLER_LB') and \ + 'CONTROLLER_LB' in self.all_configs and \ self.all_configs['CONTROLLER_LB']['vip']: - self.services_in_component[component]["fip"] = \ - self.all_configs['CONTROLLER_LB']['vip'] + self.services_in_component[component]["fip"] = \ + self.all_configs['CONTROLLER_LB']['vip'] def prepare_amqp_mariadb(self): if self.lb_vip: @@ -331,15 +387,20 @@ class AnalsyConfig(object): else: amqp_vip = self.ha_vip amqp_dict = "{'%s':'%s,%s,%s,%s'}" % (amqp_vip, self.ha_vip, - self.lb_vip, self.glance_vip, self.public_vip) + self.lb_vip, self.glance_vip, + self.public_vip) mariadb_dict = "{'%s':'%s,%s,%s,%s'}" % (self.db_vip, self.ha_vip, - self.lb_vip, self.glance_vip, self.public_vip) + self.lb_vip, + self.glance_vip, + self.public_vip) add_service_with_host(self.services, 'CONFIG_LB_HOST', self.lb_vip) elif self.ha_vip: amqp_dict = "{'%s':'%s,%s,%s'}" % (self.ha_vip, self.ha_vip, - self.glance_vip, self.public_vip) + self.glance_vip, + self.public_vip) mariadb_dict = "{'%s':'%s,%s,%s'}" % (self.db_vip, self.ha_vip, - self.glance_vip, self.public_vip) + self.glance_vip, + self.public_vip) else: amqp_dict = "{}" mariadb_dict = "{}" @@ -382,50 +443,51 @@ class AnalsyConfig(object): self.prepare_amqp_mariadb() + if self.child_cell_dict: + add_service_with_host(self.services, 'CONFIG_CHILD_CELL_DICT', + str(self.child_cell_dict)) + def update_conf_with_services(self, tecs): for s in self.services: if tecs.has_option("general", s): - print "%s is update" % s - if type(self.services[s]) is types.ListType: + # if type(self.services[s]) is types.ListType: + if isinstance(self.services[s], types.ListType): if self.services[s] and not self.services[s][0]: return tecs.set("general", s, ','.join(self.services[s])) else: - print "service %s is not exit in conf file" % s + msg = "service %s is not exit in conf file" % s + LOG.info(msg) def update_conf_with_components(self, tecs): for s in self.components: if tecs.has_option("general", s): - print "Component %s is update" % s tecs.set("general", s, 'y') else: - print "component %s is not exit in conf file" % s + msg = "component %s is not exit in conf file" % s + LOG.info(msg) def update_conf_with_modes(self, tecs): for k, v in self.modes.items(): if tecs.has_option("general", k): - print "mode %s is update" % k tecs.set("general", k, v) else: - print "mode %s is not exit in conf file" % k + msg = "mode %s is not exit in conf file" % k + LOG.info(msg) def update_tecs_conf(self, tecs): self.update_conf_with_services(tecs) self.update_conf_with_components(tecs) self.update_conf_with_modes(tecs) - + def update_ha_conf(self, ha, ha_nic_name, tecs=None): - print "heartbeat line is update" - heart_beat_list = [] if self.all_configs['OTHER'].get('dns_config'): for heartbeat in self.heartbeats: - tmp_list = [] for name_ip in self.all_configs['OTHER']['dns_config']: for tmp in heartbeat: if tmp == name_ip.keys()[0]: - tmp_list.append(name_ip.values()[0]) - heart_beat_list.append(tmp_list) - self.heartbeats = heart_beat_list + heartbeat.remove(tmp) + heartbeat.append(name_ip.values()[0]) for k, v in self.services_in_component.items(): for name_ip in self.all_configs['OTHER']['dns_config']: @@ -435,65 +497,110 @@ class AnalsyConfig(object): ha.set('DEFAULT', 'heartbeat_link2', ','.join(self.heartbeats[1])) ha.set('DEFAULT', 'heartbeat_link3', ','.join(self.heartbeats[2])) - ha.set('DEFAULT', 'components', ','.join(self.services_in_component.keys())) + ha.set('DEFAULT', 'components', ','.join( + self.services_in_component.keys())) for k, v in self.services_in_component.items(): - print "component %s is update" % k ha.set('DEFAULT', k, ','.join(v['service'])) if k == 'glance': if 'glance' in self.share_disk_services: ha.set('DEFAULT', 'glance_device_type', 'iscsi') - ha.set('DEFAULT', 'glance_device', '/dev/mapper/vg_glance-lv_glance') + ha.set( + 'DEFAULT', 'glance_device', + '/dev/mapper/vg_glance-lv_glance') ha.set('DEFAULT', 'glance_fs_type', 'ext4') else: ha.set('DEFAULT', 'glance_device_type', 'drbd') - ha.set('DEFAULT', 'glance_device', '/dev/vg_data/lv_glance') + ha.set( + 'DEFAULT', 'glance_device', '/dev/vg_data/lv_glance') ha.set('DEFAULT', 'glance_fs_type', 'ext4') # mariadb now not support db cluster, don't support share disk. if k == "database": if 'db' in self.share_disk_services: - ha.set('DEFAULT', 'database_device', '/dev/mapper/vg_db-lv_db') + ha.set( + 'DEFAULT', 'database_device', + '/dev/mapper/vg_db-lv_db') ha.set('DEFAULT', 'database_fs_type', 'ext4') - + ha.set('DEFAULT', 'database_device_type', 'share') + if tecs: + tecs.set( + "general", + 'CONFIG_HA_INSTALL_MARIADB_LOCAL', + 'n') + elif 'db' in self.share_cluster_disk_services: + ha.set( + 'DEFAULT', 'database_device', + '/dev/mapper/vg_db-lv_db') + ha.set('DEFAULT', 'database_fs_type', 'ext4') + ha.set('DEFAULT', 'database_device_type', 'share_cluster') + if tecs: + tecs.set( + "general", + 'CONFIG_HA_INSTALL_MARIADB_LOCAL', + 'y') + else: + ha.set('DEFAULT', 'database_device_type', 'local_cluster') + if tecs: + tecs.set( + "general", + 'CONFIG_HA_INSTALL_MARIADB_LOCAL', + 'y') + + if 'db_backup' in self.share_disk_services: + ha.set( + 'DEFAULT', + 'backup_database_device', + '/dev/mapper/vg_db_backup-lv_db_backup') + ha.set('DEFAULT', 'backup_database_fs_type', 'ext4') + if "mongod" in v['service']: if 'mongodb' in self.share_disk_services: - ha.set('DEFAULT', 'mongod_device', '/dev/mapper/vg_mongodb-lv_mongodb') + ha.set( + 'DEFAULT', 'mongod_device', + '/dev/mapper/vg_mongodb-lv_mongodb') ha.set('DEFAULT', 'mongod_fs_type', 'ext4') ha.set('DEFAULT', 'mongod_local', '') if tecs: - tecs.set("general", 'CONFIG_HA_INSTALL_MONGODB_LOCAL', 'n') + tecs.set( + "general", + 'CONFIG_HA_INSTALL_MONGODB_LOCAL', 'n') else: ha.set('DEFAULT', 'mongod_fs_type', 'ext4') ha.set('DEFAULT', 'mongod_local', 'yes') if tecs: - tecs.set("general", 'CONFIG_HA_INSTALL_MONGODB_LOCAL', 'y') + tecs.set( + "general", + 'CONFIG_HA_INSTALL_MONGODB_LOCAL', 'y') if k not in self.lb_components: # if "bond" in v['nic_name']: # v['nic_name'] = "vport" - ha.set('DEFAULT', k+'_fip', v['fip']) + ha.set('DEFAULT', k + '_fip', v['fip']) if ha_nic_name and k not in ['horizon']: nic_name = ha_nic_name else: nic_name = v['nic_name'] - ha.set('DEFAULT', k+'_nic', nic_name) + ha.set('DEFAULT', k + '_nic', nic_name) cidr_netmask = reduce(lambda x, y: x + y, - [bin(int(i)).count('1') for i in v['netmask'].split('.')]) - ha.set('DEFAULT', k+'_netmask', cidr_netmask) + [bin(int(i)).count('1') + for i in v['netmask'].split('.')]) + ha.set('DEFAULT', k + '_netmask', cidr_netmask) + def update_conf(tecs, key, value): tecs.set("general", key, value) + def get_conf(tecs_conf_file, **kwargs): result = {} if not kwargs: - return result + return result tecs = ConfigParser() tecs.optionxform = str tecs.read(tecs_conf_file) - result = {key : tecs.get("general", kwargs.get(key, None)) + result = {key: tecs.get("general", kwargs.get(key, None)) for key in kwargs.keys() if tecs.has_option("general", kwargs.get(key, None))} return result @@ -563,6 +670,7 @@ class DvsDaisyConfig(object): # common self.dvs_network_type = [] self.dvs_vswitch_type = {} + self.dvs_cpu_sets = [] self.dvs_physnics = [] self.enable_sdn = False @@ -586,6 +694,9 @@ class DvsDaisyConfig(object): return self.dvs_vswitch_type.update(vswitch_type) + dvs_cpu_sets = network.get('dvs_cpu_sets') + self.dvs_cpu_sets.extend(dvs_cpu_sets) + network_type = network['network_config'].get('network_type') if network_type in ['vlan']: @@ -601,13 +712,16 @@ class DvsDaisyConfig(object): self.dvs_vswitch_type.get('ovs_agent_patch')) and ( len(self.dvs_vswitch_type.get('ovs_agent_patch')) > 0): return - - if not self.dvs_vswitch_type.get('ovs_agent_patch') and not self.dvs_vswitch_type.get('ovdk'): + + if not self.dvs_vswitch_type.get('ovs_agent_patch') and not\ + self.dvs_vswitch_type.get('ovdk'): return - + update_conf(self.tecs, 'CONFIG_DVS_TYPE', self.dvs_vswitch_type) update_conf(self.tecs, 'CONFIG_DVS_PHYSICAL_NICS', ",".join(set(self.dvs_physnics))) + # cpu sets for dvs, add CONFIG_DVS_CPU_SETS to tecs.conf firstly + update_conf(self.tecs, 'CONFIG_DVS_CPU_SETS', self.dvs_cpu_sets) if 'vlan' in self.dvs_network_type: update_conf(self.tecs, 'CONFIG_NEUTRON_OVS_BRIDGE_MAPPINGS', @@ -693,12 +807,13 @@ class DvsDaisyConfig(object): default_tecs_conf_template_path = "/var/lib/daisy/tecs/" tecs_conf_template_path = default_tecs_conf_template_path + def private_network_conf(tecs, private_networks_config): if private_networks_config: mode_str = { - '0':'(active-backup;off;"%s-%s")', - '1':'(balance-slb;off;"%s-%s")', - '2':'(balance-tcp;active;"%s-%s")' + '0': '(active-backup;off;"%s-%s")', + '1': '(balance-slb;off;"%s-%s")', + '2': '(balance-tcp;active;"%s-%s")' } config_neutron_sriov_bridge_mappings = [] @@ -709,10 +824,11 @@ def private_network_conf(tecs, private_networks_config): type = private_network.get('type', None) name = private_network.get('name', None) assign_networks = private_network.get('assigned_networks', None) - slave1 = private_network.get('slave1', None) - slave2 = private_network.get('slave2', None) + slave1 = private_network.get('slave1', None) + slave2 = private_network.get('slave2', None) mode = private_network.get('mode', None) - if not type or not name or not assign_networks or not slave1 or not slave2 or not mode: + if not type or not name or not assign_networks or not\ + slave1 or not slave2 or not mode: break for assign_network in assign_networks: @@ -724,23 +840,33 @@ def private_network_conf(tecs, private_networks_config): break # ether - if 0 == cmp(type, 'ether') and 0 == cmp(network_type, 'PRIVATE'): + if 0 == cmp(type, 'ether') and\ + 0 == cmp(network_type, 'DATAPLANE'): if 0 == cmp(ml2_type, 'sriov'): - config_neutron_sriov_bridge_mappings.append("%s:%s" % (physnet_name, "br-" + name)) - config_neutron_sriov_physnet_ifaces.append("%s:%s" % (physnet_name, name)) - elif 0 == cmp(ml2_type, 'ovs'): - config_neutron_ovs_bridge_mappings.append("%s:%s" % (physnet_name, "br-" + name)) - config_neutron_ovs_physnet_ifaces.append("%s:%s" % (physnet_name, name)) - # bond - elif 0 == cmp(type, 'bond') and 0 == cmp(network_type, 'PRIVATE'): - if 0 == cmp(ml2_type, 'sriov'): - config_neutron_sriov_bridge_mappings.append("%s:%s" % (physnet_name, "br-" + name)) + config_neutron_sriov_bridge_mappings.append( + "%s:%s" % (physnet_name, "br-" + name)) config_neutron_sriov_physnet_ifaces.append( - "%s:%s" % (physnet_name, name + mode_str[mode] % (slave1, slave2))) + "%s:%s" % (physnet_name, name)) elif 0 == cmp(ml2_type, 'ovs'): - config_neutron_ovs_bridge_mappings.append("%s:%s" % (physnet_name, "br-" + name)) + config_neutron_ovs_bridge_mappings.append( + "%s:%s" % (physnet_name, "br-" + name)) config_neutron_ovs_physnet_ifaces.append( - "%s:%s" % (physnet_name, name + mode_str[mode] % (slave1, slave2))) + "%s:%s" % (physnet_name, name)) + # bond + elif 0 == cmp(type, 'bond') and\ + 0 == cmp(network_type, 'DATAPLANE'): + if 0 == cmp(ml2_type, 'sriov'): + config_neutron_sriov_bridge_mappings.append( + "%s:%s" % (physnet_name, "br-" + name)) + config_neutron_sriov_physnet_ifaces.append( + "%s:%s" % (physnet_name, name + mode_str[mode] + % (slave1, slave2))) + elif 0 == cmp(ml2_type, 'ovs'): + config_neutron_ovs_bridge_mappings.append( + "%s:%s" % (physnet_name, "br-" + name)) + config_neutron_ovs_physnet_ifaces.append( + "%s:%s" % (physnet_name, name + mode_str[mode] + % (slave1, slave2))) if config_neutron_sriov_bridge_mappings: update_conf(tecs, @@ -750,18 +876,18 @@ def private_network_conf(tecs, private_networks_config): update_conf(tecs, 'CONFIG_NEUTRON_SRIOV_PHYSNET_IFACES', ",".join(config_neutron_sriov_physnet_ifaces)) - if config_neutron_ovs_bridge_mappings : - update_conf(tecs, 'CONFIG_NEUTRON_OVS_BRIDGE_MAPPINGS', ",".join(config_neutron_ovs_bridge_mappings)) + if config_neutron_ovs_bridge_mappings: + update_conf(tecs, 'CONFIG_NEUTRON_OVS_BRIDGE_MAPPINGS', + ",".join(config_neutron_ovs_bridge_mappings)) if config_neutron_ovs_physnet_ifaces: - update_conf(tecs, 'CONFIG_NEUTRON_OVS_PHYSNET_IFACES', ",".join(config_neutron_ovs_physnet_ifaces)) + update_conf(tecs, 'CONFIG_NEUTRON_OVS_PHYSNET_IFACES', + ",".join(config_neutron_ovs_physnet_ifaces)) + def update_tecs_config(config_data, cluster_conf_path): - print "tecs config data is:" - import pprint - pprint.pprint(config_data) - msg="tecs config data is: %s" % config_data + msg = "tecs config data is: %s" % config_data LOG.info(msg) - + daisy_tecs_path = tecs_conf_template_path tecs_conf_template_file = os.path.join(daisy_tecs_path, "tecs.conf") ha_conf_template_file = os.path.join(daisy_tecs_path, "HA.conf") @@ -773,49 +899,67 @@ def update_tecs_config(config_data, cluster_conf_path): tecs = ConfigParser() tecs.optionxform = str tecs.read(tecs_conf_template_file) - + cluster_data = config_data['OTHER']['cluster_data'] update_conf(tecs, 'CLUSTER_ID', cluster_data['id']) - if cluster_data.has_key('networking_parameters'): + # if cluster_data.has_key('networking_parameters'): + if 'networking_parameters' in cluster_data: networking_parameters = cluster_data['networking_parameters'] - if networking_parameters.has_key('base_mac') and networking_parameters['base_mac']: - update_conf(tecs, 'CONFIG_NEUTRON_BASE_MAC', networking_parameters['base_mac']) - if networking_parameters.has_key('gre_id_range') and len(networking_parameters['gre_id_range'])>1 \ - and networking_parameters['gre_id_range'][0] and networking_parameters['gre_id_range'][1]: - update_conf(tecs, 'CONFIG_NEUTRON_ML2_TUNNEL_ID_RANGES', ("%s:%s" % (networking_parameters['gre_id_range'][0],networking_parameters['gre_id_range'][1]))) - if networking_parameters.get("vni_range",['1000','3000']) and len(networking_parameters['vni_range'])>1 \ - and networking_parameters['vni_range'][0] and networking_parameters['vni_range'][1]: - update_conf(tecs, 'CONFIG_NEUTRON_ML2_VNI_RANGES', ("%s:%s" % (networking_parameters['vni_range'][0],networking_parameters['vni_range'][1]))) - if networking_parameters.get("segmentation_type","vlan"): - segmentation_type = networking_parameters.get("segmentation_type","vlan") - update_conf(tecs, 'CONFIG_NEUTRON_ML2_TENANT_NETWORK_TYPES', segmentation_type) - update_conf(tecs, 'CONFIG_NEUTRON_ML2_TYPE_DRIVERS', segmentation_type) + # if networking_parameters.has_key('base_mac') and\ + if 'base_mac'in networking_parameters and\ + networking_parameters['base_mac']: + update_conf( + tecs, 'CONFIG_NEUTRON_BASE_MAC', + networking_parameters['base_mac']) + # if networking_parameters.has_key('gre_id_range') and\ + if 'gre_id_range' in networking_parameters and\ + len(networking_parameters['gre_id_range']) > 1 \ + and networking_parameters['gre_id_range'][0] and\ + networking_parameters['gre_id_range'][1]: + update_conf(tecs, 'CONFIG_NEUTRON_ML2_TUNNEL_ID_RANGES', + ("%s:%s" % (networking_parameters['gre_id_range'][0], + networking_parameters['gre_id_range'][1]))) + if 'vxlan' in config_data['OTHER'].get('segmentation_type', {}): + update_conf( + tecs, 'CONFIG_NEUTRON_ML2_VNI_RANGES', + config_data['OTHER']['segmentation_type']['vxlan']['vni_range']) + update_conf(tecs, 'CONFIG_NEUTRON_ML2_TENANT_NETWORK_TYPES', 'vxlan') + update_conf(tecs, 'CONFIG_NEUTRON_ML2_TYPE_DRIVERS', 'vxlan') + else: + update_conf(tecs, 'CONFIG_NEUTRON_ML2_TENANT_NETWORK_TYPES', 'vlan') + update_conf(tecs, 'CONFIG_NEUTRON_ML2_TYPE_DRIVERS', 'vlan') physic_network_cfg = config_data['OTHER']['physic_network_config'] if physic_network_cfg.get('json_path', None): - update_conf(tecs, 'CONFIG_NEUTRON_ML2_JSON_PATH', physic_network_cfg['json_path']) + update_conf( + tecs, 'CONFIG_NEUTRON_ML2_JSON_PATH', + physic_network_cfg['json_path']) if physic_network_cfg.get('vlan_ranges', None): - update_conf(tecs, 'CONFIG_NEUTRON_ML2_VLAN_RANGES',physic_network_cfg['vlan_ranges']) + update_conf(tecs, 'CONFIG_NEUTRON_ML2_VLAN_RANGES', + physic_network_cfg['vlan_ranges']) if config_data['OTHER']['tecs_installed_hosts']: - update_conf(tecs, 'EXCLUDE_SERVERS', ",".join(config_data['OTHER']['tecs_installed_hosts'])) + update_conf(tecs, 'EXCLUDE_SERVERS', ",".join( + config_data['OTHER']['tecs_installed_hosts'])) ha = ConfigParser() ha.optionxform = str ha.read(ha_conf_template_file) config = AnalsyConfig(config_data) - if config_data['OTHER'].has_key('ha_nic_name'): + # if config_data['OTHER'].has_key('ha_nic_name'): + if 'ha_nic_name'in config_data['OTHER']: ha_nic_name = config_data['OTHER']['ha_nic_name'] else: ha_nic_name = "" config.prepare() - + config.update_tecs_conf(tecs) config.update_ha_conf(ha, ha_nic_name, tecs) update_conf_with_zenic(tecs, config_data['OTHER']['zenic_config']) - if config_data['OTHER']['dvs_config'].has_key('network_config'): + # if config_data['OTHER']['dvs_config'].has_key('network_config'): + if 'network_config' in config_data['OTHER']['dvs_config']: config_data['OTHER']['dvs_config']['network_config']['enable_sdn'] = \ config_data['OTHER']['zenic_config'].get('vip', False) dvs_config = DvsDaisyConfig(tecs, config_data['OTHER']['dvs_config']) @@ -824,7 +968,7 @@ def update_tecs_config(config_data, cluster_conf_path): tecs.write(open(tecs_conf_out, "w+")) ha.write(open(ha_config_out, "w+")) - + return diff --git a/code/daisy/daisy/api/backends/tecs/disk_array.py b/code/daisy/daisy/api/backends/tecs/disk_array.py index 17dc469e..4ea8c064 100755 --- a/code/daisy/daisy/api/backends/tecs/disk_array.py +++ b/code/daisy/daisy/api/backends/tecs/disk_array.py @@ -16,25 +16,12 @@ """ /install endpoint for tecs API """ -import os -import copy import subprocess -import time - -import traceback -import webob.exc -from oslo_config import cfg from oslo_log import log as logging -from webob.exc import HTTPBadRequest -from webob.exc import HTTPForbidden - -from threading import Thread from daisy import i18n -from daisy import notifier from daisy.common import exception -import daisy.registry.client.v1.api as registry import daisy.api.backends.common as daisy_cmn import daisy.api.backends.tecs.common as tecs_cmn @@ -48,183 +35,281 @@ _ = i18n._ _LE = i18n._LE _LI = i18n._LI _LW = i18n._LW -tecs_state = tecs_cmn.TECS_STATE + + def _get_service_disk_for_disk_array(req, role_id): disk_info = [] - service_disks = tecs_cmn.get_service_disk_list(req, {'filters': {'role_id': role_id}}) + service_disks = tecs_cmn.get_service_disk_list(req, + {'filters': { + 'role_id': role_id}}) for service_disk in service_disks: share_disk = {} if service_disk['disk_location'] == 'share': share_disk['service'] = service_disk['service'] + share_disk['protocol_type'] = service_disk['protocol_type'] share_disk['lun'] = service_disk['lun'] - share_disk['data_ips'] = service_disk['data_ips'].split(',') + if service_disk['protocol_type'] == 'FIBER': + share_disk['fc_hba_wwpn'] = \ + service_disk['data_ips'].split(',') + else: + share_disk['data_ips'] = service_disk['data_ips'].split(',') share_disk['lvm_config'] = {} share_disk['lvm_config']['size'] = service_disk['size'] - share_disk['lvm_config']['vg_name'] = 'vg_%s' % service_disk['service'] - share_disk['lvm_config']['lv_name'] = 'lv_%s' % service_disk['service'] + share_disk['lvm_config']['vg_name'] =\ + 'vg_%s' % service_disk['service'] + share_disk['lvm_config']['lv_name'] =\ + 'lv_%s' % service_disk['service'] share_disk['lvm_config']['fs_type'] = 'ext4' disk_info.append(share_disk) return disk_info + +def _get_share_cluster_disk_for_disk_array(req, role_id): + ''' + disk_info = [{'service': 'db', 'lun': 'lun1', 'data_ips':'data_ip1'}, + {'service': 'db', 'lun': 'lun2', 'data_ips':'data_ip2'}, + {'service': 'glance', 'lun': 'lun3', 'data_ips':'data_ip3'}, + {'service': 'glance', 'lun': 'lun4', 'data_ips':'data_ip4'},] + ''' + disk_info = [] + service_disks = \ + tecs_cmn.get_service_disk_list(req, {'filters': {'role_id': role_id}}) + service_name = 'db' + for service_disk in service_disks: + share_cluster_disk = {} + if service_disk['disk_location'] == 'share_cluster': + share_cluster_disk['service'] = service_disk['service'] + share_cluster_disk['protocol_type'] = service_disk['protocol_type'] + share_cluster_disk['lun'] = service_disk['lun'] + if service_disk['protocol_type'] == 'FIBER': + share_cluster_disk['fc_hba_wwpn'] = \ + service_disk['data_ips'].split(',') + else: + share_cluster_disk['data_ips'] = \ + service_disk['data_ips'].split(',') + share_cluster_disk['lvm_config'] = {} + share_cluster_disk['lvm_config']['size'] = service_disk['size'] + share_cluster_disk['lvm_config']['vg_name'] =\ + 'vg_%s' % service_disk['service'] + share_cluster_disk['lvm_config']['lv_name'] =\ + 'lv_%s' % service_disk['service'] + share_cluster_disk['lvm_config']['fs_type'] = 'ext4' + disk_info.append(share_cluster_disk) + return disk_info + + def _get_cinder_volume_for_disk_array(req, role_id): cinder_volume_info = [] - cinder_volumes = tecs_cmn.get_cinder_volume_list(req, {'filters': {'role_id': role_id}}) + cinder_volumes = tecs_cmn.get_cinder_volume_list(req, + {'filters': { + 'role_id': role_id}}) for cinder_volume in cinder_volumes: cv_info = {} - cv_info['management_ips'] = cinder_volume['management_ips'].split(',') + cv_info['management_ips'] =\ + cinder_volume['management_ips'].split(',') cv_info['data_ips'] = cinder_volume['data_ips'].split(',') cv_info['user_name'] = cinder_volume['user_name'] cv_info['user_pwd'] = cinder_volume['user_pwd'] index = cinder_volume['backend_index'] - cv_info['backend'] = {index:{}} - cv_info['backend'][index]['volume_driver'] = cinder_volume['volume_driver'] - cv_info['backend'][index]['volume_type'] = cinder_volume['volume_type'] - cv_info['backend'][index]['pools'] = cinder_volume['pools'].split(',') + cv_info['backend'] = {index: {}} + cv_info['backend'][index]['volume_driver'] =\ + cinder_volume['volume_driver'] + cv_info['backend'][index]['volume_type'] =\ + cinder_volume['volume_type'] + cv_info['backend'][index]['pools'] =\ + cinder_volume['pools'].split(',') cinder_volume_info.append(cv_info) return cinder_volume_info + def get_disk_array_info(req, cluster_id): share_disk_info = [] + share_cluster_disk_info = [] volume_disk_info = {} cinder_volume_disk_list = [] - roles = daisy_cmn.get_cluster_roles_detail(req,cluster_id) + roles = daisy_cmn.get_cluster_roles_detail(req, cluster_id) for role in roles: if role['deployment_backend'] != daisy_cmn.tecs_backend_name: continue if role['name'] == 'CONTROLLER_HA': - share_disks = _get_service_disk_for_disk_array(req, role['id']) - share_disk_info += share_disks - cinder_volumes = _get_cinder_volume_for_disk_array(req, role['id']) + share_disks = _get_service_disk_for_disk_array(req, role['id']) + share_cluster_disks = \ + _get_share_cluster_disk_for_disk_array(req, role['id']) + share_disk_info += share_disks + share_cluster_disk_info += share_cluster_disks + cinder_volumes =\ + _get_cinder_volume_for_disk_array(req, role['id']) cinder_volume_disk_list += cinder_volumes if cinder_volume_disk_list: volume_disk_info['disk_array'] = cinder_volume_disk_list - return (share_disk_info, volume_disk_info) + return (share_disk_info, volume_disk_info, share_cluster_disk_info) -def get_host_min_mac(host_interfaces): - macs = [interface['mac'] for interface in host_interfaces - if interface['type'] == 'ether' and interface['mac']] - macs.sort() - return macs[0] -def get_ha_and_compute_ips(req, cluster_id): - controller_ha_nodes = {} - computer_ips = [] +def config_ha_share_disk(share_disk_info, + controller_ha_nodes, + share_cluster_disk_info=None): + ''' + share_disk_info = \ + [{'service': 'db', 'lun': 'lun1', 'data_ips':'data_ip1'}, + {'service': 'glance', 'lun': 'lun3', 'data_ips':'data_ip3'},] + share_cluster_disk_info = \ + [{'service': 'db', 'lun': 'lun1', 'data_ips':'data_ip1', ...}, + {'service': 'db', 'lun': 'lun2', 'data_ips':'data_ip2', ...}, + {'service': 'glance', 'lun': 'lun3', 'data_ips':'data_ip3'}, + {'service': 'glance', 'lun': 'lun4', 'data_ips':'data_ip4'},] + controller_ha_nodes[host_ip] = min_mac + ''' + sorted_db_share_cluster = [] + if share_cluster_disk_info: + db_share_cluster_disk = \ + [disk for disk in share_cluster_disk_info + if disk['service'] == 'db'] + if len(db_share_cluster_disk) != 2: + error_msg = 'share cluster disk: %s must be existed in pair.' % \ + db_share_cluster_disk + LOG.error(error_msg) + raise exception.InstallException(error_msg) + sorted_db_share_cluster = \ + sorted(db_share_cluster_disk, key=lambda s: s['lun']) + sorted_ha_nodes = \ + sorted(controller_ha_nodes.iteritems(), key=lambda d: d[1]) + sorted_ha_nodes_ip = [node[0] for node in sorted_ha_nodes] - roles = daisy_cmn.get_cluster_roles_detail(req,cluster_id) - cluster_networks = daisy_cmn.get_cluster_networks_detail(req, cluster_id) - for role in roles: - if role['deployment_backend'] != daisy_cmn.tecs_backend_name: - continue - role_hosts = daisy_cmn.get_hosts_of_role(req, role['id']) - for role_host in role_hosts: - #host has installed tecs are exclusive - if (role_host['status'] == tecs_state['ACTIVE'] or - role_host['status'] == tecs_state['UPDATING'] or - role_host['status'] == tecs_state['UPDATE_FAILED']): - continue - host_detail = daisy_cmn.get_host_detail(req, - role_host['host_id']) - host_ip = tecs_cmn.get_host_network_ip(req, - host_detail, - cluster_networks, - 'MANAGEMENT') - if role['name'] == "CONTROLLER_HA": - pxe_mac = [interface['mac'] for interface in host_detail['interfaces'] - if interface['is_deployment'] == True] - if pxe_mac and pxe_mac[0]: - controller_ha_nodes[host_ip] = pxe_mac[0] - else: - min_mac = get_host_min_mac(host_detail['interfaces']) - controller_ha_nodes[host_ip] = min_mac - if role['name'] == "COMPUTER": - computer_ips.append(host_ip) - return (controller_ha_nodes, computer_ips) - -def config_ha_share_disk(share_disk_info, controller_ha_nodes): - - error_msg = "" + all_share_disk_info = [] + if sorted_db_share_cluster: + all_share_disk_info = \ + [[disk] + share_disk_info for disk in sorted_db_share_cluster] + # all_share_disk_info = \ + # [[{'lun': 'lun1', 'service': 'db', 'data_ips': 'data_ip1'}, + # {'lun': 'lun3', 'service': 'glance', 'data_ips': 'data_ip3'}], + # [{'lun': 'lun2', 'service': 'db', 'data_ips': 'data_ip2'}, + # {'lun': 'lun3', 'service': 'glance', 'data_ips': 'data_ip3'}]] + else: + for index in range(len(sorted_ha_nodes)): + all_share_disk_info.append(share_disk_info) + # all_share_disk_info = \ + # [{'lun': 'lun3', 'service': 'glance', 'data_ips': 'data_ip3'}, + # {'lun': 'lun3', 'service': 'glance', 'data_ips': 'data_ip3'}] + + ''' cmd = 'rm -rf /var/lib/daisy/tecs/storage_auto_config/base/*.json' daisy_cmn.subprocess_call(cmd) - with open("/var/lib/daisy/tecs/storage_auto_config/base/control.json", "w") as fp: + with open("/var/lib/daisy/tecs/storage_auto_config/base/control.json",\ + "w") as fp: json.dump(share_disk_info, fp, indent=2) - - + for host_ip in controller_ha_nodes.keys(): - password = "ossdbg1" - cmd = '/var/lib/daisy/tecs/trustme.sh %s %s' % (host_ip, password) - daisy_cmn.subprocess_call(cmd) - cmd = 'clush -S -w %s "mkdir -p /home/tecs_install"' % (host_ip,) - daisy_cmn.subprocess_call(cmd) try: scp_bin_result = subprocess.check_output( - 'scp -o StrictHostKeyChecking=no -r /var/lib/daisy/tecs/storage_auto_config %s:/home/tecs_install' % (host_ip,), + 'scp -o StrictHostKeyChecking=no -r\ + /var/lib/daisy/tecs/storage_auto_config\ + %s:/home/tecs_install' % (host_ip,), shell=True, stderr=subprocess.STDOUT) except subprocess.CalledProcessError as e: - error_msg = "scp /var/lib/daisy/tecs/storage_auto_config to %s failed!" % host_ip - return error_msg + error_msg = "scp /var/lib/daisy/tecs/storage_auto_config\ + to %s failed!" % host_ip + raise exception.InstallException(error_msg) try: LOG.info(_("Config share disk for host %s" % host_ip)) - cmd = "cd /home/tecs_install/storage_auto_config/; python storage_auto_config.py share_disk %s" % controller_ha_nodes[host_ip] - exc_result = subprocess.check_output('clush -S -w %s "%s"' % (host_ip,cmd), - shell=True, stderr=subprocess.STDOUT) + cmd = "cd /home/tecs_install/storage_auto_config/;\ + python storage_auto_config.py share_disk %s"\ + % controller_ha_nodes[host_ip] + exc_result = subprocess.check_output( + 'clush -S -w %s "%s"' % (host_ip,cmd), + shell=True, stderr=subprocess.STDOUT) except subprocess.CalledProcessError as e: LOG.info(_("Storage script error message: %s" % e.output)) - error_msg = "config Disk Array share disks on %s failed!" % host_ip - return error_msg - return error_msg + error_msg = "config Disk Array share disks\ + on %s failed!" % host_ip + raise exception.InstallException(error_msg) + ''' -def config_ha_cinder_volume(volume_disk_info, controller_ha_ips): - error_msg = "" cmd = 'rm -rf /var/lib/daisy/tecs/storage_auto_config/base/*.json' daisy_cmn.subprocess_call(cmd) - with open("/var/lib/daisy/tecs/storage_auto_config/base/cinder.json", "w") as fp: + + for (host_ip, share_disk) in zip(sorted_ha_nodes_ip, all_share_disk_info): + with open("/var/lib/daisy/tecs/storage_auto_config/base/control.json", + "w") as fp: + json.dump(share_disk, fp, indent=2) + + try: + subprocess.check_output( + 'scp -o StrictHostKeyChecking=no -r\ + /var/lib/daisy/tecs/storage_auto_config\ + %s:/home/tecs_install' % (host_ip,), + shell=True, stderr=subprocess.STDOUT) + except subprocess.CalledProcessError as e: + error_msg = "scp /var/lib/daisy/tecs/storage_auto_config\ + to %s failed!" % host_ip + raise exception.InstallException(error_msg) + + try: + LOG.info(_("Config share disk for host %s" % host_ip)) + cmd = "cd /home/tecs_install/storage_auto_config/;\ + python storage_auto_config.py share_disk %s"\ + % controller_ha_nodes[host_ip] + subprocess.check_output( + 'clush -S -w %s "%s"' % (host_ip, cmd), + shell=True, stderr=subprocess.STDOUT) + except subprocess.CalledProcessError as e: + LOG.info(_("Storage script error message: %s" % e.output)) + error_msg = "config Disk Array share disks\ + on %s failed!" % host_ip + raise exception.InstallException(error_msg) + + +def config_ha_cinder_volume(volume_disk_info, controller_ha_ips): + cmd = 'rm -rf /var/lib/daisy/tecs/storage_auto_config/base/*.json' + daisy_cmn.subprocess_call(cmd) + with open("/var/lib/daisy/tecs/storage_auto_config/base/cinder.json", + "w") as fp: json.dump(volume_disk_info, fp, indent=2) for host_ip in controller_ha_ips: - password = "ossdbg1" - cmd = '/var/lib/daisy/tecs/trustme.sh %s %s' % (host_ip, password) - daisy_cmn.subprocess_call(cmd) - cmd = 'clush -S -w %s "mkdir -p /home/tecs_install"' % (host_ip,) - daisy_cmn.subprocess_call(cmd) try: - scp_bin_result = subprocess.check_output( - 'scp -o StrictHostKeyChecking=no -r /var/lib/daisy/tecs/storage_auto_config %s:/home/tecs_install' % (host_ip,), + subprocess.check_output( + 'scp -o StrictHostKeyChecking=no -r\ + /var/lib/daisy/tecs/storage_auto_config\ + %s:/home/tecs_install' % (host_ip,), shell=True, stderr=subprocess.STDOUT) except subprocess.CalledProcessError as e: - error_msg = "scp /var/lib/daisy/tecs/storage_auto_config to %s failed!" % host_ip - return error_msg + error_msg = "scp /var/lib/daisy/tecs/storage_auto_config\ + to %s failed!" % host_ip + raise exception.InstallException(error_msg) try: LOG.info(_("Config cinder volume for host %s" % host_ip)) - cmd = 'cd /home/tecs_install/storage_auto_config/; python storage_auto_config.py cinder_conf %s' % host_ip - exc_result = subprocess.check_output('clush -S -w %s "%s"' % (host_ip,cmd), - shell=True, stderr=subprocess.STDOUT) - except subprocess.CalledProcessError as e: - LOG.info(_("Storage script error message: %s" % e.output)) - error_msg = "config Disk Array cinder volumes on %s failed!" % host_ip - return error_msg - return error_msg - -def config_compute_multipath(all_nodes_ip): - error_msg = "" - for host_ip in all_nodes_ip: - password = "ossdbg1" - cmd = '/var/lib/daisy/tecs/trustme.sh %s %s' % (host_ip, password) - daisy_cmn.subprocess_call(cmd) - cmd = 'clush -S -w %s "mkdir -p /home/tecs_install"' % (host_ip,) - daisy_cmn.subprocess_call(cmd) - try: - scp_bin_result = subprocess.check_output( - 'scp -o StrictHostKeyChecking=no -r /var/lib/daisy/tecs/storage_auto_config %s:/home/tecs_install' % (host_ip,), + cmd = 'cd /home/tecs_install/storage_auto_config/;\ + python storage_auto_config.py cinder_conf %s' % host_ip + subprocess.check_output( + 'clush -S -w %s "%s"' % (host_ip, cmd), shell=True, stderr=subprocess.STDOUT) except subprocess.CalledProcessError as e: - error_msg = "scp /var/lib/daisy/tecs/storage_auto_config to %s failed!" % host_ip - return error_msg + LOG.info(_("Storage script error message: %s" % e.output)) + error_msg = "config Disk Array cinder volumes\ + on %s failed!" % host_ip + raise exception.InstallException(error_msg) + + +def config_compute_multipath(hosts_ip): + for host_ip in hosts_ip: + try: + subprocess.check_output( + 'scp -o StrictHostKeyChecking=no -r\ + /var/lib/daisy/tecs/storage_auto_config\ + %s:/home/tecs_install' % (host_ip,), + shell=True, stderr=subprocess.STDOUT) + except subprocess.CalledProcessError as e: + error_msg = "scp /var/lib/daisy/tecs/storage_auto_config\ + to %s failed!" % host_ip + raise exception.InstallException(error_msg) try: LOG.info(_("Config multipath for host %s" % host_ip)) - cmd = 'cd /home/tecs_install/storage_auto_config/; python storage_auto_config.py check_multipath' - exc_result = subprocess.check_output('clush -S -w %s "%s"' % (host_ip,cmd), - shell=True, stderr=subprocess.STDOUT) + cmd = 'cd /home/tecs_install/storage_auto_config/;\ + python storage_auto_config.py check_multipath' + subprocess.check_output( + 'clush -S -w %s "%s"' % (host_ip, cmd), + shell=True, stderr=subprocess.STDOUT) except subprocess.CalledProcessError as e: LOG.info(_("Storage script error message: %s" % e.output)) - error_msg = "config Disk Array multipath on %s failed!" % host_ip - return error_msg - return error_msg \ No newline at end of file + error_msg = "config Disk Array multipath\ + on %s failed!" % host_ip + raise exception.InstallException(error_msg) diff --git a/code/daisy/daisy/api/backends/tecs/install.py b/code/daisy/daisy/api/backends/tecs/install.py index 9477c944..9e32579c 100755 --- a/code/daisy/daisy/api/backends/tecs/install.py +++ b/code/daisy/daisy/api/backends/tecs/install.py @@ -1,1279 +1,1526 @@ -# Copyright 2013 OpenStack Foundation -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -""" -/install endpoint for tecs API -""" -import os -import re -import copy -import subprocess -import time - -import traceback -import webob.exc -from oslo_config import cfg -from oslo_log import log as logging -from webob.exc import HTTPBadRequest -from webob.exc import HTTPForbidden -from webob.exc import HTTPServerError - -from threading import Thread - -from daisy import i18n -from daisy import notifier - -from daisy.api import policy -import daisy.api.v1 - -from daisy.common import exception -import daisy.registry.client.v1.api as registry -from daisy.api.backends.tecs import config -from daisy.api.backends import driver -from daisy.api.network_api import network as neutron -from ironicclient import client as ironic_client -import daisy.api.backends.common as daisy_cmn -import daisy.api.backends.tecs.common as tecs_cmn -import daisy.api.backends.tecs.disk_array as disk_array -from daisy.api.configset import manager - -try: - import simplejson as json -except ImportError: - import json - -LOG = logging.getLogger(__name__) -_ = i18n._ -_LE = i18n._LE -_LI = i18n._LI -_LW = i18n._LW -SUPPORTED_PARAMS = daisy.api.v1.SUPPORTED_PARAMS -SUPPORTED_FILTERS = daisy.api.v1.SUPPORTED_FILTERS -ACTIVE_IMMUTABLE = daisy.api.v1.ACTIVE_IMMUTABLE - -CONF = cfg.CONF -install_opts = [ - cfg.StrOpt('max_parallel_os_number', default=10, - help='Maximum number of hosts install os at the same time.'), -] -CONF.register_opts(install_opts) - -CONF.import_opt('disk_formats', 'daisy.common.config', group='image_format') -CONF.import_opt('container_formats', 'daisy.common.config', - group='image_format') -CONF.import_opt('image_property_quota', 'daisy.common.config') - - -tecs_state = tecs_cmn.TECS_STATE -daisy_tecs_path = tecs_cmn.daisy_tecs_path - - -def _invalid_bond_type(network_type, vswitch_type, bond_mode): - msg = "Invalid bond_mode(%s) for %s in %s network" % ( - bond_mode, vswitch_type, network_type) - raise_exception = False - if bond_mode in ['0', '1', '2', '3', '4']: - return - - if bond_mode and (2 == len(bond_mode.split(';'))): - bond_mode, lacp_mode = bond_mode.split(';') - if network_type in ['vxlan'] and vswitch_type in ['dvs', 'DVS']: - if bond_mode in ['active-backup', 'balance-slb']: - if lacp_mode not in ['off']: - raise_exception = True - else: - raise_exception = True - - elif network_type in ['vlan'] and vswitch_type in ['dvs', 'DVS', - 'ovs', 'OVS']: - if bond_mode in ['balance-tcp']: - if lacp_mode not in ['active', 'passive', 'off']: - raise_exception = True - elif bond_mode in ['active-backup', 'balance-slb']: - if lacp_mode not in ['off']: - raise_exception = True - else: - raise_exception = True - else: - raise_exception = True - - if raise_exception: - raise exception.InstallException(msg) - - -def _get_host_private_networks(host_detail, cluster_private_networks_name): - """ - User member nic pci segment replace the bond pci, we use it generate the mappings.json. - :param host_detail: host infos - :param cluster_private_networks_name: network info in cluster - :return: - """ - host_private_networks = [hi for pn in cluster_private_networks_name - for hi in host_detail['interfaces'] - for assigned_network in hi['assigned_networks'] - if assigned_network and pn == assigned_network['name']] - - # If port type is bond,use pci segment of member port replace pci1 & pci2 segments of bond port - for interface_outer in host_private_networks: - if 0 != cmp(interface_outer.get('type', None), "bond"): - continue - slave1 = interface_outer.get('slave1', None) - slave2 = interface_outer.get('slave2', None) - if not slave1 or not slave2: - continue - interface_outer.pop('pci') - - for interface_inner in host_detail['interfaces']: - if 0 == cmp(interface_inner.get('name', None), slave1): - interface_outer['pci1'] = interface_inner['pci'] - elif 0 == cmp(interface_inner.get('name', None), slave2): - interface_outer['pci2'] = interface_inner['pci'] - return host_private_networks - -def _write_private_network_cfg_to_json(req, cluster_id, private_networks): - """ - Generate cluster private network json. We use the json file after tecs is installed. - :param private_networks: cluster private network params set. - :return: - """ - if not private_networks: - LOG.error("private networks can't be empty!") - return False - - cluster_hosts_network_cfg = {} - hosts_network_cfg = {} - for k in private_networks.keys(): - private_network_info = {} - for private_network in private_networks[k]: - # host_interface - type = private_network.get('type', None) - name = private_network.get('name', None) - assign_networks = private_network.get('assigned_networks', None) - slave1 = private_network.get('slave1', None) - slave2 = private_network.get('slave2', None) - pci = private_network.get('pci', None) - pci1 = private_network.get('pci1', None) - pci2 = private_network.get('pci2', None) - mode = private_network.get('mode', None) - if not type or not name or not assign_networks: - LOG.error("host_interface params invalid in private networks!") - continue - - for assign_network in assign_networks: - # network - #network_type = assign_network.get('network_type', None) - vswitch_type_network = daisy_cmn.get_assigned_network( - req, private_network['id'], assign_network['id']) - - vswitch_type = vswitch_type_network['vswitch_type'] - physnet_name = assign_network.get('name', None) - mtu = assign_network.get('mtu', None) - if not vswitch_type or not physnet_name: - LOG.error("private networks vswitch_type or physnet name is invalid!") - continue - - physnet_name_conf = {} - physnet_name_conf['type'] = type - physnet_name_conf['name'] = name - physnet_name_conf['vswitch_type'] = vswitch_type - if mtu: - physnet_name_conf['mtu'] = mtu - # physnet_name_conf['ml2'] = ml2_type + "(direct)" - if 0 == cmp("bond", type): - if not pci1 or not pci2 or not slave1 or not slave2 or not mode: - LOG.error("when type is 'bond',input params is invalid in private networks!") - continue - physnet_name_conf['slave1'] = slave1 - physnet_name_conf['slave2'] = slave2 - physnet_name_conf['pci1'] = pci1 - physnet_name_conf['pci2'] = pci2 - physnet_name_conf['mode'] = mode - _invalid_bond_type('vlan', 'OVS', mode) - elif 0 == cmp("ether", type): - if not pci: - LOG.error("when type is 'ether',input params is invalid in private networks!") - continue - physnet_name_conf['pci'] = pci - - if not physnet_name_conf: - continue - private_network_info[physnet_name] = physnet_name_conf - - if not private_network_info: - continue - hosts_network_cfg[k] = private_network_info - - if not hosts_network_cfg: - return False - cluster_hosts_network_cfg['hosts'] = hosts_network_cfg - mkdir_daisy_tecs_path = "mkdir -p " + daisy_tecs_path + cluster_id - daisy_cmn.subprocess_call(mkdir_daisy_tecs_path) - mapping_json = daisy_tecs_path + "/" + cluster_id + "/" + "mappings.json" - with open(mapping_json, "w+") as fp: - fp.write(json.dumps(cluster_hosts_network_cfg)) - return True - -def _conf_private_network(req, cluster_id, host_private_networks_dict, cluster_private_network_dict): - if not host_private_networks_dict: - LOG.info(_("No private network need config")) - return {} - - # different host(with ip) in host_private_networks_dict - config_neutron_ml2_vlan_ranges = [] - for k in host_private_networks_dict.keys(): - host_private_networks = host_private_networks_dict[k] - # different private network plane in host_interface - for host_private_network in host_private_networks: - assigned_networks = host_private_network.get('assigned_networks', None) - if not assigned_networks: - break - private_network_info = \ - [network for assigned_network in assigned_networks - for network in cluster_private_network_dict - if assigned_network and assigned_network['name'] == network['name']] - - host_private_network['assigned_networks'] = private_network_info - config_neutron_ml2_vlan_ranges += \ - ["%(name)s:%(vlan_start)s:%(vlan_end)s" % - {'name':network['name'], 'vlan_start':network['vlan_start'], 'vlan_end':network['vlan_end']} - for network in private_network_info - if network['name'] and network['vlan_start'] and network['vlan_end']] - - physic_network_cfg = {} - if _write_private_network_cfg_to_json(req, cluster_id, host_private_networks_dict): - physic_network_cfg['json_path'] = daisy_tecs_path + "/" + cluster_id + "/" + "mappings.json" - if config_neutron_ml2_vlan_ranges: - host_private_networks_vlan_range = ",".join(list(set(config_neutron_ml2_vlan_ranges))) - physic_network_cfg['vlan_ranges'] = host_private_networks_vlan_range - return physic_network_cfg - - -def _enable_network(host_networks_dict): - for network in host_networks_dict: - if network != []: - return True - return False - - -def _get_dvs_network_type(vxlan, vlan): - if _enable_network(vxlan): - return 'vxlan', vxlan - elif _enable_network(vlan): - return 'vlan', vlan - else: - return None, None - - -def _get_vtep_ip_ranges(ip_ranges): - vtep_ip_ranges = [] - for ip_range in ip_ranges: - ip_range_start = ip_range.get('start') - ip_range_end = ip_range.get('end') - if ip_range_start and ip_range_end: - vtep_ip_ranges.append( - [ip_range_start.encode('utf8'), - ip_range_end.encode('utf8')]) - return vtep_ip_ranges - - -def _get_dvs_vxlan_info(interfaces, mode_str): - vxlan_nic_info = '' - for interface in interfaces: - if interface['type'] == 'ether': - vxlan_nic_info = interface['name'] - elif interface['type'] == 'bond': - _invalid_bond_type('vxlan', 'DVS', interface.get('mode')) - name = interface.get('name', 'bond1') - if interface.get('mode') in ['0', '1', '2', '3', '4']: - try: - bond_mode = mode_str[ - 'vxlan'].get(interface.get('mode')) - except: - bond_mode = mode_str['vxlan']['0'] - vxlan_nic_info = name + bond_mode % ( - interface['slave1'], interface['slave2']) - else: - vxlan_nic_info = "%s(%s;%s-%s)" % ( - name, interface.get('mode'), - interface['slave1'], interface['slave2']) - return vxlan_nic_info - - -def _get_dvs_domain_id(assign_network, dvs_domain_id, host_ip): - domain_id = assign_network.get('dvs_domain_id') - if not domain_id: - domain_id = '0' - - domain_ip = dvs_domain_id.get(domain_id, []) - domain_ip.append(host_ip) - domain_ip = {domain_id.encode('utf8'): domain_ip} - return domain_ip - - -def _get_bridge_mappings(interface): - try: - interface = interface['assigned_networks'][0] - except: - return {} - - bridge_mappings = {} - if interface.get('network_type') in ['PRIVATE']: - phynet_name, nic = interface.get( - 'physnet_name').split('_') - phynet_name = interface.get('name') - if phynet_name and nic: - bridge_mappings.update({nic: phynet_name}) - return bridge_mappings - - -def _convert_bridge_mappings2list(bridge_mappings): - bridge_maps = [] - for nic, phynet in bridge_mappings.items(): - bridge_maps.append('%s:br_%s' % (phynet, nic)) - return set(bridge_maps) - - -def _convert_physical_mappings2list(physical_mappings): - physical_maps = [] - for phynet, nic_info in physical_mappings.items(): - physical_maps.append('%s:%s' % (phynet, nic_info)) - return set(physical_maps) - - -def _get_physical_mappings(interface, mode_str, bridge_mappings): - # bridge_mappings = {'eth0':'phynet1': 'bond0':'phynet2'} - vlan_nic_map_info = {} - phynic_name = interface.get('name') - physnet_name = bridge_mappings.get(phynic_name) - if interface['type'] == 'bond': - _invalid_bond_type('vlan', 'DVS', interface.get('mode')) - if interface.get('mode') in ['0', '1', '2', '3', '4']: - try: - bond_mode = mode_str['vlan'].get(interface.get('mode')) - except: - bond_mode = mode_str['vlan']['0'] - vlan_nic_map_info[physnet_name] = phynic_name + bond_mode % ( - interface['slave1'], interface['slave2']) - else: - # interface.get('mode') = active-backup;off - vlan_nic_map_info[physnet_name] = "%s(%s;%s-%s)" % ( - phynic_name, interface.get('mode'), - interface['slave1'], interface['slave2']) - else: - vlan_nic_map_info[physnet_name] = phynic_name - - return vlan_nic_map_info - - -def get_network_config_for_dvs(host_private_networks_dict, - cluster_private_network_dict): - # different private network plane in host_interface - host_private_networks_dict_for_dvs = copy.deepcopy( - host_private_networks_dict) - - for host_private_network in host_private_networks_dict_for_dvs: - private_networks = host_private_network.get( - 'assigned_networks', None) - if not private_networks: - break - private_network_info = \ - [network for private_network in private_networks - for network in cluster_private_network_dict - if private_network and private_network['name'] == network['name']] - host_private_network['assigned_networks'] = private_network_info - return host_private_networks_dict_for_dvs - - -def conf_dvs(req, host_vxlan_networks_dict, host_private_networks_dict): - mode_str = { - 'vxlan': - { - '0': '(active-backup;off;%s-%s)', - '1': '(balance-slb;off;%s-%s)', - }, - 'vlan': { - '0': '(active-backup;off;%s-%s)', - '1': '(balance-slb;off;%s-%s)', - '2': '(balance-tcp;active;%s-%s)' - } - } - - network_type, networks_dict = _get_dvs_network_type( - host_vxlan_networks_dict, host_private_networks_dict) - - if not network_type: - return {} - - dvs_config = {} - - network_config = {} - vswitch_type = {} - physnics_config = {} - installed_dvs = [] - installed_ovs = [] - network_config['network_type'] = network_type - - # for vxlan - network_config['vtep_ip_ranges'] = [] - dvs_domain_id = {} - - # for vlan - bridge_mappings = {} - physical_mappings = {} - - for host_ip, interfaces in networks_dict.items(): - host_ip = host_ip.encode('utf8') - assign_network = daisy_cmn.get_assigned_network( - req, interfaces[0]['id'], - interfaces[0]['assigned_networks'][0].get('id')) - - if assign_network['vswitch_type'] in ['dvs', 'DVS']: - installed_dvs.append(host_ip) - elif assign_network['vswitch_type'] in ['ovs', 'OVS']: - installed_ovs.append(host_ip) - - if network_type == 'vxlan': - network_config['vtep_ip_ranges'].extend( - _get_vtep_ip_ranges( - interfaces[0]['assigned_networks'][0].get('ip_ranges'))) - - dvs_domain_id.update( - _get_dvs_domain_id(assign_network, dvs_domain_id, host_ip)) - - if not physnics_config.get('vxlan_info'): - physnics_config['vxlan_info'] = _get_dvs_vxlan_info( - interfaces, mode_str) - - if network_type == 'vlan': - for interface in interfaces: - bridge_mapping = _get_bridge_mappings(interface) - physical_mapping = _get_physical_mappings( - interface, mode_str, bridge_mapping) - bridge_mappings.update(bridge_mapping) - physical_mappings.update(physical_mapping) - - vswitch_type['ovdk'] = installed_dvs - vswitch_type['ovs_agent_patch'] = installed_ovs - physnics_config['dvs_domain_id'] = dvs_domain_id - physnics_config['physical_mappings'] = ",".join( - _convert_physical_mappings2list(physical_mappings)) - physnics_config['bridge_mappings'] = ",".join( - _convert_bridge_mappings2list(bridge_mappings)) - - dvs_config['vswitch_type'] = vswitch_type - dvs_config['network_config'] = network_config - dvs_config['physnics_config'] = physnics_config - - return dvs_config - - -def _get_interfaces_network(req, host_detail, cluster_networks): - has_interfaces = {} - host_mngt_network = tecs_cmn.get_host_interface_by_network(host_detail, 'MANAGEMENT') - host_mgnt_ip = tecs_cmn.get_host_network_ip(req, host_detail, cluster_networks, 'MANAGEMENT') - host_mgnt_netmask = tecs_cmn.get_network_netmask(cluster_networks, 'MANAGEMENT') - host_mngt_network['ip'] = host_mgnt_ip - host_mngt_network['netmask'] = host_mgnt_netmask - has_interfaces['management'] = host_mngt_network - - host_deploy_network = tecs_cmn.get_host_interface_by_network(host_detail, 'DEPLOYMENT') - host_deploy_network_info = tecs_cmn.get_host_interface_by_network(host_detail, 'DEPLOYMENT') - #note:"is_deployment" can't label delpoyment network, it only used to label dhcp mac - if host_deploy_network_info: - host_deploy_ip = tecs_cmn.get_host_network_ip(req, host_detail, cluster_networks, 'DEPLOYMENT') - host_deploy_netmask = tecs_cmn.get_network_netmask(cluster_networks, 'DEPLOYMENT') - host_deploy_network_info['ip'] = host_deploy_ip - host_deploy_network_info['netmask'] = host_deploy_netmask - has_interfaces['deployment'] = host_deploy_network_info - - - host_storage_network_info = tecs_cmn.get_host_interface_by_network(host_detail, 'STORAGE') - if host_storage_network_info: - host_storage_ip = tecs_cmn.get_host_network_ip(req, host_detail, cluster_networks, 'STORAGE') - host_storage_netmask = tecs_cmn.get_network_netmask(cluster_networks, 'STORAGE') - host_storage_network_info['ip'] = host_storage_ip - host_storage_network_info['netmask'] = host_storage_netmask - has_interfaces['storage'] = host_storage_network_info - - host_public_network_info = tecs_cmn.get_host_interface_by_network(host_detail, 'PUBLIC') - - if host_public_network_info: - public_vlan_id = tecs_cmn.get_network_vlan_id(cluster_networks, 'PUBLIC') - - if public_vlan_id: - public_nic_name = host_public_network_info['name'] + '.' + public_vlan_id - else: - public_nic_name = host_public_network_info['name'] - - host_public_ip = tecs_cmn.get_host_network_ip(req, host_detail, cluster_networks, 'PUBLIC') - host_public_netmask = tecs_cmn.get_network_netmask(cluster_networks, 'PUBLIC') - host_public_network_info['ip'] = host_public_ip - host_public_network_info['name'] = public_nic_name - host_public_network_info['netmask'] = host_public_netmask - has_interfaces['public'] = host_public_network_info - return has_interfaces - -def _get_host_nic_name(cluster_network, host_detail): - """ - Different networking will generate different ha port name, the rule of generation - is describe in comment. - :param cluster_network: Network info in cluster. - :param host_detail: - :return: - """ - copy_host_detail = copy.deepcopy(host_detail) - - mgr_interface_info = tecs_cmn.get_host_interface_by_network(copy_host_detail, 'MANAGEMENT') - nic_info = [network - for network in cluster_network - for netname in mgr_interface_info.get('assigned_networks', None) - if network.get('name', None) == netname] - - nic_capability = [info['capability'] for info in nic_info if info['network_type'] != "PRIVATE"] - if not nic_capability or nic_capability == [None]: - return mgr_interface_info['name'] - - mgr_nic_info = [mgr_net for mgr_net in nic_info if mgr_net['network_type'] == "MANAGEMENT"][0] - # if private and management plane is unifier - if set(["PRIVATE", "MANAGEMENT"]).issubset(set([info['network_type'] for info in nic_info])): - # if type = 'ether' and 'ovs' not in ml2 and management is 'high' - if "ether" == mgr_interface_info.get('type', None) and \ - "ovs" not in [mgr_interface_info.get('vswitch_type', None)] and \ - "high" == mgr_nic_info['capability']: - return mgr_interface_info['name'] - - # if ip at outer - if mgr_interface_info.get('ip', None) and mgr_interface_info.get('name', None): - return "v_" + mgr_interface_info['name'] - # ip at inner - elif mgr_nic_info.get('ip', None): - return "managent" - - if "low" not in nic_capability: - return mgr_interface_info['name'] - - # if ip at outer - if mgr_interface_info.get('ip', None) and mgr_interface_info.get('name', None): - return "v_" + mgr_interface_info['name'] - - # ip at inner - elif mgr_nic_info.get('ip', None): - return "managent" - -def get_share_disk_services(req, role_id): - service_disks = tecs_cmn.get_service_disk_list(req, {'role_id':role_id}) - share_disk_services = [] - - for service_disk in service_disks: - if service_disk['disk_location'] == 'share': - share_disk_services.append(service_disk['service']) - return share_disk_services - -def get_cluster_tecs_config(req, cluster_id): - LOG.info(_("Get tecs config from database...")) - params = dict(limit=1000000) - roles = daisy_cmn.get_cluster_roles_detail(req,cluster_id) - cluster_networks = daisy_cmn.get_cluster_networks_detail(req, cluster_id) - try: - all_services = registry.get_services_detail(req.context, **params) - all_components = registry.get_components_detail(req.context, **params) - cluster_data = registry.get_cluster_metadata(req.context, cluster_id) - except exception.Invalid as e: - raise HTTPBadRequest(explanation=e.msg, request=req) - - cluster_private_network_dict = [network for network in cluster_networks if network['network_type'] == 'PRIVATE'] - cluster_private_networks_name = [network['name'] for network in cluster_private_network_dict] - - cluster_vxlan_network_dict = [network for network in cluster_networks if network['network_type'] == 'VXLAN'] - - tecs_config = {} - tecs_config.update({'OTHER':{}}) - other_config = tecs_config['OTHER'] - other_config.update({'cluster_data':cluster_data}) - tecs_installed_hosts = set() - host_private_networks_dict = {} - host_vxlan_network_dict = {} - mgnt_ip_list = set() - host_private_networks_dict_for_dvs = {} - zenic_cfg = {} - - for role in roles: - if role['name'] == 'ZENIC_NFM': - zenic_cfg['vip'] = role['vip'] - if role['deployment_backend'] != daisy_cmn.tecs_backend_name: - continue - try: - role_service_ids = registry.get_role_services(req.context, role['id']) - except exception.Invalid as e: - raise HTTPBadRequest(explanation=e.msg, request=req) - - role_services_detail = [asc for rsci in role_service_ids for asc in all_services if asc['id'] == rsci['service_id']] - component_id_to_name = dict([(ac['id'], ac['name']) for ac in all_components]) - service_components = dict([(scd['name'], component_id_to_name[scd['component_id']]) for scd in role_services_detail]) - - role_hosts = daisy_cmn.get_hosts_of_role(req, role['id']) - - host_interfaces = [] - for role_host in role_hosts: - host_detail = daisy_cmn.get_host_detail(req, role_host['host_id']) - - sorted_host_detail = tecs_cmn.sort_interfaces_by_pci(host_detail) - host_private_networks_list = _get_host_private_networks(sorted_host_detail, - cluster_private_networks_name) - # get ha nic port name - if not other_config.has_key('ha_nic_name') and role['name'] == "CONTROLLER_HA": - mgr_nic_name = _get_host_nic_name(cluster_networks, sorted_host_detail) - mgr_vlan_id = tecs_cmn.get_mngt_network_vlan_id(cluster_networks) - if mgr_vlan_id: - mgr_nic_name = mgr_nic_name + '.' + mgr_vlan_id - other_config.update({'ha_nic_name':mgr_nic_name}) - - has_interfaces = _get_interfaces_network(req, host_detail, cluster_networks) - has_interfaces.update({'name':host_detail['name']}) - host_interfaces.append(has_interfaces) - # mangement network must be configed - host_mgnt_ip = has_interfaces['management']['ip'] - - # host_mgnt_ip used to label who the private networks is - host_private_networks_dict[host_mgnt_ip] = host_private_networks_list - if role['name'] == 'COMPUTER': - host_vxlan_network_list = _get_host_private_networks(sorted_host_detail, ['VXLAN']) - if host_vxlan_network_list: - host_private_networks_dict_for_dvs = {} - host_vxlan_network_dict[host_mgnt_ip] = get_network_config_for_dvs( - host_vxlan_network_list, cluster_vxlan_network_dict) - elif host_private_networks_list: - host_vxlan_network_dict = {} - host_private_networks_dict_for_dvs[host_mgnt_ip] = get_network_config_for_dvs( - host_private_networks_list, cluster_private_network_dict) - - #get host ip of tecs is active - if (role_host['status'] == tecs_state['ACTIVE'] or - role_host['status'] == tecs_state['UPDATING'] or - role_host['status'] == tecs_state['UPDATE_FAILED']): - tecs_installed_hosts.add(host_mgnt_ip) - else: - mgnt_ip_list.add(host_mgnt_ip) - - share_disk_services = get_share_disk_services(req, role['id']) - is_ha = re.match(".*_HA$", role['name']) is not None - if host_interfaces: - if role['public_vip'] and not host_interfaces[0].has_key('public'): - msg = "no public networkplane found while role has public vip" - LOG.error(msg) - raise exception.NotFound(message=msg) - - tecs_config.update({role['name']: {'services': service_components, - 'vip': role['vip'], - 'host_interfaces': host_interfaces, - 'share_disk_services': share_disk_services - }}) - if is_ha: - tecs_config[role['name']]['ntp_server'] = role['ntp_server'] - tecs_config[role['name']]['public_vip'] = role['public_vip'] - tecs_config[role['name']]['glance_vip'] = role['glance_vip'] - tecs_config[role['name']]['db_vip'] = role['db_vip'] - - other_config.update({'tecs_installed_hosts':tecs_installed_hosts}) - # replace private network - physic_network_cfg = _conf_private_network(req, cluster_id, host_private_networks_dict, cluster_private_network_dict) - dvs_cfg = conf_dvs(req, host_vxlan_network_dict, host_private_networks_dict_for_dvs) - other_config.update({'physic_network_config':physic_network_cfg}) - other_config.update({'dvs_config':dvs_cfg}) - other_config.update({'zenic_config':zenic_cfg}) - return (tecs_config, mgnt_ip_list) - - -def get_host_name_and_mgnt_ip(tecs_config): - name_ip_list = [] - ip_list = [] - nodes_ips = {'ha': [], 'lb': [], 'computer': []} - - for role_name, role_configs in tecs_config.items(): - if role_name == "OTHER": - continue - for host in role_configs['host_interfaces']: - ip_domain_dict = {} - host_mgt = host['management'] - if host_mgt['ip'] not in ip_list: - ip_list.append(host_mgt['ip']) - ip_domain_dict.update({host['name']: host_mgt['ip']}) - name_ip_list.append(ip_domain_dict) - - if role_name == 'CONTROLLER_HA': - nodes_ips['ha'].append(host_mgt['ip']) - if role_name == 'CONTROLLER_LB': - nodes_ips['lb'].append(host_mgt['ip']) - if role_name == 'COMPUTER': - nodes_ips['computer'].append(host_mgt['ip']) - return name_ip_list, nodes_ips - - -def replace_ip_with_domain_name(req, tecs_config): - domain_ip_list = [] - ip_list = [] - lb_float_ip = tecs_config['CONTROLLER_LB']['vip'] - for role_name, role_configs in tecs_config.items(): - if role_name == "OTHER": - continue - is_ha = re.match(".*_HA$", role_name) is not None - is_lb = re.match(".*_LB$", role_name) is not None - - for host in role_configs['host_interfaces']: - ip_domain_dict = {} - host_mgt = host['management'] - if host_mgt['ip'] not in ip_list: - ip_list.append(host_mgt['ip']) - ip_domain_dict.update({host['name']: host_mgt['ip']}) - domain_ip_list.append(ip_domain_dict) - host_mgt['ip'] = host['name'] - - if is_ha and role_configs.get('vip'): - domain_ip_list.append({'ha-vip': role_configs['vip']}) - if role_configs['ntp_server'] == role_configs['vip']: - role_configs['ntp_server'] = 'ha-vip' - elif role_configs['ntp_server'] == lb_float_ip: - role_configs['ntp_server'] = 'lb-vip' - role_configs['vip'] = 'ha-vip' - - if role_configs.get('public_vip'): - domain_ip_list.append({'public-vip': role_configs['public_vip']}) - role_configs['public_vip'] = 'public-vip' - if role_configs.get('glance_vip'): - domain_ip_list.append({'glance-vip': role_configs['glance_vip']}) - role_configs['glance_vip'] = 'glance-vip' - if role_configs.get('db_vip'): - domain_ip_list.append({'db-vip': role_configs['db_vip']}) - role_configs['db_vip'] = 'db-vip' - - if is_lb and role_configs.get('vip'): - domain_ip_list.append({'lb-vip': role_configs['vip']}) - role_configs['vip'] = 'lb-vip' - return domain_ip_list - - -def config_dnsmasq_server(host_ip_list, domain_ip_list, password='ossdbg1'): - dns_conf = "/etc/dnsmasq.conf" - for host_ip in host_ip_list: - try: - result = subprocess.check_output( - "sshpass -p %s ssh -o StrictHostKeyChecking=no %s " - "test -f %s" % (password, host_ip, dns_conf), - shell=True, stderr=subprocess.STDOUT) - except subprocess.CalledProcessError: - msg = '%s does not exist in %s' % (dns_conf, host_ip) - LOG.error(msg) - raise exception.NotFound(message=msg) - - config_scripts = [ - "sed -i '/^[^#]/s/no-resolv[[:space:]]*/\#no-resolv/' %s" % dns_conf, - "sed -i '/^[^#]/s/no-poll[[:space:]]*/\#no-poll/' %s" % dns_conf, - "cache_size_linenumber=`grep -n 'cache-size=' %s| awk -F ':' " - "'{print $1}'` && [ ! -z $cache_size_linenumber ] && sed -i " - "${cache_size_linenumber}d %s" % (dns_conf, dns_conf), - "echo 'cache-size=3000' >> %s" % dns_conf] - - tecs_cmn.run_scrip(config_scripts, host_ip, password) - - config_ip_scripts = [] - for domain_name_ip in domain_ip_list: - domain_name = domain_name_ip.keys()[0] - domain_ip = domain_name_ip.values()[0] - config_ip_scripts.append( - "controller1_linenumber=`grep -n 'address=/%s' %s| awk -F ':' " - "'{print $1}'` && [ ! -z ${controller1_linenumber} ] && " - "sed -i ${controller1_linenumber}d %s" % - (domain_name, dns_conf, dns_conf)) - config_ip_scripts.append("echo 'address=/%s/%s' >> %s" % - (domain_name, domain_ip, dns_conf)) - tecs_cmn.run_scrip(config_ip_scripts, host_ip, password) - - service_start_scripts = [ - "dns_linenumber=`grep -n \"^[[:space:]]*ExecStart=/usr/sbin/dnsmasq -k\" " - "/usr/lib/systemd/system/dnsmasq.service|cut -d \":\" -f 1` && " - "sed -i \"${dns_linenumber}c ExecStart=/usr/sbin/dnsmasq -k " - "--dns-forward-max=50000\" /usr/lib/systemd/system/dnsmasq.service", - "for i in `ps -elf | grep dnsmasq |grep -v grep | awk -F ' ' '{print $4}'`;do kill -9 $i;done ", - "systemctl daemon-reload && systemctl enable dnsmasq.service && " - "systemctl restart dnsmasq.service"] - tecs_cmn.run_scrip(service_start_scripts, host_ip, password) - - -def config_dnsmasq_client(host_ip_list, ha_ip_list, password='ossdbg1'): - dns_client_file = "/etc/resolv.conf" - config_scripts = ["rm -rf %s" % dns_client_file] - for ha_ip in ha_ip_list: - config_scripts.append("echo 'nameserver %s' >> %s" % - (ha_ip, dns_client_file)) - for host_ip in host_ip_list: - tecs_cmn.run_scrip(config_scripts, host_ip, password) - tecs_cmn.run_scrip(config_scripts) - - -def config_nodes_hosts(host_ip_list, domain_ip, password='ossdbg1'): - hosts_file = "/etc/hosts" - config_scripts = [] - for name_ip in domain_ip: - config_scripts.append("linenumber=`grep -n '%s' /etc/hosts | " - "awk -F '' '{print $1}'` && " - "[ ! -z $linenumber ] && " - "sed -i ${linenumber}d %s" % - (name_ip.keys()[0], hosts_file)) - config_scripts.append("echo '%s %s' >> %s" % (name_ip.values()[0], - name_ip.keys()[0], - hosts_file)) - - for host_ip in host_ip_list: - tecs_cmn.run_scrip(config_scripts, host_ip, password) - tecs_cmn.run_scrip(config_scripts) - - -def revise_nova_config(computer_nodes, ha_vip, public_vip, compute_ip_domain, - password='ossdbg1'): - nova_file = "/etc/nova/nova.conf" - for host_ip in computer_nodes: - scripts = [] - if public_vip: - scripts.extend(["linenumber=`grep -n '^novncproxy_base_url' %s | " - "awk -F ':' '{print $1}'`" % nova_file, - 'sed -i "${linenumber}s/public-vip/%s/" %s' % - (public_vip, nova_file)]) - else: - scripts.extend(["linenumber=`grep -n '^novncproxy_base_url' %s | " - "awk -F ':' '{print $1}'`" % nova_file, - 'sed -i "${linenumber}s/ha-vip/%s/" %s' % - (ha_vip, nova_file)]) - scripts.extend(["linenumber=`grep -n '^vncserver_proxyclient_address' " - "%s | awk -F ':' '{print $1}'`" % nova_file, - 'sed -i "${linenumber}s/127.0.0.1/%s/" %s' % - (compute_ip_domain[host_ip], nova_file), - "systemctl restart openstack-nova-compute.service "]) - tecs_cmn.run_scrip(scripts, host_ip, password) - - -def revise_horizon_config(ha_nodes, ha_vip, public_vip, password='ossdbg1'): - dashboard_file = "/etc/httpd/conf.d/15-horizon_vhost.conf" - for host_ip in ha_nodes: - config_scripts = ["linenumber1=`grep -n 'ServerAlias %s' " - "%s| awk -F ':' '{print $1}'` && " - "[ ! -z ${linenumber1} ] && sed -i " - "${linenumber1}d %s" % (host_ip, - dashboard_file, - dashboard_file), - "linenumber2=`grep -n 'ServerAlias %s' %s| awk -F ':' '" - "{print $1}'` && [ ! -z ${linenumber2} ] && sed -i " - "${linenumber2}d %s" % (ha_vip, dashboard_file, - dashboard_file), - "linenumber3=`grep -n 'ServerAlias %s' %s| awk -F ':' '" - "{print $1}'` && [ ! -z ${linenumber3} ] && sed -i " - "${linenumber3}d %s" % (public_vip, dashboard_file, - dashboard_file), - 'dasboard_linenumber1=`grep -n "ServerAlias localhost" ' - '%s|cut -d ":" -f 1` && sed -i "${dasboard_linenumber1}a ' - 'ServerAlias %s" %s' % (dashboard_file, host_ip, - dashboard_file), - 'dasboard_linenumber1=`grep -n "ServerAlias localhost" %s' - '|cut -d ":" -f 1` && sed -i "${dasboard_linenumber1}a ' - 'ServerAlias %s" %s' % (dashboard_file, ha_vip, - dashboard_file)] - if public_vip: - config_scripts.append('dasboard_linenumber2=`grep -n ' - '"ServerAlias localhost" %s|cut ' - '-d ":" -f 1` && sed -i ' - '"${dasboard_linenumber2}a ' - 'ServerAlias %s" %s' % - (dashboard_file, public_vip, - dashboard_file)) - - tecs_cmn.run_scrip(config_scripts, host_ip, password) - - restart_http_scripts = ['systemctl daemon-reload &&' - 'systemctl restart httpd.service'] - tecs_cmn.run_scrip(restart_http_scripts, ha_vip, password) - - -class TECSInstallTask(Thread): - """ - Class for install tecs bin. - """ - """ Definition for install states.""" - - def __init__(self, req, cluster_id): - super(TECSInstallTask, self).__init__() - self.req = req - self.cluster_id = cluster_id - self.progress = 0 - self.state = tecs_state['INIT'] - self.message = "" - self.tecs_config_file = '' - self.mgnt_ip_list = '' - self.install_log_fp = None - self.last_line_num = 0 - self.need_install = False - self.ping_times = 36 - self.log_file = "/var/log/daisy/tecs_%s_install.log" % self.cluster_id - self.dns_name_ip = [] - self.password = 'ossdbg1' - self.nodes_ips = {} - - def _check_install_log(self, tell_pos): - with open(self.log_file, "r") as tmp_fp: - tmp_fp.seek(tell_pos, os.SEEK_SET) - line_num = self.last_line_num - for lnum, lcontent in enumerate(tmp_fp, 1): - tell_pos = tmp_fp.tell() - line_num += 1 - LOG.debug("<<>>", line_num, lcontent) - if -1 != lcontent.find("Preparing servers"): - self.progress = 3 - - if -1 != lcontent.find("successfully"): - self.progress = 100 - elif -1 != lcontent.find("Error") \ - or -1 != lcontent.find("ERROR") \ - or -1 != lcontent.find("error") \ - or -1 != lcontent.find("not found"): - self.state = tecs_state['INSTALL_FAILED'] - self.message = "Tecs install error, see line %s in '%s'" % (line_num,self.log_file) - raise exception.InstallException(self.message) - self.last_line_num = line_num - return tell_pos - - def _calc_progress(self, path): - """ - Calculate the progress of installing bin. - :param path: directory contain ".pp" and ".log" files - :return: installing progress(between 1~100) - """ - ppcount = logcount = 0 - for file in os.listdir(path): - if file.endswith(".log"): - logcount += 1 - elif file.endswith(".pp"): - ppcount += 1 - - progress = 0 - if 0 != ppcount: - progress = (logcount * 100.00)/ ppcount - return progress - - def _update_install_progress_to_db(self): - """ - Update progress of intallation to db. - :return: - """ - roles = daisy_cmn.get_cluster_roles_detail(self.req,self.cluster_id) - for role in roles: - if role['deployment_backend'] != daisy_cmn.tecs_backend_name: - continue - role_hosts = daisy_cmn.get_hosts_of_role(self.req, role['id']) - for role_host in role_hosts: - if role_host['status'] != tecs_state['ACTIVE']: - self.need_install = True - role_host['status'] = self.state - role_host['progress'] = self.progress - role_host['messages'] = self.message - daisy_cmn.update_role_host(self.req, role_host['id'], role_host) - role['progress'] = self.progress - role['status'] = self.state - role['messages'] = self.message - daisy_cmn.update_role(self.req, role['id'], role) - - def _generate_tecs_config_file(self, cluster_id, tecs_config): - tecs_config_file = '' - if tecs_config: - cluster_conf_path = daisy_tecs_path + cluster_id - LOG.info(_("Generate tecs config...")) - config.update_tecs_config(tecs_config, cluster_conf_path) - tecs_config_file = cluster_conf_path + "/tecs.conf" - ha_config_file = cluster_conf_path + "/HA_1.conf" - mkdir_tecs_install = "mkdir -p /home/tecs_install/" - daisy_cmn.subprocess_call(mkdir_tecs_install) - cp_ha_conf = "\cp %s /home/tecs_install/" % ha_config_file - tecs_conf = "\cp %s /home/tecs_install/" % ha_config_file - daisy_cmn.subprocess_call(cp_ha_conf) - return tecs_config_file - - def run(self): - try: - start_time = time.time() - self._run() - except Exception as e: - self.state = tecs_state['INSTALL_FAILED'] - self.message = e.message - self._update_install_progress_to_db() - LOG.exception(e.message) - else: - if not self.need_install: - return - self.progress = 100 - self.state = tecs_state['ACTIVE'] - self.message = "Tecs installed successfully" - LOG.info(_("Install TECS for cluster %s successfully." - % self.cluster_id)) - time_cost = str(round((time.time() - start_time)/60, 2)) - LOG.info(_("It totally takes %s min for installing tecs" % time_cost)) - - if self.dns_name_ip: - ha_vip = "" - public_vip = "" - compute_ip_domain = {} - for dns_dict in self.dns_name_ip: - domain_name = dns_dict.keys()[0] - domain_ip = dns_dict.values()[0] - if domain_name == "ha-vip": - ha_vip = domain_ip - if domain_name == "public-vip": - public_vip = domain_ip - if domain_ip in self.nodes_ips['computer']: - compute_ip_domain.update({domain_ip: domain_name}) - - revise_nova_config(self.nodes_ips['computer'], ha_vip, - public_vip, compute_ip_domain) - revise_horizon_config(self.nodes_ips['ha'], ha_vip, public_vip) - - # load neutron conf after installation - roles = registry.get_roles_detail(self.req.context) - for role in roles: - if role['cluster_id'] == self.cluster_id: - backend=manager.configBackend('clushshell', self.req, role['id']) - backend.push_config() - result = config.get_conf(self.tecs_config_file, - neutron_float_ip="CONFIG_NEUTRON_SERVER_HOST", - keystone_float_ip="CONFIG_KEYSTONE_HOST", - neutron_install_mode="CONFIG_NEUTRON_SERVER_INSTALL_MODE", - keystone_install_mode="CONFIG_KEYSTONE_INSTALL_MODE", - lb_float_ip="CONFIG_LB_HOST") - if (result.get('keystone_install_mode', None) == "LB" and - result.get('neutron_install_mode', None) == "LB"): - LOG.info(_("<<>>")) - time.sleep(20) - neutron(self.req, - result.get('lb_float_ip', None), - result.get('lb_float_ip', None), - self.cluster_id) - else: - LOG.info(_("<<>>")) - time.sleep(20) - neutron(self.req, - result.get('neutron_float_ip', None), - result.get('keystone_float_ip', None), - self.cluster_id) - finally: - self._update_install_progress_to_db() - if self.install_log_fp: - self.install_log_fp.close() - - def _run(self): - """ - Exectue install file(.bin) with sync mode. - :return: - """ - - def executor(**params): - # if subprocsee is failed, we need break - if os.path.exists(self.log_file): - params['tell_pos'] = self._check_install_log(params.get('tell_pos', 0)) - LOG.debug(_("<<>>")) - if 100 == self.progress: - return params - if 3 == self.progress: - self._update_install_progress_to_db() - # waiting for 'progress_log_location' file exist - if not params.get("if_progress_file_read", None): - if not os.path.exists(self.progress_log_location): - params['if_progress_file_read'] = False - return params - else: - with open(self.progress_log_location, "r") as fp: - line = fp.readline() - self.progress_logs_path = line.split('\n')[0] + "/manifests" - LOG.info(_("TECS installation log path: %s." - % self.progress_logs_path)) - params['if_progress_file_read'] = True - - # waiting for 'self.progress_logs_path' file exist - if not os.path.exists(self.progress_logs_path): - return params - - LOG.debug(_("<<>>")) - - # cacl progress & sync to db - progress = self._calc_progress(self.progress_logs_path) - - if self.progress != progress and progress >= 3: - self.progress = progress - self.state = tecs_state['INSTALLING'] - self._update_install_progress_to_db() - elif progress == 100: - self.progress = 100 - self.state = tecs_state['ACTIVE'] - self.message = "Tecs installed successfully" - return params - - if not self.cluster_id or \ - not self.req: - raise exception.InstallException("invalid params.") - - self.progress = 0 - self.message = "Preparing for TECS installation" - self._update_install_progress_to_db() - if not self.need_install: - LOG.info(_("No host in cluster %s need to install tecs." - % self.cluster_id)) - return - - (tecs_config, self.mgnt_ip_list) = get_cluster_tecs_config(self.req, self.cluster_id) - # after os is installed successfully, if ping all role hosts - # management ip successfully, begin to install TECS - unreached_hosts = daisy_cmn.check_ping_hosts(self.mgnt_ip_list, self.ping_times) - if unreached_hosts: - self.message = "ping hosts %s failed" % ','.join(unreached_hosts) - raise exception.InstallException(self.message) - else: - # os maybe not reboot completely, wait for 20s to ensure ssh successfully. - # ssh test until sucess should better here - time.sleep(20) - - name_ip_list, self.nodes_ips = get_host_name_and_mgnt_ip(tecs_config) - all_nodes = list(set(self.nodes_ips['ha'] + self.nodes_ips['lb'] + - self.nodes_ips['computer'])) - # delete daisy server known_hosts file to avoid - # ssh command failed because of incorrect host key. - daisy_cmn.subprocess_call('rm -rf /root/.ssh/known_hosts') - if tecs_config['OTHER']['cluster_data']['use_dns']: - self.dns_name_ip = replace_ip_with_domain_name(self.req, tecs_config) - storage_ip_list = tecs_cmn.get_storage_name_ip_dict( - self.req, self.cluster_id, 'STORAGE') - - self.dns_name_ip.extend(storage_ip_list) - tecs_config['OTHER'].update({'dns_config': self.dns_name_ip}) - - config_dnsmasq_server(self.nodes_ips['ha'], self.dns_name_ip) - config_dnsmasq_client(all_nodes, self.nodes_ips['ha']) - config_nodes_hosts(all_nodes, self.dns_name_ip) - host_domain = [name_ip.keys()[0] for name_ip in self.dns_name_ip - if name_ip.keys()[0] .find('vip') == -1] - unreached_hosts = daisy_cmn.check_ping_hosts(host_domain, - self.ping_times) - if unreached_hosts: - self.message = "ping hosts %s failed after DNS configuration" %\ - ','.join(unreached_hosts) - raise exception.InstallException(self.message) - else: - config_nodes_hosts(all_nodes, name_ip_list) - # generate tecs config must be after ping check - self.tecs_config_file = self._generate_tecs_config_file(self.cluster_id, - tecs_config) - - # install network-configuration-1.1.1-15.x86_64.rpm - if self.mgnt_ip_list: - for mgnt_ip in self.mgnt_ip_list: - LOG.info(_("begin to install network-configuration on %s"% mgnt_ip)) - tecs_cmn.TecsShellExector(mgnt_ip, 'install_rpm') - # network-configuration will restart network, wait until ping test successfully - time.sleep(10) - unreached_hosts = daisy_cmn.check_ping_hosts(self.mgnt_ip_list, self.ping_times) - if unreached_hosts: - self.message = "ping hosts %s failed after network configuration" % ','.join(unreached_hosts) - raise exception.InstallException(self.message) - - (share_disk_info, volume_disk_info) =\ - disk_array.get_disk_array_info(self.req, self.cluster_id) - if share_disk_info or volume_disk_info: - (controller_ha_nodes, computer_ips) =\ - disk_array.get_ha_and_compute_ips(self.req, self.cluster_id) - else: - controller_ha_nodes = {} - computer_ips = [] - - all_nodes_ip = computer_ips + controller_ha_nodes.keys() - if all_nodes_ip: - LOG.info(_("begin to config multipth ...")) - compute_error_msg = disk_array.config_compute_multipath(all_nodes_ip) - if compute_error_msg: - self.message = compute_error_msg - raise exception.InstallException(self.message) - else: - LOG.info(_("config Disk Array multipath successfully")) - - if share_disk_info: - LOG.info(_("begin to config Disk Array ...")) - ha_error_msg = disk_array.config_ha_share_disk(share_disk_info, - controller_ha_nodes) - if ha_error_msg: - self.message = ha_error_msg - raise exception.InstallException(message=self.message) - else: - LOG.info(_("config Disk Array for HA nodes successfully")) - - # check and get TECS version - tecs_version_pkg_file = tecs_cmn.check_and_get_tecs_version(daisy_tecs_path) - if not tecs_version_pkg_file: - self.state = tecs_state['INSTALL_FAILED'] - self.message = "TECS version file not found in %s" % daisy_tecs_path - raise exception.NotFound(message=self.message) - - # use pattern 'tecs_%s_install' to distinguish multi clusters installation - LOG.info(_("Open log file for TECS installation.")) - self.install_log_fp = open(self.log_file, "w+") - - # delete cluster_id file before installing, in case getting old log path - self.progress_log_location = "/var/tmp/packstack/%s" % self.cluster_id - if os.path.exists(self.progress_log_location): - os.remove(self.progress_log_location) - - install_cmd = "sudo %s conf_file %s" % (tecs_version_pkg_file, self.tecs_config_file) - LOG.info(_("Begin to install TECS in cluster %s." % self.cluster_id)) - clush_bin = subprocess.Popen( - install_cmd, shell=True, stdout=self.install_log_fp, stderr=self.install_log_fp) - - self.progress = 1 - self.state = tecs_state['INSTALLING'] - self.message = "TECS installing" - self._update_install_progress_to_db() - # if clush_bin is not terminate - # while not clush_bin.returncode: - params = {} # executor params - execute_times = 0 # executor run times - while True: - time.sleep(5) - if self.progress == 100: - if volume_disk_info: - LOG.info(_("Begin to config cinder volume ...")) - ha_error_msg = disk_array.config_ha_cinder_volume( - volume_disk_info, - controller_ha_nodes.keys()) - if ha_error_msg: - self.message = ha_error_msg - raise exception.InstallException(self.message) - else: - LOG.info(_("Config cinder volume for HA nodes successfully")) - break - elif execute_times >= 1440: - self.state = tecs_state['INSTALL_FAILED'] - self.message = "TECS install timeout for 2 hours" - raise exception.InstallTimeoutException(cluster_id=self.cluster_id) - params = executor( - # just read cluster_id file once in 'while' - if_progress_file_read=params.get("if_progress_file_read", False), - # current fp location of tecs_install.log - tell_pos=params.get("tell_pos", 0)) - - # get clush_bin.returncode - # clush_bin.poll() - execute_times += 1 - - +# Copyright 2013 OpenStack Foundation +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +""" +/install endpoint for tecs API +""" +import os +import re +import copy +import subprocess +import time + +from oslo_config import cfg +from oslo_log import log as logging +from webob.exc import HTTPBadRequest + +from threading import Thread + +from daisy import i18n +import daisy.api.v1 + +from daisy.common import utils +from daisy.common import exception +import daisy.registry.client.v1.api as registry +from daisy.api.backends.tecs import config +from daisy.api.network_api import network as neutron +import daisy.api.backends.common as daisy_cmn +import daisy.api.backends.tecs.common as tecs_cmn +import daisy.api.backends.tecs.disk_array as disk_array +from daisy.api.configset import manager + +try: + import simplejson as json +except ImportError: + import json + +LOG = logging.getLogger(__name__) +_ = i18n._ +_LE = i18n._LE +_LI = i18n._LI +_LW = i18n._LW +SUPPORTED_PARAMS = daisy.api.v1.SUPPORTED_PARAMS +SUPPORTED_FILTERS = daisy.api.v1.SUPPORTED_FILTERS +ACTIVE_IMMUTABLE = daisy.api.v1.ACTIVE_IMMUTABLE + +CONF = cfg.CONF +install_opts = [ + cfg.StrOpt('max_parallel_os_number', default=10, + help='Maximum number of hosts install os at the same time.'), +] +CONF.register_opts(install_opts) + +CONF.import_opt('disk_formats', 'daisy.common.config', group='image_format') +CONF.import_opt('container_formats', 'daisy.common.config', + group='image_format') +CONF.import_opt('image_property_quota', 'daisy.common.config') + + +tecs_state = tecs_cmn.TECS_STATE +daisy_tecs_path = tecs_cmn.daisy_tecs_path +tecs_install_path = tecs_cmn.tecs_install_path + + +def _invalid_bond_type(network_type, vswitch_type, bond_mode): + msg = "Invalid bond_mode(%s) for %s in %s network" % ( + bond_mode, vswitch_type, network_type) + raise_exception = False + if bond_mode in ['0', '1', '2', '3', '4']: + return + + if bond_mode and (2 == len(bond_mode.split(';'))): + bond_mode, lacp_mode = bond_mode.split(';') + if network_type in ['vxlan'] and vswitch_type in ['dvs', 'DVS']: + if bond_mode in ['active-backup', 'balance-slb']: + if lacp_mode not in ['off']: + raise_exception = True + else: + raise_exception = True + + elif network_type in ['vlan'] and vswitch_type in ['dvs', 'DVS', + 'ovs', 'OVS']: + if bond_mode in ['balance-tcp']: + if lacp_mode not in ['active', 'passive', 'off']: + raise_exception = True + elif bond_mode in ['active-backup', 'balance-slb']: + if lacp_mode not in ['off']: + raise_exception = True + else: + raise_exception = True + else: + raise_exception = True + + if raise_exception: + raise exception.InstallException(msg) + + +def _get_host_private_networks(host_detail, cluster_private_networks_name): + """ + User member nic pci segment replace the bond pci, + we use it generate the mappings.json. + :param host_detail: host infos + :param cluster_private_networks_name: network info in cluster + :return: + """ + host_private_networks = [hi for pn in cluster_private_networks_name + for hi in host_detail['interfaces'] + for assigned_network in hi['assigned_networks'] + if assigned_network and + pn == assigned_network['name']] + + # If port type is bond,use pci segment of member port replace pci1 & pci2 + # segments of bond port + for interface_outer in host_private_networks: + if 0 != cmp(interface_outer.get('type', None), "bond"): + continue + slave1 = interface_outer.get('slave1', None) + slave2 = interface_outer.get('slave2', None) + if not slave1 or not slave2: + continue + interface_outer.pop('pci') + + for interface_inner in host_detail['interfaces']: + if 0 == cmp(interface_inner.get('name', None), slave1): + interface_outer['pci1'] = interface_inner['pci'] + elif 0 == cmp(interface_inner.get('name', None), slave2): + interface_outer['pci2'] = interface_inner['pci'] + return host_private_networks + + +def _write_private_network_cfg_to_json(req, cluster_id, private_networks): + """ + Generate cluster private network json. + We use the json file after tecs is installed. + :param private_networks: cluster private network params set. + :return: + """ + if not private_networks: + LOG.error("private networks can't be empty!") + return False + + cluster_hosts_network_cfg = {} + hosts_network_cfg = {} + for k in private_networks.keys(): + private_network_info = {} + for private_network in private_networks[k]: + # host_interface + type = private_network.get('type', None) + name = private_network.get('name', None) + assign_networks = private_network.get('assigned_networks', None) + slave1 = private_network.get('slave1', None) + slave2 = private_network.get('slave2', None) + pci = private_network.get('pci', None) + pci1 = private_network.get('pci1', None) + pci2 = private_network.get('pci2', None) + mode = private_network.get('mode', None) + if not type or not name or not assign_networks: + LOG.error("host_interface params invalid in private networks!") + continue + + for assign_network in assign_networks: + # network + # network_type = assign_network.get('network_type', None) + vswitch_type_network = daisy_cmn.get_assigned_network( + req, private_network['id'], assign_network['id']) + + vswitch_type = vswitch_type_network['vswitch_type'] + physnet_name = assign_network.get('name', None) + mtu = assign_network.get('mtu', None) + if not vswitch_type or not physnet_name: + LOG.error( + "private networks vswitch_type or\ + physnet name is invalid!") + continue + + physnet_name_conf = {} + physnet_name_conf['type'] = type + physnet_name_conf['name'] = name + physnet_name_conf['vswitch_type'] = vswitch_type + if mtu: + physnet_name_conf['mtu'] = mtu + # physnet_name_conf['ml2'] = ml2_type + "(direct)" + if 0 == cmp("bond", type): + if not pci1 or not pci2 or not\ + slave1 or not slave2 or not mode: + LOG.error( + "when type is 'bond',\ + input params is invalid in private networks!") + continue + physnet_name_conf['slave1'] = slave1 + physnet_name_conf['slave2'] = slave2 + physnet_name_conf['pci1'] = pci1 + physnet_name_conf['pci2'] = pci2 + physnet_name_conf['mode'] = mode + _invalid_bond_type('vlan', 'OVS', mode) + elif 0 == cmp("ether", type): + if not pci: + LOG.error( + "when type is 'ether',\ + input params is invalid in private networks!") + continue + physnet_name_conf['pci'] = pci + + if not physnet_name_conf: + continue + private_network_info[physnet_name] = physnet_name_conf + + if not private_network_info: + continue + hosts_network_cfg[k] = private_network_info + + if not hosts_network_cfg: + return False + cluster_hosts_network_cfg['hosts'] = hosts_network_cfg + mkdir_daisy_tecs_path = "mkdir -p " + daisy_tecs_path + cluster_id + daisy_cmn.subprocess_call(mkdir_daisy_tecs_path) + mapping_json = daisy_tecs_path + "/" + cluster_id + "/" + "mappings.json" + with open(mapping_json, "w+") as fp: + fp.write(json.dumps(cluster_hosts_network_cfg)) + return True + + +def _conf_private_network(req, cluster_id, host_private_networks_dict, + cluster_private_network_dict): + if not host_private_networks_dict: + LOG.info(_("No private network need config")) + return {} + + # different host(with ip) in host_private_networks_dict + config_neutron_ml2_vlan_ranges = [] + for k in host_private_networks_dict.keys(): + host_private_networks = host_private_networks_dict[k] + # different private network plane in host_interface + for host_private_network in host_private_networks: + assigned_networks = host_private_network.get( + 'assigned_networks', None) + if not assigned_networks: + break + private_network_info = \ + [network for assigned_network in assigned_networks + for network in cluster_private_network_dict + if assigned_network and assigned_network[ + 'name'] == network['name']] + + host_private_network['assigned_networks'] = private_network_info + config_neutron_ml2_vlan_ranges += \ + ["%(name)s:%(vlan_start)s:%(vlan_end)s" % + {'name': network['name'], 'vlan_start':network[ + 'vlan_start'], 'vlan_end':network['vlan_end']} + for network in private_network_info + if network['name'] and network['vlan_start'] and + network['vlan_end']] + + physic_network_cfg = {} + if _write_private_network_cfg_to_json( + req, cluster_id, host_private_networks_dict): + physic_network_cfg['json_path'] = daisy_tecs_path + \ + "/" + cluster_id + "/" + "mappings.json" + if config_neutron_ml2_vlan_ranges: + host_private_networks_vlan_range = ",".join( + list(set(config_neutron_ml2_vlan_ranges))) + physic_network_cfg['vlan_ranges'] = host_private_networks_vlan_range + return physic_network_cfg + + +def _enable_network(host_networks_dict): + for network in host_networks_dict: + if network != []: + return True + return False + + +def _get_dvs_network_type(vxlan, vlan): + if _enable_network(vxlan): + return 'vxlan', vxlan + elif _enable_network(vlan): + return 'vlan', vlan + else: + return None, None + + +def _get_vtep_ip_ranges(ip_ranges): + vtep_ip_ranges = [] + for ip_range in ip_ranges: + ip_range_start = ip_range.get('start') + ip_range_end = ip_range.get('end') + if ip_range_start and ip_range_end: + vtep_ip_ranges.append( + [ip_range_start.encode('utf8'), + ip_range_end.encode('utf8')]) + return vtep_ip_ranges + + +def _get_dvs_vxlan_info(interfaces, mode_str): + vxlan_nic_info = '' + for interface in interfaces: + if interface['type'] == 'ether': + vxlan_nic_info = interface['name'] + elif interface['type'] == 'bond': + _invalid_bond_type('vxlan', 'DVS', interface.get('mode')) + name = interface.get('name', 'bond1') + if interface.get('mode') in ['0', '1', '2', '3', '4']: + try: + bond_mode = mode_str[ + 'vxlan'].get(interface.get('mode')) + except: + bond_mode = mode_str['vxlan']['0'] + vxlan_nic_info = name + bond_mode % ( + interface['slave1'], interface['slave2']) + else: + vxlan_nic_info = "%s(%s;%s-%s)" % ( + name, interface.get('mode'), + interface['slave1'], interface['slave2']) + return vxlan_nic_info + + +def _get_dvs_domain_id(assign_network, dvs_domain_id, host_ip): + domain_id = assign_network.get('dvs_domain_id') + if not domain_id: + domain_id = '0' + + domain_ip = dvs_domain_id.get(domain_id, []) + domain_ip.append(host_ip) + domain_ip = {domain_id.encode('utf8'): domain_ip} + return domain_ip + + +def _get_bridge_mappings(interface): + try: + network = interface['assigned_networks'][0] + except: + return {} + + bridge_mappings = {} + if network.get('network_type') in ['DATAPLANE']: + nic = interface.get('name') + phynet_name = network.get('name') + if phynet_name and nic: + bridge_mappings.update({nic: phynet_name}) + return bridge_mappings + + +def _convert_bridge_mappings2list(bridge_mappings): + bridge_maps = [] + for nic, phynet in bridge_mappings.items(): + bridge_maps.append('%s:br_%s' % (phynet, nic)) + return set(bridge_maps) + + +def _convert_physical_mappings2list(physical_mappings): + physical_maps = [] + for phynet, nic_info in physical_mappings.items(): + physical_maps.append('%s:%s' % (phynet, nic_info)) + return set(physical_maps) + + +def _get_physical_mappings(interface, mode_str, bridge_mappings): + # bridge_mappings = {'eth0':'phynet1': 'bond0':'phynet2'} + vlan_nic_map_info = {} + phynic_name = interface.get('name') + physnet_name = bridge_mappings.get(phynic_name) + if interface['type'] == 'bond': + _invalid_bond_type('vlan', 'DVS', interface.get('mode')) + if interface.get('mode') in ['0', '1', '2', '3', '4']: + try: + bond_mode = mode_str['vlan'].get(interface.get('mode')) + except: + bond_mode = mode_str['vlan']['0'] + vlan_nic_map_info[physnet_name] = phynic_name + bond_mode % ( + interface['slave1'], interface['slave2']) + else: + # interface.get('mode') = active-backup;off + vlan_nic_map_info[physnet_name] = "%s(%s;%s-%s)" % ( + phynic_name, interface.get('mode'), + interface['slave1'], interface['slave2']) + else: + vlan_nic_map_info[physnet_name] = phynic_name + + return vlan_nic_map_info + + +def get_network_config_for_dvs(host_private_networks_dict, + cluster_private_network_dict): + # different private network plane in host_interface + host_private_networks_dict_for_dvs =\ + copy.deepcopy(host_private_networks_dict) + + for host_private_network in host_private_networks_dict_for_dvs: + private_networks = host_private_network.get( + 'assigned_networks', None) + if not private_networks: + break + private_network_info = \ + [network for private_network in private_networks + for network in cluster_private_network_dict + if private_network and private_network[ + 'name'] == network['name']] + host_private_network['assigned_networks'] = private_network_info + return host_private_networks_dict_for_dvs + + +def get_dvs_cpu_sets(req, cluster_id, role_hosts): + """ + dvs_cpu_list = [{'IP': 'ip1', 'DVS_CPU': [1,2,3,4]}, + {'IP': 'ip2', 'DVS_CPU': [2,3,4,5]}] + """ + dvs_cpu_list = [] + cluster_networks =\ + daisy_cmn.get_cluster_networks_detail(req, cluster_id) + for role_host in role_hosts: + if (role_host['status'] == tecs_state['ACTIVE'] or + role_host['status'] == tecs_state['UPDATING'] or + role_host['status'] == tecs_state['UPDATE_FAILED']): + continue + host_detail = daisy_cmn.get_host_detail(req, role_host['host_id']) + + dvs_interfaces = utils.get_dvs_interfaces(host_detail['interfaces']) + if dvs_interfaces and 'dvs_cpus' in host_detail: + management_ip = tecs_cmn.get_host_network_ip(req, + host_detail, + cluster_networks, + 'MANAGEMENT') + dvs_cpu_dict = {} + dvs_cpu_dict['IP'] = management_ip + dvs_cpu_dict['DVS_CPU'] =\ + utils.cpu_str_to_list(host_detail['dvs_cpus']) + dvs_cpu_list.append(dvs_cpu_dict) + return dvs_cpu_list + + +def conf_dvs(req, host_vxlan_networks_dict, host_private_networks_dict): + mode_str = { + 'vxlan': + { + '0': '(active-backup;off;%s-%s)', + '1': '(balance-slb;off;%s-%s)', + }, + 'vlan': { + '0': '(active-backup;off;%s-%s)', + '1': '(balance-slb;off;%s-%s)', + '2': '(balance-tcp;active;%s-%s)' + } + } + + network_type, networks_dict = _get_dvs_network_type( + host_vxlan_networks_dict, host_private_networks_dict) + + if not network_type: + return {} + + dvs_config = {} + + network_config = {} + vswitch_type = {} + physnics_config = {} + installed_dvs = [] + installed_ovs = [] + network_config['network_type'] = network_type + + # for vxlan + network_config['vtep_ip_ranges'] = [] + dvs_domain_id = {} + + # for vlan + bridge_mappings = {} + physical_mappings = {} + + for host_ip, interfaces in networks_dict.items(): + host_ip = host_ip.encode('utf8') + assign_network = daisy_cmn.get_assigned_network( + req, interfaces[0]['id'], + interfaces[0]['assigned_networks'][0].get('id')) + + if assign_network['vswitch_type'] in ['dvs', 'DVS']: + installed_dvs.append(host_ip) + elif assign_network['vswitch_type'] in ['ovs', 'OVS']: + installed_ovs.append(host_ip) + + if network_type == 'vxlan': + network_config['vtep_ip_ranges'].extend( + _get_vtep_ip_ranges( + interfaces[0]['assigned_networks'][0].get('ip_ranges'))) + + dvs_domain_id.update( + _get_dvs_domain_id(assign_network, dvs_domain_id, host_ip)) + + if not physnics_config.get('vxlan_info'): + physnics_config['vxlan_info'] = _get_dvs_vxlan_info( + interfaces, mode_str) + + if network_type == 'vlan': + for interface in interfaces: + bridge_mapping = _get_bridge_mappings(interface) + physical_mapping = _get_physical_mappings( + interface, mode_str, bridge_mapping) + bridge_mappings.update(bridge_mapping) + physical_mappings.update(physical_mapping) + + vswitch_type['ovdk'] = installed_dvs + vswitch_type['ovs_agent_patch'] = installed_ovs + physnics_config['dvs_domain_id'] = dvs_domain_id + physnics_config['physical_mappings'] = ",".join( + _convert_physical_mappings2list(physical_mappings)) + physnics_config['bridge_mappings'] = ",".join( + _convert_bridge_mappings2list(bridge_mappings)) + + dvs_config['vswitch_type'] = vswitch_type + dvs_config['network_config'] = network_config + dvs_config['physnics_config'] = physnics_config + + return dvs_config + + +def _get_interfaces_network(req, host_detail, cluster_networks): + has_interfaces = {} + host_mngt_network = tecs_cmn.get_host_interface_by_network( + host_detail, 'MANAGEMENT') + host_mgnt_ip = tecs_cmn.get_host_network_ip( + req, host_detail, cluster_networks, 'MANAGEMENT') + host_mgnt_netmask = tecs_cmn.get_network_netmask( + cluster_networks, 'MANAGEMENT') + host_mngt_network['ip'] = host_mgnt_ip + host_mngt_network['netmask'] = host_mgnt_netmask + has_interfaces['management'] = host_mngt_network + + # host_deploy_network = tecs_cmn.get_host_interface_by_network( + # host_detail, 'DEPLOYMENT') + host_deploy_network_info = tecs_cmn.get_host_interface_by_network( + host_detail, 'DEPLOYMENT') + # note:"is_deployment" can't label delpoyment network, it only used to + # label dhcp mac + if host_deploy_network_info: + host_deploy_ip = tecs_cmn.get_host_network_ip( + req, host_detail, cluster_networks, 'DEPLOYMENT') + host_deploy_netmask = tecs_cmn.get_network_netmask( + cluster_networks, 'DEPLOYMENT') + host_deploy_network_info['ip'] = host_deploy_ip + host_deploy_network_info['netmask'] = host_deploy_netmask + has_interfaces['deployment'] = host_deploy_network_info + + mngt_network_cidr = tecs_cmn.get_network_cidr( + cluster_networks, 'MANAGEMENT') + host_storage_networks = [network for network in cluster_networks + if network['network_type'] == 'STORAGE' and + network['cidr'] != mngt_network_cidr] + if host_storage_networks: + host_storage_network_info = tecs_cmn.get_host_interface_by_network( + host_detail, host_storage_networks[0]['name']) + if host_storage_network_info: + host_storage_ip = tecs_cmn.get_host_network_ip( + req, host_detail, cluster_networks, + host_storage_networks[0]['name']) + host_storage_netmask = tecs_cmn.get_network_netmask( + cluster_networks, host_storage_networks[0]['name']) + host_storage_network_info['ip'] = host_storage_ip + host_storage_network_info['netmask'] = host_storage_netmask + has_interfaces['storage'] = host_storage_network_info + + host_public_network_info = tecs_cmn.get_host_interface_by_network( + host_detail, 'PUBLICAPI') + if host_public_network_info: + public_vlan_id = tecs_cmn.get_network_vlan_id( + cluster_networks, 'PUBLICAPI') + if public_vlan_id: + public_nic_name = host_public_network_info[ + 'name'] + '.' + public_vlan_id + else: + public_nic_name = host_public_network_info['name'] + + host_public_ip = tecs_cmn.get_host_network_ip( + req, host_detail, cluster_networks, 'PUBLICAPI') + host_public_netmask = tecs_cmn.get_network_netmask( + cluster_networks, 'PUBLICAPI') + host_public_network_info['ip'] = host_public_ip + host_public_network_info['name'] = public_nic_name + host_public_network_info['netmask'] = host_public_netmask + has_interfaces['publicapi'] = host_public_network_info + + cluster_heartbeat_network_dict = [ + network for network in cluster_networks if network[ + 'network_type'] == 'HEARTBEAT'] + cluster_heartbeat_networks_name = [ + network['name'] for network in cluster_heartbeat_network_dict] + for heartbeat_network_name in cluster_heartbeat_networks_name: + host_heartbeat_network_info = tecs_cmn.get_host_interface_by_network( + host_detail, heartbeat_network_name) + if not host_heartbeat_network_info: + cluster_heartbeat_networks_name.remove(heartbeat_network_name) + if len(cluster_heartbeat_networks_name) >= 1: + host_heartbeat1_network_info = tecs_cmn.get_host_interface_by_network( + host_detail, cluster_heartbeat_networks_name[0]) + host_heartbeat1_network_info['ip'] = tecs_cmn.get_host_network_ip( + req, host_detail, cluster_networks, + cluster_heartbeat_networks_name[0]) + has_interfaces['heartbeat1'] = host_heartbeat1_network_info + if len(cluster_heartbeat_networks_name) == 2: + host_heartbeat2_network_info = tecs_cmn.get_host_interface_by_network( + host_detail, cluster_heartbeat_networks_name[1]) + host_heartbeat2_network_info['ip'] = tecs_cmn.get_host_network_ip( + req, host_detail, cluster_networks, + cluster_heartbeat_networks_name[1]) + has_interfaces['heartbeat2'] = host_heartbeat2_network_info + return has_interfaces + + +def _get_host_nic_name(cluster_network, host_detail): + """ + Different networking will generate different ha port name, + the rule of generation + is describe in comment. + :param cluster_network: Network info in cluster. + :param host_detail: + :return: + """ + copy_host_detail = copy.deepcopy(host_detail) + + mgr_interface_info = tecs_cmn.get_host_interface_by_network( + copy_host_detail, 'MANAGEMENT') + nic_info = [network + for network in cluster_network + for netname in mgr_interface_info.get( + 'assigned_networks', None) + if network.get('name', None) == netname] + + nic_capability = [info['capability'] + for info in nic_info if info[ + 'network_type'] != "DATAPLANE"] + if not nic_capability or nic_capability == [None]: + return mgr_interface_info['name'] + + mgr_nic_info = [mgr_net for mgr_net in nic_info if mgr_net[ + 'network_type'] == "MANAGEMENT"][0] + # if private and management plane is unifier + if set(["DATAPLANE", "MANAGEMENT"]).issubset(set([info[ + 'network_type'] for info in nic_info])): + # if type = 'ether' and 'ovs' not in ml2 and management is 'high' + if "ether" == mgr_interface_info.get('type', None) and \ + "ovs" not in [mgr_interface_info.get('vswitch_type', None)] and \ + "high" == mgr_nic_info['capability']: + return mgr_interface_info['name'] + + # if ip at outer + if mgr_interface_info.get('ip', None) and mgr_interface_info.get( + 'name', None): + return "v_" + mgr_interface_info['name'] + # ip at inner + elif mgr_nic_info.get('ip', None): + return "managent" + + if "low" not in nic_capability: + return mgr_interface_info['name'] + + # if ip at outer + if mgr_interface_info.get('ip', None) and\ + mgr_interface_info.get('name', None): + return "v_" + mgr_interface_info['name'] + + # ip at inner + elif mgr_nic_info.get('ip', None): + return "managent" + + +def get_share_disk_services(req, role_id): + service_disks = tecs_cmn.get_service_disk_list(req, {'role_id': role_id}) + share_disk_services = [] + + for service_disk in service_disks: + if service_disk['disk_location'] == 'share': + share_disk_services.append(service_disk['service']) + return share_disk_services + + +def _get_vxlan_vni_range(network): + if network.get('vni_start') and network.get('vni_end'): + return '%s:%s' % (network['vni_start'], network['vni_end']) + else: + return '1000:3000' + + +def get_share_cluster_disk_services(req, role_id): + service_disks = tecs_cmn.get_service_disk_list(req, {'role_id': role_id}) + share_cluster_disk_services = [] + + for service_disk in service_disks: + if service_disk['disk_location'] == 'share_cluster': + share_cluster_disk_services.append(service_disk['service']) + return share_cluster_disk_services + + +def get_cluster_tecs_config(req, cluster_id): + LOG.info(_("Get tecs config from database...")) + params = dict(limit=1000000) + roles = daisy_cmn.get_cluster_roles_detail(req, cluster_id) + cluster_networks = daisy_cmn.get_cluster_networks_detail(req, cluster_id) + try: + all_services = registry.get_services_detail(req.context, **params) + all_components = registry.get_components_detail(req.context, **params) + cluster_data = registry.get_cluster_metadata(req.context, cluster_id) + except exception.Invalid as e: + raise HTTPBadRequest(explanation=e.msg, request=req) + + segment_type = {} + dataplane_network_dict = [network for network in cluster_networks if + network['network_type'] == 'DATAPLANE'] + if dataplane_network_dict[0]['segmentation_type'] in ['vlan']: + cluster_private_network_dict = dataplane_network_dict + cluster_vxlan_network_dict = [] + segment_type.update({'vlan': ''}) + elif dataplane_network_dict[0]['segmentation_type'] in ['vxlan']: + cluster_private_network_dict = [] + cluster_vxlan_network_dict = dataplane_network_dict + segment_type.update({'vxlan': {'vni_range': _get_vxlan_vni_range( + dataplane_network_dict[0])}}) + else: + cluster_private_network_dict = [] + cluster_vxlan_network_dict = [] + + cluster_private_networks_name = [network['name'] for network in + cluster_private_network_dict] + cluster_vxlan_networks_name = [network['name'] for network in + cluster_vxlan_network_dict] + + tecs_config = {} + tecs_config.update({'OTHER': {}}) + other_config = tecs_config['OTHER'] + other_config.update({'cluster_data': cluster_data}) + tecs_installed_hosts = set() + host_private_networks_dict = {} + host_vxlan_network_dict = {} + host_private_networks_dict_for_dvs = {} + zenic_cfg = {} + dvs_cpu_sets = [] + for role in roles: + if role['name'] == 'ZENIC_NFM': + zenic_cfg['vip'] = role['vip'] + if role['deployment_backend'] != daisy_cmn.tecs_backend_name: + continue + try: + role_service_ids = registry.get_role_services( + req.context, role['id']) + except exception.Invalid as e: + raise HTTPBadRequest(explanation=e.msg, request=req) + + role_services_detail = [asc for rsci in role_service_ids + for asc in all_services if asc[ + 'id'] == rsci['service_id']] + component_id_to_name = dict( + [(ac['id'], ac['name']) for ac in all_components]) + service_components = dict( + [(scd['name'], component_id_to_name[scd['component_id']]) + for scd in role_services_detail]) + + role_hosts = daisy_cmn.get_hosts_of_role(req, role['id']) + ha_nic_name = '' + host_interfaces = [] + for role_host in role_hosts: + host_detail = daisy_cmn.get_host_detail(req, role_host['host_id']) + + sorted_host_detail = tecs_cmn.sort_interfaces_by_pci( + cluster_networks, host_detail) + host_private_networks_list =\ + _get_host_private_networks(sorted_host_detail, + cluster_private_networks_name) + # get ha nic port name + if role['name'] == "CONTROLLER_HA": + mgr_nic_name = _get_host_nic_name( + cluster_networks, sorted_host_detail) + mgr_vlan_id = tecs_cmn.get_mngt_network_vlan_id( + cluster_networks) + if mgr_vlan_id: + mgr_nic_name = mgr_nic_name + '.' + mgr_vlan_id + if ha_nic_name and mgr_nic_name != ha_nic_name: + msg = "management plane nic name is\ + different on hosts with HA role" + LOG.error(msg) + raise HTTPBadRequest(explanation=msg, request=req) + else: + ha_nic_name = mgr_nic_name + # if not other_config.has_key('ha_nic_name'): + if 'ha_nic_name' not in other_config: + other_config.update({'ha_nic_name': mgr_nic_name}) + + has_interfaces = _get_interfaces_network( + req, host_detail, cluster_networks) + has_interfaces.update({'name': host_detail['name']}) + host_interfaces.append(has_interfaces) + # mangement network must be configed + host_mgnt_ip = has_interfaces['management']['ip'] + + host_mgnt = host_detail['name'] if cluster_data[ + 'use_dns'] else host_mgnt_ip + + # host_mgnt_ip used to label who the private networks is + host_private_networks_dict[host_mgnt] = host_private_networks_list + if role['name'] == 'COMPUTER': + host_vxlan_network_list = _get_host_private_networks( + sorted_host_detail, cluster_vxlan_networks_name) + if host_vxlan_network_list: + host_private_networks_dict_for_dvs = {} + host_vxlan_network_dict[host_mgnt] = \ + get_network_config_for_dvs( + host_vxlan_network_list, cluster_vxlan_network_dict) + elif host_private_networks_list: + host_vxlan_network_dict = {} + host_private_networks_dict_for_dvs[host_mgnt] = \ + get_network_config_for_dvs( + host_private_networks_list, + cluster_private_network_dict) + + # get host ip of tecs is active + if (role_host['status'] == tecs_state['ACTIVE'] or + role_host['status'] == tecs_state['UPDATING'] or + role_host['status'] == tecs_state['UPDATE_FAILED']): + tecs_installed_hosts.add(host_mgnt) + + share_disk_services = get_share_disk_services(req, role['id']) + share_cluster_disk_services = \ + get_share_cluster_disk_services(req, role['id']) + + is_ha = re.match(".*_HA$", role['name']) is not None + if host_interfaces: + # if role['public_vip'] and not + # host_interfaces[0].has_key('public'): + if role['public_vip'] and 'publicapi' not in host_interfaces[0]: + msg = "no public networkplane found while role has public vip" + LOG.error(msg) + raise exception.NotFound(message=msg) + + tecs_config.update({role['name']: + {'services': service_components, + 'vip': role['vip'], + 'host_interfaces': host_interfaces, + 'share_disk_services': share_disk_services, + 'share_cluster_disk_services': + share_cluster_disk_services + }}) + if is_ha: + tecs_config[role['name']]['ntp_server'] = role['ntp_server'] + tecs_config[role['name']]['public_vip'] = role['public_vip'] + tecs_config[role['name']]['glance_vip'] = role['glance_vip'] + tecs_config[role['name']]['db_vip'] = role['db_vip'] + if role['name'] == 'COMPUTER': + dvs_cpu_set = get_dvs_cpu_sets(req, cluster_id, role_hosts) + if dvs_cpu_set: + dvs_cpu_sets.extend(dvs_cpu_set) + other_config.update({'tecs_installed_hosts': tecs_installed_hosts}) + # replace private network + physic_network_cfg = _conf_private_network( + req, cluster_id, host_private_networks_dict, + cluster_private_network_dict) + dvs_cfg = conf_dvs( + req, host_vxlan_network_dict, host_private_networks_dict_for_dvs) + # set for dvs_cpu_sets + dvs_cfg['dvs_cpu_sets'] = dvs_cpu_sets + + other_config.update({'physic_network_config': physic_network_cfg}) + other_config.update({'dvs_config': dvs_cfg}) + other_config.update({'segmentation_type': segment_type}) + other_config.update({'zenic_config': zenic_cfg}) + return tecs_config + + +def get_host_name_and_mgnt_ip(tecs_config): + name_ip_list = [] + ip_list = [] + ha_nodes_ip = set() + nodes_ips = {'ha': set(), 'lb': set(), 'computer': set()} + + for role_name, role_configs in tecs_config.items(): + if role_name == "OTHER": + continue + for host in role_configs['host_interfaces']: + ip_domain_dict = {} + host_mgt = host['management'] + if host_mgt['ip'] not in ip_list: + ip_list.append(host_mgt['ip']) + ip_domain_dict.update({host['name']: host_mgt['ip']}) + name_ip_list.append(ip_domain_dict) + if role_name == 'CONTROLLER_HA': + ha_nodes_ip.add(host_mgt['ip']) + if host_mgt['ip'] in tecs_config['OTHER']['tecs_installed_hosts'] \ + or host['name'] \ + in tecs_config['OTHER']['tecs_installed_hosts']: + continue + if role_name == 'CONTROLLER_HA': + nodes_ips['ha'].add(host_mgt['ip']) + if role_name == 'CONTROLLER_LB': + nodes_ips['lb'].add(host_mgt['ip']) + if role_name == 'COMPUTER': + nodes_ips['computer'].add(host_mgt['ip']) + return name_ip_list, nodes_ips, ha_nodes_ip + + +def replace_ip_with_domain_name(req, tecs_config): + domain_ip_list = [] + ip_list = [] + lb_float_ip = tecs_config['CONTROLLER_LB']['vip'] + for role_name, role_configs in tecs_config.items(): + if role_name == "OTHER": + continue + is_ha = re.match(".*_HA$", role_name) is not None + is_lb = re.match(".*_LB$", role_name) is not None + + for host in role_configs['host_interfaces']: + ip_domain_dict = {} + host_mgt = host['management'] + if host_mgt['ip'] not in ip_list: + ip_list.append(host_mgt['ip']) + ip_domain_dict.update({host['name']: host_mgt['ip']}) + domain_ip_list.append(ip_domain_dict) + host_mgt['ip'] = host['name'] + + if is_ha and role_configs.get('vip'): + domain_ip_list.append({'ha-vip': role_configs['vip']}) + if role_configs['ntp_server'] == role_configs['vip']: + role_configs['ntp_server'] = 'ha-vip' + elif role_configs['ntp_server'] == lb_float_ip: + role_configs['ntp_server'] = 'lb-vip' + role_configs['vip'] = 'ha-vip' + + if role_configs.get('public_vip'): + domain_ip_list.append({'public-vip': role_configs['public_vip']}) + if role_configs.get('glance_vip'): + domain_ip_list.append({'glance-vip': role_configs['glance_vip']}) + role_configs['glance_vip'] = 'glance-vip' + if role_configs.get('db_vip'): + domain_ip_list.append({'db-vip': role_configs['db_vip']}) + role_configs['db_vip'] = 'db-vip' + + if is_lb and role_configs.get('vip'): + domain_ip_list.append({'lb-vip': role_configs['vip']}) + role_configs['vip'] = 'lb-vip' + return domain_ip_list + + +def config_dnsmasq_server(host_ip_list, domain_ip_list, password='ossdbg1'): + dns_conf = "/etc/dnsmasq.conf" + for host_ip in host_ip_list: + try: + subprocess.check_output( + "sshpass -p %s ssh -o StrictHostKeyChecking=no %s " + "test -f %s" % (password, host_ip, dns_conf), + shell=True, stderr=subprocess.STDOUT) + except subprocess.CalledProcessError: + msg = '%s does not exist in %s' % (dns_conf, host_ip) + LOG.error(msg) + raise exception.NotFound(message=msg) + + config_scripts = [ + "sed -i '/^[^#]/s/no-resolv[[:space:]]*/\#no-resolv/'\ + %s" % dns_conf, + "sed -i '/^[^#]/s/no-poll[[:space:]]*/\#no-poll/' %s" % dns_conf, + "cache_size_linenumber=`grep -n 'cache-size=' %s| awk -F ':' " + "'{print $1}'` && [ ! -z $cache_size_linenumber ] && sed -i " + "${cache_size_linenumber}d %s" % (dns_conf, dns_conf), + "echo 'cache-size=3000' >> %s" % dns_conf] + + tecs_cmn.run_scrip(config_scripts, host_ip, password, + msg='Failed to config cache of dns server on %s' % + host_ip) + + config_ip_scripts = [] + for domain_name_ip in domain_ip_list: + domain_name = domain_name_ip.keys()[0] + domain_ip = domain_name_ip.values()[0] + config_ip_scripts.append( + "controller1_linenumber=`grep -n 'address=/%s' %s| awk -F ':' " + "'{print $1}'` && [ ! -z ${controller1_linenumber} ] && " + "sed -i ${controller1_linenumber}d %s" % + (domain_name, dns_conf, dns_conf)) + config_ip_scripts.append("echo 'address=/%s/%s' >> %s" % + (domain_name, domain_ip, dns_conf)) + tecs_cmn.run_scrip(config_ip_scripts, host_ip, password, + 'Failed to config domain-ip of dns server on %s' % + host_ip) + + service_start_scripts = [ + "dns_linenumber=`grep -n \"^[\ + [:space:]]*ExecStart=/usr/sbin/dnsmasq -k\" " + "/usr/lib/systemd/system/dnsmasq.service|cut -d \":\" -f 1` && " + "sed -i \"${dns_linenumber}c ExecStart=/usr/sbin/dnsmasq -k " + "--dns-forward-max=150\" /usr/lib/systemd/system/dnsmasq.service", + "for i in `ps -elf | grep dnsmasq |grep -v grep | awk -F ' ' '{\ + print $4}'`;do kill -9 $i;done ", + "systemctl daemon-reload && systemctl enable dnsmasq.service && " + "systemctl restart dnsmasq.service"] + tecs_cmn.run_scrip(service_start_scripts, host_ip, password, + msg='Failed to start service of dns server on %s' % + host_ip) + + +def config_dnsmasq_client(host_ip_list, ha_ip_list, password='ossdbg1'): + dns_client_file = "/etc/resolv.conf" + tmp_dns_client_file = "/etc/resolv.conf.tmp" + config_scripts = [] + for ha_ip in ha_ip_list: + config_scripts.append("echo 'nameserver %s' >> %s" % + (ha_ip, tmp_dns_client_file)) + config_scripts.append('cat %s > %s' % (tmp_dns_client_file, + dns_client_file)) + config_scripts.append('rm -rf %s' % tmp_dns_client_file) + for host_ip in host_ip_list: + if host_ip not in ha_ip_list: + tecs_cmn.run_scrip(config_scripts, host_ip, password, + msg='Failed to config dns client on %s' % + host_ip) + + tecs_cmn.run_scrip(config_scripts, + msg='Failed to config dns client on daisy host') + + +def config_nodes_hosts(host_ip_list, domain_ip, password='ossdbg1'): + hosts_file = "/etc/hosts" + tmp_hosts_file = "/etc/hosts.tmp" + config_scripts = ['cat /etc/hosts > %s' % tmp_hosts_file] + for name_ip in domain_ip: + config_scripts.append("linenumber=`grep -n '%s$' %s | " + "awk -F ':' '{print $1}'` && " + "[ ! -z $linenumber ] && " + "sed -i ${linenumber}d %s" % + (name_ip.keys()[0], + tmp_hosts_file, tmp_hosts_file)) + config_scripts.append("echo '%s %s' >> %s" % (name_ip.values()[0], + name_ip.keys()[0], + tmp_hosts_file)) + config_scripts.append('cat %s > %s' % (tmp_hosts_file, hosts_file)) + config_scripts.append('rm -rf %s' % tmp_hosts_file) + for host_ip in host_ip_list: + tecs_cmn.run_scrip(config_scripts, host_ip, password, + msg='Failed to config /etc/hosts on %s' % host_ip) + tecs_cmn.run_scrip(config_scripts, + msg='Failed to config /etc/hosts on daisy host') + + +def revise_nova_config(computer_nodes, ha_vip, public_vip, compute_ip_domain, + password='ossdbg1'): + nova_file = "/etc/nova/nova.conf" + for host_ip in computer_nodes: + scripts = [] + if public_vip: + scripts.extend(["linenumber=`grep -n '^novncproxy_base_url' %s | " + "awk -F ':' '{print $1}'`" % nova_file, + 'sed -i "${linenumber}s/public-vip/%s/" %s' % + (public_vip, nova_file)]) + else: + scripts.extend(["linenumber=`grep -n '^novncproxy_base_url' %s | " + "awk -F ':' '{print $1}'`" % nova_file, + 'sed -i "${linenumber}s/ha-vip/%s/" %s' % + (ha_vip, nova_file)]) + scripts.extend(["linenumber=`grep -n '^vncserver_proxyclient_address' " + "%s | awk -F ':' '{print $1}'`" % nova_file, + 'sed -i "${linenumber}s/127.0.0.1/%s/" %s' % + (compute_ip_domain[host_ip], nova_file), + "systemctl restart openstack-nova-compute.service "]) + tecs_cmn.run_scrip(scripts, host_ip, password, + msg='Failed to config nova on %s' % host_ip) + + +def revise_horizon_config(ha_nodes, ha_vip, public_vip, password='ossdbg1'): + dashboard_file = "/etc/httpd/conf.d/15-horizon_vhost.conf" + for host_ip in ha_nodes: + config_scripts = ["linenumber1=`grep -n 'ServerAlias %s' " + "%s| awk -F ':' '{print $1}'` && " + "[ ! -z ${linenumber1} ] && sed -i " + "${linenumber1}d %s" % (host_ip, + dashboard_file, + dashboard_file), + "linenumber2=`grep -n 'ServerAlias \ + %s' %s| awk -F ':' '" + "{print $1}'` && [ ! -z ${linenumber2} ] && sed -i " + "${linenumber2}d %s" % (ha_vip, dashboard_file, + dashboard_file), + "linenumber3=`grep -n 'ServerAlias \ + %s' %s| awk -F ':' '" + "{print $1}'` && [ ! -z ${linenumber3} ] && sed -i " + "${linenumber3}d %s" % (public_vip, dashboard_file, + dashboard_file), + 'dasboard_linenumber1=`grep \ + -n "ServerAlias localhost" ' + '%s|cut -d ":" -f 1` && sed -i \ + "${dasboard_linenumber1}a ' + 'ServerAlias %s" %s' % (dashboard_file, host_ip, + dashboard_file), + 'dasboard_linenumber1=`grep -n \ + "ServerAlias localhost" %s' + '|cut -d ":" -f 1` && sed -i \ + "${dasboard_linenumber1}a ' + 'ServerAlias %s" %s' % (dashboard_file, ha_vip, + dashboard_file)] + if public_vip: + config_scripts.append('dasboard_linenumber2=`grep -n ' + '"ServerAlias localhost" %s|cut ' + '-d ":" -f 1` && sed -i ' + '"${dasboard_linenumber2}a ' + 'ServerAlias %s" %s' % + (dashboard_file, public_vip, + dashboard_file)) + + tecs_cmn.run_scrip(config_scripts, host_ip, password, + msg='Failed to config horizon on %s' % host_ip) + restart_http_scripts = ['systemctl daemon-reload &&' + 'systemctl restart httpd.service'] + try: + subprocess.check_output(restart_http_scripts, shell=True, + stderr=subprocess.STDOUT) + except: + return + + +class TECSInstallTask(Thread): + + """ + Class for install tecs bin. + """ + """ Definition for install states.""" + + def __init__(self, req, cluster_id): + super(TECSInstallTask, self).__init__() + self.req = req + self.cluster_id = cluster_id + self.progress = 0 + self.state = tecs_state['INIT'] + self.message = "" + self.tecs_config_file = '' + self.mgnt_ip_list = '' + self.install_log_fp = None + self.last_line_num = 0 + self.need_install = False + self.ping_times = 36 + self.log_file = "/var/log/daisy/tecs_%s_install.log" % self.cluster_id + self.dns_name_ip = [] + self.password = 'ossdbg1' + self.nodes_ips = {} + + def _check_install_log(self, tell_pos): + with open(self.log_file, "r") as tmp_fp: + tmp_fp.seek(tell_pos, os.SEEK_SET) + line_num = self.last_line_num + for lnum, lcontent in enumerate(tmp_fp, 1): + tell_pos = tmp_fp.tell() + line_num += 1 + LOG.debug("<<>>", line_num, lcontent) + if -1 != lcontent.find("Preparing servers"): + self.progress = 3 + + if -1 != lcontent.find("successfully"): + self.progress = 100 + self.state = tecs_state['ACTIVE'] + elif -1 != lcontent.find("Error") \ + or -1 != lcontent.find("ERROR") \ + or -1 != lcontent.find("error") \ + or -1 != lcontent.find("not found"): + self.state = tecs_state['INSTALL_FAILED'] + self.message = \ + "Tecs install error, see line %s in '%s'" % ( + line_num, self.log_file) + raise exception.InstallException(self.message) + self.last_line_num = line_num + return tell_pos + + def _calc_progress(self, path): + """ + Calculate the progress of installing bin. + :param path: directory contain ".pp" and ".log" files + :return: installing progress(between 1~100) + """ + ppcount = logcount = 0 + for file in os.listdir(path): + if file.endswith(".log"): + logcount += 1 + elif file.endswith(".pp"): + ppcount += 1 + + progress = 0 + if 0 != ppcount: + progress = (logcount * 100.00) / ppcount + return progress + + def _update_install_progress_to_db(self): + """ + Update progress of intallation to db. + :return: + """ + roles = daisy_cmn.get_cluster_roles_detail(self.req, self.cluster_id) + for role in roles: + if role['deployment_backend'] != daisy_cmn.tecs_backend_name: + continue + role_hosts = daisy_cmn.get_hosts_of_role(self.req, role['id']) + for role_host in role_hosts: + if role_host['status'] != tecs_state['ACTIVE']: + self.need_install = True + role_host['status'] = self.state + role_host['progress'] = self.progress + role_host['messages'] = self.message + daisy_cmn.update_role_host( + self.req, role_host['id'], role_host) + role['progress'] = self.progress + role['status'] = self.state + role['messages'] = self.message + daisy_cmn.update_role(self.req, role['id'], role) + + def _generate_tecs_config_file(self, cluster_id, tecs_config): + tecs_config_file = '' + if tecs_config: + cluster_conf_path = daisy_tecs_path + cluster_id + LOG.info(_("Generate tecs config...")) + config.update_tecs_config(tecs_config, cluster_conf_path) + tecs_config_file = cluster_conf_path + "/tecs.conf" + ha_config_file = cluster_conf_path + "/HA_1.conf" + tecs_cmn.mkdir_tecs_install() + cp_ha_conf = "\cp %s /home/tecs_install/" % ha_config_file + tecs_conf = "\cp %s /home/tecs_install/" % tecs_config_file + daisy_cmn.subprocess_call(cp_ha_conf) + daisy_cmn.subprocess_call(tecs_conf) + return tecs_config_file + + def run(self): + try: + start_time = time.time() + self._run() + except Exception as e: + self.state = tecs_state['INSTALL_FAILED'] + self.message = e.message + self._update_install_progress_to_db() + LOG.info(_("TECS version package installed failed for" + " cluster %s." % self.cluster_id)) + LOG.exception(e.message) + else: + if not self.need_install: + return + LOG.info(_("TECS version package installed completely for" + " cluster %s." % self.cluster_id)) + + LOG.info("Config provider ...") + tecs_cmn.inform_provider_cloud_state( + self.req.context, self.cluster_id, operation='add') + time_cost = str(round((time.time() - start_time) / 60, 2)) + LOG.info( + _("It totally takes %s min for installing tecs" % time_cost)) + + if self.dns_name_ip: + LOG.info("Config dns ...") + ha_vip = "" + public_vip = "" + compute_ip_domain = {} + for dns_dict in self.dns_name_ip: + domain_name = dns_dict.keys()[0] + domain_ip = dns_dict.values()[0] + if domain_name == "ha-vip": + ha_vip = domain_ip + if domain_name == "public-vip": + public_vip = domain_ip + if domain_ip in self.nodes_ips['computer']: + compute_ip_domain.update({domain_ip: domain_name}) + + revise_nova_config(self.nodes_ips['computer'], ha_vip, + public_vip, compute_ip_domain) + revise_horizon_config(self.nodes_ips['ha'], ha_vip, public_vip) + + LOG.info("Push configs for installing hosts ...") + config_backend_name = 'clushshell' + backend_driver = manager.configBackend(config_backend_name, + self.req) + + params = {'filters': {'cluster_id': self.cluster_id}} + nodes = registry.get_hosts_detail(self.req.context, **params) + push_nodes_id = [node['id'] for node in nodes + if node['status'] == 'with-role' and + node['role_status'] == tecs_state['INSTALLING']] + components_name = ['nova'] + backend_driver.push_config_by_hosts(push_nodes_id, + components_name) + + LOG.info("Config neutron ...") + result = config.get_conf( + self.tecs_config_file, + neutron_float_ip="CONFIG_NEUTRON_SERVER_HOST", + keystone_float_ip="CONFIG_KEYSTONE_HOST", + neutron_install_mode="CONFIG_NEUTRON_SERVER_INSTALL_MODE", + keystone_install_mode="CONFIG_KEYSTONE_INSTALL_MODE", + lb_float_ip="CONFIG_LB_HOST") + if (result.get('keystone_install_mode', None) == "LB" and + result.get('neutron_install_mode', None) == "LB"): + LOG.info(_("<<>>")) + time.sleep(20) + neutron(self.req, + result.get('lb_float_ip', None), + result.get('lb_float_ip', None), + self.cluster_id) + else: + LOG.info(_("<<>>")) + time.sleep(20) + neutron(self.req, + result.get('neutron_float_ip', None), + result.get('keystone_float_ip', None), + self.cluster_id) + + self.progress = 100 + self.state = tecs_state['ACTIVE'] + self.message = "Tecs installed successfully" + LOG.info(_("TECS installed successfully for cluster %s." + % self.cluster_id)) + finally: + if not self.need_install: + return + self._update_install_progress_to_db() + if self.install_log_fp: + self.install_log_fp.close() + + def _run(self): + """ + Exectue install file(.bin) with sync mode. + :return: + """ + + def executor(**params): + # if subprocsee is failed, we need break + if os.path.exists(self.log_file): + params['tell_pos'] = self._check_install_log( + params.get('tell_pos', 0)) + LOG.debug(_("<<>>")) + if 100 == self.progress: + return params + if 3 == self.progress: + self._update_install_progress_to_db() + # waiting for 'progress_log_location' file exist + if not params.get("if_progress_file_read", None): + if not os.path.exists(self.progress_log_location): + params['if_progress_file_read'] = False + return params + else: + with open(self.progress_log_location, "r") as fp: + line = fp.readline() + self.progress_logs_path = line.split( + '\n')[0] + "/manifests" + LOG.info(_("TECS installation log path: %s." + % self.progress_logs_path)) + params['if_progress_file_read'] = True + + # waiting for 'self.progress_logs_path' file exist + if not os.path.exists(self.progress_logs_path): + return params + + LOG.debug(_("<<>>")) + + # cacl progress & sync to db + progress = self._calc_progress(self.progress_logs_path) + if progress == 100: + self.progress = 100 + elif self.progress != progress and progress >= 3: + self.progress = progress + self.state = tecs_state['INSTALLING'] + self._update_install_progress_to_db() + + return params + + tecs_config = get_cluster_tecs_config(self.req, self.cluster_id) + name_ip_list, self.nodes_ips, ha_nodes_ip =\ + get_host_name_and_mgnt_ip(tecs_config) + + self.mgnt_ip_list = (self.nodes_ips['ha'] | + self.nodes_ips['lb'] | + self.nodes_ips['computer']) + + # after os is installed successfully, if ping all role hosts + # management ip successfully, begin to install TECS + unreached_hosts = daisy_cmn.check_ping_hosts(self.mgnt_ip_list, + self.ping_times) + if unreached_hosts: + self.message =\ + "ping hosts %s failed" % ','.join(unreached_hosts) + raise exception.InstallException(self.message) + else: + # os maybe not reboot completely, wait for 10s. + time.sleep(10) + + # delete daisy server known_hosts file to avoid + # ssh command failed because of incorrect host key. + daisy_cmn.subprocess_call('rm -rf /root/.ssh/known_hosts') + + self.progress = 0 + self.message = "Preparing for TECS installation" + self._update_install_progress_to_db() + if not self.need_install: + LOG.info(_("No host in cluster %s need to install tecs." + % self.cluster_id)) + return + + LOG.info(_("Begin to trust me for all nodes with role...")) + root_passwd = 'ossdbg1' + + daisy_cmn.trust_me(self.mgnt_ip_list, root_passwd) + tecs_cmn.mkdir_tecs_install(self.mgnt_ip_list) + + if tecs_config['OTHER']['cluster_data']['use_dns']: + self.dns_name_ip =\ + replace_ip_with_domain_name(self.req, tecs_config) + storage_ip_list = tecs_cmn.get_storage_name_ip_dict( + self.req, self.cluster_id, 'STORAGE') + + self.dns_name_ip.extend(storage_ip_list) + tecs_config['OTHER'].update({'dns_config': self.dns_name_ip}) + + config_dnsmasq_server(ha_nodes_ip, self.dns_name_ip) + config_dnsmasq_client(self.mgnt_ip_list, ha_nodes_ip) + config_nodes_hosts(self.mgnt_ip_list, self.dns_name_ip) + host_domain = [name_ip.keys()[0] for name_ip in self.dns_name_ip + if name_ip.values()[0] in self.mgnt_ip_list] + + unreached_hosts = daisy_cmn.check_ping_hosts(host_domain, + self.ping_times) + if unreached_hosts: + self.message = "ping hosts %s failed after DNS\ + configuration" % ','.join(unreached_hosts) + raise exception.InstallException(self.message) + else: + config_nodes_hosts(self.mgnt_ip_list, name_ip_list) + # generate tecs config must be after ping check + self.tecs_config_file =\ + self._generate_tecs_config_file(self.cluster_id, + tecs_config) + + # install network-configuration-1.1.1-15.x86_64.rpm + if self.mgnt_ip_list: + for mgnt_ip in self.mgnt_ip_list: + LOG.info(_("Begin to install network-configuration\ + on %s" % mgnt_ip)) + tecs_cmn.TecsShellExector(mgnt_ip, 'install_rpm') + # network-configuration will restart network, + # wait until ping test successfully + time.sleep(10) + unreached_hosts = daisy_cmn.check_ping_hosts(self.mgnt_ip_list, + self.ping_times) + if unreached_hosts: + self.message = "ping hosts %s failed after network\ + configuration" % ','.join(unreached_hosts) + raise exception.InstallException(self.message) + + (share_disk_info, volume_disk_info, share_cluster_disk_info) =\ + disk_array.get_disk_array_info(self.req, self.cluster_id) + + if volume_disk_info: + LOG.info(_("Begin to config multipth on compute nodes...")) + disk_array.config_compute_multipath(self.nodes_ips['computer']) + + if share_disk_info or share_cluster_disk_info: + LOG.info(_("Begin to config multipth on HA nodes...")) + disk_array.config_compute_multipath(self.nodes_ips['ha']) + LOG.info(_("Begin to config Disk Array on ha nodes...")) + array_nodes_addr =\ + tecs_cmn.get_disk_array_nodes_addr(self.req, self.cluster_id) + disk_array.config_ha_share_disk(share_disk_info, + array_nodes_addr['ha'], + share_cluster_disk_info) + + # check and get TECS version + tecs_version_pkg_file =\ + tecs_cmn.check_and_get_tecs_version(daisy_tecs_path) + if not tecs_version_pkg_file: + self.state = tecs_state['INSTALL_FAILED'] + self.message =\ + "TECS version file not found in %s" % daisy_tecs_path + raise exception.NotFound(message=self.message) + + # use pattern 'tecs_%s_install' to distinguish + # multi clusters installation + LOG.info(_("Open log file for TECS installation.")) + self.install_log_fp = open(self.log_file, "w+") + + # delete cluster_id file before installing, + # in case getting old log path + self.progress_log_location =\ + "/var/tmp/packstack/%s" % self.cluster_id + if os.path.exists(self.progress_log_location): + os.remove(self.progress_log_location) + + install_cmd = "sudo %s conf_file %s" % (tecs_version_pkg_file, + self.tecs_config_file) + LOG.info(_("Begin to install TECS in cluster %s." % self.cluster_id)) + subprocess.Popen(install_cmd, + shell=True, + stdout=self.install_log_fp, + stderr=self.install_log_fp) + + self.progress = 1 + self.state = tecs_state['INSTALLING'] + self.message = "TECS installing" + self._update_install_progress_to_db() + # if clush_bin is not terminate + # while not clush_bin.returncode: + params = {} # executor params + execute_times = 0 # executor run times + while True: + time.sleep(5) + if self.progress == 100: + if volume_disk_info and self.nodes_ips['ha']: + LOG.info(_("Begin to config cinder volume...")) + disk_array.config_ha_cinder_volume(volume_disk_info, + self.nodes_ips['ha']) + break + elif execute_times >= 1440: + self.state = tecs_state['INSTALL_FAILED'] + self.message = "TECS install timeout for 2 hours" + raise exception.InstallTimeoutException( + cluster_id=self.cluster_id) + params = executor( + # just read cluster_id file once in 'while' + if_progress_file_read=params.get( + "if_progress_file_read", False), + # current fp location of tecs_install.log + tell_pos=params.get("tell_pos", 0)) + + # get clush_bin.returncode + # clush_bin.poll() + execute_times += 1 diff --git a/code/daisy/daisy/api/backends/tecs/uninstall.py b/code/daisy/daisy/api/backends/tecs/uninstall.py index d87d8590..c555d8d0 100755 --- a/code/daisy/daisy/api/backends/tecs/uninstall.py +++ b/code/daisy/daisy/api/backends/tecs/uninstall.py @@ -17,31 +17,12 @@ /hosts endpoint for Daisy v1 API """ -import webob.exc import subprocess -from oslo_config import cfg from oslo_log import log as logging -from webob.exc import HTTPBadRequest -from webob.exc import HTTPForbidden - -from threading import Thread, Lock -import threading from daisy import i18n -from daisy import notifier - -from daisy.api import policy -import daisy.api.v1 - -from daisy.common import exception -from daisy.common import property_utils -from daisy.common import utils -from daisy.common import wsgi -from daisy.api.v1 import controller -from daisy.api.v1 import filters import daisy.api.backends.common as daisy_cmn import daisy.api.backends.tecs.common as tecs_cmn -import daisy.registry.client.v1.api as registry LOG = logging.getLogger(__name__) _ = i18n._ @@ -51,9 +32,11 @@ _LW = i18n._LW tecs_state = tecs_cmn.TECS_STATE -def update_progress_to_db(req, role_id_list, status, hosts_list,host_ip=None): + +def update_progress_to_db(req, role_id_list, status, hosts_list, host_ip=None): """ - Write uninstall progress and status to db, we use global lock object 'uninstall_mutex' + Write uninstall progress and status to db, + we use global lock object 'uninstall_mutex' to make sure this function is thread safety. :param req: http req. :param role_id_list: Column neeb be update in role table. @@ -63,26 +46,27 @@ def update_progress_to_db(req, role_id_list, status, hosts_list,host_ip=None): for role_id in role_id_list: role_hosts = daisy_cmn.get_hosts_of_role(req, role_id) for host_id_ip in hosts_list: - host_ip_tmp=host_id_ip.values()[0] - host_id_tmp=host_id_ip.keys()[0] + host_ip_tmp = host_id_ip.values()[0] + host_id_tmp = host_id_ip.keys()[0] if host_ip: for role_host in role_hosts: if (host_ip_tmp == host_ip and - role_host['host_id']== host_id_tmp): + role_host['host_id'] == host_id_tmp): role_host_meta = {} if 0 == cmp(status, tecs_state['UNINSTALLING']): role_host_meta['progress'] = 10 role_host_meta['messages'] = 'TECS uninstalling' - if 0 == cmp(status, tecs_state['UNINSTALL_FAILED']): - role_host_meta['messages'] = 'TECS uninstalled failed' - elif 0 == cmp(status, tecs_state['ACTIVE']): + if 0 == cmp(status, tecs_state['UNINSTALL_FAILED']): + role_host_meta[ + 'messages'] = 'TECS uninstalled failed' + elif 0 == cmp(status, tecs_state['INIT']): role_host_meta['progress'] = 100 - role_host_meta['messages'] = 'TECS uninstalled successfully' + role_host_meta[ + 'messages'] = 'TECS uninstalled successfully' if role_host_meta: role_host_meta['status'] = status - daisy_cmn.update_role_host(req, - role_host['id'], - role_host_meta) + daisy_cmn.update_role_host(req, role_host['id'], + role_host_meta) else: role = {} if 0 == cmp(status, tecs_state['UNINSTALLING']): @@ -91,11 +75,11 @@ def update_progress_to_db(req, role_id_list, status, hosts_list,host_ip=None): role_host_meta['status'] = status role_host_meta['progress'] = 0 daisy_cmn.update_role_host(req, - role_host['id'], - role_host_meta) - role['progress']=0 + role_host['id'], + role_host_meta) + role['progress'] = 0 role['messages'] = 'TECS uninstalling' - if 0 == cmp(status, tecs_state['UNINSTALL_FAILED']): + if 0 == cmp(status, tecs_state['UNINSTALL_FAILED']): role['messages'] = 'TECS uninstalled failed' elif 0 == cmp(status, tecs_state['INIT']): role['progress'] = 100 @@ -103,53 +87,64 @@ def update_progress_to_db(req, role_id_list, status, hosts_list,host_ip=None): if role: role['status'] = status daisy_cmn.update_role(req, role_id, role) - if 0 == cmp(status, tecs_state['INIT']): - daisy_cmn.delete_role_hosts(req, role_id) -def _thread_bin(req, host_ip, role_id_list,hosts_list): + +def _thread_bin(req, host_ip, role_id_list, hosts_list): # uninstall network-configuration-1.1.1-15.x86_64.rpm - update_progress_to_db(req,role_id_list,tecs_state['UNINSTALLING'],hosts_list,host_ip) + update_progress_to_db( + req, role_id_list, tecs_state['UNINSTALLING'], hosts_list, host_ip) tecs_cmn.TecsShellExector(host_ip, 'uninstall_rpm') cmd = 'mkdir -p /var/log/daisy/daisy_uninstall/' daisy_cmn.subprocess_call(cmd) - password = "ossdbg1" - var_log_path = "/var/log/daisy/daisy_uninstall/%s_uninstall_tecs.log" % host_ip + password = "ossdbg1" + var_log_path = "/var/log/daisy/daisy_uninstall/\ + %s_uninstall_tecs.log" % host_ip with open(var_log_path, "w+") as fp: cmd = '/var/lib/daisy/tecs/trustme.sh %s %s' % (host_ip, password) - daisy_cmn.subprocess_call(cmd,fp) + daisy_cmn.subprocess_call(cmd, fp) cmd = 'clush -S -b -w %s "rm -rf /home/daisy_uninstall"' % (host_ip,) - daisy_cmn.subprocess_call(cmd,fp) + daisy_cmn.subprocess_call(cmd, fp) cmd = 'clush -S -w %s "mkdir -p /home/daisy_uninstall"' % (host_ip,) - daisy_cmn.subprocess_call(cmd,fp) - + daisy_cmn.subprocess_call(cmd, fp) + try: - scp_bin_result = subprocess.check_output( - 'clush -S -w %s -c /var/lib/daisy/tecs/ZXTECS*.bin --dest=/home/daisy_uninstall' % (host_ip,), + subprocess.check_output( + 'clush -S -w %s -c /var/lib/daisy/tecs/ZXTECS*.bin \ + --dest=/home/daisy_uninstall' % ( + host_ip,), shell=True, stderr=subprocess.STDOUT) except subprocess.CalledProcessError as e: - update_progress_to_db(req, role_id_list, tecs_state['UNINSTALL_FAILED'],hosts_list,host_ip) + update_progress_to_db( + req, role_id_list, tecs_state[ + 'UNINSTALL_FAILED'], hosts_list, host_ip) LOG.error(_("scp TECS bin for %s failed!" % host_ip)) fp.write(e.output.strip()) - + cmd = 'clush -S -w %s "chmod 777 /home/daisy_uninstall/*"' % (host_ip,) - daisy_cmn.subprocess_call(cmd,fp) - + daisy_cmn.subprocess_call(cmd, fp) + try: exc_result = subprocess.check_output( - 'clush -S -w %s /home/daisy_uninstall/ZXTECS*.bin clean' % (host_ip,), + 'clush -S -w %s /home/daisy_uninstall/ZXTECS*.bin clean' % ( + host_ip,), shell=True, stderr=subprocess.STDOUT) except subprocess.CalledProcessError as e: - update_progress_to_db(req, role_id_list, tecs_state['UNINSTALL_FAILED'],hosts_list,host_ip) + update_progress_to_db( + req, role_id_list, tecs_state[ + 'UNINSTALL_FAILED'], hosts_list, host_ip) LOG.error(_("Uninstall TECS for %s failed!" % host_ip)) fp.write(e.output.strip()) else: - update_progress_to_db(req, role_id_list, tecs_state['ACTIVE'], hosts_list,host_ip) + update_progress_to_db(req, role_id_list, tecs_state['INIT'], + hosts_list, host_ip) LOG.info(_("Uninstall TECS for %s successfully!" % host_ip)) fp.write(exc_result) # this will be raise raise all the exceptions of the thread to log file + + def thread_bin(req, host_ip, role_id_list, hosts_list): try: _thread_bin(req, host_ip, role_id_list, hosts_list) except Exception as e: - LOG.exception(e.message) \ No newline at end of file + LOG.exception(e.message) diff --git a/code/daisy/daisy/api/backends/tecs/upgrade.py b/code/daisy/daisy/api/backends/tecs/upgrade.py index 70113960..1bf5b60b 100755 --- a/code/daisy/daisy/api/backends/tecs/upgrade.py +++ b/code/daisy/daisy/api/backends/tecs/upgrade.py @@ -17,30 +17,10 @@ /update endpoint for Daisy v1 API """ -import webob.exc import subprocess -from oslo_config import cfg from oslo_log import log as logging -from webob.exc import HTTPBadRequest -from webob.exc import HTTPForbidden - -from threading import Thread, Lock -import threading -import time from daisy import i18n -from daisy import notifier - -from daisy.api import policy -import daisy.api.v1 -import daisy.registry.client.v1.api as registry -from daisy.common import exception -from daisy.common import property_utils -from daisy.common import utils -from daisy.common import wsgi -from daisy.api.v1 import controller -from daisy.api.v1 import filters -from daisy.api.backends import os as os_handle import daisy.api.backends.common as daisy_cmn import daisy.api.backends.tecs.common as tecs_cmn @@ -53,7 +33,8 @@ _LW = i18n._LW tecs_state = tecs_cmn.TECS_STATE -def update_progress_to_db(req,role_id_list,status,hosts_list,host_ip=None): + +def update_progress_to_db(req, role_id_list, status, hosts_list, host_ip=None): """ Write update progress and status to db, to make sure this function is thread safety. @@ -65,40 +46,43 @@ def update_progress_to_db(req,role_id_list,status,hosts_list,host_ip=None): for role_id in role_id_list: role_hosts = daisy_cmn.get_hosts_of_role(req, role_id) for host_id_ip in hosts_list: - host_ip_tmp=host_id_ip.values()[0] - host_id_tmp=host_id_ip.keys()[0] + host_ip_tmp = host_id_ip.values()[0] + host_id_tmp = host_id_ip.keys()[0] if host_ip: for role_host in role_hosts: if (host_ip_tmp == host_ip and - role_host['host_id']== host_id_tmp): + role_host['host_id'] == host_id_tmp): role_host_meta = {} if 0 == cmp(status, tecs_state['UPDATING']): role_host_meta['progress'] = 10 role_host_meta['messages'] = 'TECS upgrading' - if 0 == cmp(status, tecs_state['UPDATE_FAILED']): + if 0 == cmp(status, tecs_state['UPDATE_FAILED']): role_host_meta['messages'] = 'TECS upgraded failed' elif 0 == cmp(status, tecs_state['ACTIVE']): role_host_meta['progress'] = 100 - role_host_meta['messages'] = 'TECS upgraded successfully' + role_host_meta[ + 'messages'] = 'TECS upgraded successfully' if role_host_meta: role_host_meta['status'] = status daisy_cmn.update_role_host(req, - role_host['id'], - role_host_meta) + role_host['id'], + role_host_meta) else: role = {} if 0 == cmp(status, tecs_state['UPDATING']): for role_host in role_hosts: + if role_host['status'] == tecs_state['INSTALL_FAILED']: + continue role_host_meta = {} role_host_meta['status'] = status role_host_meta['progress'] = 0 role_host_meta['messages'] = 'TECS upgrading' daisy_cmn.update_role_host(req, - role_host['id'], - role_host_meta) - role['progress']=0 + role_host['id'], + role_host_meta) + role['progress'] = 0 role['messages'] = 'TECS upgrading' - if 0 == cmp(status, tecs_state['UPDATE_FAILED']): + if 0 == cmp(status, tecs_state['UPDATE_FAILED']): role['messages'] = 'TECS upgraded failed' elif 0 == cmp(status, tecs_state['ACTIVE']): role['progress'] = 100 @@ -106,46 +90,56 @@ def update_progress_to_db(req,role_id_list,status,hosts_list,host_ip=None): if role: role['status'] = status daisy_cmn.update_role(req, role_id, role) - -def thread_bin(req,role_id_list, host_ip,hosts_list): + + +def thread_bin(req, role_id_list, host_ip, hosts_list): # update network-configuration-1.1.1-15.x86_64.rpm - update_progress_to_db(req,role_id_list,tecs_state['UPDATING'],hosts_list,host_ip) + update_progress_to_db( + req, role_id_list, tecs_state['UPDATING'], hosts_list, host_ip) cmd = 'mkdir -p /var/log/daisy/daisy_update/' daisy_cmn.subprocess_call(cmd) password = "ossdbg1" var_log_path = "/var/log/daisy/daisy_update/%s_update_tecs.log" % host_ip with open(var_log_path, "w+") as fp: cmd = '/var/lib/daisy/tecs/trustme.sh %s %s' % (host_ip, password) - daisy_cmn.subprocess_call(cmd,fp) - cmd = 'clush -S -w %s "mkdir -p /home/daisy_update"' % (host_ip,) - daisy_cmn.subprocess_call(cmd,fp) - cmd = 'clush -S -b -w %s "rm -rf /home/daisy_update/ZXTECS*.bin"' % (host_ip,) - daisy_cmn.subprocess_call(cmd,fp) + daisy_cmn.subprocess_call(cmd, fp) + cmd = 'clush -S -w %s "mkdir -p /home/tecs_update/"' % (host_ip,) + daisy_cmn.subprocess_call(cmd, fp) + cmd = 'clush -S -b -w %s "rm -rf /home/tecs_update/ZXTECS*.bin"' % ( + host_ip,) + daisy_cmn.subprocess_call(cmd, fp) tecs_cmn.TecsShellExector(host_ip, 'update_rpm') try: - scp_bin_result = subprocess.check_output( - 'clush -S -w %s -c /var/lib/daisy/tecs/ZXTECS*.bin --dest=/home/daisy_update' % (host_ip,), + subprocess.check_output( + 'clush -S -w %s -c /var/lib/daisy/tecs/ZXTECS*.bin \ + --dest=/home/tecs_update' % ( + host_ip,), shell=True, stderr=subprocess.STDOUT) except subprocess.CalledProcessError as e: - update_progress_to_db(req,role_id_list,tecs_state['UPDATE_FAILED'],hosts_list,host_ip) + update_progress_to_db( + req, role_id_list, tecs_state[ + 'UPDATE_FAILED'], hosts_list, host_ip) LOG.error(_("scp TECS bin for %s failed!" % host_ip)) fp.write(e.output.strip()) return 1 - - cmd = 'clush -S -w %s "chmod 777 /home/daisy_update/*"' % (host_ip,) - daisy_cmn.subprocess_call(cmd,fp) - + + cmd = 'clush -S -w %s "chmod 777 /home/tecs_update/*"' % (host_ip,) + daisy_cmn.subprocess_call(cmd, fp) + try: exc_result = subprocess.check_output( - 'clush -S -w %s "/home/daisy_update/ZXTECS*.bin upgrade"' % (host_ip,), + 'clush -S -w %s "/home/tecs_update/ZXTECS*.bin upgrade"' % ( + host_ip,), shell=True, stderr=subprocess.STDOUT) except subprocess.CalledProcessError as e: - update_progress_to_db(req,role_id_list,tecs_state['UPDATE_FAILED'],hosts_list,host_ip) + update_progress_to_db( + req, role_id_list, tecs_state[ + 'UPDATE_FAILED'], hosts_list, host_ip) LOG.error(_("Update TECS for %s failed!" % host_ip)) fp.write(e.output.strip()) return 2 else: - update_progress_to_db(req,role_id_list,tecs_state['ACTIVE'],hosts_list,host_ip) + update_progress_to_db( + req, role_id_list, tecs_state['ACTIVE'], hosts_list, host_ip) fp.write(exc_result) - return 0 - + return 0 diff --git a/code/daisy/daisy/api/backends/tecs/write_configs.py b/code/daisy/daisy/api/backends/tecs/write_configs.py new file mode 100755 index 00000000..28b44952 --- /dev/null +++ b/code/daisy/daisy/api/backends/tecs/write_configs.py @@ -0,0 +1,142 @@ +# Copyright 2013 OpenStack Foundation +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +""" +/install endpoint for tecs API +""" +import daisy.registry.client.v1.api as registry +import daisy.api.backends.common as daisy_cmn +from daisy.common import utils + + +def _write_role_configs_to_db(req, cluster_id, role_name, configs): + config_meta = {'cluster': cluster_id, + 'role': role_name, + 'config': configs} + registry.config_interface_metadata(req.context, + config_meta) + + +def _write_host_configs_to_db(req, host_id, configs): + config_meta = {'host_id': host_id, + 'config': configs} + registry.config_interface_metadata(req.context, + config_meta) + + +def _get_config_item(file, section, key, value, description): + return {'file-name': file, + 'section': section, + 'key': key, + 'value': value, + 'description': description} + + +def _add_configs_for_nova(req, host_detail): + config_file = '/etc/nova/nova.conf' + default_section = 'DEFAULT' + + key_name = 'vcpu_pin_set' + key_value = host_detail.get(key_name) + config_items = [] + if not key_value: + key_value = host_detail.get('isolcpus') + + nova_key_name = key_name + description = 'vcpu pin set for all vm' + item = _get_config_item(config_file, + default_section, + nova_key_name, + key_value, + description) + config_items.append(item) + + key_name = 'dvs_high_cpuset' + key_value = host_detail.get(key_name) + + nova_key_name = 'dvs_high_cpu_set' + description = 'vcpu pin set for high-performance dvs vm' + item = _get_config_item(config_file, + default_section, + nova_key_name, + key_value, + description) + config_items.append(item) + + numa_cpus = utils.get_numa_node_cpus(host_detail.get('cpu', {})) + numa_nodes = utils.get_numa_node_from_cpus(numa_cpus, key_value) + if numa_nodes: + libvirt_section = 'libvirt' + nova_key_name = 'reserved_huge_pages' + # only support one NUMA node for DVS now + key_value = 'node:%s,size:1048576,count:4' % numa_nodes[0] + description = 'reserved huges for DVS service '\ + 'on high NUMA node' + config_items.append({'file-name': config_file, + 'key': nova_key_name, + 'section': libvirt_section, + 'value': key_value, + 'description': description}) + + key_name = 'pci_high_cpuset' + pci_key_value = host_detail.get(key_name) + + nova_key_name = 'vsg_card_cpu_set' + description = 'vcpu pin set for high-performance CLC card vm' + item = _get_config_item(config_file, + default_section, + nova_key_name, + pci_key_value, + description) + config_items.append(item) + if pci_key_value: + nova_key_name = 'default_ephemeral_format' + description = 'config for CLC card' + key_value = 'ext3' + item = _get_config_item(config_file, + default_section, + nova_key_name, + key_value, + description) + config_items.append(item) + + nova_key_name = 'pci_passthrough_whitelist' + description = 'config for CLC card' + key_value = '[{"vendor_id": "8086","product_id": "0435"}]' + item = _get_config_item(config_file, + default_section, + nova_key_name, + key_value, + description) + config_items.append(item) + + _write_host_configs_to_db(req, + host_detail['id'], + config_items) + + +def update_configset(req, cluster_id): + roles = daisy_cmn.get_cluster_roles_detail(req, cluster_id) + for role in roles: + # now only computer has configs + if role['name'] != 'COMPUTER': + continue + role_meta = {'config_set_update_progress': 0} + daisy_cmn.update_role(req, role['id'], role_meta) + + role_hosts = daisy_cmn.get_hosts_of_role(req, role['id']) + for host in role_hosts: + host_detail = daisy_cmn.get_host_detail(req, host['host_id']) + _add_configs_for_nova(req, host_detail) diff --git a/code/daisy/daisy/api/backends/zenic/api.py b/code/daisy/daisy/api/backends/zenic/api.py index 226144b8..df71c11e 100755 --- a/code/daisy/daisy/api/backends/zenic/api.py +++ b/code/daisy/daisy/api/backends/zenic/api.py @@ -16,46 +16,21 @@ """ /install endpoint for zenic API """ -import os -import copy -import subprocess import time -import commands -import traceback -import webob.exc -from oslo_config import cfg from oslo_log import log as logging -from webob.exc import HTTPBadRequest -from webob.exc import HTTPForbidden -from webob.exc import HTTPServerError import threading -from threading import Thread - from daisy import i18n -from daisy import notifier - -from daisy.api import policy -import daisy.api.v1 from daisy.common import exception -import daisy.registry.client.v1.api as registry -from daisy.api.backends.zenic import config from daisy.api.backends import driver -from daisy.api.network_api import network as neutron -from ironicclient import client as ironic_client -import daisy.api.backends.os as os_handle import daisy.api.backends.common as daisy_cmn import daisy.api.backends.zenic.common as zenic_cmn import daisy.api.backends.zenic.install as instl import daisy.api.backends.zenic.uninstall as unstl import daisy.api.backends.zenic.upgrade as upgrd -try: - import simplejson as json -except ImportError: - import json LOG = logging.getLogger(__name__) _ = i18n._ @@ -65,12 +40,13 @@ _LW = i18n._LW zenic_state = zenic_cmn.ZENIC_STATE + class API(driver.DeploymentDriver): def __init__(self): super(API, self).__init__() return - + def install(self, req, cluster_id): """ Install zenic to a cluster. @@ -79,23 +55,26 @@ class API(driver.DeploymentDriver): cluster_id:cluster id """ - #instl.pxe_server_build(req, install_meta) + # instl.pxe_server_build(req, install_meta) # get hosts config which need to install OS - #hosts_need_os = instl.get_cluster_hosts_config(req, cluster_id) - # if have hosts need to install os, ZENIC installataion executed in OSInstallTask - #if hosts_need_os: - #os_install_obj = instl.OSInstallTask(req, cluster_id, hosts_need_os) - #os_install_thread = Thread(target=os_install_obj.run) - #os_install_thread.start() - #else: - LOG.info(_("No host need to install os, begin install ZENIC for cluster %s." % cluster_id)) + # hosts_need_os = instl.get_cluster_hosts_config(req, cluster_id) + # if have hosts need to install os, ZENIC installataion executed + # in OSInstallTask + # if hosts_need_os: + # os_install_obj = instl.OSInstallTask(req, cluster_id, hosts_need_os) + # os_install_thread = Thread(target=os_install_obj.run) + # os_install_thread.start() + # else: + LOG.info( + _("No host need to install os, begin install ZENIC for cluster %s." + % cluster_id)) zenic_install_task = instl.ZENICInstallTask(req, cluster_id) zenic_install_task.start() - + LOG.info((_("begin install zenic, please waiting...."))) time.sleep(5) - LOG.info((_("install zenic successfully"))) - + LOG.info((_("install zenic successfully"))) + def uninstall(self, req, cluster_id): """ Uninstall ZENIC to a cluster. @@ -105,18 +84,22 @@ class API(driver.DeploymentDriver): :raises HTTPBadRequest if x-install-cluster is missing """ - (role_id_list, hosts_list) = zenic_cmn.get_roles_and_hosts_list(req, cluster_id) + (role_id_list, hosts_list) = zenic_cmn.get_roles_and_hosts_list( + req, cluster_id) if role_id_list: if not hosts_list: msg = _("there is no host in cluster %s") % cluster_id raise exception.ThreadBinException(msg) - unstl.update_progress_to_db(req, role_id_list, zenic_state['UNINSTALLING'], 0.0) - uninstall_progress_percentage = round(1*1.0/len(hosts_list), 2)*100 - + unstl.update_progress_to_db( + req, role_id_list, zenic_state['UNINSTALLING'], 0.0) + uninstall_progress_percentage =\ + round(1 * 1.0 / len(hosts_list), 2) * 100 + threads = [] for host in hosts_list: - t = threading.Thread(target=unstl.thread_bin,args=(req,host,role_id_list,uninstall_progress_percentage)) + t = threading.Thread(target=unstl.thread_bin, args=( + req, host, role_id_list, uninstall_progress_percentage)) t.setDaemon(True) t.start() threads.append(t) @@ -132,16 +115,20 @@ class API(driver.DeploymentDriver): for role_id in role_id_list: role = daisy_cmn.get_role_detail(req, role_id) if role['progress'] == 100: - unstl.update_progress_to_db(req, role_id_list, zenic_state['UNINSTALL_FAILED']) + unstl.update_progress_to_db( + req, role_id_list, zenic_state['UNINSTALL_FAILED']) uninstall_failed_flag = True break if role['status'] == zenic_state['UNINSTALL_FAILED']: uninstall_failed_flag = True break if not uninstall_failed_flag: - LOG.info(_("all uninstall threads have done, set all roles status to 'init'!")) - unstl.update_progress_to_db(req, role_id_list, zenic_state['INIT']) - + LOG.info( + _("all uninstall threads have done,\ + set all roles status to 'init'!")) + unstl.update_progress_to_db( + req, role_id_list, zenic_state['INIT']) + LOG.info((_("begin uninstall zenic, please waiting...."))) time.sleep(5) LOG.info((_("uninstall zenic successfully"))) @@ -153,19 +140,22 @@ class API(driver.DeploymentDriver): :param req: The WSGI/Webob Request object :raises HTTPBadRequest if x-install-cluster is missing - + """ - (role_id_list, hosts_list) = zenic_cmn.get_roles_and_hosts_list(req, cluster_id) + (role_id_list, hosts_list) = zenic_cmn.get_roles_and_hosts_list( + req, cluster_id) if not hosts_list: msg = _("there is no host in cluster %s") % cluster_id - raise exception.ThreadBinException(msg) + raise exception.ThreadBinException(msg) + + upgrd.update_progress_to_db( + req, role_id_list, zenic_state['UPDATING'], 0.0) + update_progress_percentage = round(1 * 1.0 / len(hosts_list), 2) * 100 - upgrd.update_progress_to_db(req, role_id_list, zenic_state['UPDATING'], 0.0) - update_progress_percentage = round(1*1.0/len(hosts_list), 2)*100 - threads = [] for host in hosts_list: - t = threading.Thread(target=upgrd.thread_bin,args=(req,host,role_id_list,update_progress_percentage)) + t = threading.Thread(target=upgrd.thread_bin, args=( + req, host, role_id_list, update_progress_percentage)) t.setDaemon(True) t.start() threads.append(t) @@ -181,14 +171,16 @@ class API(driver.DeploymentDriver): for role_id in role_id_list: role = daisy_cmn.get_role_detail(req, role_id) if role['progress'] == 0: - upgrd.update_progress_to_db(req, role_id_list, zenic_state['UPDATE_FAILED']) + upgrd.update_progress_to_db( + req, role_id_list, zenic_state['UPDATE_FAILED']) update_failed_flag = True break if role['status'] == zenic_state['UPDATE_FAILED']: update_failed_flag = True break if not update_failed_flag: - LOG.info(_("all update threads have done, set all roles status to 'active'!")) - upgrd.update_progress_to_db(req, role_id_list, zenic_state['ACTIVE']) - - \ No newline at end of file + LOG.info( + _("all update threads have done, \ + set all roles status to 'active'!")) + upgrd.update_progress_to_db( + req, role_id_list, zenic_state['ACTIVE']) diff --git a/code/daisy/daisy/api/backends/zenic/common.py b/code/daisy/daisy/api/backends/zenic/common.py index a08c9f74..31b44165 100755 --- a/code/daisy/daisy/api/backends/zenic/common.py +++ b/code/daisy/daisy/api/backends/zenic/common.py @@ -19,33 +19,16 @@ import os import copy import subprocess -import time - -import traceback -import webob.exc -from oslo_config import cfg from oslo_log import log as logging from webob.exc import HTTPBadRequest -from webob.exc import HTTPForbidden - -from threading import Thread from daisy import i18n -from daisy import notifier - -from daisy.api import policy -import daisy.api.v1 from daisy.common import exception import daisy.registry.client.v1.api as registry import daisy.api.backends.common as daisy_cmn -try: - import simplejson as json -except ImportError: - import json - LOG = logging.getLogger(__name__) _ = i18n._ _LE = i18n._LE @@ -54,9 +37,9 @@ _LW = i18n._LW daisy_zenic_path = '/var/lib/daisy/zenic/' ZENIC_STATE = { - 'INIT' : 'init', - 'INSTALLING' : 'installing', - 'ACTIVE' : 'active', + 'INIT': 'init', + 'INSTALLING': 'installing', + 'ACTIVE': 'active', 'INSTALL_FAILED': 'install-failed', 'UNINSTALLING': 'uninstalling', 'UNINSTALL_FAILED': 'uninstall-failed', @@ -64,6 +47,7 @@ ZENIC_STATE = { 'UPDATE_FAILED': 'update-failed', } + def get_cluster_hosts(req, cluster_id): try: cluster_hosts = registry.get_cluster_hosts(req.context, cluster_id) @@ -71,13 +55,15 @@ def get_cluster_hosts(req, cluster_id): raise HTTPBadRequest(explanation=e.msg, request=req) return cluster_hosts + def get_host_detail(req, host_id): try: host_detail = registry.get_host_metadata(req.context, host_id) except exception.Invalid as e: raise HTTPBadRequest(explanation=e.msg, request=req) return host_detail - + + def get_roles_detail(req): try: roles = registry.get_roles_detail(req.context) @@ -85,13 +71,15 @@ def get_roles_detail(req): raise HTTPBadRequest(explanation=e.msg, request=req) return roles + def get_hosts_of_role(req, role_id): try: hosts = registry.get_role_host_metadata(req.context, role_id) except exception.Invalid as e: raise HTTPBadRequest(explanation=e.msg, request=req) return hosts - + + def get_role_detail(req, role_id): try: role = registry.get_role_metadata(req.context, role_id) @@ -99,17 +87,20 @@ def get_role_detail(req, role_id): raise HTTPBadRequest(explanation=e.msg, request=req) return role -def update_role(req, role_id,role_meta): + +def update_role(req, role_id, role_meta): try: registry.update_role_metadata(req.context, role_id, role_meta) except exception.Invalid as e: raise HTTPBadRequest(explanation=e.msg, request=req) + def update_role_host(req, role_id, role_host): try: registry.update_role_host_metadata(req.context, role_id, role_host) except exception.Invalid as e: - raise HTTPBadRequest(explanation=e.msg, request=req) + raise HTTPBadRequest(explanation=e.msg, request=req) + def delete_role_hosts(req, role_id): try: @@ -117,67 +108,81 @@ def delete_role_hosts(req, role_id): except exception.Invalid as e: raise HTTPBadRequest(explanation=e.msg, request=req) + def _get_cluster_network(cluster_networks, network_type): - network = [cn for cn in cluster_networks - if cn['name'] in network_type] + network = [cn for cn in cluster_networks + if cn['name'] in network_type] if not network or not network[0]: msg = "network %s is not exist" % (network_type) raise exception.InvalidNetworkConfig(msg) else: return network[0] + def get_host_interface_by_network(host_detail, network_type): host_detail_info = copy.deepcopy(host_detail) - interface_list = [hi for hi in host_detail_info['interfaces'] - for assigned_network in hi['assigned_networks'] - if assigned_network and network_type == assigned_network['name']] + interface_list = [hi for hi in host_detail_info['interfaces'] + for assigned_network in hi['assigned_networks'] + if assigned_network and + network_type == assigned_network['name']] interface = {} if interface_list: interface = interface_list[0] - + if not interface: - msg = "network %s of host %s is not exist" % (network_type, host_detail_info['id']) + msg = "network %s of host %s is not exist" % ( + network_type, host_detail_info['id']) raise exception.InvalidNetworkConfig(msg) - return interface + return interface + def get_host_network_ip(req, host_detail, cluster_networks, network_type): interface_network_ip = '' host_interface = get_host_interface_by_network(host_detail, network_type) if host_interface: network = _get_cluster_network(cluster_networks, network_type) - assigned_network = daisy_cmn.get_assigned_network(req, - host_interface['id'], - network['id']) + assigned_network = daisy_cmn.get_assigned_network(req, + host_interface['id'], + network['id']) interface_network_ip = assigned_network['ip'] if not interface_network_ip: - msg = "%s network ip of host %s can't be empty" % (network_type, host_detail['id']) + msg = "%s network ip of host %s can't be empty" % ( + network_type, host_detail['id']) raise exception.InvalidNetworkConfig(msg) return interface_network_ip -def get_deploy_node_cfg(req, host_detail, cluster_networks): - host_deploy_network = get_host_interface_by_network(host_detail, 'DEPLOYMENT') - host_deploy_ip = get_host_network_ip(req, host_detail, cluster_networks, 'DEPLOYMENT') + +def get_deploy_node_cfg(req, host_detail, cluster_networks): + host_deploy_network = get_host_interface_by_network( + host_detail, 'DEPLOYMENT') + host_deploy_ip = get_host_network_ip( + req, host_detail, cluster_networks, 'DEPLOYMENT') if not host_deploy_ip: msg = "deployment ip of host %s can't be empty" % host_detail['id'] raise exception.InvalidNetworkConfig(msg) host_deploy_macname = host_deploy_network['name'] if not host_deploy_macname: - msg = "deployment macname of host %s can't be empty" % host_detail['id'] + msg = "deployment macname of host %s can't be empty" % host_detail[ + 'id'] raise exception.InvalidNetworkConfig(msg) - host_mgt_ip = get_host_network_ip(req, host_detail, cluster_networks, 'MANAGEMENT') + host_mgt_ip = get_host_network_ip( + req, host_detail, cluster_networks, 'MANAGEMENT') if not host_mgt_ip: msg = "management ip of host %s can't be empty" % host_detail['id'] raise exception.InvalidNetworkConfig(msg) - + memmode = 'tiny' host_memory = 0 - - if host_detail.has_key('memory'): - host_memory = (int(host_detail['memory']['total'].strip().split()[0]))/(1024*1024) - + + # if host_detail.has_key('memory'): + if 'memory' in host_detail: + host_memory = ( + int(host_detail['memory'][ + 'total'].strip().split()[0])) / (1024 * 1024) + if host_memory < 8: memmode = 'tiny' elif host_memory < 16: @@ -186,24 +191,24 @@ def get_deploy_node_cfg(req, host_detail, cluster_networks): memmode = 'medium' else: memmode = 'large' - - + deploy_node_cfg = {} - deploy_node_cfg.update({'hostid':host_detail['id']}) - deploy_node_cfg.update({'hostname':host_detail['name']}) - deploy_node_cfg.update({'nodeip':host_deploy_ip}) - deploy_node_cfg.update({'MacName':host_deploy_macname}) - deploy_node_cfg.update({'memmode':memmode}) - deploy_node_cfg.update({'mgtip':host_mgt_ip}) + deploy_node_cfg.update({'hostid': host_detail['id']}) + deploy_node_cfg.update({'hostname': host_detail['name']}) + deploy_node_cfg.update({'nodeip': host_deploy_ip}) + deploy_node_cfg.update({'MacName': host_deploy_macname}) + deploy_node_cfg.update({'memmode': memmode}) + deploy_node_cfg.update({'mgtip': host_mgt_ip}) return deploy_node_cfg -def get_roles_and_hosts_list(req, cluster_id): + +def get_roles_and_hosts_list(req, cluster_id): roles_id_list = set() - hosts_id_list = set() + hosts_id_list = set() hosts_list = [] cluster_networks = daisy_cmn.get_cluster_networks_detail(req, cluster_id) - roles = daisy_cmn.get_cluster_roles_detail(req,cluster_id) + roles = daisy_cmn.get_cluster_roles_detail(req, cluster_id) for role in roles: if role['deployment_backend'] != daisy_cmn.zenic_backend_name: continue @@ -212,56 +217,62 @@ def get_roles_and_hosts_list(req, cluster_id): for role_host in role_hosts: if role_host['host_id'] not in hosts_id_list: host = daisy_cmn.get_host_detail(req, role_host['host_id']) - host_ip = get_host_network_ip(req, host, cluster_networks, 'MANAGEMENT') + host_ip = get_host_network_ip( + req, host, cluster_networks, 'MANAGEMENT') hosts_id_list.add(host['id']) - + host_cfg = {} host_cfg['mgtip'] = host_ip host_cfg['rootpwd'] = host['root_pwd'] hosts_list.append(host_cfg) - + roles_id_list.add(role['id']) - + return (roles_id_list, hosts_list) - + + def check_and_get_zenic_version(daisy_zenic_pkg_path): zenic_version_pkg_file = "" zenic_version_pkg_name = "" get_zenic_version_pkg = "ls %s| grep ^ZENIC.*\.zip$" % daisy_zenic_pkg_path obj = subprocess.Popen(get_zenic_version_pkg, - shell=True, - stdout=subprocess.PIPE, - stderr=subprocess.PIPE) + shell=True, + stdout=subprocess.PIPE, + stderr=subprocess.PIPE) (stdoutput, erroutput) = obj.communicate() if stdoutput: zenic_version_pkg_name = stdoutput.split('\n')[0] zenic_version_pkg_file = daisy_zenic_pkg_path + zenic_version_pkg_name chmod_for_zenic_version = 'chmod +x %s' % zenic_version_pkg_file daisy_cmn.subprocess_call(chmod_for_zenic_version) - return (zenic_version_pkg_file,zenic_version_pkg_name) - + return (zenic_version_pkg_file, zenic_version_pkg_name) + + class ZenicShellExector(): + """ Class config task before install zenic bin. """ - def __init__(self, mgt_ip, task_type, params={}): + + def __init__(self, mgt_ip, task_type, params={}): self.task_type = task_type self.mgt_ip = mgt_ip self.params = params - self.clush_cmd = "" + self.clush_cmd = "" self.PKG_NAME = self.params['pkg_name'] - self.PKG_PATH = daisy_zenic_path + self.PKG_NAME - self.CFG_PATH =daisy_zenic_path + mgt_ip + "_zenic.conf" + self.PKG_PATH = daisy_zenic_path + self.PKG_NAME + self.CFG_PATH = daisy_zenic_path + mgt_ip + "_zenic.conf" self.oper_type = { - 'install' : self._install_pkg + 'install': self._install_pkg } self.oper_shell = { - 'CMD_SSHPASS_PRE' : "sshpass -p ossdbg1 %(ssh_ip)s %(cmd)s", - 'CMD_CFG_SCP' : "scp %(path)s root@%(ssh_ip)s:/etc/zenic/config" % - {'path': self.CFG_PATH, 'ssh_ip':mgt_ip}, - 'CMD_PKG_UNZIP' : "unzip /home/workspace/%(pkg_name)s -d /home/workspace/PKG" % {'pkg_name':self.PKG_NAME}, - 'CMD_PKG_SCP' : "scp %(path)s root@%(ssh_ip)s:/home/workspace/" % - {'path': self.PKG_PATH, 'ssh_ip':mgt_ip} + 'CMD_SSHPASS_PRE': "sshpass -p ossdbg1 %(ssh_ip)s %(cmd)s", + 'CMD_CFG_SCP': "scp %(path)s root@%(ssh_ip)s:/etc/zenic/config" % + {'path': self.CFG_PATH, 'ssh_ip': mgt_ip}, + 'CMD_PKG_UNZIP': "unzip /home/workspace/%(pkg_name)s \ + -d /home/workspace/PKG" % {'pkg_name': self.PKG_NAME}, + 'CMD_PKG_SCP': "scp %(path)s root@%(ssh_ip)s:/home/workspace/" % + {'path': self.PKG_PATH, 'ssh_ip': mgt_ip} } self._execute() @@ -270,31 +281,39 @@ class ZenicShellExector(): if not os.path.exists(self.CFG_PATH): LOG.error(_("<<>>" % self.CFG_PATH)) return - + if not os.path.exists(self.PKG_PATH): LOG.error(_("<<>>" % self.PKG_PATH)) return - - self.clush_cmd = "%s;%s;%s" % \ - (self.oper_shell['CMD_SSHPASS_PRE'] % - {"ssh_ip":"", "cmd":self.oper_shell['CMD_PKG_SCP']}, \ - self.oper_shell['CMD_SSHPASS_PRE'] % - {"ssh_ip":"", "cmd":self.oper_shell['CMD_CFG_SCP']}, \ - self.oper_shell['CMD_SSHPASS_PRE'] % - {"ssh_ip":"ssh " + self.mgt_ip, "cmd":self.oper_shell['CMD_PKG_UNZIP']}) - subprocess.check_output(self.clush_cmd, shell = True, stderr=subprocess.STDOUT) + self.clush_cmd = "%s;%s;%s" % \ + (self.oper_shell['CMD_SSHPASS_PRE'] % + {"ssh_ip": "", "cmd": self.oper_shell['CMD_PKG_SCP']}, + self.oper_shell['CMD_SSHPASS_PRE'] % + {"ssh_ip": "", "cmd": self.oper_shell['CMD_CFG_SCP']}, + self.oper_shell['CMD_SSHPASS_PRE'] % + {"ssh_ip": "ssh " + self.mgt_ip, "cmd": self.oper_shell[ + 'CMD_PKG_UNZIP']}) + + subprocess.check_output( + self.clush_cmd, shell=True, stderr=subprocess.STDOUT) def _execute(self): try: - if not self.task_type or not self.mgt_ip : - LOG.error(_("<<>>")) + if not self.task_type or not self.mgt_ip: + LOG.error( + _("<<>>")) return self.oper_type[self.task_type]() except subprocess.CalledProcessError as e: - LOG.warn(_("<<>>" % e.output.strip())) + LOG.warn( + _("<<>>" % e.output.strip())) except Exception as e: LOG.exception(_(e.message)) else: - LOG.info(_("<<>>" % self.clush_cmd)) + LOG.info( + _("<<>>" % self.clush_cmd)) diff --git a/code/daisy/daisy/api/backends/zenic/config.py b/code/daisy/daisy/api/backends/zenic/config.py index 3231c80e..d8934e88 100755 --- a/code/daisy/daisy/api/backends/zenic/config.py +++ b/code/daisy/daisy/api/backends/zenic/config.py @@ -1,62 +1,59 @@ # -*- coding: utf-8 -*- import os -import re -import commands -import types -import subprocess from ConfigParser import ConfigParser -from daisy.common import exception - default_zenic_conf_template_path = "/var/lib/daisy/zenic/" zenic_conf_template_path = default_zenic_conf_template_path + def update_conf(zenic, key, value): zenic.set("general", key, value) + def get_conf(zenic_conf_file, **kwargs): result = {} if not kwargs: - return result + return result zenic = ConfigParser() zenic.optionxform = str zenic.read(zenic_conf_file) - result = {key : zenic.get("general", kwargs.get(key, None)) + result = {key: zenic.get("general", kwargs.get(key, None)) for key in kwargs.keys() if zenic.has_option("general", kwargs.get(key, None))} return result - -def get_nodeid(deploy_ip,zbp_ips): + + +def get_nodeid(deploy_ip, zbp_ips): nodeid = 0 i = 0 for ip in zbp_ips: if deploy_ip == ip: break else: - i=i+1 - + i = i + 1 + if i == 0: nodeid = 1 elif i == 1: nodeid = 256 else: nodeid = i - + return nodeid - - + + def update_zenic_conf(config_data, cluster_conf_path): print "zenic config data is:" import pprint pprint.pprint(config_data) - + daisy_zenic_path = zenic_conf_template_path zenic_conf_template_file = os.path.join(daisy_zenic_path, "zenic.conf") if not os.path.exists(cluster_conf_path): - os.makedirs(cluster_conf_path) + os.makedirs(cluster_conf_path) zenic = ConfigParser() zenic.optionxform = str @@ -67,15 +64,15 @@ def update_zenic_conf(config_data, cluster_conf_path): if not zbpips: zbpips = ip else: - zbpips = zbpips + ',' + ip + zbpips = zbpips + ',' + ip update_conf(zenic, 'zbpips', zbpips) - update_conf(zenic, 'zbp_node_num', config_data['zbp_node_num']) + update_conf(zenic, 'zbp_node_num', config_data['zbp_node_num']) nodelist = '1,256' if len(config_data['zbp_ips']) > 2: - for i in range(2,len(config_data['zbp_ips'])): - nodelist = nodelist + ',' + 'i' - update_conf(zenic, 'zbpnodelist',nodelist) - + for i in range(2, len(config_data['zbp_ips'])): + nodelist = nodelist + ',' + 'i' + update_conf(zenic, 'zbpnodelist', nodelist) + zampips = '' for ip in config_data['zamp_ips']: if not zampips: @@ -84,52 +81,50 @@ def update_zenic_conf(config_data, cluster_conf_path): zampips = zampips + ',' + ip update_conf(zenic, 'zampips', zampips) update_conf(zenic, 'zamp_node_num', config_data['zamp_node_num']) - mongodbips = '' for ip in config_data['mongodb_ips']: if not mongodbips: mongodbips = ip else: - mongodbips = mongodbips + ',' + ip + mongodbips = mongodbips + ',' + ip update_conf(zenic, 'mongodbips', mongodbips) - update_conf(zenic, 'mongodb_node_num', config_data['mongodb_node_num']) + update_conf(zenic, 'mongodb_node_num', config_data['mongodb_node_num']) update_conf(zenic, 'zamp_vip', config_data['zamp_vip']) update_conf(zenic, 'mongodb_vip', config_data['mongodb_vip']) - deploy_hosts = config_data['deploy_hosts'] - for deploy_host in deploy_hosts: + for deploy_host in deploy_hosts: nodeip = deploy_host['nodeip'] hostname = deploy_host['hostname'] MacName = deploy_host['MacName'] memmode = deploy_host['memmode'] - - update_conf(zenic,'nodeip',nodeip) - update_conf(zenic,'hostname',hostname) - update_conf(zenic,'MacName',MacName) - update_conf(zenic,'memmode',memmode) - - nodeid = get_nodeid(nodeip,config_data['zbp_ips']) - update_conf(zenic,'nodeid',nodeid) + + update_conf(zenic, 'nodeip', nodeip) + update_conf(zenic, 'hostname', hostname) + update_conf(zenic, 'MacName', MacName) + update_conf(zenic, 'memmode', memmode) + + nodeid = get_nodeid(nodeip, config_data['zbp_ips']) + update_conf(zenic, 'nodeid', nodeid) if nodeip in config_data['zamp_ips']: - update_conf(zenic,'needzamp','y') + update_conf(zenic, 'needzamp', 'y') else: - update_conf(zenic,'needzamp','n') - + update_conf(zenic, 'needzamp', 'n') + zenic_conf = "%s_zenic.conf" % deploy_host['mgtip'] zenic_conf_cluster_out = os.path.join(cluster_conf_path, zenic_conf) - zenic_conf_out = os.path.join(daisy_zenic_path, zenic_conf) + zenic_conf_out = os.path.join(daisy_zenic_path, zenic_conf) zenic.write(open(zenic_conf_cluster_out, "w+")) - with open(zenic_conf_cluster_out,'r') as fr,open(zenic_conf_out,'w') as fw: + with open(zenic_conf_cluster_out, 'r') as fr,\ + open(zenic_conf_out, 'w') as fw: for line in fr.readlines(): fw.write(line.replace(' ', '')) return - def test(): print("Hello, world!") diff --git a/code/daisy/daisy/api/backends/zenic/install.py b/code/daisy/daisy/api/backends/zenic/install.py index 4485def7..06ef3671 100755 --- a/code/daisy/daisy/api/backends/zenic/install.py +++ b/code/daisy/daisy/api/backends/zenic/install.py @@ -16,43 +16,23 @@ """ /install endpoint for zenic API """ -import os -import copy import subprocess import time -import traceback -import webob.exc from oslo_config import cfg from oslo_log import log as logging -from webob.exc import HTTPBadRequest -from webob.exc import HTTPForbidden -from webob.exc import HTTPServerError - -from threading import Thread, Lock import threading from daisy import i18n -from daisy import notifier -from daisy.api import policy import daisy.api.v1 from daisy.common import exception -import daisy.registry.client.v1.api as registry from daisy.api.backends.zenic import config -from daisy.api.backends import driver -from daisy.api.network_api import network as neutron -from ironicclient import client as ironic_client import daisy.api.backends.common as daisy_cmn import daisy.api.backends.zenic.common as zenic_cmn -try: - import simplejson as json -except ImportError: - import json - LOG = logging.getLogger(__name__) _ = i18n._ _LE = i18n._LE @@ -76,21 +56,24 @@ CONF.import_opt('image_property_quota', 'daisy.common.config') host_os_status = { - 'INIT' : 'init', - 'INSTALLING' : 'installing', - 'ACTIVE' : 'active', + 'INIT': 'init', + 'INSTALLING': 'installing', + 'ACTIVE': 'active', 'FAILED': 'install-failed' } zenic_state = zenic_cmn.ZENIC_STATE daisy_zenic_path = zenic_cmn.daisy_zenic_path -install_zenic_progress=0.0 +install_zenic_progress = 0.0 install_mutex = threading.Lock() -def update_progress_to_db(req, role_id_list, status, progress_percentage_step=0.0): + +def update_progress_to_db(req, role_id_list, + status, progress_percentage_step=0.0): """ - Write install progress and status to db, we use global lock object 'install_mutex' + Write install progress and status to db, + we use global lock object 'install_mutex' to make sure this function is thread safety. :param req: http req. :param role_id_list: Column neeb be update in role table. @@ -107,7 +90,7 @@ def update_progress_to_db(req, role_id_list, status, progress_percentage_step=0. if 0 == cmp(status, zenic_state['INSTALLING']): role['status'] = status role['progress'] = install_zenic_progress - if 0 == cmp(status, zenic_state['INSTALL_FAILED']): + if 0 == cmp(status, zenic_state['INSTALL_FAILED']): role['status'] = status elif 0 == cmp(status, zenic_state['ACTIVE']): role['status'] = status @@ -115,21 +98,26 @@ def update_progress_to_db(req, role_id_list, status, progress_percentage_step=0. daisy_cmn.update_role(req, role_id, role) install_mutex.release() + def _ping_hosts_test(ips): ping_cmd = 'fping' for ip in set(ips): ping_cmd = ping_cmd + ' ' + ip - obj = subprocess.Popen(ping_cmd, shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE) + obj = subprocess.Popen( + ping_cmd, shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE) (stdoutput, erroutput) = obj.communicate() _returncode = obj.returncode if _returncode == 0 or _returncode == 1: ping_result = stdoutput.split('\n') - unreachable_hosts = [result.split()[0] for result in ping_result if result and result.split()[2] != 'alive'] + unreachable_hosts = [result.split( + )[0] for result in ping_result if result and + result.split()[2] != 'alive'] else: msg = "ping failed beaceuse there is invlid ip in %s" % ips raise exception.InvalidIP(msg) return unreachable_hosts + def _check_ping_hosts(ping_ips, max_ping_times): if not ping_ips: LOG.info(_("no ip got for ping test")) @@ -145,9 +133,11 @@ def _check_ping_hosts(ping_ips, max_ping_times): ping_count += 1 if ips: - LOG.debug(_("ping host %s for %s times" % (','.join(ips), ping_count))) + LOG.debug( + _("ping host %s for %s times" % (','.join(ips), ping_count))) if ping_count >= max_ping_times: - LOG.info(_("ping host %s timeout for %ss" % (','.join(ips), ping_count*time_step))) + LOG.info(_("ping host %s timeout for %ss" % + (','.join(ips), ping_count * time_step))) return ips time.sleep(time_step) else: @@ -155,13 +145,15 @@ def _check_ping_hosts(ping_ips, max_ping_times): time.sleep(120) LOG.info(_("120s after ping host %s success" % ','.join(ping_ips))) return ips - + def _get_host_private_networks(host_detail, cluster_private_networks_name): host_private_networks = [hi for pn in cluster_private_networks_name - for hi in host_detail['interfaces'] if pn in hi['assigned_networks']] - - # If port type is bond,use pci segment of member port replace pci1 & pci2 segments of bond port + for hi in + host_detail['interfaces'] if pn in + hi['assigned_networks']] + # If port type is bond,use pci segment of member port replace pci1 & pci2 + # segments of bond port for interface_outer in host_private_networks: if 0 != cmp(interface_outer.get('type', None), "bond"): continue @@ -180,38 +172,41 @@ def _get_host_private_networks(host_detail, cluster_private_networks_name): def get_cluster_zenic_config(req, cluster_id): LOG.info(_("get zenic config from database...")) - params = dict(limit=1000000) - + # params = dict(limit=1000000) + zenic_config = {} - + deploy_hosts = [] deploy_host_cfg = {} mgt_ip = '' zbp_ip_list = set() mgt_ip_list = set() - + zamp_ip_list = set() zamp_vip = '' - + mongodb_ip_list = set() - mongodb_vip= '' + mongodb_vip = '' cluster_networks = daisy_cmn.get_cluster_networks_detail(req, cluster_id) - - all_roles = zenic_cmn.get_roles_detail(req) - - roles = [role for role in all_roles if (role['cluster_id'] == cluster_id and role['deployment_backend'] == daisy_cmn.zenic_backend_name)] - for role in roles: + + all_roles = zenic_cmn.get_roles_detail(req) + + roles = [role for role in all_roles if (role['cluster_id'] == + cluster_id and role[ + 'deployment_backend'] == + daisy_cmn.zenic_backend_name)] + for role in roles: if not (role['name'] == 'ZENIC_CTL' or role['name'] == 'ZENIC_NFM'): continue - if role['name'] == 'ZENIC_NFM': + if role['name'] == 'ZENIC_NFM': if not zamp_vip: - zamp_vip = role['vip'] + zamp_vip = role['vip'] if not mongodb_vip: - mongodb_vip = role['mongodb_vip'] + mongodb_vip = role['mongodb_vip'] role_hosts = zenic_cmn.get_hosts_of_role(req, role['id']) - + for role_host in role_hosts: mgt_ip = '' for deploy_host in deploy_hosts: @@ -220,139 +215,157 @@ def get_cluster_zenic_config(req, cluster_id): deploy_ip = deploy_host['nodeip'] break if not mgt_ip: - host_detail = zenic_cmn.get_host_detail(req, role_host['host_id']) - deploy_host_cfg = zenic_cmn.get_deploy_node_cfg(req, host_detail, cluster_networks) + host_detail = zenic_cmn.get_host_detail( + req, role_host['host_id']) + deploy_host_cfg = zenic_cmn.get_deploy_node_cfg( + req, host_detail, cluster_networks) deploy_hosts.append(deploy_host_cfg) mgt_ip = deploy_host_cfg['mgtip'] deploy_ip = deploy_host_cfg['nodeip'] - mgt_ip_list.add(mgt_ip) - if role['name'] == 'ZENIC_CTL': + if role['name'] == 'ZENIC_CTL': zbp_ip_list.add(deploy_ip) elif role['name'] == 'ZENIC_NFM': zamp_ip_list.add(deploy_ip) - mongodb_ip_list.add(deploy_ip) + mongodb_ip_list.add(deploy_ip) else: - LOG.warn(_("<<>>" % role['name'])) + LOG.warn( + _("<<>>" + % role['name'])) - zenic_config.update({'deploy_hosts':deploy_hosts}) - zenic_config.update({'zbp_ips':zbp_ip_list}) - zenic_config.update({'zbp_node_num':len(zbp_ip_list)}) - zenic_config.update({'zamp_ips':zamp_ip_list}) - zenic_config.update({'zamp_node_num':len(zamp_ip_list)}) - zenic_config.update({'mongodb_ips':mongodb_ip_list}) - zenic_config.update({'mongodb_node_num':len(mongodb_ip_list)}) - zenic_config.update({'zamp_vip':zamp_vip}) - zenic_config.update({'mongodb_vip':mongodb_vip}) + zenic_config.update({'deploy_hosts': deploy_hosts}) + zenic_config.update({'zbp_ips': zbp_ip_list}) + zenic_config.update({'zbp_node_num': len(zbp_ip_list)}) + zenic_config.update({'zamp_ips': zamp_ip_list}) + zenic_config.update({'zamp_node_num': len(zamp_ip_list)}) + zenic_config.update({'mongodb_ips': mongodb_ip_list}) + zenic_config.update({'mongodb_node_num': len(mongodb_ip_list)}) + zenic_config.update({'zamp_vip': zamp_vip}) + zenic_config.update({'mongodb_vip': mongodb_vip}) return (zenic_config, mgt_ip_list) - + + def generate_zenic_config_file(cluster_id, zenic_config): LOG.info(_("generate zenic config...")) if zenic_config: cluster_conf_path = daisy_zenic_path + cluster_id config.update_zenic_conf(zenic_config, cluster_conf_path) - -def thread_bin(req,host, role_id_list, pkg_name, install_progress_percentage): + + +def thread_bin(req, host, role_id_list, pkg_name, install_progress_percentage): host_ip = host['mgtip'] password = host['rootpwd'] - + cmd = 'mkdir -p /var/log/daisy/daisy_install/' daisy_cmn.subprocess_call(cmd) - - var_log_path = "/var/log/daisy/daisy_install/%s_install_zenic.log" % host_ip + + var_log_path =\ + "/var/log/daisy/daisy_install/%s_install_zenic.log" % host_ip with open(var_log_path, "w+") as fp: cmd = '/var/lib/daisy/zenic/trustme.sh %s %s' % (host_ip, password) - daisy_cmn.subprocess_call(cmd,fp) - + daisy_cmn.subprocess_call(cmd, fp) + cmd = 'clush -S -b -w %s mkdir -p /home/workspace' % (host_ip,) - daisy_cmn.subprocess_call(cmd,fp) - + daisy_cmn.subprocess_call(cmd, fp) + cmd = 'clush -S -b -w %s mkdir -p /etc/zenic' % (host_ip,) - daisy_cmn.subprocess_call(cmd,fp) + daisy_cmn.subprocess_call(cmd, fp) cmd = 'clush -S -b -w %s rm -rf /etc/zenic/config' % (host_ip,) - daisy_cmn.subprocess_call(cmd,fp) + daisy_cmn.subprocess_call(cmd, fp) cmd = 'clush -S -b -w %s rm -rf /home/zenic' % (host_ip,) - daisy_cmn.subprocess_call(cmd,fp) + daisy_cmn.subprocess_call(cmd, fp) cmd = 'clush -S -b -w %s rm -rf /home/workspace/unipack' % (host_ip,) - daisy_cmn.subprocess_call(cmd,fp) + daisy_cmn.subprocess_call(cmd, fp) pkg_file = daisy_zenic_path + pkg_name - cmd = 'clush -S -b -w %s rm -rf /home/workspace/%s' % (host_ip,pkg_name) - daisy_cmn.subprocess_call(cmd,fp) + cmd = 'clush -S -b -w %s rm -rf /home/workspace/%s' % ( + host_ip, pkg_name) + daisy_cmn.subprocess_call(cmd, fp) - - cfg_file = daisy_zenic_path + host_ip + "_zenic.conf" + cfg_file = daisy_zenic_path + host_ip + "_zenic.conf" try: exc_result = subprocess.check_output( - 'sshpass -p ossdbg1 scp %s root@%s:/etc/zenic/config' % (cfg_file,host_ip,), + 'sshpass -p ossdbg1 scp %s root@%s:/etc/zenic/config' % ( + cfg_file, host_ip,), shell=True, stderr=fp) except subprocess.CalledProcessError as e: - update_progress_to_db(req, role_id_list, zenic_state['INSTALL_FAILED']) + update_progress_to_db( + req, role_id_list, zenic_state['INSTALL_FAILED']) LOG.info(_("scp zenic pkg for %s failed!" % host_ip)) fp.write(e.output.strip()) exit() - else: + else: LOG.info(_("scp zenic config for %s successfully!" % host_ip)) - fp.write(exc_result) - + fp.write(exc_result) try: exc_result = subprocess.check_output( - 'sshpass -p ossdbg1 scp %s root@%s:/home/workspace/' % (pkg_file,host_ip,), + 'sshpass -p ossdbg1 scp %s root@%s:/home/workspace/' % ( + pkg_file, host_ip,), shell=True, stderr=fp) except subprocess.CalledProcessError as e: - update_progress_to_db(req, role_id_list, zenic_state['INSTALL_FAILED']) + update_progress_to_db( + req, role_id_list, zenic_state['INSTALL_FAILED']) LOG.info(_("scp zenic pkg for %s failed!" % host_ip)) fp.write(e.output.strip()) exit() - else: + else: LOG.info(_("scp zenic pkg for %s successfully!" % host_ip)) fp.write(exc_result) - - cmd = 'clush -S -b -w %s unzip /home/workspace/%s -d /home/workspace/unipack' % (host_ip,pkg_name,) + + cmd = 'clush -S -b -w %s unzip /home/workspace/%s \ + -d /home/workspace/unipack' % ( + host_ip, pkg_name,) daisy_cmn.subprocess_call(cmd) try: exc_result = subprocess.check_output( - 'clush -S -b -w %s /home/workspace/unipack/node_install.sh' % (host_ip,), + 'clush -S -b -w %s /home/workspace/unipack/node_install.sh' + % (host_ip,), shell=True, stderr=subprocess.STDOUT) except subprocess.CalledProcessError as e: - update_progress_to_db(req, role_id_list, zenic_state['INSTALL_FAILED']) + update_progress_to_db( + req, role_id_list, zenic_state['INSTALL_FAILED']) LOG.info(_("install zenic for %s failed!" % host_ip)) fp.write(e.output.strip()) exit() - else: + else: LOG.info(_("install zenic for %s successfully!" % host_ip)) fp.write(exc_result) - + try: exc_result = subprocess.check_output( 'clush -S -b -w %s /home/zenic/node_start.sh' % (host_ip,), shell=True, stderr=subprocess.STDOUT) except subprocess.CalledProcessError as e: - update_progress_to_db(req, role_id_list, zenic_state['INSTALL_FAILED']) + update_progress_to_db( + req, role_id_list, zenic_state['INSTALL_FAILED']) LOG.info(_("start zenic for %s failed!" % host_ip)) fp.write(e.output.strip()) exit() else: - update_progress_to_db(req, role_id_list, zenic_state['INSTALLING'], install_progress_percentage) + update_progress_to_db( + req, role_id_list, zenic_state['INSTALLING'], + install_progress_percentage) LOG.info(_("start zenic for %s successfully!" % host_ip)) fp.write(exc_result) - + + class ZENICInstallTask(Thread): + """ Class for install tecs bin. """ """ Definition for install states.""" INSTALL_STATES = { - 'INIT' : 'init', - 'INSTALLING' : 'installing', - 'ACTIVE' : 'active', + 'INIT': 'init', + 'INSTALLING': 'installing', + 'ACTIVE': 'active', 'FAILED': 'install-failed' } @@ -371,9 +384,6 @@ class ZENICInstallTask(Thread): self.ping_times = 36 self.log_file = "/var/log/daisy/zenic_%s_install.log" % self.cluster_id - - - def run(self): try: self._run() @@ -388,40 +398,47 @@ class ZENICInstallTask(Thread): self.state = zenic_state['ACTIVE'] self.message = "Zenic install successfully" LOG.info(_("install Zenic for cluster %s successfully." - % self.cluster_id)) - + % self.cluster_id)) + def _run(self): - - (zenic_config, self.mgt_ip_list) = get_cluster_zenic_config(self.req, self.cluster_id) + + (zenic_config, self.mgt_ip_list) = get_cluster_zenic_config( + self.req, self.cluster_id) if not self.mgt_ip_list: msg = _("there is no host in cluster %s") % self.cluster_id - raise exception.ThreadBinException(msg) - + raise exception.ThreadBinException(msg) + unreached_hosts = _check_ping_hosts(self.mgt_ip_list, self.ping_times) if unreached_hosts: self.state = zenic_state['INSTALL_FAILED'] self.message = "hosts %s ping failed" % unreached_hosts raise exception.NotFound(message=self.message) - + generate_zenic_config_file(self.cluster_id, zenic_config) - # check and get ZENIC version - (zenic_version_pkg_file,zenic_version_pkg_name) = zenic_cmn.check_and_get_zenic_version(daisy_zenic_path) - if not zenic_version_pkg_file: + (zenic_version_pkg_file, zenic_version_pkg_name) =\ + zenic_cmn.check_and_get_zenic_version( + daisy_zenic_path) + if not zenic_version_pkg_file: self.state = zenic_state['INSTALL_FAILED'] - self.message = "ZENIC version file not found in %s" % daisy_zenic_path + self.message = \ + "ZENIC version file not found in %s" % daisy_zenic_path raise exception.NotFound(message=self.message) - - (role_id_list, hosts_list) = zenic_cmn.get_roles_and_hosts_list(self.req, self.cluster_id) - - update_progress_to_db(self.req, role_id_list, zenic_state['INSTALLING'], 0.0) - install_progress_percentage = round(1*1.0/len(hosts_list), 2)*100 - + + (role_id_list, hosts_list) = zenic_cmn.get_roles_and_hosts_list( + self.req, self.cluster_id) + + update_progress_to_db( + self.req, role_id_list, zenic_state['INSTALLING'], 0.0) + install_progress_percentage = round(1 * 1.0 / len(hosts_list), 2) * 100 + threads = [] for host in hosts_list: - t = threading.Thread(target=thread_bin,args=(self.req,host,role_id_list,zenic_version_pkg_name,install_progress_percentage)) + t = threading.Thread(target=thread_bin, args=( + self.req, host, role_id_list, + zenic_version_pkg_name, install_progress_percentage)) t.setDaemon(True) t.start() threads.append(t) @@ -437,14 +454,16 @@ class ZENICInstallTask(Thread): for role_id in role_id_list: role = daisy_cmn.get_role_detail(self.req, role_id) if role['progress'] == 0: - update_progress_to_db(self.req, role_id_list, zenic_state['INSTALL_FAILED']) + update_progress_to_db( + self.req, role_id_list, zenic_state['INSTALL_FAILED']) install_failed_flag = True break if role['status'] == zenic_state['INSTALL_FAILED']: install_failed_flag = True break if not install_failed_flag: - LOG.info(_("all install threads have done, set all roles status to 'active'!")) - update_progress_to_db(self.req, role_id_list, zenic_state['ACTIVE']) - - + LOG.info( + _("all install threads have done, \ + set all roles status to 'active'!")) + update_progress_to_db( + self.req, role_id_list, zenic_state['ACTIVE']) diff --git a/code/daisy/daisy/api/backends/zenic/uninstall.py b/code/daisy/daisy/api/backends/zenic/uninstall.py index 7c492cdc..80d6c6a8 100755 --- a/code/daisy/daisy/api/backends/zenic/uninstall.py +++ b/code/daisy/daisy/api/backends/zenic/uninstall.py @@ -17,30 +17,12 @@ /hosts endpoint for Daisy v1 API """ -import os -import webob.exc + import subprocess -from oslo_config import cfg from oslo_log import log as logging -from webob.exc import HTTPBadRequest -from webob.exc import HTTPForbidden - -from threading import Thread, Lock import threading from daisy import i18n -from daisy import notifier - -from daisy.api import policy -import daisy.api.v1 - -from daisy.common import exception -from daisy.common import property_utils -from daisy.common import utils -from daisy.common import wsgi -from daisy.api.v1 import controller -from daisy.api.v1 import filters -from daisy.api.backends.zenic.common import ZenicShellExector import daisy.api.backends.common as daisy_cmn import daisy.api.backends.zenic.common as zenic_cmn @@ -52,12 +34,15 @@ _LW = i18n._LW zenic_state = zenic_cmn.ZENIC_STATE -uninstall_zenic_progress=100.0 +uninstall_zenic_progress = 100.0 uninstall_mutex = threading.Lock() -def update_progress_to_db(req, role_id_list, status, progress_percentage_step=0.0): + +def update_progress_to_db(req, role_id_list, status, + progress_percentage_step=0.0): """ - Write uninstall progress and status to db, we use global lock object 'uninstall_mutex' + Write uninstall progress and status to db, + we use global lock object 'uninstall_mutex' to make sure this function is thread safety. :param req: http req. :param role_id_list: Column neeb be update in role table. @@ -74,33 +59,36 @@ def update_progress_to_db(req, role_id_list, status, progress_percentage_step=0. if 0 == cmp(status, zenic_state['UNINSTALLING']): role['status'] = status role['progress'] = uninstall_zenic_progress - if 0 == cmp(status, zenic_state['UNINSTALL_FAILED']): + if 0 == cmp(status, zenic_state['UNINSTALL_FAILED']): role['status'] = status elif 0 == cmp(status, zenic_state['INIT']): role['status'] = status role['progress'] = 0 daisy_cmn.update_role(req, role_id, role) uninstall_mutex.release() - -def thread_bin(req, host, role_id_list,uninstall_progress_percentage): + + +def thread_bin(req, host, role_id_list, uninstall_progress_percentage): host_ip = host['mgtip'] password = host['rootpwd'] cmd = 'mkdir -p /var/log/daisy/daisy_uninstall/' daisy_cmn.subprocess_call(cmd) - var_log_path = "/var/log/daisy/daisy_uninstall/%s_uninstall_zenic.log" % host_ip + var_log_path =\ + "/var/log/daisy/daisy_uninstall/%s_uninstall_zenic.log" % host_ip with open(var_log_path, "w+") as fp: cmd = '/var/lib/daisy/zenic/trustme.sh %s %s' % (host_ip, password) - daisy_cmn.subprocess_call(cmd,fp) - + daisy_cmn.subprocess_call(cmd, fp) + try: exc_result = subprocess.check_output( 'clush -S -b -w %s /home/zenic/node_stop.sh' % (host_ip,), shell=True, stderr=subprocess.STDOUT) except subprocess.CalledProcessError as e: - update_progress_to_db(req, role_id_list, zenic_state['UNINSTALL_FAILED']) - fp.write(e.output.strip()) + update_progress_to_db( + req, role_id_list, zenic_state['UNINSTALL_FAILED']) + fp.write(e.output.strip()) else: - update_progress_to_db(req, role_id_list, zenic_state['UNINSTALLING'], uninstall_progress_percentage) + update_progress_to_db( + req, role_id_list, zenic_state['UNINSTALLING'], + uninstall_progress_percentage) fp.write(exc_result) - - diff --git a/code/daisy/daisy/api/backends/zenic/upgrade.py b/code/daisy/daisy/api/backends/zenic/upgrade.py index 54f63d35..c8035b0b 100755 --- a/code/daisy/daisy/api/backends/zenic/upgrade.py +++ b/code/daisy/daisy/api/backends/zenic/upgrade.py @@ -17,30 +17,13 @@ /update endpoint for Daisy v1 API """ -import os -import webob.exc import subprocess -from oslo_config import cfg from oslo_log import log as logging -from webob.exc import HTTPBadRequest -from webob.exc import HTTPForbidden - -from threading import Thread, Lock import threading from daisy import i18n -from daisy import notifier - -from daisy.api import policy -import daisy.api.v1 from daisy.common import exception -from daisy.common import property_utils -from daisy.common import utils -from daisy.common import wsgi -from daisy.api.v1 import controller -from daisy.api.v1 import filters -from daisy.api.backends.zenic.common import ZenicShellExector import daisy.api.backends.common as daisy_cmn import daisy.api.backends.zenic.common as zenic_cmn @@ -54,12 +37,15 @@ zenic_state = zenic_cmn.ZENIC_STATE daisy_zenic_path = zenic_cmn.daisy_zenic_path -update_zenic_progress=0.0 +update_zenic_progress = 0.0 update_mutex = threading.Lock() -def update_progress_to_db(req, role_id_list, status, progress_percentage_step=0.0): + +def update_progress_to_db(req, role_id_list, status, + progress_percentage_step=0.0): """ - Write update progress and status to db, we use global lock object 'update_mutex' + Write update progress and status to db, + we use global lock object 'update_mutex' to make sure this function is thread safety. :param req: http req. :param role_id_list: Column neeb be update in role table. @@ -76,7 +62,7 @@ def update_progress_to_db(req, role_id_list, status, progress_percentage_step=0. if 0 == cmp(status, zenic_state['UPDATING']): role['status'] = status role['progress'] = update_zenic_progress - if 0 == cmp(status, zenic_state['UPDATE_FAILED']): + if 0 == cmp(status, zenic_state['UPDATE_FAILED']): role['status'] = status elif 0 == cmp(status, zenic_state['ACTIVE']): role['status'] = status @@ -85,60 +71,70 @@ def update_progress_to_db(req, role_id_list, status, progress_percentage_step=0. update_mutex.release() -def thread_bin(req, host,role_id_list,update_progress_percentage): +def thread_bin(req, host, role_id_list, update_progress_percentage): - (zenic_version_pkg_file,zenic_version_pkg_name) = zenic_cmn.check_and_get_zenic_version(daisy_zenic_path) + (zenic_version_pkg_file, zenic_version_pkg_name) = \ + zenic_cmn.check_and_get_zenic_version( + daisy_zenic_path) if not zenic_version_pkg_file: - self.state = zenic_state['INSTALL_FAILED'] - self.message = "ZENIC version file not found in %s" % daisy_zenic_path - raise exception.NotFound(message=self.message) + # selfstate = zenic_state['INSTALL_FAILED'] + selfmessage = "ZENIC version file not found in %s" % daisy_zenic_path + raise exception.NotFound(message=selfmessage) host_ip = host['mgtip'] password = host['rootpwd'] - + cmd = 'mkdir -p /var/log/daisy/daisy_upgrade/' daisy_cmn.subprocess_call(cmd) - var_log_path = "/var/log/daisy/daisy_upgrade/%s_upgrade_zenic.log" % host_ip + var_log_path = \ + "/var/log/daisy/daisy_upgrade/%s_upgrade_zenic.log" % host_ip with open(var_log_path, "w+") as fp: cmd = '/var/lib/daisy/zenic/trustme.sh %s %s' % (host_ip, password) - daisy_cmn.subprocess_call(cmd,fp) + daisy_cmn.subprocess_call(cmd, fp) cmd = 'clush -S -b -w %s /home/zenic/node_stop.sh' % (host_ip,) - daisy_cmn.subprocess_call(cmd,fp) + daisy_cmn.subprocess_call(cmd, fp) - - cmd = 'clush -S -b -w %s rm -rf /home/workspace/%s' % (host_ip,zenic_version_pkg_name) - daisy_cmn.subprocess_call(cmd,fp) + cmd = 'clush -S -b -w %s rm -rf /home/workspace/%s' % ( + host_ip, zenic_version_pkg_name) + daisy_cmn.subprocess_call(cmd, fp) cmd = 'clush -S -b -w %s rm -rf /home/workspace/unipack' % (host_ip,) - daisy_cmn.subprocess_call(cmd,fp) - + daisy_cmn.subprocess_call(cmd, fp) + try: exc_result = subprocess.check_output( - 'sshpass -p ossdbg1 scp %s root@%s:/home/workspace/' % (zenic_version_pkg_file,host_ip,), + 'sshpass -p ossdbg1 scp %s root@%s:/home/workspace/' % ( + zenic_version_pkg_file, host_ip,), shell=True, stderr=fp) except subprocess.CalledProcessError as e: - update_progress_to_db(req, role_id_list, zenic_state['INSTALL_FAILED']) + update_progress_to_db( + req, role_id_list, zenic_state['INSTALL_FAILED']) LOG.info(_("scp zenic pkg for %s failed!" % host_ip)) fp.write(e.output.strip()) exit() - else: + else: LOG.info(_("scp zenic pkg for %s successfully!" % host_ip)) fp.write(exc_result) - - cmd = 'clush -S -b -w %s unzip /home/workspace/%s -d /home/workspace/unipack' % (host_ip,zenic_version_pkg_name,) + + cmd = 'clush -S -b -w %s unzip /home/workspace/%s \ + -d /home/workspace/unipack' % (host_ip, zenic_version_pkg_name,) daisy_cmn.subprocess_call(cmd) - + try: exc_result = subprocess.check_output( - 'clush -S -b -w %s /home/workspace/unipack/node_upgrade.sh' % (host_ip,), + 'clush -S -b -w %s /home/workspace/unipack/node_upgrade.sh' + % (host_ip,), shell=True, stderr=subprocess.STDOUT) except subprocess.CalledProcessError as e: - update_progress_to_db(req, role_id_list, zenic_state['UPDATE_FAILED']) + update_progress_to_db( + req, role_id_list, zenic_state['UPDATE_FAILED']) LOG.info(_("Upgrade zenic for %s failed!" % host_ip)) fp.write(e.output.strip()) else: - update_progress_to_db(req, role_id_list, zenic_state['UPDATING'], update_progress_percentage) + update_progress_to_db( + req, role_id_list, zenic_state['UPDATING'], + update_progress_percentage) LOG.info(_("Upgrade zenic for %s successfully!" % host_ip)) fp.write(exc_result) @@ -147,12 +143,13 @@ def thread_bin(req, host,role_id_list,update_progress_percentage): 'clush -S -b -w %s /home/zenic/node_start.sh' % (host_ip,), shell=True, stderr=subprocess.STDOUT) except subprocess.CalledProcessError as e: - update_progress_to_db(req, role_id_list, zenic_state['UPDATE_FAILED']) + update_progress_to_db( + req, role_id_list, zenic_state['UPDATE_FAILED']) LOG.info(_("Start zenic for %s failed!" % host_ip)) fp.write(e.output.strip()) else: - update_progress_to_db(req, role_id_list, zenic_state['UPDATING'], update_progress_percentage) + update_progress_to_db( + req, role_id_list, zenic_state['UPDATING'], + update_progress_percentage) LOG.info(_("Start zenic for %s successfully!" % host_ip)) fp.write(exc_result) - - diff --git a/code/daisy/daisy/api/common.py b/code/daisy/daisy/api/common.py index 71a98990..c4e21c28 100755 --- a/code/daisy/daisy/api/common.py +++ b/code/daisy/daisy/api/common.py @@ -67,8 +67,8 @@ def size_checked_iter(response, image_meta, expected_size, image_iter, 'bytes_written': bytes_written}) LOG.error(msg) raise exception.DaisyException(_("Corrupt image download for " - "image %(image_id)s") % - {'image_id': image_id}) + "image %(image_id)s") % + {'image_id': image_id}) def image_send_notification(bytes_written, expected_size, image_meta, request, @@ -218,3 +218,9 @@ def get_thread_pool(lock_name, size=1024): return wsgi.get_asynchronous_eventlet_pool(size=size) return _get_thread_pool + + +def get_pxe_mac(host_detail): + pxe_macs = [interface['mac'] for interface in host_detail['interfaces'] + if interface['is_deployment']] + return pxe_macs diff --git a/code/daisy/daisy/api/configset/clush.py b/code/daisy/daisy/api/configset/clush.py index 4663705d..a2cb35bd 100755 --- a/code/daisy/daisy/api/configset/clush.py +++ b/code/daisy/daisy/api/configset/clush.py @@ -1,30 +1,29 @@ import subprocess import daisy.registry.client.v1.api as registry -from daisy.api.backends.tecs import config +from daisy.api.backends.tecs import config as role_service from oslo_log import log as logging import webob.exc +from webob.exc import HTTPBadRequest +from daisy.common import exception +from daisy.common import utils +import daisy.api.backends.common as daisy_cmn LOG = logging.getLogger(__name__) -CONFIG_MAP = { - 'cinder_config': '/etc/cinder/cinder.conf', - 'cinder_api_paste_ini': '/etc/cinder/api-paste.ini', - 'glance_api_config': '/etc/glance/glance-api.conf', - 'glance_api_paste_ini': '/etc/glance/glance-api-paste.ini', - } + class config_clushshell(): + """ Class for clush backend.""" - def __init__(self, req, role_id): - if not req and not role_id: - LOG.error("<<>>") - return + def __init__(self, req): self.context = req.context - self.role_id = role_id - self.CLUSH_CMD = "clush -S -w %(management_ip)s \"%(sub_command)s\"" - self.SUB_COMMAND = "openstack-config --set %(config_file)s %(section)s %(key)s %(value)s" + self.CLUSH_CMD = 'clush -S -w %(management_ip)s "%(sub_command)s"' + self.SUB_COMMAND_SET = "openstack-config --set %(config_file)s"\ + " %(section)s %(key)s '%(value)s'" + self.SUB_COMMAND_DEL = "openstack-config --del %(config_file)s"\ + " %(section)s %(key)s" def _openstack_set_config(self, host_ip, config_set): """ @@ -37,107 +36,259 @@ class config_clushshell(): LOG.debug('<<>>') return - sub_command_by_one_host = [] + config_cmd = [] for config in config_set['config']: if config['config_version'] == config['running_version']: continue - config_file = registry.get_config_file_metadata(self.context, config['config_file_id']) - sub_command_by_one_host.append( - self.SUB_COMMAND % \ - {'config_file':config_file['name'] ,'section':config['section'], - 'key':config['key'], 'value':config['value']}) + config_file = registry.get_config_file_metadata( + self.context, config['config_file_id']) + if config['value']: + value = utils.translate_quotation_marks_for_shell( + config['value']) + config_cmd.append(self.SUB_COMMAND_SET % + {'config_file': config_file['name'], + 'section': config['section'], + 'key': config['key'], + 'value': value}) + else: + # if value is empty, delete or comment it. + config_cmd.append(self.SUB_COMMAND_DEL % + {'config_file': config_file['name'], + 'section': config['section'], + 'key': config['key']}) try: - sub_command_by_one_host = ";".join(sub_command_by_one_host) - clush_cmd = self.CLUSH_CMD % {'management_ip':host_ip, 'sub_command':sub_command_by_one_host} - subprocess.check_output(clush_cmd, shell=True, stderr=subprocess.STDOUT) + for cmd in config_cmd: + clush_cmd = self.CLUSH_CMD % { + 'management_ip': host_ip, 'sub_command': cmd} + subprocess.check_output( + clush_cmd, shell=True, stderr=subprocess.STDOUT) except subprocess.CalledProcessError as e: - msg = ("<<>>" % (host_ip, e.output.strip())) + msg = ("<<>>" % + (host_ip, e.output.strip())) LOG.exception(msg) raise webob.exc.HTTPServerError(explanation=msg) else: - msg = ("<<>>" % host_ip) + msg = ("<<>>" % host_ip) LOG.info(msg) - config['running_version'] = config['config_version'] - def push_config(self): + # if push_status = None, we will push configs + # to all hosts in the role + def push_role_configs(self, role_id, push_status): """ Push config to remote host. :param req: http req :param role_id: host role id :return: """ - self.role_info = registry.get_role_metadata(self.context, self.role_id) - if not self.role_info or not self.role_info.get('config_set_id'): - LOG.error("<<>>") + role_info = registry.get_role_metadata(self.context, role_id) + if not role_info.get('config_set_id'): + LOG.info("<<>>" + % role_info['name']) return - config_set = registry.get_config_set_metadata(self.context, self.role_info['config_set_id']) - if not config_set or not config_set.has_key('config'): - LOG.info("<<>>") + config_set = registry.get_config_set_metadata( + self.context, role_info['config_set_id']) + if not config_set: + LOG.info("<<>>" + % role_info['name']) return + else: + if 'config' not in config_set: + LOG.info("<<>>" + % role_info['name']) + return - config_set['config'] = \ - [config for config in config_set['config'] - if config.has_key('config_version') and config.has_key('running_version') - and config['config_version'] != config['running_version']] + config_set['config'] = [config for config in config_set['config'] + if config.get('config_version', 0) != + config.get('running_version', 0)] if not config_set['config']: - LOG.info('<<>>' % - self.role_id) + LOG.info("<<>>" + % role_info['name']) return - self.role_hosts = registry.get_role_host_metadata(self.context, self.role_id) + self.role_hosts = registry.get_role_host_metadata( + self.context, role_id) + + total_host_count = 0 + if push_status: + for r_host in self.role_hosts: + if r_host['status'] == push_status: + total_host_count += 1 + else: + total_host_count = len(self.role_hosts) + + if total_host_count > 0: + LOG.info("Begin to push config for role '%s'" + % role_info['name']) + else: + return current_count = 0 - all_host_config_sets = [] + # all_host_config_sets = [] for role_host in self.role_hosts: - host = registry.get_host_metadata(self.context, role_host['host_id']) - #change by 10166727--------start------------- - host_ip=[] + host = registry.get_host_metadata( + self.context, role_host['host_id']) + if push_status and role_host['status'] != push_status: + LOG.debug("<<>>" + % (role_host['host_id'], push_status)) + continue + + host_management_ip = '' for interface in host['interfaces']: - find_flag=interface['ip'].find(':') - if find_flag<0: - host_ip=[interface['ip']] - else: - ip_list_tmp=interface['ip'].split(",") - for ip_list in ip_list_tmp: - if ip_list.split(':')[0] == "MANAGEMENT": - host_ip=[str(ip_list.split(':')[1])] - #change by 10166727--------end--------------- - if not host_ip: - continue - host_ip = host_ip[0] + if ('assigned_networks' in interface and + interface['assigned_networks']): + for assigned_network in interface['assigned_networks']: + if (assigned_network['name'] == 'MANAGEMENT' and + 'ip' in assigned_network): + host_management_ip = assigned_network['ip'] - if 0 != subprocess.call('/var/lib/daisy/tecs/trustme.sh %s %s' % (host_ip, 'ossdbg1'), - shell=True, - stderr=subprocess.STDOUT): - raise Exception("trustme.sh error!") - if not config_set.has_key("config"): + if not host_management_ip: + msg = "Can't find management ip for host %s"\ + % role_host['host_id'] + raise HTTPBadRequest(explanation=msg) + + root_passwd = 'ossdbg1' + daisy_cmn.trust_me([host_management_ip], root_passwd) + + self._openstack_set_config(host_management_ip, config_set) + + self._role_service_restart(role_info, host_management_ip) + + current_count += 1 + role_info['config_set_update_progress'] =\ + round(current_count * 1.0 / total_host_count, 2) * 100 + registry.update_role_metadata( + self.context, role_id, role_info) + + all_config_sets = [] + for config in config_set['config']: + config['running_version'] = config['config_version'] + all_config_sets.append(config_set) + registry.update_configs_metadata_by_role_hosts( + self.context, all_config_sets) + + def _host_service_restart(self, host_ip, components_name): + params = {'limit': '200', 'filters': {}} + try: + services = registry.get_services_detail(self.context, + **params) + components = registry.get_components_detail(self.context, + **params) + except exception.Invalid as e: + raise HTTPBadRequest(explanation=e.msg) + + components_id = [comp['id'] for comp in components + for comp_name in components_name + if comp['name'] == comp_name] + + for service in services: + if service['component_id'] not in components_id: continue - self._openstack_set_config(host_ip, config_set) - all_host_config_sets.append(config_set) - registry.update_configs_metadata_by_role_hosts(self.context, all_host_config_sets) + services_name = role_service.service_map.get(service['name']) + if not services_name: + msg = "Can't find service for '%s'" % service + raise HTTPBadRequest(explanation=msg) - LOG.debug("Update config for host:%s successfully!" % host_ip) + for service_name in services_name.split(','): + active_service = "clush -S -w %s 'systemctl is-active\ + %s'" % (host_ip, service_name) + if 0 == utils.simple_subprocess_call(active_service): + restart_service = "clush -S -w %s 'systemctl restart\ + %s'" % (host_ip, service_name) + LOG.info("Restart service %s after pushing config" + % service_name) + if 0 != utils.simple_subprocess_call(restart_service): + msg = "Service %s restart failed on host '%s'."\ + % (service_name, host_ip) + LOG.error(msg) - self._host_service_restart(host_ip) - current_count +=1 - self.role_info['config_set_update_progress'] = round(current_count*1.0/len(self.role_hosts), 2)*100 - registry.update_role_metadata(self.context, self.role_id, self.role_info) + # now i don't known how to find component id by config file, + # so add you must tell me, and it can be deleted if i can find it + # in future. + def push_host_configs(self, host_id, components_name): + """ + Push config to remote host. + :param req: http req + :param host_id: host id + :return: + """ + host_detail = registry.get_host_metadata(self.context, host_id) - def _host_service_restart(self,host_ip): + if not host_detail.get('config_set_id'): + LOG.info("<<>>" + % host_id) + return + + config_set =\ + registry.get_config_set_metadata(self.context, + host_detail['config_set_id']) + if not config_set: + LOG.info("<<>>" + % host_id) + return + else: + if 'config' not in config_set: + LOG.info("<<>>" % host_id) + return + + config_set['config'] = [config for config in config_set['config'] + if config.get('config_version', 0) != + config.get('running_version', 0)] + + if not config_set['config']: + LOG.info("<<>>" + % host_id) + return + + host_management_ip = '' + for interface in host_detail['interfaces']: + if ('assigned_networks' in interface and + interface['assigned_networks']): + for assigned_network in interface['assigned_networks']: + if (assigned_network['name'] == 'MANAGEMENT' and + 'ip' in assigned_network): + host_management_ip = assigned_network['ip'] + + if not host_management_ip: + msg = "Can't find management ip for host %s"\ + % host_detail['host_id'] + raise HTTPBadRequest(explanation=msg) + + root_passwd = 'ossdbg1' + daisy_cmn.trust_me([host_management_ip], root_passwd) + + self._openstack_set_config(host_management_ip, config_set) + + self._host_service_restart(host_management_ip, components_name) + + all_config_sets = [] + for config in config_set['config']: + config['running_version'] = config['config_version'] + all_config_sets.append(config_set) + registry.update_configs_metadata_by_role_hosts(self.context, + all_config_sets) + + def _role_service_restart(self, role_info, host_ip): """ """ - for service in self.role_info['service_name']: - for service_detail_name in config.service_map.get(service).split(','): - cmd = "" - if self.role_info['name'] == "CONTROLLER_HA": - cmd = "clush -S -w %s [ `systemctl is-active %s` != 'active' ] && systemctl restart %s" % \ - (host_ip, service_detail_name, service_detail_name) - else: - cmd = "clush -S -w %s systemctl restart %s" % (host_ip, service_detail_name) - if 0 != subprocess.call(cmd, shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE): - LOG.error("Service %s restart failed in host:%s." % (service_detail_name, host_ip)) + for service in role_info['service_name']: + services_name = role_service.service_map.get(service) + if not services_name: + msg = "Can't find service for '%s'" % service + raise HTTPBadRequest(explanation=msg) - \ No newline at end of file + for service_name in services_name.split(','): + active_service = "clush -S -w %s 'systemctl is-active\ + %s'" % (host_ip, service_name) + if 0 == utils.simple_subprocess_call(active_service): + restart_service = "clush -S -w %s 'systemctl restart\ + %s'" % (host_ip, service_name) + LOG.info("Restart service %s after pushing config" + % service_name) + if 0 != utils.simple_subprocess_call(restart_service): + msg = "Service %s restart failed on host '%s'."\ + % (service_name, host_ip) + LOG.error(msg) diff --git a/code/daisy/daisy/api/configset/manager.py b/code/daisy/daisy/api/configset/manager.py index afb46a0e..29f62d6c 100755 --- a/code/daisy/daisy/api/configset/manager.py +++ b/code/daisy/daisy/api/configset/manager.py @@ -1,16 +1,24 @@ from daisy.api.configset.clush import config_clushshell + class configBackend(): - def __init__(self, type, req, role_id): + + def __init__(self, type, req): self.type = type self._instance = None - + if type == "clushshell": - self._instance = config_clushshell(req, role_id) + self._instance = config_clushshell(req) elif type == "puppet": pass - - def push_config(self): - self._instance.push_config() - - \ No newline at end of file + + # if push_status = None, we will push configs + # to all hosts in the role + def push_config_by_roles(self, role_ids, push_status=None): + for role_id in role_ids: + self._instance.push_role_configs(role_id, push_status) + + def push_config_by_hosts(self, host_ids, component_names=[]): + for host_id in host_ids: + self._instance.push_host_configs(host_id, + component_names) diff --git a/code/daisy/daisy/api/network_api.py b/code/daisy/daisy/api/network_api.py index 7bc34d7d..267071ec 100755 --- a/code/daisy/daisy/api/network_api.py +++ b/code/daisy/daisy/api/network_api.py @@ -24,10 +24,12 @@ from neutronclient.v2_0 import client as clientv20 from daisy.common import exception LOG = logging.getLogger(__name__) + class network(object): """ network config """ + def __init__(self, req, neutron_host, keystone_host, cluster_id): registry.configure_registry_client() auth_url = 'http://' + keystone_host + ':35357/v2.0' @@ -49,10 +51,12 @@ class network(object): except exception.Invalid as e: LOG.exception(e.msg) raise HTTPBadRequest(explanation=e.msg, request=req) - LOG.info("<<>>", cluster, neutron_host, keystone_host) - if 'logic_networks' in cluster and cluster['logic_networks'] is not None: + LOG.info("<<>>", + cluster, neutron_host, keystone_host) + if 'logic_networks' in cluster and cluster[ + 'logic_networks'] is not None: self.nets = cluster['logic_networks'] - #self._flat_network_uniqueness_check() + # self._flat_network_uniqueness_check() if 'routers' in cluster and cluster['routers'] is not None: self.routers = cluster['routers'] else: @@ -83,7 +87,9 @@ class network(object): for router in self.routers: router_id = self._router_create(router['name']) if 'external_logic_network' in router: - body = {'network_id': self.name_mappings[router['external_logic_network']]} + body = { + 'network_id': self.name_mappings[ + router['external_logic_network']]} self.neutron.add_gateway_router(router_id, body) if 'subnets' in router: for i in router['subnets']: @@ -92,7 +98,8 @@ class network(object): def _net_subnet_same_router_check(self, ex_network, subnet): for router in self.routers: - if 'external_logic_network' in router and router['external_logic_network'] == ex_network: + if 'external_logic_network' in router and router[ + 'external_logic_network'] == ex_network: if 'subnets' in router: for i in router['subnets']: if i == subnet: @@ -155,18 +162,25 @@ class network(object): for net in self.nets: body = {} if net['type'] == 'external': - body['network'] = {'name': net['name'], - 'router:external': True, - 'provider:network_type': net['segmentation_type']} + body['network'] = { + 'name': net['name'], + 'router:external': True, + 'provider:network_type': net['segmentation_type']} if net['segmentation_type'].strip() == 'flat': - body['network']['provider:physical_network'] = net['physnet_name'] + body['network']['provider:physical_network'] = net[ + 'physnet_name'] elif net['segmentation_type'].strip() == 'vxlan': - if 'segmentation_id' in net and net['segmentation_id'] is not None: - body['network']['provider:segmentation_id'] = net['segmentation_id'] + if 'segmentation_id' in net and net[ + 'segmentation_id'] is not None: + body['network']['provider:segmentation_id'] = net[ + 'segmentation_id'] else: - if 'segmentation_id' in net and net['segmentation_id'] is not None: - body['network']['provider:segmentation_id'] = net['segmentation_id'] - body['network']['provider:physical_network'] = net['physnet_name'] + if 'segmentation_id' in net and net[ + 'segmentation_id'] is not None: + body['network']['provider:segmentation_id'] = net[ + 'segmentation_id'] + body['network']['provider:physical_network'] = net[ + 'physnet_name'] if net['shared']: body['network']['shared'] = True else: @@ -175,21 +189,28 @@ class network(object): self.name_mappings[net['name']] = external['network']['id'] last_create_subnet = [] for subnet in net['subnets']: - if self._net_subnet_same_router_check(net['name'], subnet['name']): + if self._net_subnet_same_router_check( + net['name'], subnet['name']): last_create_subnet.append(subnet) else: - subnet_id = self._subnet_check_and_create(external['network']['id'], subnet) + subnet_id = self._subnet_check_and_create( + external['network']['id'], subnet) self.name_mappings[subnet['name']] = subnet_id for subnet in last_create_subnet: - subnet_id = self._subnet_check_and_create(external['network']['id'], subnet) + subnet_id = self._subnet_check_and_create( + external['network']['id'], subnet) self.name_mappings[subnet['name']] = subnet_id else: - body['network'] = {'name': net['name'], - 'provider:network_type': net['segmentation_type']} + body['network'] = { + 'name': net['name'], + 'provider:network_type': net['segmentation_type']} if net['segmentation_type'].strip() == 'vlan': - body['network']['provider:physical_network'] = net['physnet_name'] - if 'segmentation_id' in net and net['segmentation_id'] is not None: - body['network']['provider:segmentation_id'] = net['segmentation_id'] + body['network']['provider:physical_network'] = net[ + 'physnet_name'] + if 'segmentation_id' in net and net[ + 'segmentation_id'] is not None: + body['network']['provider:segmentation_id'] = net[ + 'segmentation_id'] if net['shared']: body['network']['shared'] = True else: @@ -197,6 +218,7 @@ class network(object): inner = self.neutron.create_network(body) self.name_mappings[net['name']] = inner['network']['id'] for subnet in net['subnets']: - subnet_id = self._subnet_check_and_create(inner['network']['id'], subnet) + subnet_id = self._subnet_check_and_create( + inner['network']['id'], subnet) self.name_mappings[subnet['name']] = subnet_id self._router_link() diff --git a/code/daisy/daisy/api/v1/__init__.py b/code/daisy/daisy/api/v1/__init__.py index 4d042010..b903d6f4 100755 --- a/code/daisy/daisy/api/v1/__init__.py +++ b/code/daisy/daisy/api/v1/__init__.py @@ -13,9 +13,11 @@ # License for the specific language governing permissions and limitations # under the License. -SUPPORTED_FILTERS = ['name', 'status','cluster_id','id','host_id', 'role_id', 'auto_scale','container_format', 'disk_format', +SUPPORTED_FILTERS = ['name', 'status', 'cluster_id', 'id', + 'host_id', 'role_id', + 'auto_scale', 'container_format', 'disk_format', 'min_ram', 'min_disk', 'size_min', 'size_max', - 'is_public', 'changes-since', 'protected'] + 'is_public', 'changes-since', 'protected', 'type'] SUPPORTED_PARAMS = ('limit', 'marker', 'sort_key', 'sort_dir') diff --git a/code/daisy/daisy/api/v1/backup_restore.py b/code/daisy/daisy/api/v1/backup_restore.py new file mode 100755 index 00000000..d8f7ca00 --- /dev/null +++ b/code/daisy/daisy/api/v1/backup_restore.py @@ -0,0 +1,312 @@ +# Copyright 2013 OpenStack Foundation +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +""" +/hosts endpoint for Daisy v1 API +""" +import datetime +import os +import subprocess +from oslo_log import log as logging +from webob.exc import HTTPBadRequest +from webob.exc import HTTPForbidden + +from daisy import i18n +from daisy import notifier + +from daisy.api import policy +import daisy.api.v1 +from daisy.common import exception +from daisy.common import property_utils +from daisy.common import utils +from daisy.common import wsgi +import daisy.registry.client.v1.api as registry +from daisy.api.v1 import controller +from daisy.api.v1 import filters +import daisy.api.backends.tecs.common as tecs_cmn + + +LOG = logging.getLogger(__name__) +_ = i18n._ +_LE = i18n._LE +_LI = i18n._LI +_LW = i18n._LW +SUPPORTED_PARAMS = daisy.api.v1.SUPPORTED_PARAMS +SUPPORTED_FILTERS = daisy.api.v1.SUPPORTED_FILTERS +ACTIVE_IMMUTABLE = daisy.api.v1.ACTIVE_IMMUTABLE +BACK_PATH = '/home/daisy_backup/' + + +class Controller(controller.BaseController): + """ + WSGI controller for hosts resource in Daisy v1 API + + The hosts resource API is a RESTful web service for host data. The API + is as follows:: + + GET /hosts -- Returns a set of brief metadata about hosts + GET /hosts/detail -- Returns a set of detailed metadata about + hosts + HEAD /hosts/ -- Return metadata about an host with id + GET /hosts/ -- Return host data for host with id + POST /hosts -- Store host data and return metadata about the + newly-stored host + PUT /hosts/ -- Update host metadata and/or upload host + data for a previously-reserved host + DELETE /hosts/ -- Delete the host with id + """ + def __init__(self): + self.notifier = notifier.Notifier() + registry.configure_registry_client() + self.policy = policy.Enforcer() + if property_utils.is_property_protection_enabled(): + self.prop_enforcer = property_utils.PropertyRules(self.policy) + else: + self.prop_enforcer = None + + def _enforce(self, req, action, target=None): + """Authorize an action against our policies""" + if target is None: + target = {} + try: + self.policy.enforce(req.context, action, target) + except exception.Forbidden: + raise HTTPForbidden() + + def _get_filters(self, req): + """ + Return a dictionary of query param filters from the request + + :param req: the Request object coming from the wsgi layer + :retval a dict of key/value filters + """ + query_filters = {} + for param in req.params: + if param in SUPPORTED_FILTERS: + query_filters[param] = req.params.get(param) + if not filters.validate(param, query_filters[param]): + raise HTTPBadRequest(_('Bad value passed to filter ' + '%(filter)s got %(val)s') + % {'filter': param, + 'val': query_filters[param]}) + return query_filters + + def _get_query_params(self, req): + """ + Extracts necessary query params from request. + + :param req: the WSGI Request object + :retval dict of parameters that can be used by registry client + """ + params = {'filters': self._get_filters(req)} + + for PARAM in SUPPORTED_PARAMS: + if PARAM in req.params: + params[PARAM] = req.params.get(PARAM) + return params + + def hostname(self): + if os.name == 'posix': + host = os.popen('echo $HOSTNAME') + try: + return host.read() + finally: + host.close() + else: + return 'Unkwon hostname' + + def check_file_format(self, req, file_meta): + if not os.path.exists(file_meta.get('backup_file_path', '')): + msg = 'File not exists!' + LOG.error(msg) + raise HTTPForbidden(explanation=msg, request=req, + content_type="text/plain") + + if not file_meta['backup_file_path'].endswith('.tar.gz'): + msg = 'File format not supported! .tar.gz format is required!' + LOG.error(msg) + raise HTTPForbidden(explanation=msg, request=req, + content_type="text/plain") + + @utils.mutating + def backup(self, req): + """ + Backup daisy data.. + + :param req: The WSGI/Webob Request object + + :raises HTTPBadRequest if backup failed + """ + version = self.version(req, {'type': 'internal'}) + date_str = filter(lambda x: x.isdigit(), + str(datetime.datetime.now())[:19]) + backup_file_name = '{0}_{1}_{2}.tar.gz'.format( + self.hostname().strip(), date_str, version['daisy_version']) + + scripts = [ + 'test -d {0}daisy_tmp||mkdir -p {0}daisy_tmp'.format(BACK_PATH), + 'echo {0}>{1}daisy_tmp/version.conf'.format( + version['daisy_version'], BACK_PATH), + 'cp /home/daisy_install/daisy.conf {0}/daisy_tmp'.format( + BACK_PATH), + 'mysqldump --all-databases > {0}daisy_tmp/database.sql'.format( + BACK_PATH), + 'tar -zcvf {0}{1} -C {0} daisy_tmp >/dev/null 2>&1'.format( + BACK_PATH, backup_file_name), + 'chmod 777 {0} {0}{1}'.format(BACK_PATH, backup_file_name), + 'rm -rf {0}daisy_tmp'.format(BACK_PATH) + ] + + tecs_cmn.run_scrip(scripts, msg='Backup file failed!') + return {"backup_file": BACK_PATH + backup_file_name} + + @utils.mutating + def restore(self, req, file_meta): + """ + Restore daisy data. + :param req: The WSGI/Webob Request object + :param file_meta: The daisy backup file path + :raises HTTPBadRequest if restore failed + """ + self.check_file_format(req, file_meta) + restore_scripts = [ + 'test -d {0} || mkdir {0}'.format(BACK_PATH), + 'test -d {0} || mkdir {0}'.format('/home/daisy_install/'), + 'tar -zxvf {1} -C {0}>/dev/null 2>&1'.format( + BACK_PATH, file_meta['backup_file_path']), + 'mysql < {0}daisy_tmp/database.sql'.format(BACK_PATH), + 'cp {0}daisy_tmp/daisy.conf /home/daisy_install/'.format( + BACK_PATH), + 'rm -rf {0}daisy_tmp'.format(BACK_PATH) + ] + tecs_cmn.run_scrip(restore_scripts, msg='Restore failed!') + LOG.info('Restore successfully') + + @utils.mutating + def get_backup_file_version(self, req, file_meta): + """ + Get version of daisy backup file. + + :param req: The WSGI/Webob Request object + + :raises HTTPBadRequest if can't get version of backup file + """ + self.check_file_format(req, file_meta) + scripts = [ + 'test -d {0} || mkdir {0}'.format(BACK_PATH), + 'tar -zxvf {0} -C {1}>/dev/null 2>&1'.format( + file_meta['backup_file_path'], BACK_PATH) + ] + + tecs_cmn.run_scrip(scripts, msg='Decompression file failed!') + + try: + version = subprocess.check_output( + 'cat {0}daisy_tmp/version.conf'.format(BACK_PATH), + shell=True, stderr=subprocess.STDOUT).strip() + except: + msg = 'Error occurred when running scripts to get version of' \ + ' backup file!' + LOG.error(msg) + raise HTTPForbidden(explanation=msg, request=req, + content_type="text/plain") + tecs_cmn.run_scrip(['rm -rf {0}daisy_tmp'.format(BACK_PATH)]) + return {"backup_file_version": version} + + @utils.mutating + def version(self, req, version): + """ + Get version of daisy. + + :param req: The WSGI/Webob Request object + + :raises HTTPBadRequest if can't get version of daisy + """ + if version.get('type') == 'internal': + scripts = "rpm -q python-daisy | awk -F'-' '{print $3\"-\"$4}'" + else: + # reserve for external version + return {"daisy_version": '1.0.0-1.1.0'} + try: + version = subprocess.check_output(scripts, shell=True, + stderr=subprocess.STDOUT).strip() + except: + msg = 'Error occurred when running scripts to get version of daisy' + LOG.error(msg) + raise HTTPForbidden(explanation=msg, request=req, + content_type="text/plain") + daisy_version = filter(lambda x: not x.isalpha(), version)[:-1] + return {"daisy_version": daisy_version} + + +class BackupRestoreDeserializer(wsgi.JSONRequestDeserializer): + """Handles deserialization of specific controller method requests.""" + + def _deserialize(self, request): + result = {} + result['file_meta'] = utils.get_dict_meta(request) + return result + + def backup(self, request): + return {} + + def restore(self, request): + return self._deserialize(request) + + def get_backup_file_version(self, request): + return self._deserialize(request) + + def version(self, request): + result = {} + result['version'] = utils.get_dict_meta(request) + return result + + +class BackupRestoreSerializer(wsgi.JSONResponseSerializer): + """Handles serialization of specific controller method responses.""" + + def __init__(self): + self.notifier = notifier.Notifier() + + def backup(self, response, result): + response.status = 201 + response.headers['Content-Type'] = 'application/json' + response.body = self.to_json(result) + return response + + def restore(self, response, result): + response.status = 201 + response.headers['Content-Type'] = 'application/json' + response.body = self.to_json(result) + return response + + def get_backup_file_version(self, response, result): + response.status = 201 + response.headers['Content-Type'] = 'application/json' + response.body = self.to_json(result) + return response + + def version(self, response, result): + response.status = 201 + response.headers['Content-Type'] = 'application/json' + response.body = self.to_json(result) + return response + + +def create_resource(): + """Version resource factory method""" + deserializer = BackupRestoreDeserializer() + serializer = BackupRestoreSerializer() + return wsgi.Resource(Controller(), deserializer, serializer) diff --git a/code/daisy/daisy/api/v1/clusters.py b/code/daisy/daisy/api/v1/clusters.py index 548f3a37..a9dcf9ba 100755 --- a/code/daisy/daisy/api/v1/clusters.py +++ b/code/daisy/daisy/api/v1/clusters.py @@ -38,6 +38,7 @@ from daisy.common import wsgi from daisy import i18n from daisy import notifier import daisy.registry.client.v1.api as registry +from functools import reduce LOG = logging.getLogger(__name__) _ = i18n._ @@ -53,15 +54,16 @@ CONF.import_opt('disk_formats', 'daisy.common.config', group='image_format') CONF.import_opt('container_formats', 'daisy.common.config', group='image_format') CONF.import_opt('image_property_quota', 'daisy.common.config') -CLUSTER_DEFAULT_NETWORKS = ['PUBLIC', 'DEPLOYMENT', 'PRIVATE', 'EXTERNAL', - 'STORAGE', 'VXLAN', 'MANAGEMENT'] +CLUSTER_DEFAULT_NETWORKS = ['PUBLICAPI', 'DEPLOYMENT', 'DATAPLANE', 'EXTERNAL', + 'STORAGE', 'MANAGEMENT'] + class Controller(controller.BaseController): """ WSGI controller for clusters resource in Daisy v1 API - The clusters resource API is a RESTful web service for cluster data. The API - is as follows:: + The clusters resource API is a RESTful web service for cluster data. + The API is as follows:: GET /clusters -- Returns a set of brief metadata about clusters GET /clusters -- Returns a set of detailed metadata about @@ -86,57 +88,74 @@ class Controller(controller.BaseController): cluster_id = kwargs.get('id', None) errmsg = (_("I'm params checker.")) - LOG.debug(_("Params check for cluster-add or cluster-update begin!")) - + LOG.debug( + _("Params check for cluster-add or cluster-update begin!")) + def check_params_range(param, type=None): ''' param : input a list ,such as [start, end] - check condition: start must less than end, and existed with pair + check condition: start must less than end, + and existed with pair return True of False ''' if len(param) != 2: msg = '%s range must be existed in pairs.' % type raise HTTPForbidden(explanation=msg) - if param[0] == None or param[0] == '': + if param[0] is None or param[0] == '': msg = 'The start value of %s range can not be None.' % type raise HTTPForbidden(explanation=msg) - if param[1] == None: + if param[1] is None: msg = 'The end value of %s range can not be None.' % type raise HTTPForbidden(explanation=msg) if int(param[0]) > int(param[1]): - msg = 'The start value of the %s range must be less than the end value.' % type + msg = 'The start value of the %s range must be less ' \ + 'than the end value.' % type raise HTTPForbidden(explanation=msg) if type not in ['vni']: if int(param[0]) < 0 or int(param[0]) > 4096: - msg = 'Invalid value of the start value(%s) of the %s range .' % (param[0], type) + msg = 'Invalid value of the start value(%s) of ' \ + 'the %s range .' % (param[ + 0], type) raise HTTPForbidden(explanation=msg) if int(param[1]) < 0 or int(param[1]) > 4096: - msg = 'Invalid value of the end value(%s) of the %s range .' % (param[1], type) + msg = 'Invalid value of the end value(%s) of ' \ + 'the %s range .' % (param[ + 1], type) raise HTTPForbidden(explanation=msg) else: if int(param[0]) < 0 or int(param[0]) > 16777216: - msg = 'Invalid value of the start value(%s) of the %s range .' % (param[0], type) + msg = 'Invalid value of the start value(%s) of ' \ + 'the %s range .' % (param[ + 0], type) raise HTTPForbidden(explanation=msg) if int(param[1]) < 0 or int(param[1]) > 16777216: - msg = 'Invalid value of the end value(%s) of the %s range .' % (param[1], type) + msg = 'Invalid value of the end value(%s) of ' \ + 'the %s range .' % (param[ + 1], type) raise HTTPForbidden(explanation=msg) return True - + def _check_auto_scale(req, cluster_meta): - if cluster_meta.has_key('auto_scale') and cluster_meta['auto_scale'] =='1': - meta = { "auto_scale":'1' } - params = { 'filters': meta } - clusters = registry.get_clusters_detail(req.context, **params) + if 'auto_scale' in cluster_meta and cluster_meta[ + 'auto_scale'] == '1': + meta = {"auto_scale": '1'} + params = {'filters': meta} + clusters = registry.get_clusters_detail( + req.context, **params) if clusters: if cluster_id: - temp_cluster = [cluster for cluster in clusters if cluster['id'] !=cluster_id] + temp_cluster = [ + cluster for cluster in clusters if + cluster['id'] != cluster_id] if temp_cluster: - errmsg = (_("already exist cluster auto_scale is true")) - raise HTTPBadRequest(explanation=errmsg) + errmsg = ( + _("already exist cluster " + "auto_scale is true")) + raise HTTPBadRequest(explanation=errmsg) else: - errmsg = (_("already exist cluster auto_scale is true")) + errmsg = ( + _("already exist cluster auto_scale is true")) raise HTTPBadRequest(explanation=errmsg) - def _ip_into_int(ip): """ @@ -144,7 +163,8 @@ class Controller(controller.BaseController): :param ip: ip string :return: decimalism integer """ - return reduce(lambda x, y: (x<<8)+y, map(int, ip.split('.'))) + return reduce(lambda x, y: (x << 8) + y, + map(int, ip.split('.'))) def _is_in_network_range(ip, network): """ @@ -155,9 +175,13 @@ class Controller(controller.BaseController): """ network = network.split('/') mask = ~(2**(32 - int(network[1])) - 1) - return (_ip_into_int(ip) & mask) == (_ip_into_int(network[0]) & mask) + return ( + _ip_into_int(ip) & mask) == ( + _ip_into_int( + network[0]) & mask) - def _check_param_nonull_and_valid(values_set, keys_set, valids_set={}): + def _check_param_nonull_and_valid( + values_set, keys_set, valids_set={}): """ Check operation params is not null and valid. :param values_set: Params set. @@ -167,10 +191,10 @@ class Controller(controller.BaseController): """ for k in keys_set: v = values_set.get(k, None) - if type(v) == type(True) and v == None: + if isinstance(v, type(True)) and v is None: errmsg = (_("Segment %s can't be None." % k)) raise HTTPBadRequest(explanation=errmsg) - elif type(v) != type(True) and not v: + elif not isinstance(v, type(True)) and not v: errmsg = (_("Segment %s can't be None." % k)) raise HTTPBadRequest(explanation=errmsg) @@ -183,15 +207,18 @@ class Controller(controller.BaseController): def _get_network_detail(req, cluster_id, networks_list): all_network_list = [] if cluster_id: - all_network_list = registry.get_networks_detail(req.context, cluster_id) + all_network_list = registry.get_networks_detail( + req.context, cluster_id) if networks_list: for net_id in networks_list: - network_detail = registry.get_network_metadata(req.context, net_id) + network_detail = registry.get_network_metadata( + req.context, net_id) all_network_list.append(network_detail) - all_private_network_list = \ - [network for network in all_network_list if network['network_type'] == "PRIVATE"] + all_private_network_list = [ + network for network in all_network_list if network[ + 'network_type'] == "DATAPLANE"] return all_private_network_list def _check_cluster_add_parameters(req, cluster_meta): @@ -201,123 +228,92 @@ class Controller(controller.BaseController): :param cluster_meta: params set :return:error message """ - if cluster_meta.has_key('nodes'): + if 'nodes' in cluster_meta: orig_keys = list(eval(cluster_meta['nodes'])) for host_id in orig_keys: controller._raise_404_if_host_deleted(req, host_id) - if cluster_meta.has_key('networks'): + if 'networks' in cluster_meta: orig_keys = list(eval(cluster_meta['networks'])) network_with_same_name = [] for network_id in orig_keys: - network_name = controller._raise_404_if_network_deleted(req, network_id) + network_name = \ + controller._raise_404_if_network_deleted( + req, network_id) if network_name in CLUSTER_DEFAULT_NETWORKS: return (_("Network name %s of %s already exits" " in the cluster, please check." % (network_name, network_id))) if network_name in network_with_same_name: - return (_("Network name can't be same with each other in 'networks[]', " + return (_("Network name can't be same with " + "each other in 'networks[]', " "please check.")) network_with_same_name.append(network_name) - # checkout network_params-------------------------------------------------- + # checkout network_params if cluster_meta.get('networking_parameters', None): - networking_parameters = eval(cluster_meta['networking_parameters']) - _check_param_nonull_and_valid(networking_parameters, - ['segmentation_type']) - segmentation_type_set = networking_parameters['segmentation_type'].split(",") - for segmentation_type in segmentation_type_set: - if segmentation_type not in ['vlan', 'vxlan', 'flat', 'gre']: - return (_("Segmentation_type of networking_parameters is not valid.")) - if segmentation_type =='vxlan': - _check_param_nonull_and_valid(networking_parameters,['vni_range']) - elif segmentation_type =='gre': - _check_param_nonull_and_valid(networking_parameters,['gre_id_range']) + networking_parameters =\ + eval(cluster_meta['networking_parameters']) - vlan_range = networking_parameters.get("vlan_range", None) - vni_range = networking_parameters.get("vni_range", None) - gre_id_range = networking_parameters.get("gre_id_range", None) - #if (vlan_range and len(vlan_range) != 2) \ - # or (vni_range and len(vni_range) != 2) \ - # or (gre_id_range and len(gre_id_range) != 2): - # return (_("Range params must be pair.")) - if vlan_range: - check_params_range(vlan_range, 'vlan') - if vni_range: - check_params_range(vni_range, 'vni') - if gre_id_range: - check_params_range(gre_id_range, 'gre_id') - - # check logic_networks-------------------------------------------------- - subnet_name_set = [] # record all subnets's name - logic_network_name_set = [] # record all logic_network's name + # check logic_networks + subnet_name_set = [] # record all subnets's name + logic_network_name_set = [] # record all logic_network's name subnets_in_logic_network = {} external_logic_network_name = [] if cluster_meta.get('logic_networks', None): # get physnet_name list all_private_cluster_networks_list = _get_network_detail( - req, cluster_id, - cluster_meta.get('networks', None) - if not isinstance(cluster_meta.get('networks', None), unicode) - else eval(cluster_meta.get('networks', None))) + req, cluster_id, cluster_meta.get( + 'networks', None) if not isinstance( + cluster_meta.get( + 'networks', None), unicode) else eval( + cluster_meta.get( + 'networks', None))) if not all_private_cluster_networks_list: - LOG.info("Private network is empty in db, it lead logical network config invalid.") - physnet_name_set = [net['name'] for net in all_private_cluster_networks_list] + LOG.info( + "Private network is empty in db, it lead " + "logical network config invalid.") + physnet_name_set = [net['name'] + for net in + all_private_cluster_networks_list] logic_networks = eval(cluster_meta['logic_networks']) for logic_network in logic_networks: subnets_in_logic_network[logic_network['name']] = [] - # We force setting the physnet_name of flat logical network to 'flat'. - if logic_network.get('segmentation_type', None) == "flat": - if logic_network['physnet_name'] != "physnet1" or logic_network['type'] != "external": - LOG.info("When 'segmentation_type' is flat the 'physnet_name' and 'type' segmentation" - "must be 'physnet1'' and 'external'', but got '%s' and '%s'.We have changed" - "it to the valid value.") + # We force setting the physnet_name of flat logical + # network to 'flat'. + if logic_network.get( + 'segmentation_type', None) == "flat": + if logic_network['physnet_name'] != "physnet1" or \ + logic_network[ + 'type'] != "external": + LOG.info( + "When 'segmentation_type' is flat the " + "'physnet_name' and 'type' segmentation" + "must be 'physnet1'' and 'external'', " + "but got '%s' and '%s'.We have changed" + "it to the valid value.") logic_network['physnet_name'] = "physnet1" logic_network['type'] = "external" physnet_name_set.append("physnet1") _check_param_nonull_and_valid( logic_network, - ['name', 'type', 'physnet_name', 'segmentation_type', 'shared', 'segmentation_id'], - {'segmentation_type' : networking_parameters['segmentation_type'], - 'physnet_name' : ','.join(physnet_name_set), - 'type' : ','.join(["external", "internal"])}) + ['name', 'type', 'physnet_name', + 'segmentation_type', 'shared', 'segmentation_id'], + {'segmentation_type': networking_parameters[ + 'segmentation_type'], + 'physnet_name': ','.join(physnet_name_set), + 'type': ','.join(["external", "internal"])}) if logic_network['type'] == "external": - external_logic_network_name.append(logic_network['name']) + external_logic_network_name.append( + logic_network['name']) logic_network_name_set.append(logic_network['name']) - # By segmentation_type check segmentation_id is in range - segmentation_id = logic_network.get('segmentation_id', None) - if segmentation_id: - err = "Segmentation_id is out of private network %s of %s.Vaild range is [%s, %s]." - segmentation_type = logic_network.get('segmentation_type', None) - if 0 == cmp(segmentation_type, "vlan"): - private_vlan_range = \ - [(net['vlan_start'], net['vlan_end']) - for net in all_private_cluster_networks_list - if logic_network['physnet_name'] == net['name']] - - if private_vlan_range and \ - not private_vlan_range[0][0] or \ - not private_vlan_range[0][1]: - return (_("Private network plane %s don't config the 'vlan_start' or " - "'vlan_end' parameter.")) - - if int(segmentation_id) not in range(private_vlan_range[0][0], private_vlan_range[0][1]): - return (_(err % ("vlan_range", logic_network['physnet_name'], - private_vlan_range[0][0], private_vlan_range[0][1]))) - elif 0 == cmp(segmentation_type, "vxlan") and vni_range: - if int(segmentation_id) not in range(vni_range[0], vni_range[1]): - return (_("Segmentation_id is out of vni_range.")) - elif 0 == cmp(segmentation_type, "gre") and gre_id_range: - if int(segmentation_id) not in range(gre_id_range[0], gre_id_range[1]): - return (_("Segmentation_id is out of gre_id_range.")) - - # checkout subnets params-------------------------------------------------- + # checkout subnets params------------------------------ if logic_network.get('subnets', None): subnet_data = logic_network['subnets'] for subnet in subnet_data: @@ -325,49 +321,78 @@ class Controller(controller.BaseController): subnet, ['name', 'cidr']) subnet_name_set.append(subnet['name']) - # By cidr check floating_ranges is in range and not overlap - #---------------start----- - if subnet['gateway'] and not _is_in_network_range(subnet['gateway'], subnet['cidr']): + # By cidr check floating_ranges is in range + # and not overlap + # ---------------start----- + if subnet['gateway'] and not \ + _is_in_network_range( + subnet['gateway'], subnet['cidr']): return (_("Wrong gateway format.")) if subnet['floating_ranges']: - inter_ip = lambda x: '.'.join([str(x/(256**i)%256) for i in range(3,-1,-1)]) + inter_ip = lambda x: '.'.join( + [str(x / (256**i) % 256) for i in + range(3, -1, -1)]) floating_ranges_with_int_ip = list() sorted_floating_ranges = list() sorted_floating_ranges_with_int_ip = list() - for floating_ip in subnet['floating_ranges']: + for floating_ip in subnet[ + 'floating_ranges']: if len(floating_ip) != 2: - return (_("Floating ip must be paris.")) + return ( + _("Floating ip must " + "be paris.")) ip_start = _ip_into_int(floating_ip[0]) ip_end = _ip_into_int(floating_ip[1]) if ip_start > ip_end: - return (_("Wrong floating ip format.")) - floating_ranges_with_int_ip.append([ip_start, ip_end]) - sorted_floating_ranges_with_int_ip = sorted(floating_ranges_with_int_ip, key=lambda x : x[0]) - for ip_range in sorted_floating_ranges_with_int_ip: + return ( + _("Wrong floating ip format.")) + floating_ranges_with_int_ip.append( + [ip_start, ip_end]) + sorted_floating_ranges_with_int_ip = \ + sorted(floating_ranges_with_int_ip, + key=lambda x: x[0]) + for ip_range in \ + sorted_floating_ranges_with_int_ip: ip_start = inter_ip(ip_range[0]) ip_end = inter_ip(ip_range[1]) - sorted_floating_ranges.append([ip_start, ip_end]) + sorted_floating_ranges.append( + [ip_start, ip_end]) last_rang_ip = [] for floating in sorted_floating_ranges: - if not _is_in_network_range(floating[0], subnet['cidr']) \ - or not _is_in_network_range(floating[1], subnet['cidr']): - return (_("Floating ip or gateway is out of range cidr.")) + if not _is_in_network_range( + floating[0], + subnet['cidr']) or not \ + _is_in_network_range( + floating[1], subnet['cidr']): + return ( + _("Floating ip or gateway " + "is out of range cidr.")) - err_list = [err for err in last_rang_ip if _ip_into_int(floating[0]) < err] + err_list = [ + err for err in last_rang_ip if + _ip_into_int( + floating[0]) < err] if last_rang_ip and 0 < len(err_list): - return (_("Between floating ip range can not be overlap.")) - last_rang_ip.append(_ip_into_int(floating[1])) - subnets_in_logic_network[logic_network['name']].append(subnet['name']) + return ( + _("Between floating ip range " + "can not be overlap.")) + last_rang_ip.append( + _ip_into_int(floating[1])) + subnets_in_logic_network[logic_network[ + 'name']].append(subnet['name']) # check external logical network uniqueness if len(external_logic_network_name) > 1: - return (_("External logical network is uniqueness in the cluster.Got %s." % - ",".join(external_logic_network_name))) + return (_("External logical network is uniqueness " + "in the cluster.Got %s." % + ",".join(external_logic_network_name))) # check logic_network_name uniqueness - if len(logic_network_name_set) != len(set(logic_network_name_set)): - return (_("Logic network name segment is repetition.")) + if len(logic_network_name_set) != len( + set(logic_network_name_set)): + return (_("Logic network name segment " + "is repetition.")) # check subnet_name uniqueness if len(subnet_name_set) != len(set(subnet_name_set)): @@ -375,36 +400,47 @@ class Controller(controller.BaseController): cluster_meta['logic_networks'] = unicode(logic_networks) - # check routers-------------------------------------------------- + # check routers------------------------------------------------ subnet_name_set_deepcopy = copy.deepcopy(subnet_name_set) - router_name_set = [] # record all routers name + router_name_set = [] # record all routers name if cluster_meta.get('routers', None): router_data = eval(cluster_meta['routers']) - for router in router_data: + for router in router_data: _check_param_nonull_and_valid(router, ['name']) # check relevance logic_network is valid - external_logic_network_data = router.get('external_logic_network', None) + external_logic_network_data = router.get( + 'external_logic_network', None) if external_logic_network_data and \ - external_logic_network_data not in logic_network_name_set: - return (_("Logic_network %s is not valid range." % external_logic_network_data)) + external_logic_network_data not in \ + logic_network_name_set: + return (_("Logic_network %s is not valid range." % + external_logic_network_data)) router_name_set.append(router['name']) # check relevance subnets is valid for subnet in router.get('subnets', []): if subnet not in subnet_name_set: - return (_("Subnet %s is not valid range." % subnet)) + return ( + _("Subnet %s is not valid range." % + subnet)) # subnet cann't relate with two routers if subnet not in subnet_name_set_deepcopy: - return (_("The subnet can't be related with multiple routers.")) + return ( + _("The subnet can't be related with " + "multiple routers.")) subnet_name_set_deepcopy.remove(subnet) if external_logic_network_data and \ - subnets_in_logic_network[external_logic_network_data] and \ - set(subnets_in_logic_network[external_logic_network_data]). \ + subnets_in_logic_network[ + external_logic_network_data] and \ + set(subnets_in_logic_network[ + external_logic_network_data]). \ issubset(set(router['subnets'])): - return (_("Logic network's subnets is all related with a router, it's not allowed.")) + return ( + _("Logic network's subnets is all related" + " with a router, it's not allowed.")) # check subnet_name uniqueness if len(router_name_set) != len(set(router_name_set)): @@ -413,10 +449,13 @@ class Controller(controller.BaseController): _check_auto_scale(req, cluster_meta) check_result = _check_cluster_add_parameters(req, cluster_meta) if 0 != cmp(check_result, errmsg): - LOG.exception(_("Params check for cluster-add or cluster-update is failed!")) + LOG.exception( + _("Params check for cluster-add or cluster-update " + "is failed!")) raise HTTPBadRequest(explanation=check_result) - LOG.debug(_("Params check for cluster-add or cluster-update is done!")) + LOG.debug( + _("Params check for cluster-add or cluster-update is done!")) return f(*args, **kwargs) return wrapper @@ -448,7 +487,8 @@ class Controller(controller.BaseController): def _raise_404_if_network_deleted(self, req, network_id): network = self.get_network_meta_or_404(req, network_id) if network['deleted']: - msg = _("Network with identifier %s has been deleted.") % network_id + msg = _("Network with identifier %s has been deleted.") % \ + network_id raise HTTPNotFound(msg) return network.get('name', None) @@ -500,9 +540,10 @@ class Controller(controller.BaseController): if not cluster_name: raise ValueError('cluster name is null!') cluster_name_split = cluster_name.split('_') - for cluster_name_info in cluster_name_split : + for cluster_name_info in cluster_name_split: if not cluster_name_info.isalnum(): - raise ValueError('cluster name must be numbers or letters or underscores !') + raise ValueError( + 'cluster name must be numbers or letters or underscores !') if cluster_meta.get('nodes', None): orig_keys = list(eval(cluster_meta['nodes'])) for host_id in orig_keys: @@ -514,11 +555,15 @@ class Controller(controller.BaseController): raise HTTPForbidden(explanation=msg) if node.get('interfaces', None): interfaces = node['interfaces'] - input_host_pxe_info = [interface for interface in interfaces - if interface.get('is_deployment', None) == 1] - if not input_host_pxe_info and node.get('os_status',None) != 'active': - msg = _("The host %s has more than one dhcp server, " - "please choose one interface for deployment") % host_id + input_host_pxe_info = [ + interface for interface in interfaces if interface.get( + 'is_deployment', None) == 1] + if not input_host_pxe_info and node.get( + 'os_status', None) != 'active': + msg = _( + "The host %s has more than one dhcp server, " + "please choose one interface for deployment") % \ + host_id raise HTTPServerError(explanation=msg) print cluster_name print cluster_meta @@ -537,7 +582,7 @@ class Controller(controller.BaseController): """ self._enforce(req, 'delete_cluster') - #cluster = self.get_cluster_meta_or_404(req, id) + # cluster = self.get_cluster_meta_or_404(req, id) print "delete_cluster:%s" % id try: registry.delete_cluster_metadata(req.context, id) @@ -556,14 +601,15 @@ class Controller(controller.BaseController): request=req, content_type="text/plain") except exception.InUseByStore as e: - msg = (_("cluster %(id)s could not be deleted because it is in use: " + msg = (_("cluster %(id)s could not be deleted because " + "it is in use: " "%(exc)s") % {"id": id, "exc": utils.exception_to_str(e)}) LOG.warn(msg) raise HTTPConflict(explanation=msg, request=req, content_type="text/plain") else: - #self.notifier.info('cluster.delete', cluster) + # self.notifier.info('cluster.delete', cluster) return Response(body='', status=200) @utils.mutating @@ -619,26 +665,31 @@ class Controller(controller.BaseController): :retval Returns the updated cluster information as a mapping """ self._enforce(req, 'update_cluster') - if cluster_meta.has_key('nodes'): + if 'nodes' in cluster_meta: orig_keys = list(eval(cluster_meta['nodes'])) for host_id in orig_keys: self._raise_404_if_host_deleted(req, host_id) node = registry.get_host_metadata(req.context, host_id) if node['status'] == 'in-cluster': - host_cluster = registry.get_host_clusters(req.context, host_id) + host_cluster = registry.get_host_clusters( + req.context, host_id) if host_cluster[0]['cluster_id'] != id: msg = _("Forbidden to add host %s with status " "'in-cluster' in another cluster") % host_id raise HTTPForbidden(explanation=msg) if node.get('interfaces', None): interfaces = node['interfaces'] - input_host_pxe_info = [interface for interface in interfaces - if interface.get('is_deployment', None) == 1] - if not input_host_pxe_info and node.get('os_status', None) != 'active': - msg = _("The host %s has more than one dhcp server, " - "please choose one interface for deployment") % host_id + input_host_pxe_info = [ + interface for interface in interfaces if interface.get( + 'is_deployment', None) == 1] + if not input_host_pxe_info and node.get( + 'os_status', None) != 'active': + msg = _( + "The host %s has more than one dhcp server, " + "please choose one interface for deployment") % \ + host_id raise HTTPServerError(explanation=msg) - if cluster_meta.has_key('networks'): + if 'networks' in cluster_meta: orig_keys = list(eval(cluster_meta['networks'])) for network_id in orig_keys: self._raise_404_if_network_deleted(req, network_id) @@ -687,6 +738,7 @@ class Controller(controller.BaseController): return {'cluster_meta': cluster_meta} + class ProjectDeserializer(wsgi.JSONRequestDeserializer): """Handles deserialization of specific controller method requests.""" @@ -701,6 +753,7 @@ class ProjectDeserializer(wsgi.JSONRequestDeserializer): def update_cluster(self, request): return self._deserialize(request) + class ProjectSerializer(wsgi.JSONResponseSerializer): """Handles serialization of specific controller method responses.""" @@ -727,6 +780,7 @@ class ProjectSerializer(wsgi.JSONResponseSerializer): response.headers['Content-Type'] = 'application/json' response.body = self.to_json(dict(cluster=cluster_meta)) return response + def get_cluster(self, response, result): cluster_meta = result['cluster_meta'] response.status = 201 @@ -734,9 +788,9 @@ class ProjectSerializer(wsgi.JSONResponseSerializer): response.body = self.to_json(dict(cluster=cluster_meta)) return response + def create_resource(): """Projects resource factory method""" deserializer = ProjectDeserializer() serializer = ProjectSerializer() return wsgi.Resource(Controller(), deserializer, serializer) - diff --git a/code/daisy/daisy/api/v1/components.py b/code/daisy/daisy/api/v1/components.py index f235a56f..b94120c5 100755 --- a/code/daisy/daisy/api/v1/components.py +++ b/code/daisy/daisy/api/v1/components.py @@ -52,21 +52,25 @@ CONF.import_opt('container_formats', 'daisy.common.config', group='image_format') CONF.import_opt('image_property_quota', 'daisy.common.config') + class Controller(controller.BaseController): """ WSGI controller for components resource in Daisy v1 API - The components resource API is a RESTful web service for component data. The API - is as follows:: + The components resource API is a RESTful web service for component data. + The API is as follows:: GET /components -- Returns a set of brief metadata about components GET /components/detail -- Returns a set of detailed metadata about components - HEAD /components/ -- Return metadata about an component with id - GET /components/ -- Return component data for component with id + HEAD /components/ -- + Return metadata about an component with id + GET /components/ -- + Return component data for component with id POST /components -- Store component data and return metadata about the newly-stored component - PUT /components/ -- Update component metadata and/or upload component + PUT /components/ -- + Update component metadata and/or upload component data for a previously-reserved component DELETE /components/ -- Delete the component with id """ @@ -132,15 +136,16 @@ class Controller(controller.BaseController): :raises HTTPBadRequest if x-component-name is missing """ self._enforce(req, 'add_component') - #component_id=component_meta["id"] - #component_owner=component_meta["owner"] + # component_id=component_meta["id"] + # component_owner=component_meta["owner"] component_name = component_meta["name"] component_description = component_meta["description"] - #print component_id - #print component_owner + # print component_id + # print component_owner print component_name print component_description - component_meta = registry.add_component_metadata(req.context, component_meta) + component_meta = registry.add_component_metadata( + req.context, component_meta) return {'component_meta': component_meta} @@ -156,7 +161,7 @@ class Controller(controller.BaseController): """ self._enforce(req, 'delete_component') - #component = self.get_component_meta_or_404(req, id) + # component = self.get_component_meta_or_404(req, id) print "delete_component:%s" % id try: registry.delete_component_metadata(req.context, id) @@ -175,14 +180,15 @@ class Controller(controller.BaseController): request=req, content_type="text/plain") except exception.InUseByStore as e: - msg = (_("component %(id)s could not be deleted because it is in use: " + msg = (_("component %(id)s could not be " + "deleted because it is in use: " "%(exc)s") % {"id": id, "exc": utils.exception_to_str(e)}) LOG.warn(msg) raise HTTPConflict(explanation=msg, request=req, content_type="text/plain") else: - #self.notifier.info('component.delete', component) + # self.notifier.info('component.delete', component) return Response(body='', status=200) @utils.mutating @@ -280,6 +286,7 @@ class Controller(controller.BaseController): return {'component_meta': component_meta} + class ComponentDeserializer(wsgi.JSONRequestDeserializer): """Handles deserialization of specific controller method requests.""" @@ -294,6 +301,7 @@ class ComponentDeserializer(wsgi.JSONRequestDeserializer): def update_component(self, request): return self._deserialize(request) + class ComponentSerializer(wsgi.JSONResponseSerializer): """Handles serialization of specific controller method responses.""" @@ -313,6 +321,7 @@ class ComponentSerializer(wsgi.JSONResponseSerializer): response.headers['Content-Type'] = 'application/json' response.body = self.to_json(dict(component=component_meta)) return response + def get_component(self, response, result): component_meta = result['component_meta'] response.status = 201 @@ -320,9 +329,9 @@ class ComponentSerializer(wsgi.JSONResponseSerializer): response.body = self.to_json(dict(component=component_meta)) return response + def create_resource(): """Components resource factory method""" deserializer = ComponentDeserializer() serializer = ComponentSerializer() return wsgi.Resource(Controller(), deserializer, serializer) - diff --git a/code/daisy/daisy/api/v1/config_files.py b/code/daisy/daisy/api/v1/config_files.py index e9899fdf..811f94b6 100755 --- a/code/daisy/daisy/api/v1/config_files.py +++ b/code/daisy/daisy/api/v1/config_files.py @@ -52,21 +52,28 @@ CONF.import_opt('container_formats', 'daisy.common.config', group='image_format') CONF.import_opt('image_property_quota', 'daisy.common.config') + class Controller(controller.BaseController): """ WSGI controller for config_files resource in Daisy v1 API - The config_files resource API is a RESTful web service for config_file data. The API + The config_files resource API is a RESTful web service + for config_file data. The API is as follows:: - GET /config_files -- Returns a set of brief metadata about config_files + GET /config_files -- + Returns a set of brief metadata about config_files GET /config_files/detail -- Returns a set of detailed metadata about config_files - HEAD /config_files/ -- Return metadata about an config_file with id - GET /config_files/ -- Return config_file data for config_file with id - POST /config_files -- Store config_file data and return metadata about the + HEAD /config_files/ -- + Return metadata about an config_file with id + GET /config_files/ -- + Return config_file data for config_file with id + POST /config_files -- + Store config_file data and return metadata about the newly-stored config_file - PUT /config_files/ -- Update config_file metadata and/or upload config_file + PUT /config_files/ -- + Update config_file metadata and/or upload config_file data for a previously-reserved config_file DELETE /config_files/ -- Delete the config_file with id """ @@ -132,13 +139,14 @@ class Controller(controller.BaseController): :raises HTTPBadRequest if x-config_file-name is missing """ self._enforce(req, 'add_config_file') - #config_file_id=config_file_meta["id"] + # config_file_id=config_file_meta["id"] config_file_name = config_file_meta["name"] config_file_description = config_file_meta["description"] - #print config_file_id + # print config_file_id print config_file_name print config_file_description - config_file_meta = registry.add_config_file_metadata(req.context, config_file_meta) + config_file_meta = registry.add_config_file_metadata( + req.context, config_file_meta) return {'config_file_meta': config_file_meta} @@ -171,14 +179,15 @@ class Controller(controller.BaseController): request=req, content_type="text/plain") except exception.InUseByStore as e: - msg = (_("config_file %(id)s could not be deleted because it is in use: " + msg = (_("config_file %(id)s could not be " + "deleted because it is in use: " "%(exc)s") % {"id": id, "exc": utils.exception_to_str(e)}) LOG.warn(msg) raise HTTPConflict(explanation=msg, request=req, content_type="text/plain") else: - #self.notifier.info('config_file.delete', config_file) + # self.notifier.info('config_file.delete', config_file) return Response(body='', status=200) @utils.mutating @@ -215,7 +224,8 @@ class Controller(controller.BaseController): self._enforce(req, 'get_config_files') params = self._get_query_params(req) try: - config_files = registry.get_config_files_detail(req.context, **params) + config_files = registry.get_config_files_detail( + req.context, **params) except exception.Invalid as e: raise HTTPBadRequest(explanation=e.msg, request=req) return dict(config_files=config_files) @@ -241,9 +251,8 @@ class Controller(controller.BaseController): request=req, content_type="text/plain") try: - config_file_meta = registry.update_config_file_metadata(req.context, - id, - config_file_meta) + config_file_meta = registry.update_config_file_metadata( + req.context, id, config_file_meta) except exception.Invalid as e: msg = (_("Failed to update config_file metadata. Got error: %s") % @@ -276,6 +285,7 @@ class Controller(controller.BaseController): return {'config_file_meta': config_file_meta} + class Config_fileDeserializer(wsgi.JSONRequestDeserializer): """Handles deserialization of specific controller method requests.""" @@ -290,6 +300,7 @@ class Config_fileDeserializer(wsgi.JSONRequestDeserializer): def update_config_file(self, request): return self._deserialize(request) + class Config_fileSerializer(wsgi.JSONResponseSerializer): """Handles serialization of specific controller method responses.""" @@ -317,9 +328,9 @@ class Config_fileSerializer(wsgi.JSONResponseSerializer): response.body = self.to_json(dict(config_file=config_file_meta)) return response + def create_resource(): """config_files resource factory method""" deserializer = Config_fileDeserializer() serializer = Config_fileSerializer() return wsgi.Resource(Controller(), deserializer, serializer) - diff --git a/code/daisy/daisy/api/v1/config_sets.py b/code/daisy/daisy/api/v1/config_sets.py index c275267c..5f28cdbf 100755 --- a/code/daisy/daisy/api/v1/config_sets.py +++ b/code/daisy/daisy/api/v1/config_sets.py @@ -53,21 +53,26 @@ CONF.import_opt('container_formats', 'daisy.common.config', group='image_format') CONF.import_opt('image_property_quota', 'daisy.common.config') + class Controller(controller.BaseController): """ WSGI controller for config_sets resource in Daisy v1 API - The config_sets resource API is a RESTful web service for config_set data. The API - is as follows:: + The config_sets resource API is a RESTful web service for config_set data. + The API is as follows:: GET /config_sets -- Returns a set of brief metadata about config_sets GET /config_sets/detail -- Returns a set of detailed metadata about config_sets - HEAD /config_sets/ -- Return metadata about an config_set with id - GET /config_sets/ -- Return config_set data for config_set with id - POST /config_sets -- Store config_set data and return metadata about the + HEAD /config_sets/ -- + Return metadata about an config_set with id + GET /config_sets/ -- + Return config_set data for config_set with id + POST /config_sets -- + Store config_set data and return metadata about the newly-stored config_set - PUT /config_sets/ -- Update config_set metadata and/or upload config_set + PUT /config_sets/ -- + Update config_set metadata and/or upload config_set data for a previously-reserved config_set DELETE /config_sets/ -- Delete the config_set with id """ @@ -125,7 +130,8 @@ class Controller(controller.BaseController): def _raise_404_if_cluster_deleted(self, req, cluster_id): cluster = self.get_cluster_meta_or_404(req, cluster_id) if cluster['deleted']: - msg = _("cluster with identifier %s has been deleted.") % cluster_id + msg = _("cluster with identifier %s has been deleted.") % \ + cluster_id raise HTTPNotFound(msg) @utils.mutating @@ -139,13 +145,14 @@ class Controller(controller.BaseController): :raises HTTPBadRequest if x-config_set-name is missing """ self._enforce(req, 'add_config_set') - #config_set_id=config_set_meta["id"] + # config_set_id=config_set_meta["id"] config_set_name = config_set_meta["name"] config_set_description = config_set_meta["description"] - #print config_set_id + # print config_set_id print config_set_name print config_set_description - config_set_meta = registry.add_config_set_metadata(req.context, config_set_meta) + config_set_meta = registry.add_config_set_metadata( + req.context, config_set_meta) return {'config_set_meta': config_set_meta} @@ -171,21 +178,20 @@ class Controller(controller.BaseController): request=req, content_type="text/plain") except exception.Forbidden as e: - msg = (_("Forbidden to delete config_set: %s") % - utils.exception_to_str(e)) - LOG.warn(msg) - raise HTTPForbidden(explanation=msg, + LOG.warn(e) + raise HTTPForbidden(explanation=e, request=req, content_type="text/plain") except exception.InUseByStore as e: - msg = (_("config_set %(id)s could not be deleted because it is in use: " + msg = (_("config_set %(id)s could not be " + "deleted because it is in use: " "%(exc)s") % {"id": id, "exc": utils.exception_to_str(e)}) LOG.warn(msg) raise HTTPConflict(explanation=msg, request=req, content_type="text/plain") else: - #self.notifier.info('config_set.delete', config_set) + # self.notifier.info('config_set.delete', config_set) return Response(body='', status=200) @utils.mutating @@ -222,7 +228,8 @@ class Controller(controller.BaseController): self._enforce(req, 'get_config_sets') params = self._get_query_params(req) try: - config_sets = registry.get_config_sets_detail(req.context, **params) + config_sets = registry.get_config_sets_detail( + req.context, **params) except exception.Invalid as e: raise HTTPBadRequest(explanation=e.msg, request=req) return dict(config_sets=config_sets) @@ -248,9 +255,8 @@ class Controller(controller.BaseController): request=req, content_type="text/plain") try: - config_set_meta = registry.update_config_set_metadata(req.context, - id, - config_set_meta) + config_set_meta = registry.update_config_set_metadata( + req.context, id, config_set_meta) except exception.Invalid as e: msg = (_("Failed to update config_set metadata. Got error: %s") % @@ -282,48 +288,51 @@ class Controller(controller.BaseController): self.notifier.info('config_set.update', config_set_meta) return {'config_set_meta': config_set_meta} - - def _raise_404_if_role_exist(self,req,config_set_meta): - role_id_list=[] + + def _raise_404_if_role_exist(self, req, config_set_meta): + role_id_list = [] try: roles = registry.get_roles_detail(req.context) for role in roles: for role_name in eval(config_set_meta['role']): - if role['cluster_id'] == config_set_meta['cluster'] and role['name'] == role_name: + if role['cluster_id'] == config_set_meta[ + 'cluster'] and role['name'] == role_name: role_id_list.append(role['id']) break except exception.Invalid as e: - raise HTTPBadRequest(explanation=e.msg, request=req) + raise HTTPBadRequest(explanation=e.msg, request=req) return role_id_list @utils.mutating def cluster_config_set_update(self, req, config_set_meta): - if config_set_meta.has_key('cluster'): + if 'cluster' in config_set_meta: orig_cluster = str(config_set_meta['cluster']) self._raise_404_if_cluster_deleted(req, orig_cluster) try: - if config_set_meta.get('role',None): - role_id_list=self._raise_404_if_role_exist(req,config_set_meta) + if config_set_meta.get('role', None): + role_id_list = self._raise_404_if_role_exist( + req, config_set_meta) if len(role_id_list) == len(eval(config_set_meta['role'])): - for role_id in role_id_list: - backend=manager.configBackend('clushshell', req, role_id) - backend.push_config() + backend = manager.configBackend('clushshell', req) + backend.push_config_by_roles(role_id_list) else: msg = "the role is not exist" LOG.error(msg) raise HTTPNotFound(msg) else: roles = registry.get_roles_detail(req.context) + role_id_list = [] for role in roles: if role['cluster_id'] == config_set_meta['cluster']: - backend=manager.configBackend('clushshell', req, role['id']) - backend.push_config() - + role_id_list.append(role['id']) + backend = manager.configBackend('clushshell', req) + backend.push_config_by_roles(role_id_list) + except exception.Invalid as e: raise HTTPBadRequest(explanation=e.msg, request=req) - - config_status={"status":"config successful"} - return {'config_set':config_status} + + config_status = {"status": "config successful"} + return {'config_set': config_status} else: msg = "the cluster is not exist" LOG.error(msg) @@ -332,18 +341,22 @@ class Controller(controller.BaseController): @utils.mutating def cluster_config_set_progress(self, req, config_set_meta): role_list = [] - if config_set_meta.has_key('cluster'): + if 'cluster' in config_set_meta: orig_cluster = str(config_set_meta['cluster']) self._raise_404_if_cluster_deleted(req, orig_cluster) try: - if config_set_meta.get('role',None): - role_id_list=self._raise_404_if_role_exist(req,config_set_meta) + if config_set_meta.get('role', None): + role_id_list = self._raise_404_if_role_exist( + req, config_set_meta) if len(role_id_list) == len(eval(config_set_meta['role'])): for role_id in role_id_list: role_info = {} - role_meta=registry.get_role_metadata(req.context, role_id) - role_info['role-name']=role_meta['name'] - role_info['config_set_update_progress']=role_meta['config_set_update_progress'] + role_meta = registry.get_role_metadata( + req.context, role_id) + role_info['role-name'] = role_meta['name'] + role_info['config_set_update_progress'] = \ + role_meta[ + 'config_set_update_progress'] role_list.append(role_info) else: msg = "the role is not exist" @@ -354,19 +367,21 @@ class Controller(controller.BaseController): for role in roles: if role['cluster_id'] == config_set_meta['cluster']: role_info = {} - role_info['role-name']=role['name'] - role_info['config_set_update_progress']=role['config_set_update_progress'] + role_info['role-name'] = role['name'] + role_info['config_set_update_progress'] = role[ + 'config_set_update_progress'] role_list.append(role_info) except exception.Invalid as e: raise HTTPBadRequest(explanation=e.msg, request=req) return role_list - + else: msg = "the cluster is not exist" LOG.error(msg) raise HTTPNotFound(msg) + class Config_setDeserializer(wsgi.JSONRequestDeserializer): """Handles deserialization of specific controller method requests.""" @@ -387,6 +402,7 @@ class Config_setDeserializer(wsgi.JSONRequestDeserializer): def cluster_config_set_progress(self, request): return self._deserialize(request) + class Config_setSerializer(wsgi.JSONResponseSerializer): """Handles serialization of specific controller method responses.""" @@ -426,9 +442,9 @@ class Config_setSerializer(wsgi.JSONResponseSerializer): response.body = self.to_json(dict(config_set=result)) return response + def create_resource(): """config_sets resource factory method""" deserializer = Config_setDeserializer() serializer = Config_setSerializer() return wsgi.Resource(Controller(), deserializer, serializer) - diff --git a/code/daisy/daisy/api/v1/configs.py b/code/daisy/daisy/api/v1/configs.py index 15bfd303..fdc18bf8 100755 --- a/code/daisy/daisy/api/v1/configs.py +++ b/code/daisy/daisy/api/v1/configs.py @@ -52,6 +52,7 @@ CONF.import_opt('container_formats', 'daisy.common.config', group='image_format') CONF.import_opt('image_property_quota', 'daisy.common.config') + class Controller(controller.BaseController): """ WSGI controller for configs resource in Daisy v1 API @@ -120,32 +121,40 @@ class Controller(controller.BaseController): if PARAM in req.params: params[PARAM] = req.params.get(PARAM) return params + def _raise_404_if_config_set_delete(self, req, config_set_id): config_set = self.get_config_set_meta_or_404(req, config_set_id) if config_set['deleted']: - msg = _("config_set with identifier %s has been deleted.") % config_set_id + msg = _("config_set with identifier %s has been deleted.") % \ + config_set_id raise HTTPNotFound(msg) def _raise_404_if_config_file_delete(self, req, config_file_id): config_file = self.get_config_file_meta_or_404(req, config_file_id) if config_file['deleted']: - msg = _("config_file with identifier %s has been deleted.") % config_file_id + msg = _( + "config_file with identifier %s has been deleted.") % \ + config_file_id raise HTTPNotFound(msg) - def _raise_404_if_role_exist(self,req,config_meta): - role_id="" + + def _raise_404_if_role_exist(self, req, config_meta): + role_id = "" try: roles = registry.get_roles_detail(req.context) for role in roles: - if role['cluster_id'] == config_meta['cluster'] and role['name'] == config_meta['role']: - role_id=role['id'] + if role['cluster_id'] == config_meta[ + 'cluster'] and role['name'] == config_meta['role']: + role_id = role['id'] break except exception.Invalid as e: - raise HTTPBadRequest(explanation=e.msg, request=req) + raise HTTPBadRequest(explanation=e.msg, request=req) return role_id + def _raise_404_if_cluster_deleted(self, req, cluster_id): cluster = self.get_cluster_meta_or_404(req, cluster_id) if cluster['deleted']: - msg = _("cluster with identifier %s has been deleted.") % cluster_id + msg = _("cluster with identifier %s has been deleted.") % \ + cluster_id raise HTTPNotFound(msg) @utils.mutating @@ -159,19 +168,57 @@ class Controller(controller.BaseController): :raises HTTPBadRequest if x-config-name is missing """ self._enforce(req, 'add_config') - - if config_meta.has_key('cluster'): - orig_cluster = str(config_meta['cluster']) - self._raise_404_if_cluster_deleted(req, orig_cluster) - - if config_meta.has_key('role'): - role_id=self._raise_404_if_role_exist(req,config_meta) - if not role_id: - msg = "the role name is not exist" + + if ('role' in config_meta and + 'host_id' in config_meta): + msg = "role name and host id only have one" + LOG.error(msg) + raise HTTPBadRequest(explanation=msg, request=req) + elif 'role' in config_meta: + # the first way to add config + # when have 'role', config_set will be ignore + if config_meta.get('cluster'): + orig_cluster = str(config_meta['cluster']) + self._raise_404_if_cluster_deleted(req, orig_cluster) + else: + msg = "cluster must be given when add config for role" LOG.error(msg) raise HTTPNotFound(msg) + if config_meta['role']: + role_id = self._raise_404_if_role_exist(req, config_meta) + if not role_id: + msg = "the role name is not exist" + LOG.error(msg) + raise HTTPNotFound(msg) + else: + msg = "the role name can't be empty" + LOG.error(msg) + raise HTTPBadRequest(explanation=msg, request=req) + elif 'host_id' in config_meta: + # the second way to add config + # when have 'host_id', config_set will be ignore + if config_meta['host_id']: + self.get_host_meta_or_404(req, config_meta['host_id']) + else: + msg = "the host id can't be empty" + LOG.error(msg) + raise HTTPBadRequest(explanation=msg, request=req) + elif 'config_set' in config_meta: + # the third way to add config + if config_meta['config_set']: + self.get_config_set_meta_or_404(req, + config_meta['config_set']) + else: + msg = "config set id can't be empty" + LOG.error(msg) + raise HTTPBadRequest(explanation=msg, request=req) + else: + msg = "no way to add config" + LOG.error(msg) + raise HTTPBadRequest(explanation=msg, request=req) - config_meta = registry.config_interface_metadata(req.context, config_meta) + config_meta = registry.config_interface_metadata( + req.context, config_meta) return config_meta @utils.mutating @@ -204,14 +251,15 @@ class Controller(controller.BaseController): request=req, content_type="text/plain") except exception.InUseByStore as e: - msg = (_("config %(id)s could not be deleted because it is in use: " + msg = (_("config %(id)s could not be " + "deleted because it is in use: " "%(exc)s") % {"id": id, "exc": utils.exception_to_str(e)}) LOG.warn(msg) raise HTTPConflict(explanation=msg, request=req, content_type="text/plain") else: - #self.notifier.info('config.delete', config) + # self.notifier.info('config.delete', config) return Response(body='', status=200) @utils.mutating @@ -253,6 +301,7 @@ class Controller(controller.BaseController): raise HTTPBadRequest(explanation=e.msg, request=req) return dict(configs=configs) + class ConfigDeserializer(wsgi.JSONRequestDeserializer): """Handles deserialization of specific controller method requests.""" @@ -263,10 +312,11 @@ class ConfigDeserializer(wsgi.JSONRequestDeserializer): def add_config(self, request): return self._deserialize(request) - + def delete_config(self, request): return self._deserialize(request) + class ConfigSerializer(wsgi.JSONResponseSerializer): """Handles serialization of specific controller method responses.""" @@ -293,9 +343,9 @@ class ConfigSerializer(wsgi.JSONResponseSerializer): response.body = self.to_json(dict(config=config_meta)) return response + def create_resource(): """configs resource factory method""" deserializer = ConfigDeserializer() serializer = ConfigSerializer() return wsgi.Resource(Controller(), deserializer, serializer) - diff --git a/code/daisy/daisy/api/v1/controller.py b/code/daisy/daisy/api/v1/controller.py index cc47a4d8..eadd4389 100755 --- a/code/daisy/daisy/api/v1/controller.py +++ b/code/daisy/daisy/api/v1/controller.py @@ -27,6 +27,7 @@ _ = i18n._ class BaseController(object): + def get_image_meta_or_404(self, request, image_id): """ Grabs the image metadata for an image with a supplied @@ -101,6 +102,7 @@ class BaseController(object): raise webob.exc.HTTPForbidden(msg, request=request, content_type='text/plain') + def get_component_meta_or_404(self, request, component_id): """ Grabs the component metadata for an component with a supplied @@ -175,7 +177,7 @@ class BaseController(object): raise webob.exc.HTTPForbidden(msg, request=request, content_type='text/plain') - + def get_network_meta_or_404(self, request, network_id): """ Grabs the network metadata for an network with a supplied @@ -199,7 +201,7 @@ class BaseController(object): LOG.debug(msg) raise webob.exc.HTTPForbidden(msg, request=request, - content_type='text/plain') + content_type='text/plain') def get_active_image_meta_or_error(self, request, image_id): """ @@ -242,7 +244,7 @@ class BaseController(object): raise webob.exc.HTTPBadRequest(explanation=msg, request=req, content_type='text/plain') - + def get_config_file_meta_or_404(self, request, config_file_id): """ Grabs the config_file metadata for an config_file with a supplied @@ -291,7 +293,7 @@ class BaseController(object): LOG.debug(msg) raise webob.exc.HTTPForbidden(msg, request=request, - content_type='text/plain') + content_type='text/plain') def get_config_meta_or_404(self, request, config_id): """ @@ -342,7 +344,7 @@ class BaseController(object): raise webob.exc.HTTPForbidden(msg, request=request, content_type='text/plain') - + def get_cinder_volume_meta_or_404(self, request, id): """ Grabs the config metadata for an config with a supplied @@ -366,4 +368,4 @@ class BaseController(object): LOG.debug(msg) raise webob.exc.HTTPForbidden(msg, request=request, - content_type='text/plain') \ No newline at end of file + content_type='text/plain') diff --git a/code/daisy/daisy/api/v1/disk_array.py b/code/daisy/daisy/api/v1/disk_array.py index 4efce262..692fd187 100755 --- a/code/daisy/daisy/api/v1/disk_array.py +++ b/code/daisy/daisy/api/v1/disk_array.py @@ -16,19 +16,15 @@ """ /hosts endpoint for Daisy v1 API """ -import time -import traceback import ast -import webob.exc from oslo_log import log as logging from webob.exc import HTTPBadRequest from webob.exc import HTTPForbidden -from webob.exc import HTTPServerError +from webob.exc import HTTPNotFound +from webob.exc import HTTPConflict from webob import Response -from threading import Thread - from daisy import i18n from daisy import notifier @@ -43,10 +39,6 @@ import daisy.registry.client.v1.api as registry from daisy.api.v1 import controller from daisy.api.v1 import filters -try: - import simplejson as json -except ImportError: - import json LOG = logging.getLogger(__name__) _ = i18n._ @@ -56,13 +48,15 @@ _LW = i18n._LW SUPPORTED_PARAMS = daisy.api.v1.SUPPORTED_PARAMS SUPPORTED_FILTERS = daisy.api.v1.SUPPORTED_FILTERS ACTIVE_IMMUTABLE = daisy.api.v1.ACTIVE_IMMUTABLE -SERVICE_DISK_SERVICE = ('db', 'glance', 'dbbackup', 'mongodb', 'nova') -DISK_LOCATION = ('local', 'share') -CINDER_VOLUME_BACKEND_PARAMS = ('management_ips', 'data_ips','pools', +SERVICE_DISK_SERVICE = ('db', 'glance', 'db_backup', 'mongodb', 'nova') +DISK_LOCATION = ('local', 'share', 'share_cluster') +PROTOCOL_TYPE = ('FIBER', 'ISCSI', 'CEPH') +CINDER_VOLUME_BACKEND_PARAMS = ('management_ips', 'data_ips', 'pools', 'volume_driver', 'volume_type', - 'role_id', 'user_name','user_pwd') + 'role_id', 'user_name', 'user_pwd') CINDER_VOLUME_BACKEND_DRIVER = ['KS3200_IPSAN', 'KS3200_FCSAN', - 'FUJISTU_ETERNUS'] + 'FUJITSU_ETERNUS', 'HP3PAR_FCSAN'] + class Controller(controller.BaseController): """ @@ -82,6 +76,7 @@ class Controller(controller.BaseController): data for a previously-reserved host DELETE /hosts/ -- Delete the host with id """ + def __init__(self): self.notifier = notifier.Notifier() registry.configure_registry_client() @@ -99,7 +94,7 @@ class Controller(controller.BaseController): self.policy.enforce(req.context, action, target) except exception.Forbidden: raise HTTPForbidden() - + def _get_filters(self, req): """ Return a dictionary of query param filters from the request @@ -146,39 +141,57 @@ class Controller(controller.BaseController): def _raise_404_if_service_disk_deleted(self, req, service_disk_id): service_disk = self.get_service_disk_meta_or_404(req, service_disk_id) if service_disk is None or service_disk['deleted']: - msg = _("service_disk with identifier %s has been deleted.") % service_disk_id + msg = _( + "service_disk with identifier %s has been deleted.") % \ + service_disk_id raise HTTPNotFound(msg) - - def _default_value_set(self, disk_meta): - if (not disk_meta.has_key('disk_location') or - not disk_meta['disk_location'] or - disk_meta['disk_location'] == ''): - disk_meta['disk_location'] = 'local' - if not disk_meta.has_key('lun'): - disk_meta['lun'] = 0 - if not disk_meta.has_key('size'): - disk_meta['size'] = -1 - def _unique_service_in_role(self, req, disk_meta): + def _default_value_set(self, disk_meta): + if ('disk_location' not in disk_meta or + not disk_meta['disk_location'] or + disk_meta['disk_location'] == ''): + disk_meta['disk_location'] = 'local' + if 'lun' not in disk_meta: + disk_meta['lun'] = 0 + if 'size' not in disk_meta: + disk_meta['size'] = -1 + if 'protocol_type' not in disk_meta: + disk_meta['protocol_type'] = 'ISCSI' + + def _unique_service_in_role(self, req, disk_meta): params = {'filters': {'role_id': disk_meta['role_id']}} - service_disks = registry.list_service_disk_metadata(req.context, **params) - for service_disk in service_disks: - if service_disk['service'] == disk_meta['service']: - msg = "disk service %s has existed in role %s" %(disk_meta['service'], disk_meta['role_id']) - raise HTTPBadRequest(explanation=msg, - request=req, - content_type="text/plain") - + service_disks = registry.list_service_disk_metadata( + req.context, **params) + if disk_meta['disk_location'] == 'share_cluster': + for disk in service_disks: + if disk['service'] == disk_meta['service'] and \ + disk['disk_location'] != 'share_cluster': + id = disk['id'] + registry.delete_service_disk_metadata(req.context, id) + else: + for service_disk in service_disks: + if service_disk['disk_location'] == 'share_cluster' and \ + service_disk['service'] == disk_meta['service']: + id = service_disk['id'] + registry.delete_service_disk_metadata(req.context, id) + elif service_disk['service'] == disk_meta['service']: + msg = "disk service %s has existed in role %s" % ( + disk_meta['service'], disk_meta['role_id']) + LOG.error(msg) + raise HTTPBadRequest(explanation=msg, + request=req, + content_type="text/plain") + def _service_disk_add_meta_valid(self, req, disk_meta): - if not disk_meta.has_key('role_id'): + if 'role_id' not in disk_meta: msg = "'role_id' must be given" raise HTTPBadRequest(explanation=msg, request=req, content_type="text/plain") else: - self._raise_404_if_role_deleted(req,disk_meta['role_id']) + self._raise_404_if_role_deleted(req, disk_meta['role_id']) - if not disk_meta.has_key('service'): + if 'service' not in disk_meta: msg = "'service' must be given" raise HTTPBadRequest(explanation=msg, request=req, @@ -187,20 +200,22 @@ class Controller(controller.BaseController): if disk_meta['service'] not in SERVICE_DISK_SERVICE: msg = "service '%s' is not supported" % disk_meta['service'] raise HTTPBadRequest(explanation=msg, - request=req, - content_type="text/plain") - + request=req, + content_type="text/plain") + if disk_meta['disk_location'] not in DISK_LOCATION: - msg = "disk_location %s is not supported" % disk_meta['disk_location'] - raise HTTPBadRequest(explanation=msg, - request=req, - content_type="text/plain") - if disk_meta['disk_location'] == 'share' and not disk_meta.has_key('data_ips'): - msg = "'data_ips' must be given when disk_location is share" + msg = "disk_location %s is not supported" % disk_meta[ + 'disk_location'] raise HTTPBadRequest(explanation=msg, request=req, content_type="text/plain") - + if disk_meta['disk_location'] in ['share', 'share_cluster'] \ + and 'data_ips' not in disk_meta: + msg = "'data_ips' must be given when disk_location was not local" + raise HTTPBadRequest(explanation=msg, + request=req, + content_type="text/plain") + if disk_meta['lun'] < 0: msg = "'lun' should not be less than 0" raise HTTPBadRequest(explanation=msg, @@ -218,48 +233,65 @@ class Controller(controller.BaseController): raise HTTPBadRequest(explanation=msg, request=req, content_type="text/plain") - - self._unique_service_in_role(req, disk_meta) + + if disk_meta.get('protocol_type', None) \ + and disk_meta['protocol_type'] not in PROTOCOL_TYPE: + msg = "protocol type %s is not supported" % disk_meta[ + 'protocol_type'] + raise HTTPBadRequest(explanation=msg, + request=req, + content_type="text/plain") + + self._unique_service_in_role(req, disk_meta) def _service_disk_update_meta_valid(self, req, id, disk_meta): orig_disk_meta = self.get_service_disk_meta_or_404(req, id) - if disk_meta.has_key('role_id'): - self._raise_404_if_role_deleted(req,disk_meta['role_id']) + if 'role_id' in disk_meta: + self._raise_404_if_role_deleted(req, disk_meta['role_id']) - if disk_meta.has_key('service'): + if 'service' in disk_meta: if disk_meta['service'] not in SERVICE_DISK_SERVICE: msg = "service '%s' is not supported" % disk_meta['service'] raise HTTPBadRequest(explanation=msg, - request=req, - content_type="text/plain") - - if disk_meta.has_key('disk_location'): + request=req, + content_type="text/plain") + + if 'disk_location' in disk_meta: if disk_meta['disk_location'] not in DISK_LOCATION: - msg = "disk_location '%s' is not supported" % disk_meta['disk_location'] + msg = "disk_location '%s' is not supported" % disk_meta[ + 'disk_location'] raise HTTPBadRequest(explanation=msg, - request=req, - content_type="text/plain") - if (disk_meta['disk_location'] == 'share' and - not disk_meta.has_key('data_ips') and - not orig_disk_meta['data_ips']): + request=req, + content_type="text/plain") + if (disk_meta['disk_location'] == 'share' and + 'data_ips' not in disk_meta and + not orig_disk_meta['data_ips']): msg = "'data_ips' must be given when disk_location is share" raise HTTPBadRequest(explanation=msg, - request=req, - content_type="text/plain") - - if disk_meta.has_key('size'): + request=req, + content_type="text/plain") + + if 'size' in disk_meta: disk_meta['size'] = ast.literal_eval(str(disk_meta['size'])) if not isinstance(disk_meta['size'], int): msg = "'size' is not integer" raise HTTPBadRequest(explanation=msg, - request=req, - content_type="text/plain") + request=req, + content_type="text/plain") if disk_meta['size'] < -1: msg = "'size' is invalid" raise HTTPBadRequest(explanation=msg, request=req, content_type="text/plain") - + + if disk_meta.get('protocol_type', None) \ + and disk_meta['protocol_type'] not in PROTOCOL_TYPE: + msg = "protocol type %s is not supported" % disk_meta[ + 'protocol_type'] + raise HTTPBadRequest(explanation=msg, + request=req, + content_type="text/plain") + @utils.mutating def service_disk_add(self, req, disk_meta): """ @@ -269,19 +301,19 @@ class Controller(controller.BaseController): :raises HTTPBadRequest if x-install-cluster is missing """ - + self._enforce(req, 'service_disk_add') self._default_value_set(disk_meta) self._service_disk_add_meta_valid(req, disk_meta) - service_disk_meta = registry.add_service_disk_metadata(req.context, disk_meta) + service_disk_meta = registry.add_service_disk_metadata( + req.context, disk_meta) return {'disk_meta': service_disk_meta} - @utils.mutating def service_disk_delete(self, req, id): """ Deletes a service_disk from Daisy. - + :param req: The WSGI/Webob Request object :param image_meta: Mapping of metadata about service_disk @@ -305,7 +337,8 @@ class Controller(controller.BaseController): request=req, content_type="text/plain") except exception.InUseByStore as e: - msg = (_("service_disk %(id)s could not be deleted because it is in use: " + msg = (_("service_disk %(id)s could not be deleted " + "because it is in use: " "%(exc)s") % {"id": id, "exc": utils.exception_to_str(e)}) LOG.warn(msg) raise HTTPConflict(explanation=msg, @@ -319,9 +352,8 @@ class Controller(controller.BaseController): self._enforce(req, 'service_disk_update') self._service_disk_update_meta_valid(req, id, disk_meta) try: - service_disk_meta = registry.update_service_disk_metadata(req.context, - id, - disk_meta) + service_disk_meta = registry.update_service_disk_metadata( + req.context, id, disk_meta) except exception.Invalid as e: msg = (_("Failed to update role metadata. Got error: %s") % @@ -353,7 +385,6 @@ class Controller(controller.BaseController): self.notifier.info('role.update', service_disk_meta) return {'disk_meta': service_disk_meta} - @utils.mutating def service_disk_detail(self, req, id): @@ -374,53 +405,61 @@ class Controller(controller.BaseController): def service_disk_list(self, req): self._enforce(req, 'service_disk_list') params = self._get_query_params(req) - filters=params.get('filters',None) + filters = params.get('filters', None) if 'role_id' in filters: - role_id=filters['role_id'] + role_id = filters['role_id'] self._raise_404_if_role_deleted(req, role_id) try: - service_disks = registry.list_service_disk_metadata(req.context, **params) + service_disks = registry.list_service_disk_metadata( + req.context, **params) except exception.Invalid as e: raise HTTPBadRequest(explanation=e.msg, request=req) return dict(disk_meta=service_disks) - + def _cinder_volume_list(self, req, params): try: - cinder_volumes = registry.list_cinder_volume_metadata(req.context, **params) + cinder_volumes = registry.list_cinder_volume_metadata( + req.context, **params) except exception.Invalid as e: raise HTTPBadRequest(explanation=e.msg, request=req) return cinder_volumes - - def _is_cinder_volume_repeat(self, req, array_disk_info, update_id = None): - cinder_volume_id = None + + def _is_cinder_volume_repeat(self, req, array_disk_info, update_id=None): params = {'filters': {}} - + if update_id: - cinder_volume_metal = self.get_cinder_volume_meta_or_404(req, update_id) - new_management_ips = array_disk_info.get('management_ips', cinder_volume_metal['management_ips']).split(",") - new_pools = array_disk_info.get('pools', cinder_volume_metal['pools']).split(",") + cinder_volume_metal = self.get_cinder_volume_meta_or_404( + req, update_id) + new_management_ips = array_disk_info.get( + 'management_ips', cinder_volume_metal[ + 'management_ips']).split(",") + new_pools = array_disk_info.get( + 'pools', cinder_volume_metal['pools']).split(",") else: new_management_ips = array_disk_info['management_ips'].split(",") new_pools = array_disk_info['pools'].split(",") - + org_cinder_volumes = self._cinder_volume_list(req, params) for cinder_volume in org_cinder_volumes: - if (set(cinder_volume['management_ips'].split(",")) == set(new_management_ips) and - set(cinder_volume['pools'].split(",")) == set(new_pools)): + if (set(cinder_volume['management_ips'].split(",")) == set( + new_management_ips) and + set(cinder_volume['pools'].split(",")) == set(new_pools)): if cinder_volume['id'] != update_id: - msg = 'cinder_volume array disks conflict with cinder_volume %s' % cinder_volume['id'] + msg = 'cinder_volume array disks ' \ + 'conflict with cinder_volume %s' % cinder_volume[ + 'id'] raise HTTPBadRequest(explanation=msg, request=req) - + def _get_cinder_volume_backend_index(self, req, disk_array): params = {'filters': {}} cinder_volumes = self._cinder_volume_list(req, params) index = 1 while True: - backend_index = "%s-%s" %(disk_array['volume_driver'], index) + backend_index = "%s-%s" % (disk_array['volume_driver'], index) flag = True for cinder_volume in cinder_volumes: if backend_index == cinder_volume['backend_index']: - index=index+1 + index = index + 1 flag = False break if flag: @@ -437,46 +476,52 @@ class Controller(controller.BaseController): :raises HTTPBadRequest if x-install-cluster is missing """ self._enforce(req, 'cinder_volume_add') - if not disk_meta.has_key('role_id'): + if 'role_id' not in disk_meta: msg = "'role_id' must be given" raise HTTPBadRequest(explanation=msg, request=req, content_type="text/plain") else: - self._raise_404_if_role_deleted(req,disk_meta['role_id']) + self._raise_404_if_role_deleted(req, disk_meta['role_id']) disk_arrays = eval(disk_meta['disk_array']) for disk_array in disk_arrays: for key in disk_array.keys(): if (key not in CINDER_VOLUME_BACKEND_PARAMS and - key != 'data_ips'): + key != 'data_ips'): msg = "'%s' must be given for cinder volume config" % key raise HTTPBadRequest(explanation=msg, - request=req, - content_type="text/plain") - if disk_array['volume_driver'] not in CINDER_VOLUME_BACKEND_DRIVER: - msg = "volume_driver %s is not supported" % disk_array['volume_driver'] + request=req, + content_type="text/plain") + if disk_array[ + 'volume_driver'] not in CINDER_VOLUME_BACKEND_DRIVER: + msg = "volume_driver %s is not supported" % disk_array[ + 'volume_driver'] raise HTTPBadRequest(explanation=msg, - request=req, - content_type="text/plain") - if (disk_array['volume_driver'] == 'FUJISTU_ETERNUS' and - (not disk_array.has_key('data_ips') or + request=req, + content_type="text/plain") + if (disk_array['volume_driver'] == 'FUJITSU_ETERNUS' and + ('data_ips' not in disk_array or not disk_array['data_ips'])): - msg = "data_ips must be given when using FUJISTU Disk Array" + msg = "data_ips must be given " \ + "when using FUJITSU Disk Array" raise HTTPBadRequest(explanation=msg, - request=req, - content_type="text/plain") + request=req, + content_type="text/plain") self._is_cinder_volume_repeat(req, disk_array) disk_array['role_id'] = disk_meta['role_id'] - disk_array['backend_index'] = self._get_cinder_volume_backend_index(req, disk_array) - cinder_volumes = registry.add_cinder_volume_metadata(req.context, disk_array) + disk_array['backend_index'] = \ + self._get_cinder_volume_backend_index( + req, disk_array) + cinder_volumes = registry.add_cinder_volume_metadata( + req.context, disk_array) return {'disk_meta': cinder_volumes} - + @utils.mutating def cinder_volume_delete(self, req, id): """ Deletes a service_disk from Daisy. - + :param req: The WSGI/Webob Request object :param image_meta: Mapping of metadata about service_disk @@ -500,7 +545,8 @@ class Controller(controller.BaseController): request=req, content_type="text/plain") except exception.InUseByStore as e: - msg = (_("cindre volume %(id)s could not be deleted because it is in use: " + msg = (_("cindre volume %(id)s could not " + "be deleted because it is in use: " "%(exc)s") % {"id": id, "exc": utils.exception_to_str(e)}) LOG.warn(msg) raise HTTPConflict(explanation=msg, @@ -510,20 +556,21 @@ class Controller(controller.BaseController): return Response(body='', status=200) def _is_data_ips_valid(self, req, update_id, update_meta): - orgin_cinder_volume = self.get_cinder_volume_meta_or_404(req, update_id) + orgin_cinder_volume = self.get_cinder_volume_meta_or_404( + req, update_id) - new_driver = update_meta.get('volume_driver', - orgin_cinder_volume['volume_driver']) - if new_driver != 'FUJISTU_ETERNUS': + new_driver = update_meta.get('volume_driver', + orgin_cinder_volume['volume_driver']) + if new_driver != 'FUJITSU_ETERNUS': return new_data_ips = update_meta.get('data_ips', - orgin_cinder_volume['data_ips']) + orgin_cinder_volume['data_ips']) if not new_data_ips: - msg = "data_ips must be given when using FUJISTU Disk Array" + msg = "data_ips must be given when using FUJITSU Disk Array" raise HTTPBadRequest(explanation=msg, - request=req, - content_type="text/plain") + request=req, + content_type="text/plain") @utils.mutating def cinder_volume_update(self, req, id, disk_meta): @@ -531,28 +578,29 @@ class Controller(controller.BaseController): if key not in CINDER_VOLUME_BACKEND_PARAMS: msg = "'%s' must be given for cinder volume config" % key raise HTTPBadRequest(explanation=msg, - request=req, - content_type="text/plain") - if disk_meta.has_key('role_id'): - self._raise_404_if_role_deleted(req,disk_meta['role_id']) - if (disk_meta.has_key('volume_driver') and - disk_meta['volume_driver'] not in CINDER_VOLUME_BACKEND_DRIVER): - msg = "volume_driver %s is not supported" % disk_meta['volume_driver'] + request=req, + content_type="text/plain") + if 'role_id' in disk_meta: + self._raise_404_if_role_deleted(req, disk_meta['role_id']) + if ('volume_driver' in disk_meta and disk_meta[ + 'volume_driver'] not in CINDER_VOLUME_BACKEND_DRIVER): + msg = "volume_driver %s is not supported" % disk_meta[ + 'volume_driver'] raise HTTPBadRequest(explanation=msg, - request=req, - content_type="text/plain") - + request=req, + content_type="text/plain") + self._is_cinder_volume_repeat(req, disk_meta, id) self._is_data_ips_valid(req, id, disk_meta) try: - cinder_volume_meta = registry.update_cinder_volume_metadata(req.context, - id, - disk_meta) + cinder_volume_meta = registry.update_cinder_volume_metadata( + req.context, id, disk_meta) except exception.Invalid as e: - msg = (_("Failed to update cinder_volume metadata. Got error: %s") % - utils.exception_to_str(e)) + msg = ( + _("Failed to update cinder_volume metadata. Got error: %s") % + utils.exception_to_str(e)) LOG.warn(msg) raise HTTPBadRequest(explanation=msg, request=req, @@ -580,7 +628,7 @@ class Controller(controller.BaseController): self.notifier.info('cinder_volume.update', cinder_volume_meta) return {'disk_meta': cinder_volume_meta} - + @utils.mutating def cinder_volume_detail(self, req, id): """ @@ -595,17 +643,17 @@ class Controller(controller.BaseController): self._enforce(req, 'cinder_volume_detail') cinder_volume_meta = self.get_cinder_volume_meta_or_404(req, id) return {'disk_meta': cinder_volume_meta} - + def cinder_volume_list(self, req): self._enforce(req, 'cinder_volume_list') params = self._get_query_params(req) - filters=params.get('filters',None) + filters = params.get('filters', None) if 'role_id' in filters: - role_id=filters['role_id'] + role_id = filters['role_id'] self._raise_404_if_role_deleted(req, role_id) cinder_volumes = self._cinder_volume_list(req, params) return dict(disk_meta=cinder_volumes) - + class DiskArrayDeserializer(wsgi.JSONRequestDeserializer): """Handles deserialization of specific controller method requests.""" @@ -614,19 +662,20 @@ class DiskArrayDeserializer(wsgi.JSONRequestDeserializer): result = {} result["disk_meta"] = utils.get_dict_meta(request) return result - + def service_disk_add(self, request): return self._deserialize(request) - + def service_disk_update(self, request): return self._deserialize(request) - + def cinder_volume_add(self, request): return self._deserialize(request) - + def cinder_volume_update(self, request): return self._deserialize(request) - + + class DiskArraySerializer(wsgi.JSONResponseSerializer): """Handles serialization of specific controller method responses.""" @@ -634,33 +683,30 @@ class DiskArraySerializer(wsgi.JSONResponseSerializer): self.notifier = notifier.Notifier() def service_disk_add(self, response, result): - disk_meta = result['disk_meta'] response.status = 201 response.headers['Content-Type'] = 'application/json' response.body = self.to_json(result) return response - + def service_disk_update(self, response, result): - disk_meta = result['disk_meta'] response.status = 201 response.headers['Content-Type'] = 'application/json' response.body = self.to_json(result) return response - + def cinder_volume_add(self, response, result): - disk_meta = result['disk_meta'] response.status = 201 response.headers['Content-Type'] = 'application/json' response.body = self.to_json(result) return response - + def cinder_volume_update(self, response, result): - disk_meta = result['disk_meta'] response.status = 201 response.headers['Content-Type'] = 'application/json' response.body = self.to_json(result) return response + def create_resource(): """Image members resource factory method""" deserializer = DiskArrayDeserializer() diff --git a/code/daisy/daisy/api/v1/host_template.py b/code/daisy/daisy/api/v1/host_template.py index b5a15f95..5b67098c 100755 --- a/code/daisy/daisy/api/v1/host_template.py +++ b/code/daisy/daisy/api/v1/host_template.py @@ -24,8 +24,8 @@ from webob.exc import HTTPConflict from webob.exc import HTTPForbidden from webob.exc import HTTPNotFound from webob import Response -import copy -import json + +# import json from daisy.api import policy import daisy.api.v1 @@ -41,7 +41,7 @@ import daisy.registry.client.v1.api as registry from daisy.registry.api.v1 import template import daisy.api.backends.tecs.common as tecs_cmn -import daisy.api.backends.common as daisy_cmn + try: import simplejson as json except ImportError: @@ -64,21 +64,26 @@ CONF.import_opt('container_formats', 'daisy.common.config', group='image_format') CONF.import_opt('image_property_quota', 'daisy.common.config') + class Controller(controller.BaseController): """ WSGI controller for Templates resource in Daisy v1 API - The HostTemplates resource API is a RESTful web Template for Template data. The API - is as follows:: + The HostTemplates resource API is a RESTful web Template for Template data. + The API is as follows:: GET /HostTemplates -- Returns a set of brief metadata about Templates GET /HostTemplates/detail -- Returns a set of detailed metadata about HostTemplates - HEAD /HostTemplates/ -- Return metadata about an Template with id - GET /HostTemplates/ -- Return Template data for Template with id - POST /HostTemplates -- Store Template data and return metadata about the + HEAD /HostTemplates/ -- + Return metadata about an Template with id + GET /HostTemplates/ -- + Return Template data for Template with id + POST /HostTemplates -- + Store Template data and return metadata about the newly-stored Template - PUT /HostTemplates/ -- Update Template metadata and/or upload Template + PUT /HostTemplates/ -- + Update Template metadata and/or upload Template data for a previously-reserved Template DELETE /HostTemplates/ -- Delete the Template with id """ @@ -136,8 +141,9 @@ class Controller(controller.BaseController): def _raise_404_if_cluster_deleted(self, req, cluster_id): cluster = self.get_cluster_meta_or_404(req, cluster_id) if cluster['deleted']: - msg = _("Cluster with identifier %s has been deleted.") % cluster_id - raise webob.exc.HTTPNotFound(msg) + msg = _("Cluster with identifier %s has been deleted.") % \ + cluster_id + raise HTTPNotFound(msg) @utils.mutating def add_template(self, req, host_template): @@ -150,9 +156,9 @@ class Controller(controller.BaseController): :raises HTTPBadRequest if x-Template-name is missing """ self._enforce(req, 'add_host_template') - template_name = host_template["name"] - - host_template = registry.add_host_template_metadata(req.context, host_template) + + host_template = registry.add_host_template_metadata( + req.context, host_template) return {'host_template': template} @@ -167,7 +173,7 @@ class Controller(controller.BaseController): :retval Returns the updated image information as a mapping """ self._enforce(req, 'update_host_template') - #orig_Template_meta = self.get_Template_meta_or_404(req, id) + # orig_Template_meta = self.get_Template_meta_or_404(req, id) ''' if orig_Template_meta['deleted']: msg = _("Forbidden to update deleted Template.") @@ -176,9 +182,8 @@ class Controller(controller.BaseController): content_type="text/plain") ''' try: - host_template = registry.update_host_template_metadata(req.context, - template_id, - host_template) + host_template = registry.update_host_template_metadata( + req.context, template_id, host_template) except exception.Invalid as e: msg = (_("Failed to update template metadata. Got error: %s") % @@ -210,47 +215,51 @@ class Controller(controller.BaseController): self.notifier.info('host_template.update', host_template) return {'host_template': host_template} - + def _filter_params(self, host_meta): for key in host_meta.keys(): - if key=="id" or key=="updated_at" or key=="deleted_at" or key=="created_at" or key=="deleted": + if key == "id" or key == "updated_at" or key == "deleted_at" or \ + key == "created_at" or key == "deleted": del host_meta[key] - if host_meta.has_key("memory"): + if "memory" in host_meta: del host_meta['memory'] - - if host_meta.has_key("system"): + + if "system" in host_meta: del host_meta['system'] - - if host_meta.has_key("disks"): - del host_meta['disks'] - - if host_meta.has_key("os_status"): - del host_meta['os_status'] - if host_meta.has_key("status"): - del host_meta['status'] + if "disks" in host_meta: + del host_meta['disks'] - if host_meta.has_key("messages"): - del host_meta['messages'] - - if host_meta.has_key("cpu"): + if "os_status" in host_meta: + del host_meta['os_status'] + + if "status" in host_meta: + del host_meta['status'] + + if "messages" in host_meta: + del host_meta['messages'] + + if "cpu" in host_meta: del host_meta['cpu'] - if host_meta.has_key("ipmi_addr"): + if "ipmi_addr" in host_meta: del host_meta['ipmi_addr'] - if host_meta.has_key("interfaces"): + if "interfaces" in host_meta: for interface in host_meta['interfaces']: for key in interface.keys(): - if key=="id" or key=="updated_at" or key=="deleted_at" \ - or key=="created_at" or key=="deleted" or key=="current_speed" \ - or key=="max_speed" or key=="host_id" or key=="state": + if key == "id" or key == "updated_at" or \ + key == "deleted_at" \ + or key == "created_at" or key == "deleted" or \ + key == "current_speed" \ + or key == "max_speed" or key == "host_id" or \ + key == "state": del interface[key] for assigned_network in interface['assigned_networks']: - if assigned_network.has_key("ip"): - assigned_network['ip'] = "" - return host_meta - + if "ip" in assigned_network: + assigned_network['ip'] = "" + return host_meta + @utils.mutating def get_host_template_detail(self, req, template_id): """ @@ -263,7 +272,8 @@ class Controller(controller.BaseController): """ self._enforce(req, 'get_host_template_detail') try: - host_template = registry.host_template_detail_metadata(req.context, template_id) + host_template = registry.host_template_detail_metadata( + req.context, template_id) return {'host_template': host_template} except exception.NotFound as e: msg = (_("Failed to find host template: %s") % @@ -280,30 +290,33 @@ class Controller(controller.BaseController): request=req, content_type="text/plain") except exception.InUseByStore as e: - msg = (_("host template %(id)s could not be get because it is in use: " - "%(exc)s") % {"id": template_id, "exc": utils.exception_to_str(e)}) + msg = (_("host template %(id)s could not be get " + "because it is in use: " + "%(exc)s") % {"id": template_id, + "exc": utils.exception_to_str(e)}) LOG.error(msg) raise HTTPConflict(explanation=msg, request=req, content_type="text/plain") else: - #self.notifier.info('host.delete', host) + # self.notifier.info('host.delete', host) return Response(body='', status=200) - + @utils.mutating def get_host_template_lists(self, req): self._enforce(req, 'get_template_lists') params = self._get_query_params(req) template_meta = {} try: - host_template_lists = registry.host_template_lists_metadata(req.context, **params) + host_template_lists = registry.host_template_lists_metadata( + req.context, **params) if host_template_lists and host_template_lists[0]: template_meta = json.loads(host_template_lists[0]['hosts']) return {'host_template': template_meta} except exception.Invalid as e: raise HTTPBadRequest(explanation=e.msg, request=req) return dict(host_template=host_template_lists) - + @utils.mutating def host_to_template(self, req, host_template): """ @@ -315,21 +328,32 @@ class Controller(controller.BaseController): """ self._enforce(req, 'host_to_template') if host_template.get('host_id', None): - origin_host_meta = self.get_host_meta_or_404(req, host_template['host_id']) + origin_host_meta = self.get_host_meta_or_404( + req, host_template['host_id']) host_meta = self._filter_params(origin_host_meta) - if host_template.get('host_template_name', None) and host_template.get('cluster_name', None): + if host_template.get( + 'host_template_name', + None) and host_template.get( + 'cluster_name', + None): host_meta['name'] = host_template['host_template_name'] - host_meta['description'] = host_template.get('description', None) - params = {'filters':{'cluster_name':host_template['cluster_name']}} - templates = registry.host_template_lists_metadata(req.context, **params) - if templates and templates[0]: + host_meta['description'] = host_template.get( + 'description', None) + params = { + 'filters': { + 'cluster_name': host_template['cluster_name']}} + templates = registry.host_template_lists_metadata( + req.context, **params) + if templates and templates[0]: had_host_template = False if templates[0]['hosts']: - templates[0]['hosts'] = json.loads(templates[0]['hosts']) + templates[0]['hosts'] = json.loads( + templates[0]['hosts']) else: templates[0]['hosts'] = [] for index in range(len(templates[0]['hosts'])): - if host_template['host_template_name'] == templates[0]['hosts'][index]['name']: + if host_template['host_template_name'] == templates[ + 0]['hosts'][index]['name']: had_host_template = True templates[0]['hosts'][index] = host_meta break @@ -337,12 +361,15 @@ class Controller(controller.BaseController): host_meta['name'] = host_template['host_template_name'] templates[0]['hosts'].append(host_meta) templates[0]['hosts'] = json.dumps(templates[0]['hosts']) - host_template = registry.update_host_template_metadata(req.context, - templates[0]['id'], - templates[0]) + host_template = registry.update_host_template_metadata( + req.context, templates[0]['id'], templates[0]) else: - param = {"cluster_name": host_template['cluster_name'], "hosts":json.dumps([host_meta])} - host_template = registry.add_host_template_metadata(req.context, param) + param = { + "cluster_name": host_template['cluster_name'], + "hosts": json.dumps( + [host_meta])} + host_template = registry.add_host_template_metadata( + req.context, param) return {'host_template': host_template} @utils.mutating @@ -350,8 +377,9 @@ class Controller(controller.BaseController): if not host_template.get('cluster_name', None): msg = "cluster name is null" raise HTTPNotFound(explanation=msg) - params = {'filters':{'cluster_name':host_template['cluster_name']}} - templates = registry.host_template_lists_metadata(req.context, **params) + params = {'filters': {'cluster_name': host_template['cluster_name']}} + templates = registry.host_template_lists_metadata( + req.context, **params) hosts_param = [] host_template_used = {} if templates and templates[0]: @@ -362,66 +390,79 @@ class Controller(controller.BaseController): break if not host_template_used: msg = "not host_template %s" % host_template['host_template_name'] - raise HTTPNotFound(explanation=msg, request=req, content_type="text/plain") + raise HTTPNotFound( + explanation=msg, + request=req, + content_type="text/plain") if host_template.get('host_id', None): self.get_host_meta_or_404(req, host_template['host_id']) else: - msg="host_id is not null" - raise HTTPBadRequest(explanation = msg) + msg = "host_id is not null" + raise HTTPBadRequest(explanation=msg) host_id = host_template['host_id'] - params = {'filters':{'name': host_template['cluster_name']}} + params = {'filters': {'name': host_template['cluster_name']}} clusters = registry.get_clusters_detail(req.context, **params) if clusters and clusters[0]: host_template_used['cluster'] = clusters[0]['id'] - if host_template_used.has_key('role') and host_template_used['role']: + if 'role' in host_template_used and host_template_used['role']: role_id_list = [] host_role_list = [] - if host_template_used.has_key('cluster'): + if 'cluster' in host_template_used: params = self._get_query_params(req) role_list = registry.get_roles_detail(req.context, **params) for role_name in role_list: - if role_name['cluster_id'] == host_template_used['cluster']: + if role_name['cluster_id'] == host_template_used[ + 'cluster']: host_role_list = list(host_template_used['role']) if role_name['name'] in host_role_list: role_id_list.append(role_name['id']) - host_template_used['role'] = role_id_list - if host_template_used.has_key('name'): + host_template_used['role'] = role_id_list + if 'name' in host_template_used: host_template_used.pop('name') - if host_template_used.has_key('dmi_uuid'): + if 'dmi_uuid' in host_template_used: host_template_used.pop('dmi_uuid') - if host_template_used.has_key('ipmi_user'): + if 'ipmi_user' in host_template_used: host_template_used.pop('ipmi_user') - if host_template_used.has_key('ipmi_passwd'): + if 'ipmi_passwd' in host_template_used: host_template_used.pop('ipmi_passwd') - if host_template_used.has_key('ipmi_addr'): + if 'ipmi_addr' in host_template_used: host_template_used.pop('ipmi_addr') host_template_interfaces = host_template_used.get('interfaces', None) if host_template_interfaces: - template_ether_interface = [interface for interface in host_template_interfaces if interface['type'] == "ether" ] + template_ether_interface = [ + interface for interface in host_template_interfaces if + interface['type'] == "ether"] orig_host_meta = registry.get_host_metadata(req.context, host_id) orig_host_interfaces = orig_host_meta.get('interfaces', None) - temp_orig_host_interfaces = [ interface for interface in orig_host_interfaces if interface['type'] == "ether" ] + temp_orig_host_interfaces = [ + interface for interface in orig_host_interfaces if + interface['type'] == "ether"] if len(temp_orig_host_interfaces) != len(template_ether_interface): msg = (_('host_id %s does not match the host_id host_template ' - '%s.') % (host_id, host_template['host_template_name'])) - raise HTTPBadRequest(explanation = msg) + '%s.') % (host_id, + host_template['host_template_name'])) + raise HTTPBadRequest(explanation=msg) interface_match_flag = 0 for host_template_interface in host_template_interfaces: if host_template_interface['type'] == 'ether': for orig_host_interface in orig_host_interfaces: - if orig_host_interface['pci'] == host_template_interface['pci']: + if orig_host_interface[ + 'pci'] == host_template_interface['pci']: interface_match_flag += 1 - host_template_interface['mac'] = orig_host_interface['mac'] - if host_template_interface.has_key('ip'): + host_template_interface[ + 'mac'] = orig_host_interface['mac'] + if 'ip' in host_template_interface: host_template_interface.pop('ip') if interface_match_flag != len(template_ether_interface): msg = (_('host_id %s does not match the host ' - 'host_template %s.') % (host_id, host_template['host_template_name'])) + 'host_template %s.') % ( + host_id, host_template['host_template_name'])) raise HTTPBadRequest(explanation=msg) host_template_used['interfaces'] = str(host_template_interfaces) - host_template = registry.update_host_metadata(req.context, host_id, host_template_used) + host_template = registry.update_host_metadata( + req.context, host_id, host_template_used) return {"host_template": host_template} - + @utils.mutating def delete_host_template(self, req, host_template): """ @@ -437,8 +478,11 @@ class Controller(controller.BaseController): if not host_template.get('cluster_name', None): msg = "cluster name is null" raise HTTPNotFound(explanation=msg) - params = {'filters':{'cluster_name':host_template['cluster_name']}} - host_templates = registry.host_template_lists_metadata(req.context, **params) + params = { + 'filters': { + 'cluster_name': host_template['cluster_name']}} + host_templates = registry.host_template_lists_metadata( + req.context, **params) template_param = [] had_host_template = False if host_templates and host_templates[0]: @@ -449,18 +493,20 @@ class Controller(controller.BaseController): had_host_template = True break if not had_host_template: - msg = "not host template name %s" %host_template['host_template_name'] + msg = "not host template name %s" % host_template[ + 'host_template_name'] raise HTTPNotFound(explanation=msg) else: host_templates[0]['hosts'] = json.dumps(template_param) - host_template = registry.update_host_template_metadata(req.context, - host_templates[0]['id'], - host_templates[0]) - return {"host_template": host_template} + host_template = registry.update_host_template_metadata( + req.context, host_templates[0]['id'], + host_templates[0]) + return {"host_template": host_template} else: - msg = "host template cluster name %s is null" %host_template['cluster_name'] - raise HTTPNotFound(explanation=msg) - + msg = "host template cluster name %s is null" % host_template[ + 'cluster_name'] + raise HTTPNotFound(explanation=msg) + except exception.NotFound as e: msg = (_("Failed to find host template to delete: %s") % utils.exception_to_str(e)) @@ -476,15 +522,18 @@ class Controller(controller.BaseController): request=req, content_type="text/plain") except exception.InUseByStore as e: - msg = (_("template %(id)s could not be deleted because it is in use: " - "%(exc)s") % {"id": template_id, "exc": utils.exception_to_str(e)}) + msg = (_("template %(id)s could not be deleted " + "because it is in use: " + "%(exc)s") % {"id": host_template['host_id'], + "exc": utils.exception_to_str(e)}) LOG.error(msg) raise HTTPConflict(explanation=msg, request=req, content_type="text/plain") else: return Response(body='', status=200) - + + class HostTemplateDeserializer(wsgi.JSONRequestDeserializer): """Handles deserialization of specific controller method requests.""" @@ -492,13 +541,12 @@ class HostTemplateDeserializer(wsgi.JSONRequestDeserializer): result = {} result["host_template"] = utils.get_template_meta(request) return result - + def add_host_template(self, request): return self._deserialize(request) def update_host_template(self, request): return self._deserialize(request) - def host_to_template(self, request): return self._deserialize(request) @@ -509,6 +557,7 @@ class HostTemplateDeserializer(wsgi.JSONRequestDeserializer): def delete_host_template(self, request): return self._deserialize(request) + class HostTemplateSerializer(wsgi.JSONResponseSerializer): """Handles serialization of specific controller method responses.""" @@ -528,18 +577,20 @@ class HostTemplateSerializer(wsgi.JSONResponseSerializer): response.headers['Content-Type'] = 'application/json' response.body = self.to_json(dict(host_template=host_template)) return response + def get_host_template_detail(self, response, result): host_template = result['host_template'] response.status = 201 response.headers['Content-Type'] = 'application/json' response.body = self.to_json(dict(host_template=host_template)) return response + def update_host_template(self, response, result): host_template = result['host_template'] response.status = 201 response.headers['Content-Type'] = 'application/json' response.body = self.to_json(dict(host_template=host_template)) - return response + return response def host_to_template(self, response, result): host_template = result['host_template'] @@ -560,7 +611,7 @@ class HostTemplateSerializer(wsgi.JSONResponseSerializer): response.status = 201 response.headers['Content-Type'] = 'application/json' response.body = self.to_json(dict(host_template=host_template)) - + def create_resource(): """Templates resource factory method""" diff --git a/code/daisy/daisy/api/v1/hosts.py b/code/daisy/daisy/api/v1/hosts.py index 219f24c1..94bdc37a 100755 --- a/code/daisy/daisy/api/v1/hosts.py +++ b/code/daisy/daisy/api/v1/hosts.py @@ -18,6 +18,7 @@ """ import subprocess import re + from oslo_config import cfg from oslo_log import log as logging from webob.exc import HTTPBadRequest @@ -25,8 +26,6 @@ from webob.exc import HTTPConflict from webob.exc import HTTPForbidden from webob.exc import HTTPNotFound from webob import Response -from collections import Counter -from webob.exc import HTTPServerError from daisy.api import policy import daisy.api.v1 from daisy.api.v1 import controller @@ -35,12 +34,16 @@ from daisy.common import exception from daisy.common import property_utils from daisy.common import utils from daisy.common import wsgi +from daisy.common import vcpu_pin from daisy import i18n from daisy import notifier import daisy.registry.client.v1.api as registry import threading import daisy.api.backends.common as daisy_cmn +import daisy.api.backends.tecs.common as tecs_cmn import ConfigParser +import socket +import netaddr LOG = logging.getLogger(__name__) _ = i18n._ @@ -56,9 +59,21 @@ CONF.import_opt('disk_formats', 'daisy.common.config', group='image_format') CONF.import_opt('container_formats', 'daisy.common.config', group='image_format') CONF.import_opt('image_property_quota', 'daisy.common.config') -config = ConfigParser.ConfigParser() -config.read("/home/daisy_install/daisy.conf") -ML2_TYPE = ['ovs', 'dvs', 'ovs,sriov(macvtap)', 'ovs,sriov(direct)', 'sriov(macvtap)', 'sriov(direct)'] + +DISCOVER_DEFAULTS = { + 'listen_port': '5050', + 'ironic_url': 'http://127.0.0.1:6385/v1', +} + +ML2_TYPE = [ + 'ovs', + 'dvs', + 'ovs,sriov(macvtap)', + 'ovs,sriov(direct)', + 'sriov(macvtap)', + 'sriov(direct)'] +SUPPORT_HOST_PAGE_SIZE = ['2M', '1G'] + class Controller(controller.BaseController): """ @@ -79,7 +94,7 @@ class Controller(controller.BaseController): DELETE /nodes/ -- Delete the host with id """ support_resource_type = ['baremetal', 'server', 'docker'] - + def __init__(self): self.notifier = notifier.Notifier() registry.configure_registry_client() @@ -101,13 +116,15 @@ class Controller(controller.BaseController): def _raise_404_if_network_deleted(self, req, network_id): network = self.get_network_meta_or_404(req, network_id) if network is None or network['deleted']: - msg = _("Network with identifier %s has been deleted.") % network_id + msg = _("Network with identifier %s has been deleted.") % \ + network_id raise HTTPNotFound(msg) def _raise_404_if_cluster_deleted(self, req, cluster_id): cluster = self.get_cluster_meta_or_404(req, cluster_id) if cluster is None or cluster['deleted']: - msg = _("Cluster with identifier %s has been deleted.") % cluster_id + msg = _("Cluster with identifier %s has been deleted.") % \ + cluster_id raise HTTPNotFound(msg) def _raise_404_if_role_deleted(self, req, role_id): @@ -116,7 +133,6 @@ class Controller(controller.BaseController): msg = _("Cluster with identifier %s has been deleted.") % role_id raise HTTPNotFound(msg) - def _get_filters(self, req): """ Return a dictionary of query param filters from the request @@ -148,11 +164,15 @@ class Controller(controller.BaseController): if PARAM in req.params: params[PARAM] = req.params.get(PARAM) return params - - def check_bond_slaves_validity(self, bond_slaves_lists, ether_nic_names_list): + + def check_bond_slaves_validity( + self, + bond_slaves_lists, + ether_nic_names_list): ''' members in bond slaves must be in ether_nic_names_list - len(set(bond_slaves)) == 2, and can not be overlap between slaves members + len(set(bond_slaves)) == 2, and can not be overlap + between slaves members bond_slaves_lists: [[name1,name2], [name1,name2], ...] ether_nic_names_list: [name1, name2, ...] ''' @@ -160,13 +180,20 @@ class Controller(controller.BaseController): LOG.warn('bond_slaves: %s' % bond_slaves) if len(set(bond_slaves)) != 2: LOG.error('set(bond_slaves: %s' % set(bond_slaves)) - msg = (_("Bond slaves(%s) must be different nic and existed in ether nics in pairs." % bond_slaves)) + msg = ( + _( + "Bond slaves(%s) must be different nic and existed " + "in ether nics in pairs." % + bond_slaves)) LOG.error(msg) raise HTTPForbidden(msg) if not set(bond_slaves).issubset(set(ether_nic_names_list)): - msg = (_("Pay attention: illegal ether nic existed in bond slaves(%s)." % bond_slaves)) + msg = ( + _("Pay attention: illegal ether nic existed " + "in bond slaves(%s)." % bond_slaves)) LOG.error(msg) raise HTTPForbidden(msg) + def validate_ip_format(self, ip_str): ''' valid ip_str format = '10.43.178.9' @@ -177,40 +204,39 @@ class Controller(controller.BaseController): '10.43.1789', invalid format ''' valid_fromat = False - if ip_str.count('.') == 3 and \ - all(num.isdigit() and 0<=int(num)<256 for num in ip_str.rstrip().split('.')): + if ip_str.count('.') == 3 and all(num.isdigit() and 0 <= int( + num) < 256 for num in ip_str.rstrip().split('.')): valid_fromat = True - if valid_fromat == False: + if not valid_fromat: msg = (_("%s invalid ip format!") % ip_str) LOG.error(msg) raise HTTPForbidden(msg) - - def _ip_into_int(self, ip): - """ - Switch ip string to decimalism integer.. - :param ip: ip string - :return: decimalism integer - """ - return reduce(lambda x, y: (x<<8)+y, map(int, ip.split('.'))) - def _is_in_network_range(self, ip, network): - """ - Check ip is in range - :param ip: Ip will be checked, like:192.168.1.2. - :param network: Ip range,like:192.168.0.0/24. - :return: If ip in range,return True,else return False. - """ - network = network.split('/') - mask = ~(2**(32 - int(network[1])) - 1) - return (self._ip_into_int(ip) & mask) == (self._ip_into_int(network[0]) & mask) - - def get_cluster_networks_info(self, req, cluster_id): + def validate_mac_format(self, mac_str): + '''Validates a mac address''' + if re.match("[0-9a-f]{2}([-:])[0-9a-f]{2}(\\1[0-9a-f]{2}){4}$", + mac_str.lower()): + return + else: + msg = (_("%s invalid mac format!") % mac_str) + LOG.error(msg) + raise HTTPForbidden(msg) + + def get_cluster_networks_info(self, req, cluster_id=None, type=None): ''' - get_cluster_networks_info by cluster id + get_cluster_networks_info by cluster id ''' - all_networks = registry.get_all_networks(req.context) - cluster_networks = [network for network in all_networks if network['cluster_id'] == cluster_id] - return cluster_networks + params = {} + if type: + params['filters'] = {'type': type} + + all_networks = registry.get_all_networks(req.context, **params) + if cluster_id: + cluster_networks = [network for network in all_networks + if network['cluster_id'] == cluster_id] + return cluster_networks + else: + return all_networks def _check_assigned_networks(self, req, cluster_id, assigned_networks): LOG.info("assigned_networks %s " % assigned_networks) @@ -218,55 +244,68 @@ class Controller(controller.BaseController): list_of_assigned_networks = [] for assigned_network in assigned_networks: LOG.info("assigned_network %s " % assigned_network) - if not assigned_network.has_key('name') or not assigned_network['name']: - msg = "assigned networks '%s' are invalid" % (assigned_networks) + if 'name' not in assigned_network or not assigned_network['name']: + msg = "assigned networks '%s' are invalid" % ( + assigned_networks) LOG.error(msg) raise HTTPBadRequest(explanation=msg, - request=req, - content_type="text/plain") - network_info = [network for network in cluster_networks if network['name'] == assigned_network['name']] + request=req, + content_type="text/plain") + network_info = [network for network in cluster_networks if network[ + 'name'] == assigned_network['name']] if network_info and network_info[0]: network_cidr = network_info[0]['cidr'] LOG.info("network_info %s " % network_info) - if network_info[0]['network_type'] != 'PRIVATE': + if network_info[0]['network_type'] != 'DATAPLANE': if network_cidr: - if assigned_network.has_key('ip') and assigned_network['ip']: + if 'ip' in assigned_network and assigned_network['ip']: self.validate_ip_format(assigned_network['ip']) - ip_in_cidr = self._is_in_network_range(assigned_network['ip'], network_cidr) + ip_in_cidr = utils.is_ip_in_cidr( + assigned_network['ip'], network_cidr) if not ip_in_cidr: - msg = (_("The ip '%s' for network '%s' is not in cidr range." % - (assigned_network['ip'], assigned_network['name']))) + msg = (_("The ip '%s' for network '%s'" + " is not in cidr range." % + (assigned_network['ip'], + assigned_network['name']))) raise HTTPBadRequest(explanation=msg) else: - msg = "error, cidr of network '%s' is empty" % (assigned_network['name']) + msg = "error, cidr of network '%s' is empty" % ( + assigned_network['name']) LOG.error(msg) raise HTTPBadRequest(explanation=msg, - request=req, - content_type="text/plain") + request=req, + content_type="text/plain") else: - msg = "can't find network named '%s' in cluster '%s'" % (assigned_network['name'], cluster_id) + msg = "can't find network named '%s' in cluster '%s'" % ( + assigned_network['name'], cluster_id) LOG.error(msg) raise HTTPBadRequest(explanation=msg, - request=req, - content_type="text/plain") + request=req, + content_type="text/plain") list_of_assigned_networks.append(network_info[0]) return list_of_assigned_networks def _compare_assigned_networks_of_interface(self, interface1, interface2): for network in interface1: + if network.get('segmentation_type') in ['vlan']: + continue for network_compare in interface2: - if network['cidr'] == network_compare['cidr']: + if network_compare.get('segmentation_type') in ['vlan']: + continue + if network.get('cidr', None) \ + and network_compare.get('cidr', None) \ + and network['cidr'] == network_compare['cidr']: return network['name'], network_compare['name'] return False, False def _compare_assigned_networks_between_interfaces( self, interface_num, assigned_networks_of_interfaces): for interface_id in range(interface_num): - for interface_id_compare in range(interface_id+1, interface_num): + for interface_id_compare in range(interface_id + 1, interface_num): network1_name, network2_name = self.\ - _compare_assigned_networks_of_interface\ - (assigned_networks_of_interfaces[interface_id], - assigned_networks_of_interfaces[interface_id_compare]) + _compare_assigned_networks_of_interface( + assigned_networks_of_interfaces[interface_id], + assigned_networks_of_interfaces[interface_id_compare]) if network1_name and network2_name: msg = (_('Network %s and network %s with same ' 'cidr can not be assigned to different ' @@ -275,25 +314,27 @@ class Controller(controller.BaseController): def _check_add_host_interfaces(self, req, host_meta): host_meta_interfaces = [] - if host_meta.has_key('interfaces'): + if 'interfaces' in host_meta: host_meta_interfaces = list(eval(host_meta['interfaces'])) else: return - - cluster_id = host_meta.get('cluster', None) - exist_id = self._verify_interface_among_hosts(req, host_meta) + cluster_id = host_meta.get('cluster', None) + + exist_id, os_status = self._verify_interface_among_hosts( + req, host_meta) if exist_id: + if os_status == "active": + msg = _( + 'The host %s os_status is active,' + 'forbidden ironic to add host.') % exist_id + raise HTTPBadRequest(explanation=msg) host_meta['id'] = exist_id self.update_host(req, exist_id, host_meta) - LOG.info("<<>>" % exist_id) + LOG.info( + "<<>>" % + exist_id) return {'host_meta': host_meta} - - if self._host_with_bad_pxe_info_in_params(host_meta): - if cluster_id and host_meta.get('os_status', None) != 'active': - msg = _("There is no nic for deployment, please choose " - "one interface to set it's 'is_deployment' True") - raise HTTPServerError(explanation=msg) ether_nic_names_list = list() bond_nic_names_list = list() @@ -304,12 +345,20 @@ class Controller(controller.BaseController): interface_num = 0 for interface in host_meta_interfaces: assigned_networks_of_one_interface = [] - if interface.get('type', None) != 'bond' and not interface.get('mac', None): + if interface.get( + 'type', + None) != 'bond' and not interface.get( + 'mac', + None): msg = _('The ether interface need a non-null mac ') raise HTTPBadRequest(explanation=msg, request=req, content_type="text/plain") - if interface.get('type', None) != 'bond' and not interface.get('pci', None): + if interface.get( + 'type', + None) == 'ether' and not interface.get( + 'pci', + None): msg = "The Interface need a non-null pci" LOG.error(msg) raise HTTPBadRequest(explanation=msg, @@ -317,12 +366,14 @@ class Controller(controller.BaseController): content_type="text/plain") if interface.get('name', None): - if interface.has_key('type') and interface['type'] == 'bond': + if 'type' in interface and interface['type'] == 'bond': bond_nic_names_list.append(interface['name']) if interface.get('slaves', None): bond_slaves_lists.append(interface['slaves']) else: - msg = (_("Slaves parameter can not be None when nic type was bond.")) + msg = ( + _("Slaves parameter can not be None " + "when nic type was bond.")) LOG.error(msg) raise HTTPForbidden(msg) else: # type == ether or interface without type field @@ -331,53 +382,61 @@ class Controller(controller.BaseController): msg = (_("Nic name can not be None.")) LOG.error(msg) raise HTTPForbidden(msg) - - if interface.has_key('is_deployment'): - if interface['is_deployment'] == "True" or interface['is_deployment'] == True: + + if 'is_deployment' in interface: + if interface['is_deployment'] == "True" or interface[ + 'is_deployment']: interface['is_deployment'] = 1 else: interface['is_deployment'] = 0 - if (interface.has_key('assigned_networks') and - interface['assigned_networks'] != [''] and + if ('assigned_networks' in interface and + interface['assigned_networks'] != [''] and interface['assigned_networks']): have_assigned_network = True if cluster_id: assigned_networks_of_one_interface = self.\ _check_assigned_networks(req, cluster_id, - interface['assigned_networks']) + interface[ + 'assigned_networks']) else: - msg = "cluster must be given first when network plane is allocated" + msg = "cluster must be given first when network " \ + "plane is allocated" LOG.error(msg) raise HTTPBadRequest(explanation=msg, request=req, content_type="text/plain") - if (interface.has_key('ip') and interface['ip'] and - interface.has_key('netmask') and interface['netmask']): + if ('ip' in interface and interface['ip'] and + 'netmask' in interface and interface['netmask']): have_ip_netmask = True - - if interface.has_key('mac') and interface.has_key('ip'): - host_infos = registry.get_host_interface(req.context, host_meta) + + if 'mac' in interface and 'ip' in interface: + host_infos = registry.get_host_interface( + req.context, host_meta) for host_info in host_infos: - if host_info.has_key('host_id'): + if 'host_id' in host_info: host_meta["id"] = host_info['host_id'] - - if interface.has_key('vswitch_type') and interface['vswitch_type'] != '' and interface['vswitch_type'] not in ML2_TYPE: - msg = "vswitch_type %s is not supported" % interface['vswitch_type'] + + if 'vswitch_type' in interface and interface[ + 'vswitch_type'] != '' and \ + interface['vswitch_type'] not in \ + ML2_TYPE: + msg = "vswitch_type %s is not supported" % interface[ + 'vswitch_type'] raise HTTPBadRequest(explanation=msg, request=req, - content_type="text/plain") + content_type="text/plain") interface_num += 1 assigned_networks_of_intefaces.\ append(assigned_networks_of_one_interface) for interface_id in range(interface_num): - for interface_id_compare in range(interface_id+1, interface_num): + for interface_id_compare in range(interface_id + 1, interface_num): network1_name, network2_name = self.\ - _compare_assigned_networks_of_interface\ - (assigned_networks_of_intefaces[interface_id], - assigned_networks_of_intefaces[interface_id_compare]) + _compare_assigned_networks_of_interface( + assigned_networks_of_intefaces[interface_id], + assigned_networks_of_intefaces[interface_id_compare]) if network1_name and network2_name: msg = (_('Network %s and network %s with same ' 'cidr can not be assigned to different ' @@ -387,20 +446,41 @@ class Controller(controller.BaseController): # when assigned_network is empty, ip must be config if not have_assigned_network: if not have_ip_netmask: - msg = "ip and netmask must be given when network plane is not allocated" + msg = "ip and netmask must be given when network " \ + "plane is not allocated" LOG.error(msg) raise HTTPBadRequest(explanation=msg, request=req, content_type="text/plain") - + # check bond slaves validity - self.check_bond_slaves_validity(bond_slaves_lists, ether_nic_names_list) + self.check_bond_slaves_validity( + bond_slaves_lists, ether_nic_names_list) nic_name_list = ether_nic_names_list + bond_nic_names_list if len(set(nic_name_list)) != len(nic_name_list): msg = (_("Nic name must be unique.")) LOG.error(msg) raise HTTPForbidden(msg) - + + def _check_dvs_huge(self, host_meta, orig_host_meta={}): + host_interfaces = (host_meta.get('interfaces') or + orig_host_meta.get('interfaces')) + if host_interfaces: + if not isinstance(host_interfaces, list): + host_interfaces = eval(host_interfaces) + + has_dvs = utils.get_dvs_interfaces(host_interfaces) + if has_dvs: + if (('hugepages' in host_meta and + int(host_meta['hugepages']) < 10) or + ('hugepagesize' in host_meta and + host_meta['hugepagesize'] != '1G')): + host_name = (host_meta.get('name') or + orig_host_meta.get('name')) + msg = _("hugepages should be larger than 10G " + " when dvs installed on host %s") % host_name + raise HTTPForbidden(explanation=msg) + @utils.mutating def add_host(self, req, host_meta): """ @@ -412,14 +492,16 @@ class Controller(controller.BaseController): :raises HTTPBadRequest if x-host-name is missing """ self._enforce(req, 'add_host') - # if host is update in '_verify_interface_among_hosts', no need add host continue. + # if host is update in '_verify_interface_among_hosts', no need add + # host continue. cluster_id = host_meta.get('cluster', None) if cluster_id: self.get_cluster_meta_or_404(req, cluster_id) - if host_meta.has_key('role') and host_meta['role']: + + if 'role' in host_meta and host_meta['role']: role_id_list = [] - host_roles=[] - if host_meta.has_key('cluster'): + host_roles = [] + if 'cluster' in host_meta: params = self._get_query_params(req) role_list = registry.get_roles_detail(req.context, **params) for role_name in role_list: @@ -430,7 +512,8 @@ class Controller(controller.BaseController): role_id_list.append(role_name['id']) continue if len(role_id_list) != len(host_roles): - msg = "The role of params %s is not exist, please use the right name" % host_roles + msg = "The role of params %s is not exist, " \ + "please use the right name" % host_roles raise HTTPBadRequest(explanation=msg, request=req, content_type="text/plain") @@ -440,32 +523,42 @@ class Controller(controller.BaseController): raise HTTPBadRequest(explanation=msg, request=req, content_type="text/plain") - - + # if host is found from ssh, don't set pxe interface + if host_meta.get('os_status', None) == 'init': + self._set_pxe_interface_for_host(req, host_meta) + self._check_add_host_interfaces(req, host_meta) - if host_meta.has_key('resource_type'): + if 'resource_type' in host_meta: if host_meta['resource_type'] not in self.support_resource_type: - msg = "resource type is not supported, please use it in %s" % self.support_resource_type + msg = "resource type is not supported, please use it in %s" % \ + self.support_resource_type raise HTTPBadRequest(explanation=msg, request=req, content_type="text/plain") else: host_meta['resource_type'] = 'baremetal' - - if host_meta.has_key('os_status'): - if host_meta['os_status'] not in ['init', 'installing', 'active', 'failed', 'none']: + + if 'os_status' in host_meta: + if host_meta['os_status'] not in ['init', 'installing', + 'active', 'failed', 'none']: msg = "os_status is not valid." raise HTTPBadRequest(explanation=msg, request=req, content_type="text/plain") - - if host_meta.has_key('ipmi_addr') and host_meta['ipmi_addr']: - if not host_meta.has_key('ipmi_user'): + + if 'ipmi_addr' in host_meta and host_meta['ipmi_addr']: + if 'ipmi_user' not in host_meta: host_meta['ipmi_user'] = 'zteroot' - if not host_meta.has_key('ipmi_passwd'): + if 'ipmi_passwd' not in host_meta: host_meta['ipmi_passwd'] = 'superuser' + self._check_dvs_huge(host_meta) + + if host_meta.get('config_set_id'): + self.get_config_set_meta_or_404(req, + host_meta['config_set_id']) + host_meta = registry.add_host_metadata(req.context, host_meta) return {'host_meta': host_meta} @@ -505,13 +598,15 @@ class Controller(controller.BaseController): request=req, content_type="text/plain") else: - #self.notifier.info('host.delete', host) - params= {} - discover_hosts = registry.get_discover_hosts_detail(req.context, **params) + # self.notifier.info('host.delete', host) + params = {} + discover_hosts = registry.get_discover_hosts_detail( + req.context, **params) for host in discover_hosts: if host.get('host_id') == id: LOG.info("delete discover host: %s" % id) - registry.delete_discover_host_metadata(req.context, host['id']) + registry.delete_discover_host_metadata( + req.context, host['id']) return Response(body='', status=200) @utils.mutating @@ -527,6 +622,60 @@ class Controller(controller.BaseController): """ self._enforce(req, 'get_host') host_meta = self.get_host_meta_or_404(req, id) + host_vcpu_pin = vcpu_pin.allocate_cpus(host_meta) + host_meta.update(host_vcpu_pin) + if 'role' in host_meta and 'CONTROLLER_HA' in host_meta['role']: + host_cluster_name = host_meta['cluster'] + params = {'filters': {u'name': host_cluster_name}} + cluster_info = registry.get_clusters_detail(req.context, **params) + cluster_id = cluster_info[0]['id'] + + ctl_ha_nodes_min_mac =\ + tecs_cmn.get_ctl_ha_nodes_min_mac(req, cluster_id) + sorted_ha_nodes = \ + sorted(ctl_ha_nodes_min_mac.iteritems(), key=lambda d: d[1]) + sorted_ha_nodes_min_mac = \ + [min_mac[1] for min_mac in sorted_ha_nodes] + + host_min_mac = utils.get_host_min_mac(host_meta['interfaces']) + host_iqn = daisy_cmn.calc_host_iqn(host_min_mac) + host_meta['iqn'] = host_iqn + + cluster_roles = daisy_cmn.get_cluster_roles_detail(req, cluster_id) + role_id = '' + for role in cluster_roles: + if role['name'] == 'CONTROLLER_HA': + role_id = role['id'] + break + service_disks = \ + tecs_cmn.get_service_disk_list(req, + {'filters': { + 'role_id': role_id}}) + db_share_cluster_disk = [] + service_lun_info = [] + for disk in service_disks: + if disk['service'] == 'db' and \ + disk['disk_location'] == 'share_cluster': + db_share_cluster_disk.append(disk) + if disk['disk_location'] == 'share': + tmp_disk = {} + tmp_disk[disk['service']] = disk['lun'] + service_lun_info.append(tmp_disk) + + sorted_db_share_cluster = \ + sorted(db_share_cluster_disk, key=lambda s: s['lun']) + + db_service_lun_info = {} + for (min_mac, share_disk) in \ + zip(sorted_ha_nodes_min_mac, sorted_db_share_cluster): + if host_min_mac == min_mac: + db_service_lun_info['db'] = share_disk['lun'] + break + if db_service_lun_info: + service_lun_info.append(db_service_lun_info) + if service_lun_info: + host_meta['lun'] = service_lun_info + return {'host_meta': host_meta} def detail(self, req): @@ -549,67 +698,151 @@ class Controller(controller.BaseController): params = self._get_query_params(req) try: nodes = registry.get_hosts_detail(req.context, **params) + for node in nodes: + if node.get("hwm_id"): + self.check_discover_state_with_hwm(req, node) + else: + self.check_discover_state_with_no_hwm(req, node) except exception.Invalid as e: raise HTTPBadRequest(explanation=e.msg, request=req) return dict(nodes=nodes) + def check_discover_state_with_hwm(self, req, node): + node['discover_state'] = None + host_meta = self.get_host_meta_or_404(req, node.get('id')) + if host_meta and host_meta.get('interfaces'): + mac_list = [ + interface['mac'] for interface in + host_meta.get('interfaces') if interface.get('mac')] + if mac_list: + min_mac = min(mac_list) + pxe_discover_host = self._get_discover_host_by_mac(req, + min_mac) + if pxe_discover_host: + if pxe_discover_host.get('ip'): + node['discover_state'] = \ + "SSH:" + pxe_discover_host.get('status') + else: + node['discover_state'] = \ + "PXE:" + pxe_discover_host.get('status') + + return node + + def check_discover_state_with_no_hwm(self, req, node): + node['discover_state'] = None + host_meta = self.get_host_meta_or_404(req, node.get('id')) + if host_meta and host_meta.get('interfaces'): + ip_list = [interface['ip'] for interface + in host_meta.get('interfaces') if interface['ip']] + if ip_list: + for ip in ip_list: + ssh_discover_host = self._get_host_by_ip(req, ip) + if ssh_discover_host: + node['discover_state'] = \ + "SSH:" + ssh_discover_host.get('status') + + return node + + def _update_hwm_host(self, req, hwm_host, hosts, hwm_ip): + hwm_host_mac = [hwm_host_interface['mac'] for hwm_host_interface + in hwm_host.get('interfaces')] + for host in hosts: + host_update_meta = dict() + host_meta = self.get_host_meta_or_404(req, host['id']) + host_mac = [host_interface['mac'] for host_interface + in host_meta.get('interfaces')] + set_same_mac = set(hwm_host_mac) & set(host_mac) + + if set_same_mac: + host_update_meta['hwm_id'] = hwm_host['id'] + host_update_meta['hwm_ip'] = hwm_ip + node = registry.update_host_metadata(req.context, host['id'], + host_update_meta) + return node + + host_add_meta = dict() + host_add_meta['name'] = str(hwm_host['id']) + host_add_meta['description'] = 'default' + host_add_meta['os_status'] = 'init' + host_add_meta['hwm_id'] = str(hwm_host['id']) + host_add_meta['hwm_ip'] = str(hwm_ip) + host_add_meta['interfaces'] = str(hwm_host['interfaces']) + node = registry.add_host_metadata(req.context, host_add_meta) + return node + + def update_hwm_host(self, req, host_meta): + self._enforce(req, 'get_hosts') + params = self._get_query_params(req) + try: + hosts = registry.get_hosts_detail(req.context, **params) + hosts_without_hwm_id = list() + hosts_hwm_id_list = list() + for host in hosts: + if host.get('hwm_id'): + hosts_hwm_id_list.append(host['hwm_id']) + else: + hosts_without_hwm_id.append(host) + + hwm_hosts = host_meta['nodes'] + hwm_ip = host_meta['hwm_ip'] + nodes = list() + for hwm_host in eval(hwm_hosts): + if hwm_host['id'] in hosts_hwm_id_list: + continue + node = self._update_hwm_host(req, hwm_host, + hosts_without_hwm_id, hwm_ip) + nodes.append(node) + return dict(nodes=nodes) + except exception.Invalid as e: + raise HTTPBadRequest(explanation=e.msg, request=req) + def _compute_hugepage_memory(self, hugepages, memory, hugepagesize='1G'): hugepage_memory = 0 if hugepagesize == '2M': - hugepage_memory = 2*1024*int(hugepages) + hugepage_memory = 2 * 1024 * int(hugepages) if hugepagesize == '1G': - hugepage_memory = 1*1024*1024*int(hugepages) + hugepage_memory = 1 * 1024 * 1024 * int(hugepages) if hugepage_memory > memory: - msg = "The memory hugepages used is bigger than total memory." - raise HTTPBadRequest(explanation=msg) - - def _host_with_no_pxe_info_in_db(self, host_interfaces): - input_host_pxe_info = self._count_host_pxe_info(host_interfaces) - if not input_host_pxe_info: - return True - - def _host_with_bad_pxe_info_in_params(self, host_meta): - input_host_pxe_info = self._count_host_pxe_info(host_meta['interfaces']) - # In default,we think there is only one pxe interface. - if not input_host_pxe_info: - LOG.info("<<>>" - % host_meta.get('name', None)) - return True - # If it not only the exception will be raise. - if len(input_host_pxe_info) > 1: - msg = ("There are more than one pxe nics among the same host," - "it isn't allowed.") + msg = "The memory hugepages used is bigger " \ + "than total memory." raise HTTPBadRequest(explanation=msg) def _count_host_pxe_info(self, interfaces): interfaces = eval(interfaces) - input_host_pxe_info = [interface - for interface in interfaces - if interface.get('is_deployment', None) == "True" or interface.get('is_deployment', None) == "true" - or interface.get('is_deployment', None) == 1] + input_host_pxe_info = [ + interface for interface in interfaces if interface.get( + 'is_deployment', + None) == "True" or interface.get( + 'is_deployment', + None) == "true" or interface.get( + 'is_deployment', + None) == 1] return input_host_pxe_info def _update_networks_phyname(self, req, interface, cluster_id): phyname_networks = {} - cluster_networks = registry.get_networks_detail(req.context, cluster_id) + cluster_networks = registry.get_networks_detail( + req.context, cluster_id) for assigned_network in list(interface['assigned_networks']): network_info_list = [network for network in cluster_networks - if assigned_network['name'] == network['name']] + if assigned_network['name'] == + network['name']] if network_info_list and network_info_list[0]: network_info = network_info_list[0] phyname_networks[network_info['id']] = \ [network_info['name'], interface['name']] else: - msg = "can't find network named '%s' in cluster '%s'" % (assigned_network['name'], cluster_id) + msg = "can't find network named '%s' in cluster '%s'" % ( + assigned_network['name'], cluster_id) LOG.error(msg) raise HTTPBadRequest(explanation=msg, - request=req, - content_type="text/plain") + request=req, + content_type="text/plain") # by cluster id and network_name search interface table registry.update_phyname_of_network(req.context, phyname_networks) - def _verify_interface_in_same_host(self, interfaces, id = None): + def _verify_interface_in_same_host(self, interfaces, id=None): """ Verify interface in the input host. :param interface: host interface info @@ -618,33 +851,50 @@ class Controller(controller.BaseController): # verify interface among the input host interfaces = eval(interfaces) same_mac_list = [interface1['name'] - for interface1 in interfaces for interface2 in interfaces - if interface1.get('name', None) and interface1.get('mac', None) and - interface2.get('name', None) and interface2.get('mac', None) and - interface1.get('type', None) and interface2.get('type', None) and - interface1['name'] != interface2['name'] and interface1['mac'] == interface2['mac'] - and interface1['type'] != "bond" and interface2['type'] != "bond"] - # Notice:If interface with same 'mac' is illegal,we need delete code #1,and raise exception in 'if' block. + for interface1 in interfaces for interface2 in + interfaces + if interface1.get('name', None) and + interface1.get('mac', None) and + interface2.get('name', None) and + interface2.get('mac', None) and + interface1.get('type', None) and + interface2.get('type', None) and + interface1['name'] != interface2['name'] and + interface1['mac'] == interface2['mac'] and + interface1['type'] != "bond" and + interface2['type'] != "bond"] + # Notice:If interface with same 'mac' is illegal, + # we need delete code #1,and raise exception in 'if' block. # This code block is just verify for early warning. if same_mac_list: msg = "%s%s" % ("" if not id else "Host id:%s." % id, - "The nic name of interface [%s] with same mac,please check!" % + "The nic name of interface [%s] with same mac," + "please check!" % ",".join(same_mac_list)) LOG.warn(msg) # 1----------------------------------------------------------------- # if interface with same 'pci', raise exception same_pci_list = [interface1['name'] - for interface1 in interfaces for interface2 in interfaces - if interface1.get('name', None) and interface1.get('pci', None) and - interface2.get('name', None) and interface2.get('pci', None) and - interface1.get('type', None) and interface2.get('type', None) and - interface1['name'] != interface2['name'] and interface1['pci'] == interface2['pci'] - and interface1['type'] != "bond" and interface2['type'] != "bond"] + for interface1 in interfaces for interface2 in + interfaces + if interface1.get('name', None) and + interface1.get('pci', None) and + interface2.get('name', None) and + interface2.get('pci', None) and + interface1.get('type', None) and + interface2.get('type', None) and + interface1['name'] != interface2['name'] and + interface1['pci'] == interface2['pci'] and + interface1['type'] == "ether" and + interface2['type'] == "ether"] if same_pci_list: - msg = "The nic name of interface [%s] with same pci,please check!" % ",".join(same_pci_list) - raise HTTPForbidden(explanation = msg) + msg = "The nic name of interface [%s] " \ + "with same pci,please check!" % ",".join( + same_pci_list) + LOG.error(msg) + raise HTTPForbidden(msg) # 1----------------------------------------------------------------- def _verify_interface_among_hosts(self, req, host_meta): @@ -659,7 +909,8 @@ class Controller(controller.BaseController): self._verify_interface_in_same_host(host_meta['interfaces']) # host pxe interface info - input_host_pxe_info = self._count_host_pxe_info(host_meta['interfaces']) + input_host_pxe_info = self._count_host_pxe_info( + host_meta['interfaces']) # verify interface between exist host and input host in cluster list_params = { 'sort_key': u'name', @@ -673,19 +924,27 @@ class Controller(controller.BaseController): input_host_pxe_info = input_host_pxe_info[0] for exist_node in exist_nodes: id = exist_node.get('id', None) + os_status = exist_node.get('os_status', None) exist_node_info = self.get_host(req, id).get('host_meta', None) if not exist_node_info.get('interfaces', None): continue for interface in exist_node_info['interfaces']: - if interface.get('mac', None) != input_host_pxe_info.get('mac', None) or \ - interface.get('type', None) == "bond": + if interface.get( + 'mac', None) != input_host_pxe_info.get( + 'mac', None) or interface.get( + 'type', None) == "bond": continue - if exist_node.get('dmi_uuid', None) != host_meta.get('dmi_uuid', None): - msg = "The 'mac' of host interface is exist in db, but 'dmi_uuid' is different." \ - "We think you want update the host, but the host can't find." + if exist_node.get('dmi_uuid') \ + and exist_node.get('dmi_uuid') != \ + host_meta.get('dmi_uuid'): + msg = "The 'mac' of host interface is exist in " \ + "db, but 'dmi_uuid' is different.We think " \ + "you want update the host, but the host " \ + "can't find." raise HTTPForbidden(explanation=msg) - return id + return (id, os_status) + return (None, None) def _get_swap_lv_size_m(self, memory_size_m): if memory_size_m <= 4096: @@ -697,7 +956,63 @@ class Controller(controller.BaseController): else: swap_lv_size_m = 65536 return swap_lv_size_m - + + def _ready_to_discover_host(self, host_meta, orig_host_meta): + if orig_host_meta.get('interfaces', None): + macs = [interface['mac'] for interface + in orig_host_meta['interfaces'] if interface['mac']] + for mac in macs: + delete_host_discovery_info = 'pxe_os_install_clean ' + mac + subprocess.call(delete_host_discovery_info, + shell=True, + stdout=open('/dev/null', 'w'), + stderr=subprocess.STDOUT) + if ('role' not in host_meta and + 'status' in orig_host_meta and + orig_host_meta['status'] == 'with-role' and + orig_host_meta['os_status'] != 'init'): + host_meta['role'] = [] + if 'os_progress' not in host_meta: + host_meta['os_progress'] = 0 + if 'messages' not in host_meta: + host_meta['messages'] = '' + + def _set_pxe_interface_for_host(self, req, host_meta): + all_networks = self.get_cluster_networks_info(req, type='system') + template_deploy_network = [network for network in all_networks + if network['type'] == 'system' and + network['name'] == 'DEPLOYMENT'] + if not template_deploy_network: + msg = "error, can't find deployment network of system" + raise HTTPNotFound(msg) + + dhcp_cidr = template_deploy_network[0]['cidr'] + dhcp_ip_ranges = template_deploy_network[0]['ip_ranges'] + + deployment_interface_count = 0 + host_meta['interfaces'] = eval(host_meta['interfaces']) + for interface in host_meta['interfaces']: + if 'ip' in interface and interface['ip']: + ip_in_cidr = utils.is_ip_in_cidr(interface['ip'], + dhcp_cidr) + if dhcp_ip_ranges: + ip_in_ranges = utils.is_ip_in_ranges(interface['ip'], + dhcp_ip_ranges) + else: + ip_in_ranges = True + if ip_in_cidr and ip_in_ranges: + interface['is_deployment'] = 1 + deployment_interface_count += 1 + + if deployment_interface_count != 1: + if deployment_interface_count == 0: + msg = "error, can't find dhcp ip" + if deployment_interface_count > 1: + msg = "error, find more than one dhcp ip" + LOG.error(msg) + raise HTTPBadRequest(explanation=msg) + host_meta['interfaces'] = unicode(host_meta['interfaces']) + @utils.mutating def update_host(self, req, id, host_meta): """ @@ -717,255 +1032,366 @@ class Controller(controller.BaseController): raise HTTPForbidden(explanation=msg, request=req, content_type="text/plain") - if host_meta.has_key('interfaces'): + orig_mac_list = list() + if 'interfaces' in host_meta: for interface_param in eval(host_meta['interfaces']): if not interface_param.get('pci', None) and \ - interface_param.get('type', None) != 'bond': + interface_param.get('type', None) == 'ether': msg = "The Interface need a non-null pci" raise HTTPBadRequest(explanation=msg, request=req, content_type="text/plain") - if interface_param.has_key('vswitch_type') and interface_param['vswitch_type'] != '' and interface_param['vswitch_type'] not in ML2_TYPE: - msg = "vswitch_type %s is not supported" % interface_param['vswitch_type'] + if 'vswitch_type' in interface_param and interface_param[ + 'vswitch_type'] != '' and \ + interface_param['vswitch_type'] not in ML2_TYPE: + msg = "vswitch_type %s is not supported" % interface_param[ + 'vswitch_type'] raise HTTPBadRequest(explanation=msg, request=req, - content_type="text/plain") - if orig_host_meta.get('interfaces', None): - interfaces_db = orig_host_meta['interfaces'] + content_type="text/plain") + interfaces_db = orig_host_meta.get('interfaces', None) + orig_mac_list = [interface_db['mac'] for interface_db in + interfaces_db if interface_db['mac']] + orig_pci_list = [interface_db['pci'] for interface_db in + interfaces_db if interface_db['pci']] + if interfaces_db and len(orig_pci_list): interfaces_param = eval(host_meta['interfaces']) - interfaces_db_ether = [interface_db for interface_db in - interfaces_db if interface_db.get('type', None) != 'bond'] - interfaces_param_ether = [interface_param for interface_param in - interfaces_param if interface_param.get('type', None) != 'bond'] + interfaces_db_ether = [ + interface_db for interface_db in interfaces_db if + interface_db.get( + 'type', None) != 'bond'] + interfaces_param_ether = [ + interface_param for interface_param in interfaces_param if + interface_param.get( + 'type', None) != 'bond'] if len(interfaces_param) < len(interfaces_db_ether): msg = "Forbidden to update part of interfaces" raise HTTPForbidden(explanation=msg) + # pci in subnet interface is null, + # comment it to avoid the bug. 20160508 gaoming + if '': pci_count = 0 for interface_db in interfaces_db: if interface_db.get('type', None) != 'bond': for interface_param in interfaces_param_ether: if interface_param['pci'] == interface_db['pci']: pci_count += 1 - if interface_param['mac'] != interface_db['mac']: + if interface_param[ + 'mac'] != interface_db['mac']: msg = "Forbidden to modify mac of " \ - "interface with pci %s" % interface_db['pci'] + "interface with pci %s" % \ + interface_db['pci'] raise HTTPForbidden(explanation=msg) - if interface_param['type'] != interface_db['type']: + if interface_param[ + 'type'] != interface_db['type']: msg = "Forbidden to modify type of " \ - "interface with pci %s" % interface_db['pci'] + "interface with pci %s" % \ + interface_db['pci'] raise HTTPForbidden(explanation=msg) if pci_count != len(interfaces_db_ether): msg = "Forbidden to modify pci of interface" raise HTTPForbidden(explanation=msg) - if host_meta.has_key('cluster'): + if 'cluster' in host_meta: self.get_cluster_meta_or_404(req, host_meta['cluster']) - if host_meta.has_key('cluster'): + if 'cluster' in host_meta: if orig_host_meta['status'] == 'in-cluster': host_cluster = registry.get_host_clusters(req.context, id) if host_meta['cluster'] != host_cluster[0]['cluster_id']: msg = _("Forbidden to add host %s with status " "'in-cluster' in another cluster") % id raise HTTPForbidden(explanation=msg) - - if (host_meta.has_key('resource_type') and - host_meta['resource_type'] not in self.support_resource_type): - msg = "resource type is not supported, please use it in %s" % self.support_resource_type + + if ('resource_type' in host_meta and + host_meta['resource_type'] not in self.support_resource_type): + msg = "resource type is not supported, please use it in %s" % \ + self.support_resource_type raise HTTPNotFound(msg) - if host_meta.get('os_status',None) != 'init' and orig_host_meta.get('os_status',None) == 'active': - if host_meta.get('root_disk',None) and host_meta['root_disk'] != orig_host_meta['root_disk']: - msg = _("Forbidden to update root_disk of %s when os_status is active if " - "you don't want to install os") % host_meta['name'] + if host_meta.get( + 'os_status', + None) != 'init' and orig_host_meta.get( + 'os_status', + None) == 'active': + if host_meta.get('root_disk', None) and host_meta[ + 'root_disk'] != orig_host_meta['root_disk']: + msg = _( + "Forbidden to update root_disk of %s " + "when os_status is active if " + "you don't want to install os") % host_meta['name'] raise HTTPForbidden(explanation=msg) else: host_meta['root_disk'] = orig_host_meta['root_disk'] else: - if host_meta.get('root_disk',None): + if host_meta.get('root_disk', None): root_disk = host_meta['root_disk'] - elif orig_host_meta.get('root_disk',None): + elif orig_host_meta.get('root_disk', None): root_disk = str(orig_host_meta['root_disk']) else: host_meta['root_disk'] = 'sda' root_disk = host_meta['root_disk'] - if not orig_host_meta.get('disks',None): - msg = "there is no disks in %s" %orig_host_meta['id'] - raise HTTPNotFound(msg) + if not orig_host_meta.get('disks', None): + msg = "there is no disks in %s" % orig_host_meta['id'] + raise HTTPNotFound(msg) if root_disk not in orig_host_meta['disks'].keys(): msg = "There is no disk named %s" % root_disk raise HTTPBadRequest(explanation=msg, - request=req, - content_type="text/plain") + request=req, + content_type="text/plain") - if host_meta.get('os_status',None) != 'init' and orig_host_meta.get('os_status',None) == 'active': - if host_meta.get('root_lv_size',None) and int(host_meta['root_lv_size']) != orig_host_meta['root_lv_size']: - msg = _("Forbidden to update root_lv_size of %s when os_status is active if " - "you don't want to install os") % host_meta['name'] + if host_meta.get( + 'os_status', + None) != 'init' and orig_host_meta.get( + 'os_status', + None) == 'active': + if host_meta.get( + 'root_lv_size', None) and int( + host_meta['root_lv_size']) != orig_host_meta[ + 'root_lv_size']: + msg = _( + "Forbidden to update root_lv_size of %s " + "when os_status is active if " + "you don't want to install os") % host_meta['name'] raise HTTPForbidden(explanation=msg) else: host_meta['root_lv_size'] = str(orig_host_meta['root_lv_size']) else: - if host_meta.get('root_lv_size',None): + if host_meta.get('root_lv_size', None): root_lv_size = host_meta['root_lv_size'] - elif orig_host_meta.get('root_lv_size',None): + elif orig_host_meta.get('root_lv_size', None): root_lv_size = str(orig_host_meta['root_lv_size']) else: - host_meta['root_lv_size'] = '51200' + host_meta['root_lv_size'] = '102400' root_lv_size = host_meta['root_lv_size'] - if not orig_host_meta.get('disks',None): - msg = "there is no disks in %s" %orig_host_meta['id'] - raise HTTPNotFound(msg) + if not orig_host_meta.get('disks', None): + msg = "there is no disks in %s" % orig_host_meta['id'] + raise HTTPNotFound(msg) if root_lv_size.isdigit(): - root_lv_size=int(root_lv_size) - root_disk_storage_size_b_str = str(orig_host_meta['disks']['%s' %root_disk]['size']) - root_disk_storage_size_b_int = int(root_disk_storage_size_b_str.strip().split()[0]) - root_disk_storage_size_m = root_disk_storage_size_b_int//(1024*1024) + root_lv_size = int(root_lv_size) + root_disk_storage_size_b_str = str( + orig_host_meta['disks'][ + '%s' % + root_disk]['size']) + root_disk_storage_size_b_int = int( + root_disk_storage_size_b_str.strip().split()[0]) + root_disk_storage_size_m = root_disk_storage_size_b_int // ( + 1024 * 1024) boot_partition_m = 400 redundant_partiton_m = 600 - free_root_disk_storage_size_m = root_disk_storage_size_m - boot_partition_m - redundant_partiton_m - if (root_lv_size/4)*4 > free_root_disk_storage_size_m: - msg = "root_lv_size of %s is larger than the free_root_disk_storage_size."%orig_host_meta['id'] + free_root_disk_storage_size_m = root_disk_storage_size_m - \ + boot_partition_m - redundant_partiton_m + if (root_lv_size / 4) * 4 > free_root_disk_storage_size_m: + msg = "root_lv_size of %s is larger " \ + "than the free_root_disk_storage_size." % \ + orig_host_meta['id'] raise HTTPForbidden(explanation=msg, request=req, content_type="text/plain") - if (root_lv_size/4)*4 < 51200: - msg = "root_lv_size of %s is too small ,it must be larger than 51200M."%orig_host_meta['id'] + if (root_lv_size / 4) * 4 < 102400: + msg = "root_lv_size of %s is too small, " \ + "it must be larger than 102400M." % orig_host_meta[ + 'id'] raise HTTPForbidden(explanation=msg, request=req, content_type="text/plain") else: - msg = (_("root_lv_size of %s is wrong,please input a number and it must be positive number") %orig_host_meta['id']) + msg = ( + _("root_lv_size of %s is wrong," + "please input a number and it must be positive number") % + orig_host_meta['id']) raise HTTPForbidden(explanation=msg, request=req, content_type="text/plain") - if host_meta.get('os_status',None) != 'init' and orig_host_meta.get('os_status',None) == 'active': - if host_meta.get('swap_lv_size',None) and int(host_meta['swap_lv_size']) != orig_host_meta['swap_lv_size']: - msg = _("Forbidden to update swap_lv_size of %s when os_status is active if " - "you don't want to install os") % host_meta['name'] + if host_meta.get( + 'os_status', + None) != 'init' and orig_host_meta.get( + 'os_status', + None) == 'active': + if host_meta.get( + 'swap_lv_size', None) and int( + host_meta['swap_lv_size']) != \ + orig_host_meta['swap_lv_size']: + msg = _( + "Forbidden to update swap_lv_size of %s " + "when os_status is active if " + "you don't want to install os") % host_meta['name'] raise HTTPForbidden(explanation=msg) else: host_meta['swap_lv_size'] = str(orig_host_meta['swap_lv_size']) else: - if host_meta.get('swap_lv_size',None): + if host_meta.get('swap_lv_size', None): swap_lv_size = host_meta['swap_lv_size'] - elif orig_host_meta.get('swap_lv_size',None): + elif orig_host_meta.get('swap_lv_size', None): swap_lv_size = str(orig_host_meta['swap_lv_size']) else: - if not orig_host_meta.get('memory',None): - msg = "there is no memory in %s" %orig_host_meta['id'] + if not orig_host_meta.get('memory', None): + msg = "there is no memory in %s" % orig_host_meta['id'] raise HTTPNotFound(msg) memory_size_b_str = str(orig_host_meta['memory']['total']) memory_size_b_int = int(memory_size_b_str.strip().split()[0]) - memory_size_m = memory_size_b_int//1024 + memory_size_m = memory_size_b_int // 1024 swap_lv_size_m = self._get_swap_lv_size_m(memory_size_m) host_meta['swap_lv_size'] = str(swap_lv_size_m) swap_lv_size = host_meta['swap_lv_size'] if swap_lv_size.isdigit(): - swap_lv_size=int(swap_lv_size) + swap_lv_size = int(swap_lv_size) disk_storage_size_b = 0 for key in orig_host_meta['disks']: + if orig_host_meta['disks'][key]['disk'].find("-fc-") \ + != -1 or orig_host_meta['disks'][key]['disk'].\ + find("-iscsi-") != -1 \ + or orig_host_meta['disks'][key]['name'].\ + find("mpath") != -1 \ + or orig_host_meta['disks'][key]['name'].\ + find("spath") != -1: + continue stroage_size_str = orig_host_meta['disks'][key]['size'] - stroage_size_b_int = int(stroage_size_str.strip().split()[0]) - disk_storage_size_b = disk_storage_size_b + stroage_size_b_int - disk_storage_size_m = disk_storage_size_b/(1024*1024) + stroage_size_b_int = int( + stroage_size_str.strip().split()[0]) + disk_storage_size_b = \ + disk_storage_size_b + stroage_size_b_int + disk_storage_size_m = disk_storage_size_b / (1024 * 1024) boot_partition_m = 400 redundant_partiton_m = 600 - if host_meta.get('role',None): + if host_meta.get('role', None): host_role_names = eval(host_meta['role']) - elif orig_host_meta.get('role',None): + elif orig_host_meta.get('role', None): host_role_names = orig_host_meta['role'] else: host_role_names = None if host_role_names: - roles_of_host=[] + roles_of_host = [] params = self._get_query_params(req) - role_lists = registry.get_roles_detail(req.context, **params) + role_lists = registry.get_roles_detail( + req.context, **params) for host_role_name in host_role_names: for role in role_lists: - if host_role_name == role['name'] and role['type'] == 'default': + if host_role_name == role[ + 'name'] and role['type'] == 'default': roles_of_host.append(role) db_lv_size = 0 nova_lv_size = 0 glance_lv_size = 0 for role_of_host in roles_of_host: if role_of_host['name'] == 'CONTROLLER_HA': - if role_of_host.get('glance_lv_size',None): + if role_of_host.get('glance_lv_size', None): glance_lv_size = role_of_host['glance_lv_size'] - if role_of_host.get('db_lv_size',None): + if role_of_host.get('db_lv_size', None): db_lv_size = role_of_host['db_lv_size'] if role_of_host['name'] == 'COMPUTER': nova_lv_size = role_of_host['nova_lv_size'] - free_disk_storage_size_m = disk_storage_size_m - boot_partition_m - redundant_partiton_m - \ - (root_lv_size/4)*4 - (glance_lv_size/4)*4- (nova_lv_size/4)*4- (db_lv_size/4)*4 + free_disk_storage_size_m = disk_storage_size_m - \ + boot_partition_m - \ + redundant_partiton_m - \ + (root_lv_size / 4) * 4 - (glance_lv_size / 4) * 4 - \ + (nova_lv_size / 4) * 4 - \ + (db_lv_size / 4) * 4 else: - free_disk_storage_size_m = disk_storage_size_m - boot_partition_m - \ - redundant_partiton_m - (root_lv_size/4)*4 - if (swap_lv_size/4)*4 > free_disk_storage_size_m: - msg = "the sum of swap_lv_size and glance_lv_size and nova_lv_size and db_lv_size of %s is larger " \ - "than the free_disk_storage_size."%orig_host_meta['id'] + free_disk_storage_size_m = disk_storage_size_m - \ + boot_partition_m - redundant_partiton_m - \ + (root_lv_size / 4) * 4 + if (swap_lv_size / 4) * 4 > free_disk_storage_size_m: + msg = "the sum of swap_lv_size and " \ + "glance_lv_size and nova_lv_size and " \ + "db_lv_size of %s is larger " \ + "than the free_disk_storage_size." % \ + orig_host_meta['id'] raise HTTPForbidden(explanation=msg, request=req, content_type="text/plain") - if (swap_lv_size/4)*4 < 2000: - msg = "swap_lv_size of %s is too small ,it must be larger than 2000M."%orig_host_meta['id'] + if (swap_lv_size / 4) * 4 < 2000: + msg = "swap_lv_size of %s is too small, " \ + "it must be larger than 2000M." % orig_host_meta[ + 'id'] raise HTTPForbidden(explanation=msg, request=req, content_type="text/plain") else: - msg = (_("swap_lv_size of %s is wrong,please input a number and it must be positive number") %orig_host_meta['id']) + msg = ( + _("swap_lv_size of %s is wrong," + "please input a number and it must be positive number") % + orig_host_meta['id']) raise HTTPForbidden(explanation=msg, request=req, content_type="text/plain") - - if host_meta.get('os_status',None) != 'init' and orig_host_meta.get('os_status',None) == 'active': - if host_meta.get('root_pwd',None) and host_meta['root_pwd'] != orig_host_meta['root_pwd']: - msg = _("Forbidden to update root_pwd of %s when os_status is active if " - "you don't want to install os") % host_meta['name'] + if host_meta.get( + 'os_status', + None) != 'init' and orig_host_meta.get( + 'os_status', + None) == 'active': + if host_meta.get('root_pwd', None) and host_meta[ + 'root_pwd'] != orig_host_meta['root_pwd']: + msg = _( + "Forbidden to update root_pwd of %s " + "when os_status is active if " + "you don't want to install os") % host_meta['name'] raise HTTPForbidden(explanation=msg) else: host_meta['root_pwd'] = orig_host_meta['root_pwd'] else: - if not host_meta.get('root_pwd',None) and not orig_host_meta.get('root_pwd',None): + if not host_meta.get( + 'root_pwd', + None) and not orig_host_meta.get( + 'root_pwd', + None): host_meta['root_pwd'] = 'ossdbg1' - if host_meta.get('os_status',None) != 'init' and orig_host_meta.get('os_status',None) == 'active': - if host_meta.get('isolcpus',None) and host_meta['isolcpus'] != orig_host_meta['isolcpus']: - msg = _("Forbidden to update isolcpus of %s when os_status is active if " - "you don't want to install os") % host_meta['name'] + if host_meta.get( + 'os_status', + None) != 'init' and orig_host_meta.get( + 'os_status', + None) == 'active': + if host_meta.get('isolcpus', None) and host_meta[ + 'isolcpus'] != orig_host_meta['isolcpus']: + msg = _( + "Forbidden to update isolcpus of %s " + "when os_status is active if " + "you don't want to install os") % host_meta['name'] raise HTTPForbidden(explanation=msg) else: host_meta['isolcpus'] = orig_host_meta['isolcpus'] else: - if host_meta.get('isolcpus',None): + if host_meta.get('isolcpus', None): isolcpus = host_meta['isolcpus'] - elif orig_host_meta.get('isolcpus',None): + elif orig_host_meta.get('isolcpus', None): isolcpus = orig_host_meta['isolcpus'] else: host_meta['isolcpus'] = None isolcpus = host_meta['isolcpus'] - if not orig_host_meta.get('cpu',None): - msg = "there is no cpu in %s" %orig_host_meta['id'] - raise HTTPNotFound(msg) + if not orig_host_meta.get('cpu', None): + msg = "there is no cpu in %s" % orig_host_meta['id'] + raise HTTPNotFound(msg) cpu_num = orig_host_meta['cpu']['total'] if isolcpus: - isolcpus_lists = [value.split('-') for value in isolcpus.split(',')] + isolcpus_lists = [value.split('-') + for value in isolcpus.split(',')] isolcpus_list = [] for value in isolcpus_lists: isolcpus_list = isolcpus_list + value for value in isolcpus_list: - if int(value)<0 or int(value)>cpu_num -1: - msg = "isolcpus number must be lager than 0 and less than %d" %(cpu_num-1) + if int(value) < 0 or int(value) > cpu_num - 1: + msg = "isolcpus number must be lager than 0 and " \ + "less than %d" % ( + cpu_num - 1) raise HTTPForbidden(explanation=msg, - request=req, - content_type="text/plain") + request=req, + content_type="text/plain") - if host_meta.has_key('role'): + clusters = registry.get_clusters_detail(req.context) + orig_cluster_name = orig_host_meta.get('cluster', None) + orig_cluster_id = None + for cluster in clusters: + if cluster['name'] == orig_cluster_name: + orig_cluster_id = cluster['id'] + cluster_id = host_meta.get('cluster', orig_cluster_id) + + params = self._get_query_params(req) + role_list = registry.get_roles_detail(req.context, **params) + if 'role' in host_meta: role_id_list = [] - if host_meta.has_key('cluster'): - params = self._get_query_params(req) - role_list = registry.get_roles_detail(req.context, **params) + if 'cluster' in host_meta: host_roles = list() for role_name in role_list: if role_name['cluster_id'] == host_meta['cluster']: @@ -974,152 +1400,120 @@ class Controller(controller.BaseController): if role_name['name'] == host_role: role_id_list.append(role_name['id']) continue - if len(role_id_list) != len(host_roles) and host_meta['role'] != u"[u'']": - msg = "The role of params %s is not exist, please use the right name" % host_roles + if len(role_id_list) != len( + host_roles) and host_meta['role'] != u"[u'']": + msg = "The role of params %s is not exist, " \ + "please use the right name" % host_roles raise HTTPNotFound(msg) host_meta['role'] = role_id_list else: msg = "cluster params is none" raise HTTPNotFound(msg) - if host_meta.has_key('interfaces'): - if self._host_with_bad_pxe_info_in_params(host_meta): - msg = _('The parameter interfaces of %s is wrong, there is no interface for pxe.') % id - #raise HTTPBadRequest(explanation=msg) - else: - host_meta_interfaces = list(eval(host_meta['interfaces'])) - ether_nic_names_list = list() - bond_nic_names_list = list() - bond_slaves_lists = list() - interface_num = 0 - assigned_networks_of_interfaces = [] - for interface in host_meta_interfaces: - if interface.get('name', None): - if interface.has_key('type') and interface['type'] == 'bond': - bond_nic_names_list.append(interface['name']) - slave_list = [] - if interface.get('slaves', None): - bond_slaves_lists.append(interface['slaves']) - elif interface.get('slave1', None) and interface.get('slave2', None): - slave_list.append(interface['slave1']) - slave_list.append(interface['slave2']) - bond_slaves_lists.append(slave_list) - else: - msg = (_("Slaves parameter can not be None when nic type was bond.")) - LOG.error(msg) - raise HTTPForbidden(msg) - else: # type == ether or interface without type field - ether_nic_names_list.append(interface['name']) - else: - msg = (_("Nic name can not be None.")) - LOG.error(msg) - raise HTTPForbidden(msg) - if interface.has_key('is_deployment'): - if interface['is_deployment'] == "True" or interface['is_deployment'] == True: - interface['is_deployment'] = 1 + if 'interfaces' in host_meta: + host_meta_interfaces = list(eval(host_meta['interfaces'])) + ether_nic_names_list = list() + bond_nic_names_list = list() + bond_slaves_lists = list() + interface_num = 0 + assigned_networks_of_interfaces = [] + for interface in host_meta_interfaces: + if interface.get('name', None): + if 'type' in interface and interface['type'] == 'bond': + bond_nic_names_list.append(interface['name']) + slave_list = [] + if interface.get('slaves', None): + bond_slaves_lists.append(interface['slaves']) + elif interface.get('slave1', None) and \ + interface.get('slave2', None): + slave_list.append(interface['slave1']) + slave_list.append(interface['slave2']) + bond_slaves_lists.append(slave_list) else: - interface['is_deployment'] = 0 - - if (interface.has_key('assigned_networks') and - interface['assigned_networks'] != [''] and - interface['assigned_networks']): - clusters = registry.get_clusters_detail(req.context) - orig_cluster_name = orig_host_meta.get('cluster', None) - orig_cluster_id = None - for cluster in clusters: - if cluster['name'] == orig_cluster_name: - orig_cluster_id = cluster['id'] - cluster_id = host_meta.get('cluster', orig_cluster_id) - if cluster_id: - LOG.info("interface['assigned_networks']: %s" % interface['assigned_networks']) - assigned_networks_of_one_interface = self.\ - _check_assigned_networks(req, - cluster_id, - interface['assigned_' - 'networks']) - self._update_networks_phyname(req, interface, cluster_id) - host_meta['cluster'] = cluster_id - else: - msg = "cluster must be given first when network plane is allocated" + msg = ( + _("Slaves parameter can not be " + "None when nic type was bond.")) LOG.error(msg) - raise HTTPBadRequest(explanation=msg, - request=req, - content_type="text/plain") - assigned_networks_of_interfaces.\ - append(assigned_networks_of_one_interface) - else: - assigned_networks_of_interfaces.\ - append([]) - interface_num += 1 - self._compare_assigned_networks_between_interfaces\ - (interface_num, assigned_networks_of_interfaces) - - # check bond slaves validity - self.check_bond_slaves_validity(bond_slaves_lists, ether_nic_names_list) - nic_name_list = ether_nic_names_list + bond_nic_names_list - if len(set(nic_name_list)) != len(nic_name_list): - msg = (_("Nic name must be unique.")) + raise HTTPForbidden(msg) + else: # type == ether or interface without type field + ether_nic_names_list.append(interface['name']) + else: + msg = (_("Nic name can not be None.")) LOG.error(msg) raise HTTPForbidden(msg) - else: - if host_meta.has_key('cluster'): - host_interfaces = orig_host_meta.get('interfaces', None) - if host_interfaces: - if host_meta.has_key('os_status'): - if host_meta['os_status'] != 'active': - if self._host_with_no_pxe_info_in_db(str(host_interfaces)): - msg = _("The host has more than one dhcp " - "server, please choose one interface " - "for deployment") - raise HTTPServerError(explanation=msg) + if 'is_deployment' in interface: + if interface['is_deployment'] == "True" or interface[ + 'is_deployment']: + interface['is_deployment'] = 1 else: - if orig_host_meta.get('os_status', None) != 'active': - if self._host_with_no_pxe_info_in_db(str(host_interfaces)): - msg = _("There is no nic for deployment, " - "please choose one interface to set " - "it's 'is_deployment' True") - raise HTTPServerError(explanation=msg) + interface['is_deployment'] = 0 - if host_meta.has_key('os_status'): - if host_meta['os_status'] not in ['init', 'installing', 'active', 'failed', 'none']: + if ('assigned_networks' in interface and + interface['assigned_networks'] != [''] and + interface['assigned_networks']): + if cluster_id: + LOG.info( + "interface['assigned_networks']: %s" % + interface['assigned_networks']) + assigned_networks_of_one_interface = self.\ + _check_assigned_networks(req, + cluster_id, + interface['assigned_' + 'networks']) + self._update_networks_phyname( + req, interface, cluster_id) + host_meta['cluster'] = cluster_id + else: + msg = "cluster must be given first " \ + "when network plane is allocated" + LOG.error(msg) + raise HTTPBadRequest(explanation=msg, + request=req, + content_type="text/plain") + assigned_networks_of_interfaces.\ + append(assigned_networks_of_one_interface) + else: + assigned_networks_of_interfaces.\ + append([]) + interface_num += 1 + self._compare_assigned_networks_between_interfaces( + interface_num, assigned_networks_of_interfaces) + + # check bond slaves validity + self.check_bond_slaves_validity( + bond_slaves_lists, ether_nic_names_list) + nic_name_list = ether_nic_names_list + bond_nic_names_list + if len(set(nic_name_list)) != len(nic_name_list): + msg = (_("Nic name must be unique.")) + LOG.error(msg) + raise HTTPForbidden(msg) + + if 'os_status' in host_meta: + if host_meta['os_status'] not in \ + ['init', 'installing', 'active', 'failed', 'none']: msg = "os_status is not valid." raise HTTPNotFound(msg) - if host_meta['os_status'] == 'init': - if orig_host_meta.get('interfaces', None): - macs = [interface['mac'] for interface in orig_host_meta['interfaces'] - if interface['mac']] - for mac in macs: - delete_host_discovery_info = 'pxe_os_install_clean ' + mac - subprocess.call(delete_host_discovery_info, - shell=True, - stdout=open('/dev/null', 'w'), - stderr=subprocess.STDOUT) - if (not host_meta.has_key('role') and - orig_host_meta.has_key('status') and - orig_host_meta['status'] == 'with-role' and - orig_host_meta['os_status'] != 'init'): - host_meta['role'] = [] - if not host_meta.has_key('os_progress'): - host_meta['os_progress'] = 0 - if not host_meta.has_key('messages'): - host_meta['messages'] = '' - - if ((host_meta.has_key('ipmi_addr') and host_meta['ipmi_addr']) - or orig_host_meta['ipmi_addr']): - if not host_meta.has_key('ipmi_user') and not orig_host_meta['ipmi_user']: + + if (('ipmi_addr' in host_meta and host_meta['ipmi_addr']) or + orig_host_meta['ipmi_addr']): + if 'ipmi_user' not in host_meta and not\ + orig_host_meta['ipmi_user']: host_meta['ipmi_user'] = 'zteroot' - if not host_meta.has_key('ipmi_passwd') and not orig_host_meta['ipmi_passwd']: + if 'ipmi_passwd' not in host_meta and not \ + orig_host_meta['ipmi_passwd']: host_meta['ipmi_passwd'] = 'superuser' - if host_meta.get('os_status',None) != 'init' and orig_host_meta.get('os_status',None) == 'active': - if host_meta.get('hugepages',None) and int(host_meta['hugepages']) != orig_host_meta['hugepages']: - msg = _("Forbidden to update hugepages of %s when os_status is active if " + if host_meta.get('os_status', None) != 'init' and \ + orig_host_meta.get('os_status', None) == 'active': + if host_meta.get( + 'hugepages', None) and int( + host_meta['hugepages']) != orig_host_meta['hugepages']: + msg = _("Forbidden to update hugepages of %s" + " when os_status is active if " "you don't want to install os") % host_meta['name'] raise HTTPForbidden(explanation=msg) - else: - host_meta['hugepages'] = str(orig_host_meta['hugepages']) else: - if host_meta.has_key('hugepages'): + if 'hugepages' in host_meta: if not orig_host_meta.get('memory', {}).get('total', None): msg = "The host %s has no memory" % id raise HTTPNotFound(explanation=msg) @@ -1127,75 +1521,112 @@ class Controller(controller.BaseController): if host_meta['hugepages'] is None: host_meta['hugepages'] = 0 if int(host_meta['hugepages']) < 0: - msg = "The parameter hugepages must be zero or positive integer." + msg = "The parameter hugepages must be zero or " \ + "positive integer." raise HTTPBadRequest(explanation=msg) - if not host_meta.has_key('hugepagesize') and \ + if 'hugepagesize' not in host_meta and \ orig_host_meta.get('hugepagesize', None): self._compute_hugepage_memory(host_meta['hugepages'], - int(memory.strip().split(' ')[0]), - orig_host_meta['hugepagesize']) - if not host_meta.has_key('hugepagesize') and \ + int(memory.strip().split( + ' ')[0]), + orig_host_meta[ + 'hugepagesize']) + if 'hugepagesize' not in host_meta and \ not orig_host_meta.get('hugepagesize', None): - self._compute_hugepage_memory(host_meta['hugepages'], - int(memory.strip().split(' ')[0])) + self._compute_hugepage_memory( + host_meta['hugepages'], int( + memory.strip().split(' ')[0])) - if host_meta.get('os_status',None) != 'init' and orig_host_meta.get('os_status',None) == 'active': - if host_meta.get('hugepagesize',None) and host_meta['hugepagesize'] != orig_host_meta['hugepagesize']: - msg = _("Forbidden to update hugepagesize of %s when os_status is active if " - "you don't want to install os") % host_meta['name'] + if host_meta.get('os_status', None) != 'init' and \ + orig_host_meta.get('os_status', None) == 'active': + if host_meta.get('hugepagesize', None) and \ + host_meta['hugepagesize'] != \ + orig_host_meta['hugepagesize']: + msg = _( + "Forbidden to update hugepagesize of %s" + " when os_status is active if you don't " + "want to install os") % host_meta['name'] raise HTTPForbidden(explanation=msg) - else: - host_meta['hugepagesize'] = orig_host_meta['hugepagesize'] else: - if host_meta.has_key('hugepagesize'): + if 'hugepagesize' in host_meta: if not orig_host_meta.get('memory', {}).get('total', None): msg = "The host %s has no memory" % id raise HTTPNotFound(explanation=msg) memory = orig_host_meta.get('memory', {}).get('total', None) if host_meta['hugepagesize'] is None: host_meta['hugepagesize'] = '1G' - elif host_meta['hugepagesize'] != '2m' and \ - host_meta['hugepagesize'] != '2M' and \ - host_meta['hugepagesize'] != '1g' and \ - host_meta['hugepagesize'] != '1G': - msg = "The value 0f parameter hugepagesize is not supported." - raise HTTPBadRequest(explanation=msg) - if host_meta['hugepagesize'] == '2m': - host_meta['hugepagesize'] = '2M' - if host_meta['hugepagesize'] == '1g': - host_meta['hugepagesize'] = '1G' + else: + host_meta['hugepagesize'].upper() + if host_meta['hugepagesize'] not in SUPPORT_HOST_PAGE_SIZE: + msg = "The value 0f parameter hugepagesize " \ + "is not supported." + raise HTTPBadRequest(explanation=msg) if host_meta['hugepagesize'] == '2M' and \ - int(host_meta['hugepagesize'][0])*1024 > \ - int(memory.strip().split(' ')[0]): + int(host_meta['hugepagesize'][0]) * 1024 > \ + int(memory.strip().split(' ')[0]): msg = "The host %s forbid to use hugepage because it's " \ "memory is too small" % id raise HTTPForbidden(explanation=msg) if host_meta['hugepagesize'] == '1G' and \ - int(host_meta['hugepagesize'][0])*1024*1024 > \ - int(memory.strip().split(' ')[0]): + int(host_meta['hugepagesize'][0]) * 1024 * 1024 > \ + int(memory.strip().split(' ')[0]): msg = "The hugepagesize is too big, you can choose 2M " \ "for a try." raise HTTPBadRequest(explanation=msg) - if host_meta.has_key('hugepages'): - self._compute_hugepage_memory(host_meta['hugepages'], - int(memory.strip().split(' ')[0]), - host_meta['hugepagesize']) - if not host_meta.has_key('hugepages') and orig_host_meta.get('hugepages', None): + if 'hugepages' in host_meta: + self._compute_hugepage_memory(host_meta['hugepages'], int( + memory.strip().split(' ')[0]), + host_meta['hugepagesize']) + if 'hugepages' not in host_meta and \ + orig_host_meta.get('hugepages', None): self._compute_hugepage_memory(orig_host_meta['hugepages'], - int(memory.strip().split(' ')[0]), - host_meta['hugepagesize']) + int( + memory.strip().split(' ')[0]), + host_meta['hugepagesize']) - if host_meta.get('os_status',None) != 'init' and orig_host_meta.get('os_status',None) == 'active': - if host_meta.get('os_version',None) and host_meta['os_version'] != orig_host_meta['os_version_file']: - msg = _("Forbidden to update os_version of %s when os_status is active if " + self._check_dvs_huge(host_meta, orig_host_meta) + + if host_meta.get('os_status', None) != 'init' \ + and orig_host_meta.get('os_status', None) == 'active': + if host_meta.get('os_version', None) and host_meta['os_version'] \ + != orig_host_meta['os_version_file']: + msg = _("Forbidden to update os_version of %s " + "when os_status is active if " "you don't want to install os") % host_meta['name'] raise HTTPForbidden(explanation=msg) + if (host_meta.get('config_set_id') and + host_meta['config_set_id'] != + orig_host_meta.get('config_set_id')): + self.get_config_set_meta_or_404(req, + host_meta['config_set_id']) + + if host_meta.get('os_status', None) == 'init': + self._ready_to_discover_host(host_meta, orig_host_meta) + try: - host_meta = registry.update_host_metadata(req.context, - id, + if host_meta.get('cluster', None): + host_detail = self.get_host_meta_or_404(req, id) + pxe_macs = [interface['mac'] for interface in host_detail[ + 'interfaces'] if interface['is_deployment']] + if not pxe_macs: + self.add_ssh_host_to_cluster_and_assigned_network( + req, host_meta['cluster'], id) + + host_meta = registry.update_host_metadata(req.context, id, host_meta) + if orig_mac_list: + orig_min_mac = min(orig_mac_list) + discover_host = self._get_discover_host_by_mac(req, + orig_min_mac) + if discover_host: + discover_host_params = { + "mac": orig_min_mac, + "status": "DISCOVERY_SUCCESSFUL" + } + self.update_pxe_host(req, discover_host['id'], + discover_host_params) except exception.Invalid as e: msg = (_("Failed to update host metadata. Got error: %s") % utils.exception_to_str(e)) @@ -1226,149 +1657,386 @@ class Controller(controller.BaseController): self.notifier.info('host.update', host_meta) return {'host_meta': host_meta} - - + + def _checker_the_ip_or_hostname_valid(self, ip_str): + try: + socket.gethostbyname_ex(ip_str) + return True + except Exception: + if netaddr.IPAddress(ip_str).version == 6: + return True + else: + return False + + def check_vlan_nic_and_join_vlan_network( + self, req, cluster_id, host_list, networks): + father_vlan_list = [] + for host_id in host_list: + host_meta_detail = self.get_host_meta_or_404(req, host_id) + if 'interfaces' in host_meta_detail: + interfac_list = host_meta_detail.get('interfaces', None) + for interface_info in interfac_list: + host_ip = interface_info.get('ip', None) + if interface_info['type'] == 'vlan' and host_ip: + check_ip_if_valid =\ + self._checker_the_ip_or_hostname_valid(host_ip) + if not check_ip_if_valid: + msg = "Error:The %s is not the right ip!" % host_ip + LOG.error(msg) + raise exception.Forbidden(msg) + nic_name = interface_info['name'].split('.')[0] + vlan_id = interface_info['name'].split('.')[1] + for network in networks: + if vlan_id == network['vlan_id']: + if network['network_type'] not in \ + ['DATAPLANE', 'EXTERNAL']: + father_vlan_list.append( + {nic_name: {'name': network['name'], + 'ip': host_ip}}) + interface_info['assigned_networks'].\ + append({'name': network['name'], + 'ip': host_ip}) + LOG.info( + "add the nic %s of the host %s to" + " assigned_network %s" % + (interface_info['name'], host_id, + interface_info['assigned_networks'])) + return father_vlan_list + + def check_bond_or_ether_nic_and_join_network( + self, req, cluster_id, host_list, networks, father_vlan_list): + for host_id in host_list: + host_info = self.get_host_meta_or_404(req, host_id) + if 'interfaces' in host_info: + update_host_interface = 0 + interfac_meta_list = host_info.get('interfaces', None) + for interface_info in interfac_meta_list: + update_flag = 0 + host_info_ip = interface_info.get('ip', None) + if interface_info['type'] != 'vlan': + nic_name = interface_info['name'] + for nic in father_vlan_list: + if nic.keys()[0] == nic_name: + update_flag = 1 + update_host_interface = 1 + interface_info['assigned_networks']\ + .append(nic.values()[0]) + if update_flag: + continue + if host_info_ip: + check_ip_if_valid =\ + self._checker_the_ip_or_hostname_valid( + host_info_ip) + if not check_ip_if_valid: + msg = "Error:The %s is not the right ip!"\ + % host_info_ip + LOG.error(msg) + raise exception.Forbidden(msg) + for network in networks: + if network.get('cidr', None): + ip_in_cidr = utils.is_ip_in_cidr( + host_info_ip, network['cidr']) + if ip_in_cidr: + vlan_id = network['vlan_id'] + if not vlan_id: + update_host_interface = 1 + if network['network_type'] not in\ + ['DATAPLANE', 'EXTERNAL']: + interface_info[ + 'assigned_networks']\ + .append({'name': + network['name'], + 'ip': + host_info_ip}) + LOG.info( + "add the nic %s " + "of the host" + " %s to assigned_network" + " %s" % + (nic_name, host_id, + interface_info[ + 'assigned_networks'])) + else: + msg = ( + "the nic %s of ip %s is " + "in the %s cidr " + "range,but the network vlan " + "id is %s " % + (nic_name, host_info_ip, + network['name'], vlan_id)) + LOG.error(msg) + raise exception.Forbidden(msg) + if update_host_interface: + host_meta = {} + host_meta['cluster'] = cluster_id + host_meta['interfaces'] = str(interfac_meta_list) + host_meta = registry.update_host_metadata(req.context, + host_id, + host_meta) + LOG.info("add the host %s join the cluster %s and" + " assigned_network successful" % + (host_id, cluster_id)) + + def add_ssh_host_to_cluster_and_assigned_network( + self, req, cluster_id, host_id): + if cluster_id: + host_list = [] + father_vlan_list = [] + # cluster_meta = {} + discover_successful = 0 + host_info = self.get_host_meta_or_404(req, host_id) + host_status = host_info.get('status', None) + if host_status != 'init': + interfac_meta_list = host_info.get('interfaces', None) + for interface_info in interfac_meta_list: + assigned_networks = interface_info.get( + 'assigned_networks', None) + if assigned_networks: + discover_successful = 1 + if not discover_successful: + host_list.append(host_id) + + if host_list: + # cluster_meta['nodes']=str(host_list) + # LOG.info("add ssh host %s to cluster %s" % + # (host_list, cluster_id)) + # cluster_meta = registry.update_cluster_metadata(req.context, + # cluster_id, + # cluster_meta) + params = {'filters': {'cluster_id': cluster_id}} + networks = registry.get_networks_detail(req.context, + cluster_id, **params) + father_vlan_list = self.check_vlan_nic_and_join_vlan_network( + req, cluster_id, host_list, networks) + self.check_bond_or_ether_nic_and_join_network( + req, cluster_id, host_list, networks, father_vlan_list) + def update_progress_to_db(self, req, update_info, discover_host_meta): - discover= {} + discover = {} discover['status'] = update_info['status'] discover['message'] = update_info['message'] if update_info.get('host_id'): discover['host_id'] = update_info['host_id'] LOG.info("discover:%s", discover) - registry.update_discover_host_metadata(req.context, discover_host_meta['id'], discover) - - def thread_bin(self,req,discover_host_meta): + registry.update_discover_host_metadata(req.context, + discover_host_meta['id'], + discover) + + def thread_bin(self, req, cluster_id, discover_host_meta): cmd = 'mkdir -p /var/log/daisy/discover_host/' daisy_cmn.subprocess_call(cmd) if not discover_host_meta['passwd']: - msg = "the passwd of ip %s is none."%discover_host_meta['ip'] + msg = "the passwd of ip %s is none." % discover_host_meta['ip'] LOG.error(msg) raise HTTPForbidden(msg) - var_log_path = "/var/log/daisy/discover_host/%s_discovery_host.log" % discover_host_meta['ip'] + var_log_path = "/var/log/daisy/discover_host/%s_discovery_host.log" \ + % discover_host_meta['ip'] with open(var_log_path, "w+") as fp: try: trustme_result = subprocess.check_output( - '/var/lib/daisy/tecs/trustme.sh %s %s' % (discover_host_meta['ip'],discover_host_meta['passwd']), + '/var/lib/daisy/tecs/trustme.sh %s %s' % + (discover_host_meta['ip'], discover_host_meta['passwd']), shell=True, stderr=subprocess.STDOUT) - if 'Permission denied' in trustme_result: #when passwd was wrong + if 'Permission denied' in trustme_result: + # when passwd was wrong update_info = {} update_info['status'] = 'DISCOVERY_FAILED' - update_info['message'] = "Passwd was wrong, do trustme.sh %s failed!" % discover_host_meta['ip'] - self.update_progress_to_db(req, update_info, discover_host_meta) - msg = (_("Do trustme.sh %s failed!" % discover_host_meta['ip'])) + update_info['message'] = "Passwd was wrong, do" \ + "trustme.sh %s failed!"\ + % discover_host_meta['ip'] + self.update_progress_to_db(req, update_info, + discover_host_meta) + msg = (_("Do trustme.sh %s failed!" % + discover_host_meta['ip'])) LOG.warn(_(msg)) fp.write(msg) - elif 'is unreachable' in trustme_result: #when host ip was unreachable + elif 'is unreachable' in trustme_result: + # when host ip was unreachable update_info = {} update_info['status'] = 'DISCOVERY_FAILED' - update_info['message'] = "Host ip was unreachable, do trustme.sh %s failed!" % discover_host_meta['ip'] - self.update_progress_to_db(req,update_info, discover_host_meta) - msg = (_("Do trustme.sh %s failed!" % discover_host_meta['ip'])) + update_info['message'] = "Host ip was unreachable," \ + " do trustme.sh %s failed!" %\ + discover_host_meta['ip'] + self.update_progress_to_db(req, update_info, + discover_host_meta) + msg = (_("Do trustme.sh %s failed!" % + discover_host_meta['ip'])) LOG.warn(_(msg)) except subprocess.CalledProcessError as e: update_info = {} update_info['status'] = 'DISCOVERY_FAILED' - msg = "discover host for %s failed! raise CalledProcessError when execute trustme.sh." % discover_host_meta['ip'] + msg = "discover host for %s failed! raise CalledProcessError" \ + " when execute trustme.sh." % discover_host_meta['ip'] update_info['message'] = msg - self.update_progress_to_db(req,update_info, discover_host_meta) + self.update_progress_to_db( + req, update_info, discover_host_meta) LOG.error(_(msg)) fp.write(e.output.strip()) return except: update_info = {} update_info['status'] = 'DISCOVERY_FAILED' - update_info['message'] = "discover host for %s failed!" % discover_host_meta['ip'] - self.update_progress_to_db(req,update_info, discover_host_meta) - LOG.error(_("discover host for %s failed!" % discover_host_meta['ip'])) - fp.write("discover host for %s failed!" % discover_host_meta['ip']) + update_info['message'] = "discover host for %s failed!" \ + % discover_host_meta['ip'] + self.update_progress_to_db( + req, update_info, discover_host_meta) + LOG.error(_("discover host for %s failed!" + % discover_host_meta['ip'])) + fp.write("discover host for %s failed!" + % discover_host_meta['ip']) return try: - cmd = 'clush -S -b -w %s "rm -rf /home/daisy/discover_host"' % (discover_host_meta['ip'],) - daisy_cmn.subprocess_call(cmd,fp) - cmd = 'clush -S -w %s "mkdir -p /home/daisy/discover_host"' % (discover_host_meta['ip'],) - daisy_cmn.subprocess_call(cmd,fp) - cmd = 'clush -S -w %s "chmod 777 /home/daisy/discover_host"' % (discover_host_meta['ip'],) - daisy_cmn.subprocess_call(cmd,fp) + cmd = 'clush -S -b -w %s "rm -rf /home/daisy/discover_host"'\ + % (discover_host_meta['ip'],) + daisy_cmn.subprocess_call(cmd, fp) + cmd = 'clush -S -w %s "mkdir -p /home/daisy/discover_host"'\ + % (discover_host_meta['ip'],) + daisy_cmn.subprocess_call(cmd, fp) + cmd = 'clush -S -w %s "chmod 777 /home/daisy/discover_host"'\ + % (discover_host_meta['ip'],) + daisy_cmn.subprocess_call(cmd, fp) except subprocess.CalledProcessError as e: update_info = {} update_info['status'] = 'DISCOVERY_FAILED' - msg = "raise CalledProcessError when execute cmd for host %s." % discover_host_meta['ip'] + msg = "raise CalledProcessError when execute cmd for host %s."\ + % discover_host_meta['ip'] update_info['message'] = msg - self.update_progress_to_db(req,update_info, discover_host_meta) + self.update_progress_to_db( + req, update_info, discover_host_meta) LOG.error(_(msg)) fp.write(e.output.strip()) return try: - scp_sh_and_rpm_result = subprocess.check_output( - 'clush -S -w %s -c /var/lib/daisy/tecs/getnodeinfo.sh /var/lib/daisy/tecs/jq-1.3-2.el7.x86_64.rpm --dest=/home/daisy/discover_host' % (discover_host_meta['ip'],), + subprocess.check_output( + 'clush -S -w %s -c /var/lib/daisy/tecs/getnodeinfo.sh' + ' /var/lib/daisy/tecs/jq-1.3-2.el7.x86_64.rpm ' + '--dest=/home/daisy/discover_host' % + (discover_host_meta['ip'],), shell=True, stderr=subprocess.STDOUT) except subprocess.CalledProcessError as e: update_info = {} update_info['status'] = 'DISCOVERY_FAILED' - update_info['message'] = "scp getnodeinfo.sh and jq-1.3-2.el7.x86_64.rpm for %s failed!" % discover_host_meta['ip'] - self.update_progress_to_db(req, update_info, discover_host_meta) - LOG.error(_("scp getnodeinfo.sh and jq-1.3-2.el7.x86_64.rpm for %s failed!" % discover_host_meta['ip'])) + update_info['message'] = "scp getnodeinfo.sh and " \ + "jq-1.3-2.el7.x86_64.rpm for %s" \ + " failed!" % discover_host_meta['ip'] + self.update_progress_to_db(req, update_info, + discover_host_meta) + LOG.error(_("scp getnodeinfo.sh and" + " jq-1.3-2.el7.x86_64.rpm for %s failed!" + % discover_host_meta['ip'])) fp.write(e.output.strip()) return - + try: - rpm_install_result = subprocess.check_output( - 'clush -S -w %s rpm -ivh --force /home/daisy/discover_host/jq-1.3-2.el7.x86_64.rpm' % (discover_host_meta['ip'],), + subprocess.check_output( + 'clush -S -w %s rpm -ivh --force ' + '/home/daisy/discover_host/jq-1.3-2.el7.x86_64.rpm' + % (discover_host_meta['ip'],), shell=True, stderr=subprocess.STDOUT) except subprocess.CalledProcessError as e: update_info = {} update_info['status'] = 'DISCOVERY_FAILED' - update_info['message'] = "install jq-1.3-2.el7.x86_64.rpm for %s failed!" % discover_host_meta['ip'] - self.update_progress_to_db(req, update_info, discover_host_meta) - LOG.error(_("install jq-1.3-2.el7.x86_64.rpm for %s failed!" % discover_host_meta['ip'])) + update_info['message'] = \ + "install jq-1.3-2.el7.x86_64.rpm for %s failed!"\ + % discover_host_meta['ip'] + self.update_progress_to_db(req, update_info, + discover_host_meta) + LOG.error(_("install jq-1.3-2.el7.x86_64.rpm for %s failed!" + % discover_host_meta['ip'])) fp.write(e.output.strip()) return - + try: exc_result = subprocess.check_output( - 'clush -S -w %s /home/daisy/discover_host/getnodeinfo.sh' % (discover_host_meta['ip'],), + 'clush -S -w %s /home/daisy/discover_host/getnodeinfo.sh' + % (discover_host_meta['ip'],), shell=True, stderr=subprocess.STDOUT) - if 'Failed connect to' in exc_result: #when openstack-ironic-discoverd.service has problem + if 'Failed connect to' in exc_result: + # when openstack-ironic-discoverd.service has problem update_info = {} update_info['status'] = 'DISCOVERY_FAILED' - update_info['message'] = "Do getnodeinfo.sh %s failed!" % discover_host_meta['ip'] - self.update_progress_to_db(req, update_info, discover_host_meta) - msg = (_("Do trustme.sh %s failed!" % discover_host_meta['ip'])) + update_info['message'] = "Do getnodeinfo.sh %s failed!" \ + % discover_host_meta['ip'] + self.update_progress_to_db(req, update_info, + discover_host_meta) + msg = (_("Do trustme.sh %s failed!" % + discover_host_meta['ip'])) LOG.warn(_(msg)) fp.write(msg) else: - update_info = {} - update_info['status'] = 'DISCOVERY_SUCCESSFUL' - update_info['message'] = "discover host for %s successfully!" % discover_host_meta['ip'] mac_info = re.search(r'"mac": ([^,\n]*)', exc_result) mac = eval(mac_info.group(1)) filters = {'mac': mac} - host_interfaces = registry.get_all_host_interfaces(req.context, filters) + update_info = {} + host_interfaces =\ + registry.get_all_host_interfaces(req.context, filters) if host_interfaces: + update_info['status'] = 'DISCOVERY_SUCCESSFUL' + update_info['message'] =\ + "discover host for %s successfully!" %\ + discover_host_meta['ip'] update_info['host_id'] = host_interfaces[0]['host_id'] - LOG.info("update_info['host_id']:%s", update_info['host_id']) - self.update_progress_to_db(req,update_info, discover_host_meta) - LOG.info(_("discover host for %s successfully!" % discover_host_meta['ip'])) - fp.write(exc_result) - + LOG.info("update_info['host_id']:%s", + update_info['host_id']) + self.update_progress_to_db(req, update_info, + discover_host_meta) + LOG.info(_("discover host for %s successfully!" + % discover_host_meta['ip'])) + fp.write(exc_result) + else: + update_info['status'] = 'DISCOVERY_FAILED' + update_info['message'] = \ + "discover host for %s failed!please view" \ + " the daisy api log" % discover_host_meta['ip'] + self.update_progress_to_db(req, update_info, + discover_host_meta) + LOG.error(_("discover host for %s failed!" % + discover_host_meta['ip'])) + fp.write(exc_result) + return except subprocess.CalledProcessError as e: update_info = {} update_info['status'] = 'DISCOVERY_FAILED' - update_info['message'] = "discover host for %s failed!" % discover_host_meta['ip'] - self.update_progress_to_db(req,update_info, discover_host_meta) - LOG.error(_("discover host for %s failed!" % discover_host_meta['ip'])) + update_info['message'] = "discover host for %s failed!" %\ + discover_host_meta['ip'] + self.update_progress_to_db( + req, update_info, discover_host_meta) + LOG.error(_("discover host for %s failed!" % + discover_host_meta['ip'])) fp.write(e.output.strip()) return - + + discover_host_info = \ + registry.get_discover_host_metadata(req.context, + discover_host_meta['id']) + if not discover_host_info['host_id']: + update_info = {} + update_info['status'] = 'DISCOVERY_FAILED' + update_info['message'] = "discover host for %s failed!" \ + % discover_host_info['ip'] + self.update_progress_to_db( + req, update_info, discover_host_info) + msg = (_("discover host for %s failed!" % + discover_host_info['ip'])) + LOG.error(msg) + return + else: + self.add_ssh_host_to_cluster_and_assigned_network( + req, cluster_id, discover_host_info['host_id']) @utils.mutating def discover_host_bin(self, req, host_meta): - params={} - discover_host_meta_list=registry.get_discover_hosts_detail(req.context, **params) + params = {} + cluster_id = host_meta.get('cluster_id', None) + if cluster_id: + params = {'cluster_id': cluster_id} + discover_host_meta_list =\ + registry.get_discover_hosts_detail(req.context, **params) filters = {} - host_interfaces = registry.get_all_host_interfaces(req.context, filters) + host_interfaces = \ + registry.get_all_host_interfaces(req.context, filters) existed_host_ip = [host['ip'] for host in host_interfaces] LOG.info('existed_host_ip**: %s', existed_host_ip) @@ -1378,41 +2046,69 @@ class Controller(controller.BaseController): update_info['status'] = 'DISCOVERING' update_info['message'] = 'DISCOVERING' update_info['host_id'] = 'None' - self.update_progress_to_db(req, update_info, discover_host) + self.update_progress_to_db(req, update_info, discover_host) threads = [] for discover_host_meta in discover_host_meta_list: if discover_host_meta['ip'] in existed_host_ip: update_info = {} update_info['status'] = 'DISCOVERY_SUCCESSFUL' - update_info['message'] = "discover host for %s successfully!" % discover_host_meta['ip'] - host_id_list = [host['host_id'] for host in host_interfaces if discover_host_meta['ip'] == host['ip']] + update_info['message'] = "discover host for %s successfully!" \ + % discover_host_meta['ip'] + host_id_list = \ + [host['host_id'] for host in host_interfaces + if discover_host_meta['ip'] == host['ip']] update_info['host_id'] = host_id_list[0] - self.update_progress_to_db(req,update_info, discover_host_meta) + self.update_progress_to_db( + req, update_info, discover_host_meta) continue if discover_host_meta['status'] != 'DISCOVERY_SUCCESSFUL': - t = threading.Thread(target=self.thread_bin,args=(req,discover_host_meta)) + t = threading.Thread( + target=self.thread_bin, args=( + req, cluster_id, discover_host_meta)) t.setDaemon(True) t.start() threads.append(t) - LOG.info(_("all host discovery threads have started, please waiting....")) + LOG.info(_("all host discovery threads have started, " + "please waiting....")) try: for t in threads: t.join() except: LOG.warn(_("Join discover host thread %s failed!" % t)) - + @utils.mutating def discover_host(self, req, host_meta): - daisy_management_ip=config.get("DEFAULT", "daisy_management_ip") + cluster_id = host_meta.get('cluster_id', None) + if cluster_id: + self.get_cluster_meta_or_404(req, cluster_id) + + config = ConfigParser.ConfigParser() + config.read("/home/daisy_install/daisy.conf") + daisy_management_ip = config.get("DEFAULT", "daisy_management_ip") if daisy_management_ip: - cmd = 'dhcp_linenumber=`grep -n "dhcp_ip=" /var/lib/daisy/tecs/getnodeinfo.sh|cut -d ":" -f 1` && sed -i "${dhcp_linenumber}c dhcp_ip=\'%s\'" /var/lib/daisy/tecs/getnodeinfo.sh'% (daisy_management_ip,) + cmd = 'dhcp_linenumber=`grep -n "dhcp_ip="' \ + ' /var/lib/daisy/tecs/getnodeinfo.sh|cut -d ":" -f 1` && ' \ + 'sed -i "${dhcp_linenumber}c dhcp_ip=\'%s\'" ' \ + '/var/lib/daisy/tecs/getnodeinfo.sh' % (daisy_management_ip,) daisy_cmn.subprocess_call(cmd) - - discovery_host_thread = threading.Thread(target=self.discover_host_bin,args=(req, host_meta)) + + config_discoverd = ConfigParser.ConfigParser( + defaults=DISCOVER_DEFAULTS) + config_discoverd.read("/etc/ironic-discoverd/discoverd.conf") + listen_port = config_discoverd.get("discoverd", "listen_port") + if listen_port: + cmd = 'port_linenumber=`grep -n "listen_port="' \ + ' /var/lib/daisy/tecs/getnodeinfo.sh|cut -d ":" -f 1` && ' \ + 'sed -i "${port_linenumber}c listen_port=\'%s\'" ' \ + '/var/lib/daisy/tecs/getnodeinfo.sh' % (listen_port,) + daisy_cmn.subprocess_call(cmd) + + discovery_host_thread = threading.Thread( + target=self.discover_host_bin, args=(req, host_meta)) discovery_host_thread.start() - return {"status":"begin discover host"} - + return {"status": "begin discover host"} + @utils.mutating def add_discover_host(self, req, host_meta): """ @@ -1437,25 +2133,30 @@ class Controller(controller.BaseController): if host and host['status'] != 'DISCOVERY_SUCCESSFUL': host_info = {} host_info['ip'] = host_meta.get('ip', host.get('ip')) - host_info['passwd'] = host_meta.get('passwd', host.get('passwd')) - host_info['user'] = host_meta.get('user', host.get('user')) + host_info['passwd'] =\ + host_meta.get('passwd', host.get('passwd')) + host_info['user'] = \ + host_meta.get('user', host.get('user')) host_info['status'] = 'init' - host_info['message'] = 'None' - host_meta = registry.update_discover_host_metadata(req.context, - host['id'], - host_info) + host_info['message'] = 'None' + host_meta = \ + registry.update_discover_host_metadata(req.context, + host['id'], + host_info) return {'host_meta': host_meta} else: - msg = (_("ip %s already existed and this host has been discovered successfully. " % host_meta['ip'])) + msg = (_("ip %s already existed and this host has " + "been discovered successfully. " + % host_meta['ip'])) LOG.error(msg) raise HTTPForbidden(explanation=msg, - request=req, - content_type="text/plain") + request=req, + content_type="text/plain") self.validate_ip_format(host_meta['ip']) - + if not host_meta.get('user', None): host_meta['user'] = 'root' - + if not host_meta.get('passwd', None): msg = "PASSWD parameter can not be None." raise HTTPBadRequest(explanation=msg, @@ -1465,11 +2166,12 @@ class Controller(controller.BaseController): host_meta['status'] = 'init' try: - discover_host_info = registry.add_discover_host_metadata(req.context, host_meta) + discover_host_info = \ + registry.add_discover_host_metadata(req.context, host_meta) except exception.Invalid as e: raise HTTPBadRequest(explanation=e.msg, request=req) return {'host_meta': discover_host_info} - + @utils.mutating def delete_discover_host(self, req, id): """ @@ -1505,9 +2207,9 @@ class Controller(controller.BaseController): request=req, content_type="text/plain") else: - #self.notifier.info('host.delete', host) + # self.notifier.info('host.delete', host) return Response(body='', status=200) - + def detail_discover_host(self, req): """ Returns detailed information for all available nodes @@ -1532,31 +2234,32 @@ class Controller(controller.BaseController): except exception.Invalid as e: raise HTTPBadRequest(explanation=e.msg, request=req) - return dict(nodes=nodes) - + return dict(nodes=nodes) + def update_discover_host(self, req, id, host_meta): ''' ''' self._enforce(req, 'update_discover_host') - params = {'id': id} orig_host_meta = registry.get_discover_host_metadata(req.context, id) if host_meta.get('ip', None): discover_hosts_ip = self._get_discover_host_ip(req) if host_meta['ip'] in discover_hosts_ip: host_status = host_meta.get('status', orig_host_meta['status']) if host_status == 'DISCOVERY_SUCCESSFUL': - msg = (_("Host with ip %s already has been discovered successfully, can not change host ip to %s " % (orig_host_meta['ip'], host_meta['ip']))) + msg = (_("Host with ip %s already has been discovered " + "successfully, can not change host ip to %s " % + (orig_host_meta['ip'], host_meta['ip']))) LOG.error(msg) raise HTTPForbidden(explanation=msg, - request=req, - content_type="text/plain") + request=req, + content_type="text/plain") self.validate_ip_format(host_meta['ip']) - if orig_host_meta['ip'] != host_meta['ip']: + if orig_host_meta['ip'] != host_meta.get('ip', None): host_meta['status'] = 'init' try: host_meta = registry.update_discover_host_metadata(req.context, - id, - host_meta) + id, + host_meta) except exception.Invalid as e: msg = (_("Failed to update host metadata. Got error: %s") % @@ -1590,17 +2293,19 @@ class Controller(controller.BaseController): return {'host_meta': host_meta} def _get_discover_host_ip(self, req): - params= {} + params = {} hosts_ip = list() - discover_hosts = registry.get_discover_hosts_detail(req.context, **params) + discover_hosts =\ + registry.get_discover_hosts_detail(req.context, **params) for host in discover_hosts: if host.get('ip', None): hosts_ip.append(host['ip']) return hosts_ip def _get_host_by_ip(self, req, host_ip): - params= {} - discover_hosts = registry.get_discover_hosts_detail(req.context, **params) + params = {} + discover_hosts = \ + registry.get_discover_hosts_detail(req.context, **params) LOG.info("%s" % discover_hosts) for host in discover_hosts: if host.get('ip') == host_ip: @@ -1611,7 +2316,8 @@ class Controller(controller.BaseController): ''' ''' try: - host_meta = registry.get_discover_host_metadata(req.context, discover_host_id) + host_meta = registry.get_discover_host_metadata(req.context, + discover_host_id) except exception.Invalid as e: msg = (_("Failed to update host metadata. Got error: %s") % utils.exception_to_str(e)) @@ -1643,6 +2349,113 @@ class Controller(controller.BaseController): return {'host_meta': host_meta} + def _get_discover_host_mac(self, req): + params = dict() + hosts_mac = list() + discover_hosts =\ + registry.get_discover_hosts_detail(req.context, **params) + for host in discover_hosts: + if host.get('mac'): + hosts_mac.append(host['mac']) + return hosts_mac + + def _get_discover_host_by_mac(self, req, host_mac): + params = dict() + discover_hosts = \ + registry.get_discover_hosts_detail(req.context, **params) + LOG.info("%s" % discover_hosts) + for host in discover_hosts: + if host.get('mac') == host_mac: + return host + return + + @utils.mutating + def add_pxe_host(self, req, host_meta): + """ + Adds a new pxe host to Daisy + + :param req: The WSGI/Webob Request object + :param host_meta: Mapping of metadata about host + + :raises HTTPBadRequest if x-host-name is missing + """ + self._enforce(req, 'add_pxe_host') + LOG.warn("host_meta: %s" % host_meta) + if not host_meta.get('mac'): + msg = "MAC parameter can not be None." + raise HTTPBadRequest(explanation=msg, + request=req, + content_type="text/plain") + + self.validate_mac_format(host_meta['mac']) + pxe_hosts_mac = self._get_discover_host_mac(req) + if host_meta['mac'] in pxe_hosts_mac: + host = self._get_discover_host_by_mac(req, host_meta['mac']) + host_meta = registry.update_discover_host_metadata( + req.context, host['id'], host_meta) + return {'host_meta': host_meta} + + if not host_meta.get('status', None): + host_meta['status'] = 'None' + + try: + pxe_host_info = \ + registry.add_discover_host_metadata(req.context, host_meta) + except exception.Invalid as e: + raise HTTPBadRequest(explanation=e.msg, request=req) + return {'host_meta': pxe_host_info} + + @utils.mutating + def update_pxe_host(self, req, id, host_meta): + """ + Update a new pxe host to Daisy + """ + self._enforce(req, 'update_pxe_host') + if not host_meta.get('mac'): + msg = "MAC parameter can not be None." + raise HTTPBadRequest(explanation=msg, + request=req, + content_type="text/plain") + + self.validate_mac_format(host_meta['mac']) + orig_host_meta = registry.get_discover_host_metadata(req.context, id) + try: + if host_meta['mac'] == orig_host_meta['mac']: + host_meta = registry.update_discover_host_metadata( + req.context, id, host_meta) + + except exception.Invalid as e: + msg = (_("Failed to update discover host metadata. " + "Got error: %s") % utils.exception_to_str(e)) + LOG.error(msg) + raise HTTPBadRequest(explanation=msg, + request=req, + content_type="text/plain") + except exception.NotFound as e: + msg = (_("Failed to find discover host to update: %s") % + utils.exception_to_str(e)) + LOG.error(msg) + raise HTTPNotFound(explanation=msg, + request=req, + content_type="text/plain") + except exception.Forbidden as e: + msg = (_("Forbidden to update discover host: %s") % + utils.exception_to_str(e)) + LOG.error(msg) + raise HTTPForbidden(explanation=msg, + request=req, + content_type="text/plain") + except (exception.Conflict, exception.Duplicate) as e: + LOG.error(utils.exception_to_str(e)) + raise HTTPConflict(body=_('Host operation conflicts'), + request=req, + content_type='text/plain') + else: + self.notifier.info('host.update', host_meta) + + return {'host_meta': host_meta} + + class HostDeserializer(wsgi.JSONRequestDeserializer): """Handles deserialization of specific controller method requests.""" @@ -1659,13 +2472,23 @@ class HostDeserializer(wsgi.JSONRequestDeserializer): def discover_host(self, request): return self._deserialize(request) - + + def update_hwm_host(self, request): + return self._deserialize(request) + def add_discover_host(self, request): return self._deserialize(request) - + def update_discover_host(self, request): return self._deserialize(request) + def add_pxe_host(self, request): + return self._deserialize(request) + + def update_pxe_host(self, request): + return self._deserialize(request) + + class HostSerializer(wsgi.JSONResponseSerializer): """Handles serialization of specific controller method responses.""" @@ -1692,37 +2515,50 @@ class HostSerializer(wsgi.JSONResponseSerializer): response.headers['Content-Type'] = 'application/json' response.body = self.to_json(dict(host=host_meta)) return response - + def discover_host(self, response, result): host_meta = result response.status = 201 response.headers['Content-Type'] = 'application/json' response.body = self.to_json(dict(host_meta)) return response - + def add_discover_host(self, response, result): host_meta = result['host_meta'] response.status = 201 response.headers['Content-Type'] = 'application/json' response.body = self.to_json(dict(host=host_meta)) return response - + def update_discover_host(self, response, result): host_meta = result['host_meta'] response.status = 201 response.headers['Content-Type'] = 'application/json' response.body = self.to_json(dict(host=host_meta)) - + def get_discover_host_detail(self, response, result): host_meta = result['host_meta'] response.status = 201 response.headers['Content-Type'] = 'application/json' response.body = self.to_json(dict(host=host_meta)) return response - + + def add_pxe_host(self, response, result): + host_meta = result['host_meta'] + response.status = 201 + response.headers['Content-Type'] = 'application/json' + response.body = self.to_json(dict(host=host_meta)) + return response + + def update_pxe_host(self, response, result): + host_meta = result['host_meta'] + response.status = 201 + response.headers['Content-Type'] = 'application/json' + response.body = self.to_json(dict(host=host_meta)) + + def create_resource(): """Hosts resource factory method""" deserializer = HostDeserializer() serializer = HostSerializer() return wsgi.Resource(Controller(), deserializer, serializer) - diff --git a/code/daisy/daisy/api/v1/hwms.py b/code/daisy/daisy/api/v1/hwms.py new file mode 100755 index 00000000..57b8e153 --- /dev/null +++ b/code/daisy/daisy/api/v1/hwms.py @@ -0,0 +1,347 @@ +# Copyright 2013 OpenStack Foundation +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +""" +/Hwm endpoint for Daisy v1 API +""" + +from oslo_config import cfg +from oslo_log import log as logging +import webob.exc +from webob.exc import HTTPBadRequest +from webob.exc import HTTPConflict +from webob.exc import HTTPForbidden +from webob.exc import HTTPNotFound +from webob import Response + +from daisy.api import policy +import daisy.api.v1 +from daisy.api.v1 import controller +from daisy.api.v1 import filters +from daisy.common import exception +from daisy.common import property_utils +from daisy.common import utils +from daisy.common import wsgi +from daisy import i18n +from daisy import notifier +import daisy.registry.client.v1.api as registry +from daisy.registry.api.v1 import hwms + +import daisy.api.backends.tecs.common as tecs_cmn + +daisy_tecs_path = tecs_cmn.daisy_tecs_path + + +LOG = logging.getLogger(__name__) +_ = i18n._ +_LE = i18n._LE +_LI = i18n._LI +_LW = i18n._LW + +SUPPORTED_PARAMS = hwms.SUPPORTED_PARAMS +SUPPORTED_FILTERS = hwms.SUPPORTED_FILTERS +ACTIVE_IMMUTABLE = daisy.api.v1.ACTIVE_IMMUTABLE +CONF = cfg.CONF +CONF.import_opt('disk_formats', 'daisy.common.config', + group='image_format') +CONF.import_opt('container_formats', 'daisy.common.config', + group='image_format') +CONF.import_opt('image_property_quota', 'daisy.common.config') + + +class Controller(controller.BaseController): + """ + WSGI controller for hwms resource in Daisy v1 API + + The Templates resource API is a RESTful web Template for Template data. + The API is as follows:: + + GET /Templates -- Returns a set of brief metadata about Templates + GET /Templates/detail -- Returns a set of detailed metadata about + Templates + HEAD /Templates/ -- Return metadata about an Template with id + GET /Templates/ -- Return Template data for Template with id + POST /Templates -- Store Template data and return metadata about the + newly-stored Template + PUT /Templates/ -- Update Template metadata and/or upload Template + data for a previously-reserved Template + DELETE /Templates/ -- Delete the Template with id + """ + def __init__(self): + self.notifier = notifier.Notifier() + registry.configure_registry_client() + self.policy = policy.Enforcer() + if property_utils.is_property_protection_enabled(): + self.prop_enforcer = property_utils.PropertyRules(self.policy) + else: + self.prop_enforcer = None + + def _enforce(self, req, action, target=None): + """Authorize an action against our policies""" + if target is None: + target = {} + try: + self.policy.enforce(req.context, action, target) + except exception.Forbidden: + raise HTTPForbidden() + + def _get_filters(self, req): + """ + Return a dictionary of query param filters from the request + + :param req: the Request object coming from the wsgi layer + :retval a dict of key/value filters + """ + query_filters = {} + for param in req.params: + if param in SUPPORTED_FILTERS: + query_filters[param] = req.params.get(param) + if not filters.validate(param, query_filters[param]): + raise HTTPBadRequest(_('Bad value passed to filter ' + '%(filter)s got %(val)s') + % {'filter': param, + 'val': query_filters[param]}) + return query_filters + + def _get_query_params(self, req): + """ + Extracts necessary query params from request. + + :param req: the WSGI Request object + :retval dict of parameters that can be used by registry client + """ + params = {'filters': self._get_filters(req)} + + for PARAM in SUPPORTED_PARAMS: + if PARAM in req.params: + params[PARAM] = req.params.get(PARAM) + return params + + def _raise_404_if_cluster_deleted(self, req, cluster_id): + cluster = self.get_cluster_meta_or_404(req, cluster_id) + if cluster['deleted']: + msg = _("Cluster with identifier %s has been deleted.") % \ + cluster_id + raise webob.exc.HTTPNotFound(msg) + + def get_clusters_hwm_ip(self, req): + params = self._get_query_params(req) + clusters_hwm_ip = list() + clusters = registry.get_clusters_detail(req.context, **params) + for cluster in clusters: + clusters_hwm_ip.append(cluster.get('hwm_ip')) + return clusters_hwm_ip + + @utils.mutating + def add_hwm(self, req, hwm): + """ + Adds a new hwm to Daisy. + + :param req: The WSGI/Webob Request object + :param image_meta: Mapping of metadata about Template + + :raises HTTPBadRequest if x-Template-name is missing + """ + self._enforce(req, 'add_template') + hwm = registry.add_hwm_metadata(req.context, hwm) + + return {'hwm': hwm} + + @utils.mutating + def update_hwm(self, req, id, hwm): + """ + Updates an existing hwm with the registry. + + :param request: The WSGI/Webob Request object + :param id: The opaque image identifier + + :retval Returns the updated image information as a mapping + """ + self._enforce(req, 'update_hwm') + hwm_meta = registry.hwm_detail_metadata(req.context, id) + hwm_ip = hwm_meta['hwm_ip'] + clusters_hwm_ip = self.get_clusters_hwm_ip(req) + if hwm_ip in clusters_hwm_ip: + msg = (_("Hwm %s has already used in cluster, " + "it can not be update. " % hwm_ip)) + LOG.error(msg) + raise HTTPForbidden(explanation=msg, request=req, + content_type="text/plain") + try: + hwm = registry.update_hwm_metadata(req.context, id, hwm) + except exception.Invalid as e: + msg = (_("Failed to update hwm metadata. Got error: %s") % + utils.exception_to_str(e)) + LOG.warn(msg) + raise HTTPBadRequest(explanation=msg, + request=req, + content_type="text/plain") + except exception.NotFound as e: + msg = (_("Failed to find hwm to update: %s") % + utils.exception_to_str(e)) + LOG.warn(msg) + raise HTTPNotFound(explanation=msg, + request=req, + content_type="text/plain") + except exception.Forbidden as e: + msg = (_("Forbidden to update hwm: %s") % + utils.exception_to_str(e)) + LOG.warn(msg) + raise HTTPForbidden(explanation=msg, + request=req, + content_type="text/plain") + except (exception.Conflict, exception.Duplicate) as e: + LOG.warn(utils.exception_to_str(e)) + raise HTTPConflict(body=_('hwm operation conflicts'), + request=req, + content_type='text/plain') + else: + self.notifier.info('hwm.update', hwm) + + return {'hwm': hwm} + + @utils.mutating + def delete_hwm(self, req, id): + """ + delete a existing hwm template with the registry. + + :param request: The WSGI/Webob Request object + :param id: The opaque image identifier + + :retval Returns the updated image information as a mapping + """ + self._enforce(req, 'delete_hwm') + hwm_meta = registry.hwm_detail_metadata(req.context, id) + hwm_ip = hwm_meta['hwm_ip'] + clusters_hwm_ip = self.get_clusters_hwm_ip(req) + if hwm_ip in clusters_hwm_ip: + msg = (_("Hwm %s has already used in cluster, " + "it can not be deleted. " % hwm_ip)) + LOG.error(msg) + raise HTTPForbidden(explanation=msg, request=req, + content_type="text/plain") + try: + registry.delete_hwm_metadata(req.context, id) + except exception.NotFound as e: + msg = (_("Failed to find hwm to delete: %s") % + utils.exception_to_str(e)) + LOG.error(msg) + raise HTTPNotFound(explanation=msg, request=req, + content_type="text/plain") + except exception.Forbidden as e: + msg = (_("Forbidden to delete hwm: %s") % + utils.exception_to_str(e)) + LOG.error(msg) + raise HTTPForbidden(explanation=msg, request=req, + content_type="text/plain") + except exception.InUseByStore as e: + msg = (_( + "hwm %(id)s could not be deleted because it is in " + "use:%(exc)s") % {"id": id, "exc": utils.exception_to_str(e)}) + LOG.error(msg) + raise HTTPConflict(explanation=msg, request=req, + content_type="text/plain") + else: + return Response(body='', status=200) + + @utils.mutating + def detail(self, req, id): + """ + delete a existing hwm with the registry. + :param request: The WSGI/Webob Request object + :param id: The opaque image identifie + :retval Returns the updated image information as a mapping + """ + self._enforce(req, 'detail') + context = req.context + try: + hwm_meta = registry.hwm_detail_metadata(context, id) + except exception.NotFound: + msg = "Hwm with identifier %s not found" % id + LOG.debug(msg) + raise webob.exc.HTTPNotFound( + msg, request=req, content_type='text/plain') + except exception.Forbidden: + msg = "Forbidden hwm access" + LOG.debug(msg) + raise webob.exc.HTTPForbidden(msg, + request=req, + content_type='text/plain') + return {'hwm': hwm_meta} + + @utils.mutating + def list(self, req): + self._enforce(req, 'list') + params = self._get_query_params(req) + try: + hwm_list = registry.hwm_list_metadata(req.context, **params) + except exception.Invalid as e: + raise HTTPBadRequest(explanation=e.msg, request=req) + return dict(hwm=hwm_list) + + +class HwmDeserializer(wsgi.JSONRequestDeserializer): + """Handles deserialization of specific controller method requests.""" + def _deserialize(self, request): + result = {} + result["hwm"] = utils.get_hwm_meta(request) + return result + + def add_hwm(self, request): + return self._deserialize(request) + + def update_hwm(self, request): + return self._deserialize(request) + + +class HwmSerializer(wsgi.JSONResponseSerializer): + """Handles serialization of specific controller method responses.""" + def __init__(self): + self.notifier = notifier.Notifier() + + def add_hwm(self, response, result): + hwm = result['hwm'] + response.status = 201 + response.headers['Content-Type'] = 'application/json' + response.body = self.to_json(dict(hwm=hwm)) + return response + + def delete_hwm(self, response, result): + hwm = result['hwm'] + response.status = 201 + response.headers['Content-Type'] = 'application/json' + response.body = self.to_json(dict(hwm=hwm)) + return response + + def get_detail(self, response, result): + hwm = result['hwm'] + response.status = 201 + response.headers['Content-Type'] = 'application/json' + response.body = self.to_json(dict(hwm=hwm)) + return response + + def update_hwm(self, response, result): + hwm = result['hwm'] + response.status = 201 + response.headers['Content-Type'] = 'application/json' + response.body = self.to_json(dict(hwm=hwm)) + return response + + +def create_resource(): + """Templates resource factory method""" + deserializer = HwmDeserializer() + serializer = HwmSerializer() + return wsgi.Resource(Controller(), deserializer, serializer) diff --git a/code/daisy/daisy/api/v1/images.py b/code/daisy/daisy/api/v1/images.py index 0a9001c3..4a235a76 100755 --- a/code/daisy/daisy/api/v1/images.py +++ b/code/daisy/daisy/api/v1/images.py @@ -905,8 +905,9 @@ class Controller(controller.BaseController): # Once an image is 'active' only an admin can # modify certain core metadata keys for key in ACTIVE_IMMUTABLE: - if (orig_status == 'active' and image_meta.get(key) is not None - and image_meta.get(key) != orig_image_meta.get(key)): + if (orig_status == 'active' and + image_meta.get(key) is not None and + image_meta.get(key) != orig_image_meta.get(key)): msg = _("Forbidden to modify '%s' of active image.") % key raise HTTPForbidden(explanation=msg, request=req, diff --git a/code/daisy/daisy/api/v1/install.py b/code/daisy/daisy/api/v1/install.py index 49768bba..de259a31 100755 --- a/code/daisy/daisy/api/v1/install.py +++ b/code/daisy/daisy/api/v1/install.py @@ -17,13 +17,11 @@ /hosts endpoint for Daisy v1 API """ import time -import traceback import webob.exc from oslo_log import log as logging from webob.exc import HTTPBadRequest from webob.exc import HTTPForbidden -from webob.exc import HTTPServerError from threading import Thread @@ -44,10 +42,6 @@ import daisy.api.backends.common as daisy_cmn from daisy.api.backends import driver from daisy.api.backends import os as os_handle -try: - import simplejson as json -except ImportError: - import json LOG = logging.getLogger(__name__) _ = i18n._ @@ -67,31 +61,40 @@ BACKENDS_UNINSTALL_ORDER = [] def get_deployment_backends(req, cluster_id, backends_order): - cluster_roles = daisy_cmn.get_cluster_roles_detail(req,cluster_id) - cluster_backends = set([role['deployment_backend'] for role in cluster_roles if daisy_cmn.get_hosts_of_role(req, role['id'])]) - ordered_backends = [backend for backend in backends_order if backend in cluster_backends] - other_backends = [backend for backend in cluster_backends if backend not in backends_order] - deployment_backends =ordered_backends + other_backends - return deployment_backends - + cluster_roles = daisy_cmn.get_cluster_roles_detail(req, cluster_id) + cluster_backends = set([role['deployment_backend'] + for role in cluster_roles if + daisy_cmn.get_hosts_of_role(req, role['id'])]) + ordered_backends = [ + backend for backend in backends_order if backend in cluster_backends] + other_backends = [ + backend for backend in cluster_backends if + backend not in backends_order] + deployment_backends = ordered_backends + other_backends + return deployment_backends + + class InstallTask(object): """ Class for install OS and TECS. """ """ Definition for install states.""" + def __init__(self, req, cluster_id): self.req = req self.cluster_id = cluster_id def _backends_install(self): - backends = get_deployment_backends(self.req, self.cluster_id, BACKENDS_INSTALL_ORDER) + backends = get_deployment_backends( + self.req, self.cluster_id, BACKENDS_INSTALL_ORDER) if not backends: LOG.info(_("No backends need to install.")) return for backend in backends: backend_driver = driver.load_deployment_dirver(backend) backend_driver.install(self.req, self.cluster_id) - # this will be raise raise all the exceptions of the thread to log file + # this will be raise raise all the exceptions of the thread to log file + def run(self): try: self._run() @@ -102,43 +105,57 @@ class InstallTask(object): """ Exectue os installation with sync mode. :return: - """ + """ # get hosts config which need to install OS - all_hosts_need_os = os_handle.get_cluster_hosts_config(self.req, self.cluster_id) + all_hosts_need_os = os_handle.get_cluster_hosts_config( + self.req, self.cluster_id) if all_hosts_need_os: - hosts_with_role_need_os = [host_detail for host_detail in all_hosts_need_os if host_detail['status'] == 'with-role'] - hosts_without_role_need_os = [host_detail for host_detail in all_hosts_need_os if host_detail['status'] != 'with-role'] + hosts_with_role_need_os = [ + host_detail for host_detail in all_hosts_need_os if + host_detail['status'] == 'with-role'] + hosts_without_role_need_os = [ + host_detail for host_detail in all_hosts_need_os if + host_detail['status'] != 'with-role'] else: - LOG.info(_("No host need to install os, begin to install " - "backends for cluster %s." % self.cluster_id)) + LOG.info(_("No host need to install os, begin to install " + "backends for cluster %s." % self.cluster_id)) self._backends_install() return run_once_flag = True - # if no hosts with role need os, install backend applications immediately + # if no hosts with role need os, install backend applications + # immediately if not hosts_with_role_need_os: run_once_flag = False role_hosts_need_os = [] - LOG.info(_("All of hosts with role is 'active', begin to install " - "backend applications for cluster %s first." % self.cluster_id)) + LOG.info(_("All of hosts with role is 'active', begin to install " + "backend applications for cluster %s first." % + self.cluster_id)) self._backends_install() else: - role_hosts_need_os = [host_detail['id'] for host_detail in hosts_with_role_need_os] + role_hosts_need_os = [host_detail['id'] + for host_detail in hosts_with_role_need_os] # hosts with role put the head of the list - order_hosts_need_os = hosts_with_role_need_os + hosts_without_role_need_os + order_hosts_need_os = hosts_with_role_need_os + \ + hosts_without_role_need_os while order_hosts_need_os: os_install = os_handle.OSInstall(self.req, self.cluster_id) - #all os will be installed batch by batch with max_parallel_os_number which was set in daisy-api.conf - (order_hosts_need_os,role_hosts_need_os) = os_install.install_os(order_hosts_need_os,role_hosts_need_os) - # after a batch of os install over, judge if all role hosts install os completely, + # all os will be installed batch by batch with + # max_parallel_os_number which was set in daisy-api.conf + (order_hosts_need_os, role_hosts_need_os) = os_install.install_os( + order_hosts_need_os, role_hosts_need_os) + # after a batch of os install over, judge if all + # role hosts install os completely, # if role_hosts_need_os is empty, install TECS immediately if run_once_flag and not role_hosts_need_os: run_once_flag = False - #wait to reboot os after new os installed + # wait to reboot os after new os installed time.sleep(10) LOG.info(_("All hosts with role install successfully, " - "begin to install backend applications for cluster %s." % self.cluster_id)) + "begin to install backend applications " + "for cluster %s." % + self.cluster_id)) self._backends_install() @@ -160,6 +177,7 @@ class Controller(controller.BaseController): data for a previously-reserved host DELETE /hosts/ -- Delete the host with id """ + def __init__(self): self.notifier = notifier.Notifier() registry.configure_registry_client() @@ -177,13 +195,14 @@ class Controller(controller.BaseController): self.policy.enforce(req.context, action, target) except exception.Forbidden: raise HTTPForbidden() - + def _raise_404_if_cluster_deleted(self, req, cluster_id): cluster = self.get_cluster_meta_or_404(req, cluster_id) if cluster['deleted']: - msg = _("Cluster with identifier %s has been deleted.") % cluster_id + msg = _("Cluster with identifier %s has been deleted.") % \ + cluster_id raise webob.exc.HTTPNotFound(msg) - + def _get_filters(self, req): """ Return a dictionary of query param filters from the request @@ -225,19 +244,24 @@ class Controller(controller.BaseController): :raises HTTPBadRequest if x-install-cluster is missing """ + if 'deployment_interface' in install_meta: + os_handle.pxe_server_build(req, install_meta) + return {"status": "pxe is installed"} + cluster_id = install_meta['cluster_id'] self._enforce(req, 'install_cluster') self._raise_404_if_cluster_deleted(req, cluster_id) - if install_meta.get("deployment_interface", None): - os_handle.pxe_server_build(req, install_meta) - return {"status": "pxe is installed"} - - # if have hosts need to install os, TECS installataion executed in InstallTask + daisy_cmn.set_role_status_and_progress( + req, cluster_id, 'install', + {'messages': 'Waiting for TECS installation', 'progress': '0'}, + 'tecs') + # if have hosts need to install os, + # TECS installataion executed in InstallTask os_install_obj = InstallTask(req, cluster_id) os_install_thread = Thread(target=os_install_obj.run) os_install_thread.start() - return {"status":"begin install"} + return {"status": "begin install"} @utils.mutating def uninstall_cluster(self, req, cluster_id): @@ -251,20 +275,24 @@ class Controller(controller.BaseController): self._enforce(req, 'uninstall_cluster') self._raise_404_if_cluster_deleted(req, cluster_id) - backends = get_deployment_backends(req, cluster_id, BACKENDS_UNINSTALL_ORDER) + backends = get_deployment_backends( + req, cluster_id, BACKENDS_UNINSTALL_ORDER) for backend in backends: backend_driver = driver.load_deployment_dirver(backend) - uninstall_thread = Thread(target=backend_driver.uninstall, args=(req, cluster_id)) + uninstall_thread = Thread( + target=backend_driver.uninstall, args=( + req, cluster_id)) uninstall_thread.start() - return {"status":"begin uninstall"} - + return {"status": "begin uninstall"} + @utils.mutating def uninstall_progress(self, req, cluster_id): self._enforce(req, 'uninstall_progress') self._raise_404_if_cluster_deleted(req, cluster_id) - + all_nodes = {} - backends = get_deployment_backends(req, cluster_id, BACKENDS_UNINSTALL_ORDER) + backends = get_deployment_backends( + req, cluster_id, BACKENDS_UNINSTALL_ORDER) if not backends: LOG.info(_("No backends need to uninstall.")) return all_nodes @@ -274,7 +302,6 @@ class Controller(controller.BaseController): all_nodes.update(nodes_process) return all_nodes - @utils.mutating def update_cluster(self, req, cluster_id): """ @@ -287,29 +314,36 @@ class Controller(controller.BaseController): self._enforce(req, 'update_cluster') self._raise_404_if_cluster_deleted(req, cluster_id) - backends = get_deployment_backends(req, cluster_id, BACKENDS_UPGRADE_ORDER) + backends = get_deployment_backends( + req, cluster_id, BACKENDS_UPGRADE_ORDER) if not backends: LOG.info(_("No backends need to update.")) - return {"status":""} + return {"status": ""} + daisy_cmn.set_role_status_and_progress( + req, cluster_id, 'upgrade', + {'messages': 'Waiting for TECS upgrading', 'progress': '0'}, + 'tecs') for backend in backends: - backend_driver = driver.load_deployment_dirver(backend) - update_thread = Thread(target=backend_driver.upgrade, args=(req, cluster_id)) + backend_driver = driver.load_deployment_dirver(backend) + update_thread = Thread(target=backend_driver.upgrade, + args=(req, cluster_id)) update_thread.start() - return {"status":"begin update"} + return {"status": "begin update"} @utils.mutating def update_progress(self, req, cluster_id): self._enforce(req, 'update_progress') self._raise_404_if_cluster_deleted(req, cluster_id) - backends = get_deployment_backends(req, cluster_id, BACKENDS_UPGRADE_ORDER) + backends = get_deployment_backends( + req, cluster_id, BACKENDS_UPGRADE_ORDER) all_nodes = {} for backend in backends: backend_driver = driver.load_deployment_dirver(backend) nodes_process = backend_driver.upgrade_progress(req, cluster_id) all_nodes.update(nodes_process) return all_nodes - + @utils.mutating def export_db(self, req, install_meta): """ @@ -324,7 +358,8 @@ class Controller(controller.BaseController): self._raise_404_if_cluster_deleted(req, cluster_id) all_config_files = {} - backends = get_deployment_backends(req, cluster_id, BACKENDS_INSTALL_ORDER) + backends = get_deployment_backends( + req, cluster_id, BACKENDS_INSTALL_ORDER) if not backends: LOG.info(_("No backends need to export.")) return all_config_files @@ -345,17 +380,18 @@ class Controller(controller.BaseController): """ self._enforce(req, 'update_disk_array') self._raise_404_if_cluster_deleted(req, cluster_id) - + tecs_backend_name = 'tecs' - backends = get_deployment_backends(req, cluster_id, BACKENDS_UNINSTALL_ORDER) + backends = get_deployment_backends( + req, cluster_id, BACKENDS_UNINSTALL_ORDER) if tecs_backend_name not in backends: message = "No tecs backend" LOG.info(_(message)) else: backend_driver = driver.load_deployment_dirver(tecs_backend_name) message = backend_driver.update_disk_array(req, cluster_id) - return {'status':message} - + return {'status': message} + class InstallDeserializer(wsgi.JSONRequestDeserializer): """Handles deserialization of specific controller method requests.""" @@ -367,13 +403,14 @@ class InstallDeserializer(wsgi.JSONRequestDeserializer): def install_cluster(self, request): return self._deserialize(request) - + def export_db(self, request): return self._deserialize(request) def update_disk_array(self, request): return {} + class InstallSerializer(wsgi.JSONResponseSerializer): """Handles serialization of specific controller method responses.""" @@ -385,7 +422,7 @@ class InstallSerializer(wsgi.JSONResponseSerializer): response.headers['Content-Type'] = 'application/json' response.body = self.to_json(result) return response - + def export_db(self, response, result): response.status = 201 response.headers['Content-Type'] = 'application/json' @@ -397,7 +434,8 @@ class InstallSerializer(wsgi.JSONResponseSerializer): response.headers['Content-Type'] = 'application/json' response.body = self.to_json(result) return response - + + def create_resource(): """Image members resource factory method""" deserializer = InstallDeserializer() diff --git a/code/daisy/daisy/api/v1/members.py b/code/daisy/daisy/api/v1/members.py index ef038e09..80d18080 100755 --- a/code/daisy/daisy/api/v1/members.py +++ b/code/daisy/daisy/api/v1/members.py @@ -53,36 +53,38 @@ class Controller(controller.BaseController): def _raise_404_if_project_deleted(self, req, cluster_id): project = self.get_cluster_meta_or_404(req, cluster_id) if project['deleted']: - msg = _("Cluster with identifier %s has been deleted.") % cluster_id + msg = _("Cluster with identifier %s has been deleted.") % \ + cluster_id raise webob.exc.HTTPNotFound(msg) - # def get_cluster_hosts(self, req, cluster_id, host_id=None): - # """ - # Return a list of dictionaries indicating the members of the - # image, i.e., those tenants the image is shared with. +# def get_cluster_hosts(self, req, cluster_id, host_id=None): +# """ +# Return a list of dictionaries indicating the members of the +# image, i.e., those tenants the image is shared with. # - # :param req: the Request object coming from the wsgi layer - # :param image_id: The opaque image identifier - # :retval The response body is a mapping of the following form:: +# :param req: the Request object coming from the wsgi layer +# :param image_id: The opaque image identifier +# :retval The response body is a mapping of the following form:: - # {'members': [ - # {'host_id': , ...}, ... - # ]} - # """ - # self._enforce(req, 'get_cluster_hosts') - # self._raise_404_if_project_deleted(req, cluster_id) +# {'members': [ +# {'host_id': , ...}, ... +# ]} +# """ +# self._enforce(req, 'get_cluster_hosts') +# self._raise_404_if_project_deleted(req, cluster_id) # - # try: - # members = registry.get_cluster_hosts(req.context, cluster_id, host_id) - # except exception.NotFound: - # msg = _("Project with identifier %s not found") % cluster_id - # LOG.warn(msg) - # raise webob.exc.HTTPNotFound(msg) - # except exception.Forbidden: - # msg = _("Unauthorized project access") - # LOG.warn(msg) - # raise webob.exc.HTTPForbidden(msg) - # return dict(members=members) +# try: +# members = registry.get_cluster_hosts( +# req.context, cluster_id, host_id) +# except exception.NotFound: +# msg = _("Project with identifier %s not found") % cluster_id +# LOG.warn(msg) +# raise webob.exc.HTTPNotFound(msg) +# except exception.Forbidden: +# msg = _("Unauthorized project access") +# LOG.warn(msg) +# raise webob.exc.HTTPForbidden(msg) +# return dict(members=members) @utils.mutating def delete(self, req, image_id, id): @@ -104,7 +106,7 @@ class Controller(controller.BaseController): raise webob.exc.HTTPNotFound(explanation=e.msg) return webob.exc.HTTPNoContent() - + @utils.mutating def add_cluster_host(self, req, cluster_id, host_id, body=None): """ @@ -113,7 +115,7 @@ class Controller(controller.BaseController): self._enforce(req, 'add_cluster_host') self._raise_404_if_project_deleted(req, cluster_id) self._raise_404_if_host_deleted(req, host_id) - + try: registry.add_cluster_host(req.context, cluster_id, host_id) except exception.Invalid as e: @@ -127,7 +129,7 @@ class Controller(controller.BaseController): raise webob.exc.HTTPNotFound(explanation=e.msg) return webob.exc.HTTPNoContent() - + @utils.mutating def delete_cluster_host(self, req, cluster_id, host_id): """ @@ -147,7 +149,7 @@ class Controller(controller.BaseController): raise webob.exc.HTTPNotFound(explanation=e.msg) return webob.exc.HTTPNoContent() - + def default(self, req, image_id, id, body=None): """This will cover the missing 'show' and 'create' actions""" raise webob.exc.HTTPMethodNotAllowed() diff --git a/code/daisy/daisy/api/v1/networks.py b/code/daisy/daisy/api/v1/networks.py index 40473473..b22d1a3e 100755 --- a/code/daisy/daisy/api/v1/networks.py +++ b/code/daisy/daisy/api/v1/networks.py @@ -36,6 +36,7 @@ from daisy.common import wsgi from daisy import i18n from daisy import notifier import daisy.registry.client.v1.api as registry +from functools import reduce LOG = logging.getLogger(__name__) _ = i18n._ @@ -52,9 +53,16 @@ CONF.import_opt('container_formats', 'daisy.common.config', group='image_format') CONF.import_opt('image_property_quota', 'daisy.common.config') -SUPPORT_NETWORK_TYPE = ('PUBLIC', 'PRIVATE', 'STORAGE', 'MANAGEMENT', 'EXTERNAL', 'DEPLOYMENT', 'VXLAN') -SUPPORT_NETWORK_TEMPLATE_TYPE = ('custom', 'template', 'default') -SUPPORT_ML2_TYPE = ('ovs', 'sriov(direct)', 'sriov(macvtap)', +SUPPORT_NETWORK_TYPE = ( + 'PUBLICAPI', + 'DATAPLANE', + 'STORAGE', + 'MANAGEMENT', + 'EXTERNAL', + 'DEPLOYMENT', + 'HEARTBEAT') +SUPPORT_NETWORK_TEMPLATE_TYPE = ('custom', 'template', 'default', 'system') +SUPPORT_ML2_TYPE = ('ovs', 'sriov(direct)', 'sriov(macvtap)', 'ovs,sriov(direct)', 'ovs,sriov(macvtap)') SUPPORT_NETWORK_CAPABILITY = ('high', 'low') @@ -99,12 +107,15 @@ class Controller(controller.BaseController): def _raise_404_if_network_deleted(self, req, network_id): network = self.get_network_meta_or_404(req, network_id) if network['deleted']: - msg = _("Network with identifier %s has been deleted.") % network_id + msg = _("Network with identifier %s has been deleted.") % \ + network_id raise HTTPNotFound(msg) + def _raise_404_if_cluster_delete(self, req, cluster_id): cluster_id = self.get_cluster_meta_or_404(req, cluster_id) if cluster_id['deleted']: - msg = _("cluster_id with identifier %s has been deleted.") % cluster_id + msg = _("cluster_id with identifier %s has been deleted.") % \ + cluster_id raise HTTPNotFound(msg) def _get_network_name_by_cluster_id(self, context, cluster_id): @@ -114,7 +125,6 @@ class Controller(controller.BaseController): network_name_list.append(network['name']) return network_name_list - def _get_filters(self, req): """ Return a dictionary of query param filters from the request @@ -146,7 +156,7 @@ class Controller(controller.BaseController): if PARAM in req.params: params[PARAM] = req.params.get(PARAM) return params - + def validate_ip_format(self, ip_str): ''' valid ip_str format = '10.43.178.9' @@ -157,21 +167,21 @@ class Controller(controller.BaseController): '10.43.1789', invalid format ''' valid_fromat = False - if ip_str.count('.') == 3 and \ - all(num.isdigit() and 0<=int(num)<256 for num in ip_str.rstrip().split('.')): + if ip_str.count('.') == 3 and all(num.isdigit() and 0 <= int( + num) < 256 for num in ip_str.rstrip().split('.')): valid_fromat = True - if valid_fromat == False: + if not valid_fromat: msg = (_("%s invalid ip format!") % ip_str) LOG.warn(msg) raise HTTPForbidden(msg) - + def _ip_into_int(self, ip): """ Switch ip string to decimalism integer.. :param ip: ip string :return: decimalism integer """ - return reduce(lambda x, y: (x<<8)+y, map(int, ip.split('.'))) + return reduce(lambda x, y: (x << 8) + y, map(int, ip.split('.'))) def _is_in_network_range(self, ip, network): """ @@ -182,9 +192,13 @@ class Controller(controller.BaseController): """ network = network.split('/') mask = ~(2**(32 - int(network[1])) - 1) - return (self._ip_into_int(ip) & mask) == (self._ip_into_int(network[0]) & mask) + return ( + self._ip_into_int(ip) & mask) == ( + self._ip_into_int( + network[0]) & mask) - def _verify_uniqueness_of_network_name(self, req, network_list, network_meta, is_update = False): + def _verify_uniqueness_of_network_name( + self, req, network_list, network_meta, is_update=False): """ Network name is match case and uniqueness in cluster. :param req: @@ -192,50 +206,137 @@ class Controller(controller.BaseController): :param network_meta: network plane need be verified :return: """ - if not network_list or not network_meta or not network_meta.get('name', None): - msg = _("Input params invalid for verifying uniqueness of network name.") + if not network_list or not network_meta or not network_meta.get( + 'name', None): + msg = _("Input params invalid for verifying uniqueness of " + "network name.") raise HTTPBadRequest(msg, request=req, content_type="text/plain") network_name = network_meta['name'] for network in network_list['networks']: - if (is_update and - network_name == network['name'] and - network_meta['id'] == network['id']): + if (is_update and + network_name == network['name'] and + network_meta['id'] == network['id']): return # network name don't match case network_name_list = [network['name'].lower() for network in - network_list['networks'] if network.get('name', None)] + network_list['networks'] if + network.get('name', None)] if network_name.lower() in network_name_list: - msg = _("Name of network isn't match case and %s already exits in the cluster." % network_name) + msg = _( + "Name of network isn't match case and %s already exits " + "in the cluster." % + network_name) raise HTTPConflict(msg, request=req, content_type="text/plain") if not is_update: - # Input networks type can't be same with db record which is all ready exit, + # Input networks type can't be same with db record + # which is all ready exit, # except PRIVATE network. network_type_exist_list = \ - [network['network_type'] for network in network_list['networks'] - if network.get('network_type', None) and network['network_type'] != "PRIVATE" - and network['network_type'] != "STORAGE"] - if network_meta.get("network_type", None) in network_type_exist_list: - msg = _("The %s network plane %s must be only, except PRIVATE network." % - (network_meta['network_type'], network_name)) + [network['network_type'] for network in + network_list['networks'] + if network.get('network_type', None) and + network['network_type'] != "DATAPLANE" and + network['network_type'] != "STORAGE" and + network['network_type'] != "HEARTBEAT"] + if network_meta.get( + "network_type", + None) in network_type_exist_list: + msg = _( + "The %s network plane %s must be unique, " + "except DATAPLANE/STORAGE/HEARTBEAT network." % + (network_meta['network_type'], network_name)) raise HTTPConflict(msg, request=req, content_type="text/plain") - def _valid_vlan_range(self, req, network_meta): - if ((network_meta.has_key('vlan_start') and not network_meta.has_key('vlan_end')) or - (not network_meta.has_key('vlan_start') and network_meta.has_key('vlan_end'))): - raise HTTPBadRequest(explanation="vlan-start and vlan-end must be appeared at the same time", request=req) - if network_meta.has_key('vlan_start'): - if not (int(network_meta['vlan_start']) >= 1 and + def _valid_network_range(self, req, network_meta): + if (('vlan_start' in network_meta and 'vlan_end' not in + network_meta) or ( + 'vlan_start' not in network_meta and + 'vlan_end' in network_meta)): + msg = "vlan-start and vlan-end must be appeared "\ + "at the same time" + LOG.error(msg) + raise HTTPBadRequest(explanation=msg, request=req) + if 'vlan_start' in network_meta: + if not (int(network_meta['vlan_start']) >= 1 and int(network_meta['vlan_start']) <= 4094): - raise HTTPBadRequest(explanation="vlan-start must be a integer in '1~4096'", request=req) - if network_meta.has_key('vlan_end'): - if not (int(network_meta['vlan_end']) >= 1 and + msg = "vlan_start must be a integer in 1~4096" + LOG.error(msg) + raise HTTPBadRequest(explanation=msg, request=req) + if 'vlan_end' in network_meta: + if not (int(network_meta['vlan_end']) >= 1 and int(network_meta['vlan_end']) <= 4094): - raise HTTPBadRequest(explanation="vlan-end must be a integer in '1~4096'", request=req) + msg = "vlan_end must be a integer in 1~4096" + LOG.error(msg) + raise HTTPBadRequest(explanation=msg, request=req) if int(network_meta['vlan_start']) > int(network_meta['vlan_end']): - raise HTTPBadRequest(explanation="vlan-start must be less than vlan-end", request=req) + msg = "vlan_start must be less than vlan_end" + LOG.error(msg) + raise HTTPBadRequest(explanation=msg, request=req) + + if (('vni_start' in network_meta and 'vni_end' not in + network_meta) or ( + 'vni_start' not in network_meta and + 'vni_end' in network_meta)): + + msg = "vni_start and vni_end must be appeared at the same time" + LOG.error(msg) + raise HTTPBadRequest(explanation=msg, request=req) + if 'vni_start' in network_meta: + if not (int(network_meta['vni_start']) >= 1 and + int(network_meta['vni_start']) <= 16777216): + msg = "vni_start must be a integer in 1~16777216" + LOG.error(msg) + raise HTTPBadRequest(explanation=msg, request=req) + if 'vni_end' in network_meta: + if not (int(network_meta['vni_end']) >= 1 and + int(network_meta['vni_end']) <= 16777216): + msg = "vni_end must be a integer in 1~16777216" + LOG.error(msg) + raise HTTPBadRequest(explanation=msg, request=req) + if int(network_meta['vni_start']) > int(network_meta['vni_end']): + msg = "vni_start must be less than vni_end" + LOG.error(msg) + raise HTTPBadRequest(explanation=msg, request=req) + + if (('gre_id_start' in network_meta and 'gre_id_end' not in + network_meta) or ( + 'gre_id_start' not in network_meta and + 'gre_id_end' in network_meta)): + msg = "gre_id_start and gre_id_end must"\ + "be appeared at the same time" + LOG.error(msg) + raise HTTPBadRequest(explanation=msg, request=req) + if 'gre_id_start' in network_meta: + if not (int(network_meta['gre_id_start']) >= 1 and + int(network_meta['gre_id_start']) <= 4094): + msg = "gre_id_start must be a integer in 1~4094" + LOG.error(msg) + raise HTTPBadRequest(explanation=msg, request=req) + if 'gre_id_end' in network_meta: + if not (int(network_meta['gre_id_end']) >= 1 and + int(network_meta['gre_id_end']) <= 4094): + msg = "gre_id_end must be a integer in 1~4094" + LOG.error(msg) + raise HTTPBadRequest(explanation=msg, request=req) + if int(network_meta['gre_id_start']) >\ + int(network_meta['gre_id_end']): + msg = "gre_id_start must be less than gre_id_end" + LOG.error(msg) + raise HTTPBadRequest(explanation=msg, request=req) + + def _verify_heartbeat_network(self, req, network_list, network_meta): + heartbeat_networks = [ + network for network in network_list['networks'] if network.get( + 'network_type', + None) and network['network_type'] == "HEARTBEAT"] + if len(heartbeat_networks) >= 2: + raise HTTPBadRequest( + explanation="HEARTBEAT network plane number must be " + "less than two", + request=req) @utils.mutating def add_network(self, req, network_meta): @@ -248,46 +349,62 @@ class Controller(controller.BaseController): :raises HTTPBadRequest if x-host-name is missing """ self._enforce(req, 'add_network') - cluster_id = network_meta.get('cluster_id',None) + cluster_id = network_meta.get('cluster_id', None) if cluster_id: self._raise_404_if_cluster_delete(req, cluster_id) network_list = self.detail(req, cluster_id) - self._verify_uniqueness_of_network_name(req, network_list, network_meta) + self._verify_uniqueness_of_network_name( + req, network_list, network_meta) + if 'network_type' in network_meta and network_meta[ + 'network_type'] == "HEARTBEAT": + self._verify_heartbeat_network(req, network_list, network_meta) # else: # if network_meta.get('type',None) != "template": - # raise HTTPBadRequest(explanation="cluster id must be given", request=req) - network_name=network_meta.get('name',None) + # raise HTTPBadRequest(explanation="cluster id must be given", + # request=req) + network_name = network_meta.get('name', None) network_name_split = network_name.split('_') - for network_name_info in network_name_split : - if not network_name_info.isalnum(): - raise ValueError('network name must be numbers or letters or underscores !') - if not network_meta.has_key('network_type'): - raise HTTPBadRequest(explanation="network-type must be given", request=req) - if network_meta['network_type'] not in SUPPORT_NETWORK_TYPE: - raise HTTPBadRequest(explanation="unsupported network-type", request=req) - - - if (network_meta.has_key('type') and - network_meta['type'] not in SUPPORT_NETWORK_TEMPLATE_TYPE): - raise HTTPBadRequest(explanation="unsupported type", request=req) - - if (network_meta.has_key('capability') and - network_meta['capability'] not in SUPPORT_NETWORK_CAPABILITY): - raise HTTPBadRequest(explanation="unsupported capability type", request=req) - - self._valid_vlan_range(req, network_meta) + for network_name_info in network_name_split: + if not network_name_info.isalnum(): + raise ValueError( + 'network name must be numbers or letters or underscores !') + if 'network_type' not in network_meta: + raise HTTPBadRequest( + explanation="network-type must be given", + request=req) + if network_meta['network_type'] not in SUPPORT_NETWORK_TYPE: + raise HTTPBadRequest( + explanation="unsupported network-type", + request=req) - if network_meta.get('ip_ranges', None): + if ('type' in network_meta and + network_meta['type'] not in SUPPORT_NETWORK_TEMPLATE_TYPE): + raise HTTPBadRequest(explanation="unsupported type", request=req) + + if ('capability' in network_meta and + network_meta['capability'] not in SUPPORT_NETWORK_CAPABILITY): + raise HTTPBadRequest( + explanation="unsupported capability type", + request=req) + + self._valid_network_range(req, network_meta) + + if network_meta.get('ip_ranges', None) and \ + eval(network_meta['ip_ranges']): cidr = None - if not network_meta.has_key('cidr'): - msg = (_("When ip range was specified, the CIDR parameter can not be empty.")) + if 'cidr' not in network_meta: + msg = ( + _("When ip range was specified, the CIDR parameter " + "can not be empty.")) LOG.warn(msg) raise HTTPForbidden(msg) else: cidr = network_meta['cidr'] cidr_division = cidr.split('/') - if len(cidr_division) != 2 or ( cidr_division[1] \ - and int(cidr_division[1]) > 32 or int(cidr_division[1]) < 0): + if len(cidr_division) != 2 or ( + cidr_division[1] and int( + cidr_division[1]) > 32 or int( + cidr_division[1]) < 0): msg = (_("Wrong CIDR format.")) LOG.warn(msg) raise HTTPForbidden(msg) @@ -299,39 +416,50 @@ class Controller(controller.BaseController): sorted_int_ip_ranges_list = list() for ip_pair in ip_ranges: if ['start', 'end'] != ip_pair.keys(): - msg = (_("IP range was not start with 'start:' or end with 'end:'.")) + msg = ( + _("IP range was not start with 'start:' or " + "end with 'end:'.")) LOG.warn(msg) raise HTTPForbidden(msg) - ip_start = ip_pair['start'] + ip_start = ip_pair['start'] ip_end = ip_pair['end'] - self.validate_ip_format(ip_start) #check ip format + self.validate_ip_format(ip_start) # check ip format self.validate_ip_format(ip_end) - + if not self._is_in_network_range(ip_start, cidr): - msg = (_("IP address %s was not in the range of CIDR %s." % (ip_start, cidr))) + msg = ( + _("IP address %s was not in the range " + "of CIDR %s." % (ip_start, cidr))) LOG.warn(msg) raise HTTPForbidden(msg) - + if not self._is_in_network_range(ip_end, cidr): - msg = (_("IP address %s was not in the range of CIDR %s." % (ip_end, cidr))) + msg = ( + _("IP address %s was not in the range " + "of CIDR %s." % (ip_end, cidr))) LOG.warn(msg) raise HTTPForbidden(msg) - - #transform ip format to int when the string format is valid + + # transform ip format to int when the string format is + # valid int_ip_start = self._ip_into_int(ip_start) int_ip_end = self._ip_into_int(ip_end) - + if int_ip_start > int_ip_end: msg = (_("Wrong ip range format.")) LOG.warn(msg) raise HTTPForbidden(msg) int_ip_ranges_list.append([int_ip_start, int_ip_end]) - sorted_int_ip_ranges_list = sorted(int_ip_ranges_list, key=lambda x : x[0]) - + sorted_int_ip_ranges_list = sorted( + int_ip_ranges_list, key=lambda x: x[0]) + for int_ip_range in sorted_int_ip_ranges_list: - if last_ip_range_end and last_ip_range_end >= int_ip_range[0]: + if last_ip_range_end and last_ip_range_end >= int_ip_range[ + 0]: msg = (_("Between ip ranges can not be overlap.")) - LOG.warn(msg) # such as "[10, 15], [12, 16]", last_ip_range_end >= int_ip_range[0], this ip ranges were overlap + # such as "[10, 15], [12, 16]", last_ip_range_end >= + # int_ip_range[0], this ip ranges were overlap + LOG.warn(msg) raise HTTPForbidden(msg) else: last_ip_range_end = int_ip_range[1] @@ -353,16 +481,33 @@ class Controller(controller.BaseController): 'have the same cidr')) raise HTTPBadRequest(explanation=msg) - if network_meta.get('gateway', None) and network_meta.get('cidr', None): + if network_meta.get( + 'gateway', + None) and network_meta.get( + 'cidr', + None): gateway = network_meta['gateway'] cidr = network_meta['cidr'] - + self.validate_ip_format(gateway) return_flag = self._is_in_network_range(gateway, cidr) if not return_flag: - msg = (_('The gateway %s was not in the same segment with the cidr %s of management network.' % (gateway, cidr))) + msg = ( + _( + 'The gateway %s was not in the same segment ' + 'with the cidr %s of management network.' % + (gateway, cidr))) raise HTTPBadRequest(explanation=msg) + if network_meta.get('cluster_id') and network_meta.get('gateway'): + networks = registry.get_networks_detail(req.context, cluster_id) + gateways = [network['gateway'] for network in networks + if network['name'] != network_meta['name'] and + network['gateway']] + if gateways: + msg = (_('More than one gateway found in cluster.')) + LOG.error(msg) + raise HTTPConflict(explanation=msg) network_meta = registry.add_network_metadata(req.context, network_meta) return {'network_meta': network_meta} @@ -377,14 +522,16 @@ class Controller(controller.BaseController): :raises HTTPBadRequest if x-host-name is missing """ self._enforce(req, 'delete_network') - #self._raise_404_if_cluster_deleted(req, cluster_id) - #self._raise_404_if_network_deleted(req, network_id) + # self._raise_404_if_cluster_deleted(req, cluster_id) + # self._raise_404_if_network_deleted(req, network_id) network = self.get_network_meta_or_404(req, network_id) if network['deleted']: - msg = _("Network with identifier %s has been deleted.") % network_id + msg = _("Network with identifier %s has been deleted.") % \ + network_id raise HTTPNotFound(msg) if network['type'] != 'custom': - msg = _("Type of network was not custom, can not delete this network.") + msg = _("Type of network was not custom, can not " + "delete this network.") raise HTTPForbidden(msg) try: registry.delete_network_metadata(req.context, network_id) @@ -403,14 +550,15 @@ class Controller(controller.BaseController): request=req, content_type="text/plain") except exception.InUseByStore as e: - msg = (_("Network %(id)s could not be deleted because it is in use: " + msg = (_("Network %(id)s could not be deleted " + "because it is in use: " "%(exc)s") % {"id": id, "exc": utils.exception_to_str(e)}) LOG.warn(msg) raise HTTPConflict(explanation=msg, request=req, content_type="text/plain") else: - #self.notifier.info('host.delete', host) + # self.notifier.info('host.delete', host) return Response(body='', status=200) @utils.mutating @@ -436,10 +584,19 @@ class Controller(controller.BaseController): """ self._enforce(req, 'get_all_network') params = self._get_query_params(req) + filters = params.get('filters') + if filters and filters.get('type'): + if filters['type'] not in SUPPORT_NETWORK_TEMPLATE_TYPE: + msg = "type '%s' is not support." % filters['type'] + LOG.error(msg) + raise HTTPBadRequest(explanation=msg, request=req) + try: - networks = registry.get_all_networks(req.context,**params) + networks = registry.get_all_networks(req.context, **params) except Exception: - raise HTTPBadRequest(explanation="Get all networks failed.", request=req) + raise HTTPBadRequest( + explanation="Get all networks failed.", + request=req) return dict(networks=networks) def detail(self, req, id): @@ -458,15 +615,15 @@ class Controller(controller.BaseController): 'deleted_at': |,}, ... ]} """ - cluster_id = self._raise_404_if_cluster_delete(req, id) + self._raise_404_if_cluster_delete(req, id) self._enforce(req, 'get_networks') params = self._get_query_params(req) try: - networks = registry.get_networks_detail(req.context, id,**params) + networks = registry.get_networks_detail(req.context, id, **params) except exception.Invalid as e: raise HTTPBadRequest(explanation=e.msg, request=req) return dict(networks=networks) - + @utils.mutating def update_network(self, req, network_id, network_meta): """ @@ -477,14 +634,16 @@ class Controller(controller.BaseController): :retval Returns the updated image information as a mapping """ - if network_meta.has_key('name'): - network_name=network_meta.get('name',None) + if 'name' in network_meta: + network_name = network_meta.get('name', None) network_name_split = network_name.split('_') - for network_name_info in network_name_split : - if not network_name_info.isalnum(): - raise ValueError('network name must be numbers or letters or underscores !') + for network_name_info in network_name_split: + if not network_name_info.isalnum(): + raise ValueError( + 'network name must be numbers or ' + 'letters or underscores !') self._enforce(req, 'update_network') - #orig_cluster_meta = self.get_cluster_meta_or_404(req, cluster_id) + # orig_cluster_meta = self.get_cluster_meta_or_404(req, cluster_id) orig_network_meta = self.get_network_meta_or_404(req, network_id) # Do not allow any updates on a deleted network. if orig_network_meta['deleted']: @@ -492,23 +651,27 @@ class Controller(controller.BaseController): raise HTTPForbidden(explanation=msg, request=req, content_type="text/plain") - if (network_meta.has_key('network_type') and - network_meta['network_type'] not in SUPPORT_NETWORK_TYPE): - raise HTTPBadRequest(explanation="unsupported network-type", request=req) - if (network_meta.has_key('type') and - network_meta['type'] not in SUPPORT_NETWORK_TEMPLATE_TYPE): - raise HTTPBadRequest(explanation="unsupported type", request=req) - if (network_meta.has_key('type') and - network_meta['type'] == 'template'): - raise HTTPBadRequest(explanation="network template type is not allowed to update", request=req) - - - - if (network_meta.has_key('capability') and - network_meta['capability'] not in SUPPORT_NETWORK_CAPABILITY): - raise HTTPBadRequest(explanation="unsupported capability type", request=req) + if ('network_type' in network_meta and + network_meta['network_type'] not in SUPPORT_NETWORK_TYPE): + raise HTTPBadRequest( + explanation="unsupported network-type", + request=req) + if ('type' in network_meta and + network_meta['type'] not in SUPPORT_NETWORK_TEMPLATE_TYPE): + raise HTTPBadRequest(explanation="unsupported type", request=req) + if ('type' in network_meta and + network_meta['type'] == 'template'): + raise HTTPBadRequest( + explanation="network template type is not allowed to update", + request=req) - self._valid_vlan_range(req, network_meta) + if ('capability' in network_meta and + network_meta['capability'] not in SUPPORT_NETWORK_CAPABILITY): + raise HTTPBadRequest( + explanation="unsupported capability type", + request=req) + + self._valid_network_range(req, network_meta) network_name = network_meta.get('name', None) cluster_id = orig_network_meta['cluster_id'] @@ -516,17 +679,20 @@ class Controller(controller.BaseController): network_updated = copy.deepcopy(network_meta) network_updated['id'] = network_id network_type = network_meta.get('network_type', None) - network_updated['network_type'] = \ - orig_network_meta['network_type'] if not network_type else network_type + network_updated['network_type'] = orig_network_meta[ + 'network_type'] if not network_type else network_type network_list = self.detail(req, cluster_id) - self._verify_uniqueness_of_network_name(req, network_list, network_updated, True) - + self._verify_uniqueness_of_network_name( + req, network_list, network_updated, True) + cidr = network_meta.get('cidr', orig_network_meta['cidr']) vlan_id = network_meta.get('vlan_id', orig_network_meta['vlan_id']) if cidr: cidr_division = cidr.split('/') - if len(cidr_division) != 2 or ( cidr_division[1] \ - and int(cidr_division[1]) > 32 or int(cidr_division[1]) < 0): + if len(cidr_division) != 2 or ( + cidr_division[1] and int( + cidr_division[1]) > 32 or int( + cidr_division[1]) < 0): msg = (_("Wrong CIDR format.")) LOG.warn(msg) raise HTTPForbidden(msg) @@ -549,9 +715,12 @@ class Controller(controller.BaseController): 'have the same cidr')) raise HTTPBadRequest(explanation=msg) - if network_meta.get('ip_ranges', None): + if network_meta.get('ip_ranges', None) and \ + eval(network_meta['ip_ranges']): if not cidr: - msg = (_("When ip range was specified, the CIDR parameter can not be empty.")) + msg = ( + _("When ip range was specified, " + "the CIDR parameter can not be empty.")) LOG.warn(msg) raise HTTPForbidden(msg) ip_ranges = eval(network_meta['ip_ranges']) @@ -560,53 +729,81 @@ class Controller(controller.BaseController): sorted_int_ip_ranges_list = list() for ip_pair in ip_ranges: if ['start', 'end'] != ip_pair.keys(): - msg = (_("IP range was not start with 'start:' or end with 'end:'.")) + msg = ( + _("IP range was not start with 'start:' " + "or end with 'end:'.")) LOG.warn(msg) raise HTTPForbidden(msg) - ip_start = ip_pair['start'] + ip_start = ip_pair['start'] ip_end = ip_pair['end'] - self.validate_ip_format(ip_start) #check ip format + self.validate_ip_format(ip_start) # check ip format self.validate_ip_format(ip_end) - + if not self._is_in_network_range(ip_start, cidr): - msg = (_("IP address %s was not in the range of CIDR %s." % (ip_start, cidr))) + msg = ( + _("IP address %s was not in the " + "range of CIDR %s." % (ip_start, cidr))) LOG.warn(msg) raise HTTPForbidden(msg) - + if not self._is_in_network_range(ip_end, cidr): - msg = (_("IP address %s was not in the range of CIDR %s." % (ip_end, cidr))) + msg = ( + _("IP address %s was not in the " + "range of CIDR %s." % (ip_end, cidr))) LOG.warn(msg) raise HTTPForbidden(msg) - - #transform ip format to int when the string format is valid + + # transform ip format to int when the string format is valid int_ip_start = self._ip_into_int(ip_start) int_ip_end = self._ip_into_int(ip_end) - + if int_ip_start > int_ip_end: msg = (_("Wrong ip range format.")) LOG.warn(msg) raise HTTPForbidden(msg) int_ip_ranges_list.append([int_ip_start, int_ip_end]) - sorted_int_ip_ranges_list = sorted(int_ip_ranges_list, key=lambda x : x[0]) - LOG.warn("sorted_int_ip_ranges_list: "% sorted_int_ip_ranges_list) - #check ip ranges overlap + sorted_int_ip_ranges_list = sorted( + int_ip_ranges_list, key=lambda x: x[0]) + LOG.warn("sorted_int_ip_ranges_list: " % sorted_int_ip_ranges_list) + # check ip ranges overlap for int_ip_range in sorted_int_ip_ranges_list: if last_ip_range_end and last_ip_range_end >= int_ip_range[0]: msg = (_("Between ip ranges can not be overlap.")) - LOG.warn(msg) # such as "[10, 15], [12, 16]", last_ip_range_end >= int_ip_range[0], this ip ranges were overlap + # such as "[10, 15], [12, 16]", last_ip_range_end >= + # int_ip_range[0], this ip ranges were overlap + LOG.warn(msg) raise HTTPForbidden(msg) else: last_ip_range_end = int_ip_range[1] - - if network_meta.get('gateway', orig_network_meta['gateway']) and network_meta.get('cidr', orig_network_meta['cidr']): + + if network_meta.get( + 'gateway', + orig_network_meta['gateway']) and network_meta.get( + 'cidr', + orig_network_meta['cidr']): gateway = network_meta.get('gateway', orig_network_meta['gateway']) cidr = network_meta.get('cidr', orig_network_meta['cidr']) self.validate_ip_format(gateway) return_flag = self._is_in_network_range(gateway, cidr) if not return_flag: - msg = (_('The gateway %s was not in the same segment with the cidr %s of management network.' % (gateway, cidr))) + msg = ( + _( + 'The gateway %s was not in the same ' + 'segment with the cidr %s of management network.' % + (gateway, cidr))) raise HTTPBadRequest(explanation=msg) + # allow one gateway in one cluster + if network_meta.get('cluster_id') and (network_meta.get('gateway')): + networks = registry.get_networks_detail(req.context, cluster_id) + gateways = [network['gateway'] for network in networks + if network['name'] != orig_network_meta['name'] and + network['gateway']] + if gateways: + msg = (_('More than one gateway found in cluster.')) + LOG.error(msg) + raise HTTPConflict(explanation=msg) + try: network_meta = registry.update_network_metadata(req.context, network_id, @@ -626,12 +823,8 @@ class Controller(controller.BaseController): request=req, content_type="text/plain") except exception.Forbidden as e: - msg = (_("Forbidden to update network: %s") % - utils.exception_to_str(e)) - LOG.warn(msg) - raise HTTPForbidden(explanation=msg, - request=req, - content_type="text/plain") + LOG.warn(e) + raise HTTPForbidden(e) except (exception.Conflict, exception.Duplicate) as e: LOG.warn(utils.exception_to_str(e)) raise HTTPConflict(body=_('Network operation conflicts'), @@ -642,6 +835,7 @@ class Controller(controller.BaseController): return {'network_meta': network_meta} + class HostDeserializer(wsgi.JSONRequestDeserializer): """Handles deserialization of specific controller method requests.""" @@ -656,6 +850,7 @@ class HostDeserializer(wsgi.JSONRequestDeserializer): def update_network(self, request): return self._deserialize(request) + class HostSerializer(wsgi.JSONResponseSerializer): """Handles serialization of specific controller method responses.""" @@ -683,9 +878,9 @@ class HostSerializer(wsgi.JSONResponseSerializer): response.body = self.to_json(dict(network=network_meta)) return response + def create_resource(): """Hosts resource factory method""" deserializer = HostDeserializer() serializer = HostSerializer() return wsgi.Resource(Controller(), deserializer, serializer) - diff --git a/code/daisy/daisy/api/v1/roles.py b/code/daisy/daisy/api/v1/roles.py index 1c47c2bf..c665c397 100755 --- a/code/daisy/daisy/api/v1/roles.py +++ b/code/daisy/daisy/api/v1/roles.py @@ -46,8 +46,16 @@ SUPPORTED_PARAMS = daisy.api.v1.SUPPORTED_PARAMS SUPPORTED_FILTERS = daisy.api.v1.SUPPORTED_FILTERS ACTIVE_IMMUTABLE = daisy.api.v1.ACTIVE_IMMUTABLE SUPPORTED_DEPLOYMENT_BACKENDS = ('tecs', 'zenic', 'proton') -SUPPORTED_ROLE = ('CONTROLLER_LB', 'CONTROLLER_HA', 'COMPUTER', 'ZENIC_CTL', 'ZENIC_NFM', - 'ZENIC_MDB', 'PROTON', 'CHILD_CELL_1_COMPUTER', 'CONTROLLER_CHILD_CELL_1') +SUPPORTED_ROLE = ( + 'CONTROLLER_LB', + 'CONTROLLER_HA', + 'COMPUTER', + 'ZENIC_CTL', + 'ZENIC_NFM', + 'ZENIC_MDB', + 'PROTON', + 'CHILD_CELL_1_COMPUTER', + 'CONTROLLER_CHILD_CELL_1') SUPPORT_DISK_LOCATION = ('local', 'share') CONF = cfg.CONF @@ -56,6 +64,7 @@ CONF.import_opt('container_formats', 'daisy.common.config', group='image_format') CONF.import_opt('image_property_quota', 'daisy.common.config') + class Controller(controller.BaseController): """ WSGI controller for roles resource in Daisy v1 API @@ -130,86 +139,97 @@ class Controller(controller.BaseController): if host['deleted']: msg = _("Node with identifier %s has been deleted.") % host_id raise HTTPNotFound(msg) + def _raise_404_if_service_deleted(self, req, service_id): service = self.get_service_meta_or_404(req, service_id) if service['deleted']: - msg = _("Service with identifier %s has been deleted.") % service_id + msg = _("Service with identifier %s has been deleted.") % \ + service_id raise HTTPNotFound(msg) + def _raise_404_if_config_set_deleted(self, req, config_set_id): config_set = self.get_config_set_meta_or_404(req, config_set_id) if config_set['deleted']: - msg = _("Config_Set with identifier %s has been deleted.") % config_set_id + msg = _("Config_Set with identifier %s has been deleted.") % \ + config_set_id raise HTTPNotFound(msg) + def _raise_404_if_cluster_deleted(self, req, cluster_id): cluster = self.get_cluster_meta_or_404(req, cluster_id) if cluster['deleted']: - msg = _("cluster with identifier %s has been deleted.") % cluster_id + msg = _("cluster with identifier %s has been deleted.") % \ + cluster_id raise HTTPNotFound(msg) def _get_service_name_list(self, req, role_service_id_list): service_name_list = [] for service_id in role_service_id_list: - service_meta = registry.get_service_metadata(req.context, service_id) + service_meta = registry.get_service_metadata( + req.context, service_id) service_name_list.append(service_meta['name']) return service_name_list - + def _get_host_disk_except_os_disk_by_info(self, host_info): ''' type(host_info): host_disk_except_os_disk_lists: disk_size , type = int ''' - #import pdb;pdb.set_trace() + # import pdb;pdb.set_trace() host_disk_except_os_disk_lists = 0 - os_disk_m = host_info.get('root_lv_size', 51200) + os_disk_m = host_info.get('root_lv_size', 102400) swap_size_m = host_info.get('swap_lv_size', None) if swap_size_m: - swap_size_m = (swap_size_m / 4)*4 + swap_size_m = (swap_size_m / 4) * 4 else: swap_size_m = 0 boot_partition_m = 400 redundant_partiton_m = 600 if not os_disk_m: - os_disk_m = 51200 - #host_disk = 1024 + os_disk_m = 102400 + # host_disk = 1024 host_disks = host_info.get('disks', None) host_disk_size_m = 0 if host_disks: - for key, value in host_disks.items(): + for key, value in host_disks.items(): disk_size_b = str(value.get('size', None)) disk_size_b_str = disk_size_b.strip().split()[0] if disk_size_b_str: disk_size_b_int = int(disk_size_b_str) - disk_size_m = disk_size_b_int//(1024*1024) + disk_size_m = disk_size_b_int // (1024 * 1024) host_disk_size_m = host_disk_size_m + disk_size_m - host_disk_except_os_disk_lists = host_disk_size_m - os_disk_m - swap_size_m - boot_partition_m - redundant_partiton_m - LOG.warn('----start----host_disk_except_os_disk_lists: %s -----end--' % host_disk_except_os_disk_lists) + host_disk_except_os_disk_lists = host_disk_size_m - os_disk_m - \ + swap_size_m - boot_partition_m - redundant_partiton_m + LOG.warn( + '----start----host_disk_except_os_disk_lists: %s -----end--' % + host_disk_except_os_disk_lists) return host_disk_except_os_disk_lists - + def _check_host_validity(self, **paras): ''' - paras['db_lv_size'], paras['glance_lv_size'] , paras['disk_size'] + paras['db_lv_size'], paras['glance_lv_size'] , paras['disk_size'] ''' disk_size = paras.get('disk_size', None) - LOG.warn('--------disk_size:----- %s'% disk_size) + LOG.warn('--------disk_size:----- %s' % disk_size) if disk_size: disk_size_m = int(disk_size) else: disk_size_m = 0 - if disk_size_m == 0: #Host hard disk size was 0, think that the host does not need to install the system - return #Don't need to ckeck the validity of hard disk size - + if disk_size_m == 0: # Host hard disk size was 0, + # think that the host does not need to install the system + return # Don't need to ckeck the validity of hard disk size + db_lv_size_m = paras.get('db_lv_size', 300) if db_lv_size_m: db_lv_size_m = int(db_lv_size_m) else: db_lv_size_m = 0 - + glance_lv_size_m = paras.get('glance_lv_size', 17100) if glance_lv_size_m: glance_lv_size_m = int(glance_lv_size_m) else: glance_lv_size_m = 0 - + nova_lv_size_m = paras.get('nova_lv_size', 0) if nova_lv_size_m: nova_lv_size_m = int(nova_lv_size_m) @@ -217,11 +237,13 @@ class Controller(controller.BaseController): nova_lv_size_m = 0 if nova_lv_size_m == -1: nova_lv_size_m = 0 - glance_lv_size_m = (glance_lv_size_m/4)*4 - db_lv_size_m = (db_lv_size_m/4)*4 - nova_lv_size_m = (nova_lv_size_m/4)*4 + glance_lv_size_m = (glance_lv_size_m / 4) * 4 + db_lv_size_m = (db_lv_size_m / 4) * 4 + nova_lv_size_m = (nova_lv_size_m / 4) * 4 if glance_lv_size_m + db_lv_size_m + nova_lv_size_m > disk_size_m: - msg = _("There isn't enough disk space to specify database or glance or nova disk, please specify database or glance or nova disk size again") + msg = _("There isn't enough disk space to specify database or " + "glance or nova disk, please specify database or " + "glance or nova disk size again") LOG.debug(msg) raise HTTPForbidden(msg) @@ -236,7 +258,12 @@ class Controller(controller.BaseController): def _check_config_set_id_exist(self, req, config_set_id): self._raise_404_if_config_set_deleted(req, config_set_id) - def _check_glance_lv_value(self, req, glance_lv_value, role_name, service_name_list): + def _check_glance_lv_value( + self, + req, + glance_lv_value, + role_name, + service_name_list): if int(glance_lv_value) < 0 and int(glance_lv_value) != -1: msg = _("glance_lv_size can't be negative except -1.") raise HTTPForbidden(explanation=msg, @@ -250,150 +277,170 @@ class Controller(controller.BaseController): content_type="text/plain") def _check_db_lv_size(self, req, db_lv_size, service_name_list): - if int(db_lv_size) < 0 and int(db_lv_size) != -1 : + if int(db_lv_size) < 0 and int(db_lv_size) != -1: msg = _("The size of database disk can't be negative except -1.") LOG.debug(msg) raise HTTPForbidden(msg) - #Only the role with database service can be formulated the size of a database. - if 'mariadb' not in service_name_list and 'mongodb' not in service_name_list: + # Only the role with database service can be formulated the size of + # a database. + if 'mariadb' not in service_name_list and 'mongodb' not in \ + service_name_list: msg = _('The role without database service is unable ' 'to specify the size of the database!') LOG.debug(msg) raise HTTPForbidden(explanation=msg, - request=req, - content_type="text/plain") + request=req, + content_type="text/plain") def _check_nova_lv_size(self, req, nova_lv_size, role_name): if role_name != "COMPUTER": msg = _("The role is not COMPUTER, it can't set logic " "volume disk for nova.") raise HTTPForbidden(explanation=msg, - request=req, - content_type="text/plain") + request=req, + content_type="text/plain") try: if int(nova_lv_size) < 0 and int(nova_lv_size) != -1: msg = _("The nova_lv_size must be -1 or [0, N).") raise HTTPForbidden(explanation=msg, - request=req, - content_type="text/plain") + request=req, + content_type="text/plain") except: msg = _("The nova_lv_size must be -1 or [0, N).") raise HTTPForbidden(explanation=msg, - request=req, - content_type="text/plain") + request=req, + content_type="text/plain") def _check_all_lv_size(self, req, db_lv_size, glance_lv_size, nova_lv_size, host_id_list, cluster_id, argws): if db_lv_size or glance_lv_size or nova_lv_size: for host_id in host_id_list: - host_disk_db_glance_nova_size = self.get_host_disk_db_glance_nova_size(req, host_id, cluster_id) - if host_disk_db_glance_nova_size['db_lv_size'] and db_lv_size and \ - int(db_lv_size) < int(host_disk_db_glance_nova_size['db_lv_size']): - argws['db_lv_size'] = host_disk_db_glance_nova_size['db_lv_size'] + host_disk_db_glance_nova_size = \ + self.get_host_disk_db_glance_nova_size( + req, host_id, cluster_id) + if host_disk_db_glance_nova_size['db_lv_size'] and \ + db_lv_size and int( + db_lv_size) < int(host_disk_db_glance_nova_size[ + 'db_lv_size']): + argws['db_lv_size'] = host_disk_db_glance_nova_size[ + 'db_lv_size'] else: argws['db_lv_size'] = db_lv_size - if host_disk_db_glance_nova_size['glance_lv_size'] and glance_lv_size and \ - int(glance_lv_size) < int(host_disk_db_glance_nova_size['glance_lv_size']): - argws['glance_lv_size'] = host_disk_db_glance_nova_size['glance_lv_size'] + if host_disk_db_glance_nova_size['glance_lv_size'] and \ + glance_lv_size and int( + glance_lv_size) < int(host_disk_db_glance_nova_size[ + 'glance_lv_size']): + argws['glance_lv_size'] = host_disk_db_glance_nova_size[ + 'glance_lv_size'] else: argws['glance_lv_size'] = glance_lv_size - if host_disk_db_glance_nova_size['nova_lv_size'] and nova_lv_size and \ - int(nova_lv_size) < int(host_disk_db_glance_nova_size['nova_lv_size']): - argws['nova_lv_size'] = host_disk_db_glance_nova_size['nova_lv_size'] + if host_disk_db_glance_nova_size['nova_lv_size'] and \ + nova_lv_size and int( + nova_lv_size) < int(host_disk_db_glance_nova_size[ + 'nova_lv_size']): + argws['nova_lv_size'] = host_disk_db_glance_nova_size[ + 'nova_lv_size'] else: argws['nova_lv_size'] = nova_lv_size argws['disk_size'] = host_disk_db_glance_nova_size['disk_size'] - LOG.warn('--------host(%s) check_host_validity argws:----- %s'% (host_id, argws)) + LOG.warn( + '--------host(%s) check_host_validity argws:----- %s' % + (host_id, argws)) self._check_host_validity(**argws) def _check_deployment_backend(self, req, deployment_backend): if deployment_backend not in SUPPORTED_DEPLOYMENT_BACKENDS: - msg = "deployment backend '%s' is not supported." % deployment_backend + msg = "deployment backend '%s' is not supported." % \ + deployment_backend raise HTTPBadRequest(explanation=msg, request=req, content_type="text/plain") - + def _check_role_type_in_update_role(self, req, role_type, orig_role_meta): if orig_role_meta['type'].lower() != role_type.lower(): msg = _("Role type can not be updated to other type.") LOG.debug(msg) raise HTTPForbidden(msg) - - def _check_cluster_id_in_role_update(self, req, role_cluster, orig_role_meta): + + def _check_cluster_id_in_role_update( + self, req, role_cluster, orig_role_meta): if orig_role_meta['type'].lower() == 'template': msg = _("The template role does not belong to any cluster.") LOG.debug(msg) raise HTTPForbidden(msg) orig_role_cluster = orig_role_meta['cluster_id'] - if orig_role_cluster != role_cluster: #Can not change the cluster which the role belongs to + if orig_role_cluster != role_cluster: # Can not change the cluster + # which the role belongs to msg = _("Can't update the cluster of the role.") LOG.debug(msg) raise HTTPForbidden(msg) else: self._raise_404_if_cluster_deleted(req, role_cluster) - + def _check_role_name_in_role_update(self, req, role_meta, orig_role_meta): role_name = role_meta['name'] cluster_id = role_meta.get('cluster_id', orig_role_meta['cluster_id']) if cluster_id: self.check_cluster_role_name_repetition(req, role_name, cluster_id) - else: #role type was template, cluster id was None + else: # role type was template, cluster id was None self.check_template_role_name_repetition(req, role_name) - - def _check_all_lv_size_of_nodes_with_role_in_role_update(self, req, role_meta, orig_role_meta, - role_host_id_list): - #check host with this role at the same time + + def _check_all_lv_size_of_nodes_with_role_in_role_update( + self, req, role_meta, orig_role_meta, role_host_id_list): + # check host with this role at the same time cluster_id = role_meta.get('cluster_id', None) - if not cluster_id: #role with cluster + if not cluster_id: # role with cluster cluster_id = orig_role_meta['cluster_id'] - if not cluster_id: #without cluster id, raise Error + if not cluster_id: # without cluster id, raise Error msg = _("The cluster_id parameter can not be None!") LOG.debug(msg) raise HTTPForbidden(msg) argws = dict() - if role_meta.has_key('db_lv_size'): + if 'db_lv_size' in role_meta: db_lv_size = role_meta['db_lv_size'] - else: #The db_lv_size has been specified before. + else: # The db_lv_size has been specified before. db_lv_size = orig_role_meta.get('db_lv_size') - if role_meta.has_key('glance_lv_size'): + if 'glance_lv_size' in role_meta: glance_lv_size = role_meta['glance_lv_size'] else: glance_lv_size = orig_role_meta.get('glance_lv_size') - if role_meta.has_key('nova_lv_size'): + if 'nova_lv_size' in role_meta: nova_lv_size = role_meta['nova_lv_size'] else: nova_lv_size = orig_role_meta.get('nova_lv_size') - if role_meta.has_key('nodes'): + if 'nodes' in role_meta: host_id_list = list(eval(role_meta['nodes'])) + role_host_id_list else: host_id_list = role_host_id_list self._check_all_lv_size(req, db_lv_size, glance_lv_size, nova_lv_size, host_id_list, cluster_id, argws) - + def _check_ntp_server(self, req, role_name): if role_name != 'CONTROLLER_HA': msg = 'The role %s need no ntp_server' % role_name raise HTTPForbidden(explanation=msg) - def _check_role_type_in_role_add(self, req, role_meta): - #role_type == None or not template, cluster id must not be None + # role_type == None or not template, cluster id must not be None role_type = role_meta['type'] - if role_type.lower() != 'template': + if role_type.lower() != 'template': role_cluster_id = role_meta.get('cluster_id', None) - if not role_cluster_id: #add role without cluster id parameter, raise error - msg = _("The cluster_id parameter can not be None if role was not a template type.") + if not role_cluster_id: # add role without cluster id parameter, + # raise error + msg = _( + "The cluster_id parameter can not be None " + "if role was not a template type.") LOG.debug(msg) raise HTTPForbidden(msg) - else: #role_type == template, cluster id is not necessary - if role_meta.has_key('cluster_id'): + else: # role_type == template, cluster id is not necessary + if 'cluster_id' in role_meta: msg = _("Tht template role cannot be added to any cluster.") LOG.debug(msg) raise HTTPForbidden(msg) - + def _check_all_lv_size_with_role_in_role_add(self, req, role_meta): cluster_id = role_meta.get('cluster_id', None) - if not cluster_id: #without cluster id, raise Error + if not cluster_id: # without cluster id, raise Error msg = _("The cluster_id parameter can not be None!") LOG.debug(msg) raise HTTPForbidden(msg) @@ -403,87 +450,104 @@ class Controller(controller.BaseController): nova_lv_size = role_meta.get('nova_lv_size', 0) host_id_list = list(eval(role_meta['nodes'])) self._check_all_lv_size(req, db_lv_size, glance_lv_size, - nova_lv_size, host_id_list, cluster_id, argws) + nova_lv_size, host_id_list, cluster_id, argws) def get_host_disk_db_glance_nova_size(self, req, host_id, cluster_id): ''' - return : + return : host_disk_db_glance_nova_size['disk_size'] = 1024000 host_disk_db_glance_nova_size['db_lv_size'] = 1011 host_disk_db_glance_nova_size['glance_lv_size'] = 1011 host_disk_db_glance_nova_size['nova_lv_size'] = 1011 ''' - #import pdb;pdb.set_trace() + # import pdb;pdb.set_trace() host_disk_db_glance_nova_size = dict() db_lv_size = list() glance_lv_size = list() - nova_lv_size= list() - disk_size = list() - + nova_lv_size = list() + # disk_size = list() + host_info = self.get_host_meta_or_404(req, host_id) if host_info: - if host_info.has_key('deleted') and host_info['deleted']: - msg = _("Node with identifier %s has been deleted.") % host_info['id'] + if 'deleted' in host_info and host_info['deleted']: + msg = _("Node with identifier %s has been deleted.") % \ + host_info[ + 'id'] LOG.debug(msg) raise HTTPNotFound(msg) - #get host disk infomation + # get host disk infomation host_disk = self._get_host_disk_except_os_disk_by_info(host_info) host_disk_db_glance_nova_size['disk_size'] = host_disk - #get role_host db/galnce/nova infomation + # get role_host db/galnce/nova infomation cluster_info = self.get_cluster_meta_or_404(req, cluster_id) - if host_info.has_key('cluster'): #host with cluster + if 'cluster' in host_info: # host with cluster if host_info['cluster'] != cluster_info['name']: - #type(host_info['cluster']) = list, type(cluster_info['name']) = str - msg = _("Role and hosts belong to different cluster.") + # type(host_info['cluster']) = list, + # type(cluster_info['name']) = str + msg = _("Role and hosts belong to different cluster.") LOG.debug(msg) raise HTTPNotFound(msg) else: all_roles = registry.get_roles_detail(req.context) - cluster_roles = [role for role in all_roles if role['cluster_id'] == cluster_id] - #roles infomation saved in cluster_roles - if host_info.has_key('role') and host_info['role']: #host with role + cluster_roles = [ + role for role in all_roles if role['cluster_id'] == + cluster_id] + # roles infomation saved in cluster_roles + if 'role' in host_info and host_info[ + 'role']: # host with role for role in cluster_roles: - if role['name'] in host_info['role'] and cluster_roles: + if role['name'] in host_info[ + 'role'] and cluster_roles: db_lv_size.append(role.get('db_lv_size', None)) - glance_lv_size.append(role.get('glance_lv_size', None)) - nova_lv_size.append(role.get('nova_lv_size', None)) - - if db_lv_size: + glance_lv_size.append( + role.get('glance_lv_size', None)) + nova_lv_size.append( + role.get('nova_lv_size', None)) + + if db_lv_size: host_disk_db_glance_nova_size['db_lv_size'] = max(db_lv_size) - else: #host without cluster + else: # host without cluster host_disk_db_glance_nova_size['db_lv_size'] = 0 - if glance_lv_size: - host_disk_db_glance_nova_size['glance_lv_size'] = max(glance_lv_size) + if glance_lv_size: + host_disk_db_glance_nova_size[ + 'glance_lv_size'] = max(glance_lv_size) else: host_disk_db_glance_nova_size['glance_lv_size'] = 0 - if nova_lv_size: + if nova_lv_size: host_disk_db_glance_nova_size['nova_lv_size'] = max(nova_lv_size) else: host_disk_db_glance_nova_size['nova_lv_size'] = 0 - LOG.warn('--------host(%s)disk_db_glance_nova_size:----- %s'% (host_id, host_disk_db_glance_nova_size)) + LOG.warn('--------host(%s)disk_db_glance_nova_size:----- %s' % + (host_id, host_disk_db_glance_nova_size)) return host_disk_db_glance_nova_size - + def check_cluster_role_name_repetition(self, req, role_name, cluster_id): all_roles = registry.get_roles_detail(req.context) - cluster_roles = [role for role in all_roles if role['cluster_id'] == cluster_id] + cluster_roles = [role for role in all_roles if role[ + 'cluster_id'] == cluster_id] cluster_roles_name = [role['name'].lower() for role in cluster_roles] - if role_name.lower() in cluster_roles_name: - msg = _("The role %s has already been in the cluster %s!" % (role_name, cluster_id)) + if role_name.lower() in cluster_roles_name: + msg = _( + "The role %s has already been in the cluster %s!" % + (role_name, cluster_id)) LOG.debug(msg) raise HTTPForbidden(msg) - + def check_template_role_name_repetition(self, req, role_name): all_roles = registry.get_roles_detail(req.context) - template_roles = [role for role in all_roles if role['cluster_id'] == None] + template_roles = [ + role for role in all_roles if role['cluster_id'] is None] template_roles_name = [role['name'].lower() for role in template_roles] if role_name.lower() in template_roles_name: - msg = _("The role %s has already been in the the template role." % role_name) + msg = _( + "The role %s has already been in the the template role." % + role_name) LOG.debug(msg) raise HTTPForbidden(msg) - + def _check_disk_parameters(self, req, role_meta): - if (role_meta.has_key('disk_location') and - role_meta['disk_location'] not in SUPPORT_DISK_LOCATION): + if ('disk_location' in role_meta and + role_meta['disk_location'] not in SUPPORT_DISK_LOCATION): msg = _("value of disk_location is not supported.") raise HTTPForbidden(msg) @@ -496,69 +560,82 @@ class Controller(controller.BaseController): role_service_id_list, role_host_id_list): role_name = orig_role_meta['name'] if role_meta.get('type', None): - self._check_role_type_in_update_role(req, role_meta['type'], orig_role_meta) - if role_meta.has_key('ntp_server'): + self._check_role_type_in_update_role( + req, role_meta['type'], orig_role_meta) + if 'ntp_server' in role_meta: self._check_ntp_server(req, role_name) - if role_meta.has_key('nodes'): + if 'nodes' in role_meta: self._check_nodes_exist(req, list(eval(role_meta['nodes']))) - if role_meta.has_key('services'): + if 'services' in role_meta: self._check_services_exist(req, list(eval(role_meta['services']))) role_service_id_list.extend(list(eval(role_meta['services']))) - if role_meta.has_key('config_set_id'): - self._check_config_set_id_exist(req, str(role_meta['config_set_id'])) - if role_meta.has_key('cluster_id'): - self._check_cluster_id_in_role_update(req, str(role_meta['cluster_id']), orig_role_meta) - if role_meta.has_key('name'): - self._check_role_name_in_role_update(req, role_meta, orig_role_meta) - service_name_list = self._get_service_name_list(req, role_service_id_list) - glance_lv_value = role_meta.get('glance_lv_size', orig_role_meta['glance_lv_size']) + if 'config_set_id' in role_meta: + self._check_config_set_id_exist( + req, str(role_meta['config_set_id'])) + if 'cluster_id' in role_meta: + self._check_cluster_id_in_role_update( + req, str(role_meta['cluster_id']), orig_role_meta) + if 'name' in role_meta: + self._check_role_name_in_role_update( + req, role_meta, orig_role_meta) + service_name_list = self._get_service_name_list( + req, role_service_id_list) + glance_lv_value = role_meta.get( + 'glance_lv_size', orig_role_meta['glance_lv_size']) if glance_lv_value: - self._check_glance_lv_value(req, glance_lv_value, role_name, service_name_list) + self._check_glance_lv_value( + req, glance_lv_value, role_name, service_name_list) if role_meta.get('db_lv_size', None) and role_meta['db_lv_size']: - self._check_db_lv_size(req, role_meta['db_lv_size'], service_name_list) + self._check_db_lv_size( + req, role_meta['db_lv_size'], service_name_list) if role_meta.get('nova_lv_size', None): self._check_nova_lv_size(req, role_meta['nova_lv_size'], role_name) - if role_meta.has_key('nodes') or role_host_id_list: - self._check_all_lv_size_of_nodes_with_role_in_role_update(req, role_meta, orig_role_meta, - role_host_id_list) + if 'nodes' in role_meta or role_host_id_list: + self._check_all_lv_size_of_nodes_with_role_in_role_update( + req, role_meta, orig_role_meta, role_host_id_list) self._check_disk_parameters(req, role_meta) - if role_meta.has_key('deployment_backend'): - self._check_deployment_backend(req, role_meta['deployment_backend']) + if 'deployment_backend' in role_meta: + self._check_deployment_backend( + req, role_meta['deployment_backend']) if role_meta.get('role_type', None): self._check_type_role_reasonable(req, role_meta) - def _check_role_add_parameters(self, req, role_meta, role_service_id_list): - role_type = role_meta.get('type', None) role_name = role_meta.get('name', None) if role_meta.get('type', None): self._check_role_type_in_role_add(req, role_meta) - if role_meta.has_key('nodes'): + if 'nodes' in role_meta: self._check_nodes_exist(req, list(eval(role_meta['nodes']))) - if role_meta.has_key('services'): + if 'services' in role_meta: self._check_services_exist(req, list(eval(role_meta['services']))) role_service_id_list.extend(list(eval(role_meta['services']))) - if role_meta.has_key('config_set_id'): - self._check_config_set_id_exist(req, str(role_meta['config_set_id'])) - if role_meta.has_key('cluster_id'): + if 'config_set_id' in role_meta: + self._check_config_set_id_exist( + req, str(role_meta['config_set_id'])) + if 'cluster_id' in role_meta: orig_cluster = str(role_meta['cluster_id']) self._raise_404_if_cluster_deleted(req, orig_cluster) - self.check_cluster_role_name_repetition(req, role_name, orig_cluster) + self.check_cluster_role_name_repetition( + req, role_name, orig_cluster) else: self.check_template_role_name_repetition(req, role_name) - service_name_list = self._get_service_name_list(req, role_service_id_list) + service_name_list = self._get_service_name_list( + req, role_service_id_list) glance_lv_value = role_meta.get('glance_lv_size', None) if glance_lv_value: - self._check_glance_lv_value(req, glance_lv_value, role_name, service_name_list) + self._check_glance_lv_value( + req, glance_lv_value, role_name, service_name_list) if role_meta.get('db_lv_size', None) and role_meta['db_lv_size']: - self._check_db_lv_size(req, role_meta['db_lv_size'], service_name_list) + self._check_db_lv_size( + req, role_meta['db_lv_size'], service_name_list) if role_meta.get('nova_lv_size', None): self._check_nova_lv_size(req, role_meta['nova_lv_size'], role_name) - if role_meta.has_key('nodes'): + if 'nodes' in role_meta: self._check_all_lv_size_with_role_in_role_add(req, role_meta) self._check_disk_parameters(req, role_meta) - if role_meta.has_key('deployment_backend'): - self._check_deployment_backend(req, role_meta['deployment_backend']) + if 'deployment_backend' in role_meta: + self._check_deployment_backend( + req, role_meta['deployment_backend']) else: role_meta['deployment_backend'] = 'tecs' if role_meta.get('role_type', None): @@ -591,7 +668,7 @@ class Controller(controller.BaseController): def delete_role(self, req, id): """ Deletes a role from Daisy. - + :param req: The WSGI/Webob Request object :param image_meta: Mapping of metadata about role @@ -599,7 +676,7 @@ class Controller(controller.BaseController): """ self._enforce(req, 'delete_role') - #role = self.get_role_meta_or_404(req, id) + # role = self.get_role_meta_or_404(req, id) print "delete_role:%s" % id try: registry.delete_role_metadata(req.context, id) @@ -625,7 +702,7 @@ class Controller(controller.BaseController): request=req, content_type="text/plain") else: - #self.notifier.info('role.delete', role) + # self.notifier.info('role.delete', role) return Response(body='', status=200) @utils.mutating @@ -661,11 +738,11 @@ class Controller(controller.BaseController): """ self._enforce(req, 'get_roles') params = self._get_query_params(req) - filters=params.get('filters',None) + filters = params.get('filters', None) if 'cluster_id' in filters: - cluster_id=filters['cluster_id'] + cluster_id = filters['cluster_id'] self._raise_404_if_cluster_deleted(req, cluster_id) - + try: roles = registry.get_roles_detail(req.context, **params) except exception.Invalid as e: @@ -684,13 +761,28 @@ class Controller(controller.BaseController): """ orig_role_meta = self.get_role_meta_or_404(req, id) role_service_list = registry.get_role_services(req.context, id) - role_service_id_list = [ role_service['service_id'] for role_service in role_service_list ] + role_service_id_list = [role_service['service_id'] + for role_service in role_service_list] role_host_info_list = registry.get_role_host_metadata(req.context, id) - role_host_id_list = [role_host['host_id'] for role_host in role_host_info_list] - self._check_role_update_parameters(req, role_meta, orig_role_meta, role_service_id_list, role_host_id_list) + role_host_id_list = [role_host['host_id'] + for role_host in role_host_info_list] + self._check_role_update_parameters( + req, + role_meta, + orig_role_meta, + role_service_id_list, + role_host_id_list) + + if orig_role_meta['role_type'] == "CONTROLLER_HA": + cluster_meta = {} + cluster_meta['public_vip'] = role_meta.get( + 'public_vip') or role_meta.get('vip') + if cluster_meta['public_vip']: + cluster_meta = registry.update_cluster_metadata( + req.context, orig_role_meta['cluster_id'], cluster_meta) self._enforce(req, 'modify_image') - #orig_role_meta = self.get_role_meta_or_404(req, id) + # orig_role_meta = self.get_role_meta_or_404(req, id) # Do not allow any updates on a deleted image. # Fix for LP Bug #1060930 @@ -735,6 +827,7 @@ class Controller(controller.BaseController): return {'role_meta': role_meta} + class RoleDeserializer(wsgi.JSONRequestDeserializer): """Handles deserialization of specific controller method requests.""" @@ -749,6 +842,7 @@ class RoleDeserializer(wsgi.JSONRequestDeserializer): def update_role(self, request): return self._deserialize(request) + class RoleSerializer(wsgi.JSONResponseSerializer): """Handles serialization of specific controller method responses.""" @@ -768,6 +862,7 @@ class RoleSerializer(wsgi.JSONResponseSerializer): response.headers['Content-Type'] = 'application/json' response.body = self.to_json(dict(role=role_meta)) return response + def get_role(self, response, result): role_meta = result['role_meta'] response.status = 201 @@ -775,6 +870,7 @@ class RoleSerializer(wsgi.JSONResponseSerializer): response.body = self.to_json(dict(role=role_meta)) return response + def create_resource(): """Roles resource factory method""" deserializer = RoleDeserializer() diff --git a/code/daisy/daisy/api/v1/router.py b/code/daisy/daisy/api/v1/router.py index bfcbbdce..da6c2450 100755 --- a/code/daisy/daisy/api/v1/router.py +++ b/code/daisy/daisy/api/v1/router.py @@ -14,7 +14,7 @@ # under the License. -#from daisy.api.v1 import images +# from daisy.api.v1 import images from daisy.api.v1 import hosts from daisy.api.v1 import clusters from daisy.api.v1 import template @@ -29,14 +29,17 @@ from daisy.api.v1 import networks from daisy.api.v1 import install from daisy.api.v1 import disk_array from daisy.api.v1 import host_template +from daisy.api.v1 import hwms from daisy.common import wsgi +from daisy.api.v1 import backup_restore + class API(wsgi.Router): """WSGI router for Glance v1 API requests.""" def __init__(self, mapper): - reject_method_resource = wsgi.Resource(wsgi.RejectMethodController()) + wsgi.Resource(wsgi.RejectMethodController()) '''images_resource = images.create_resource() @@ -126,7 +129,6 @@ class API(wsgi.Router): controller=members_resource, action="index_shared_images")''' - hosts_resource = hosts.create_resource() mapper.connect("/nodes", @@ -145,11 +147,17 @@ class API(wsgi.Router): controller=hosts_resource, action='detail', conditions={'method': ['GET']}) - + mapper.connect("/nodes/{id}", controller=hosts_resource, action='get_host', conditions={'method': ['GET']}) + + mapper.connect("/hwm_nodes", + controller=hosts_resource, + action='update_hwm_host', + conditions={'method': ['POST']}) + mapper.connect("/discover_host/", controller=hosts_resource, action='discover_host', @@ -159,17 +167,17 @@ class API(wsgi.Router): controller=hosts_resource, action='add_discover_host', conditions={'method': ['POST']}) - + mapper.connect("/discover/nodes/{id}", controller=hosts_resource, action='delete_discover_host', conditions={'method': ['DELETE']}) - + mapper.connect("/discover/nodes", controller=hosts_resource, action='detail_discover_host', conditions={'method': ['GET']}) - + mapper.connect("/discover/nodes/{id}", controller=hosts_resource, action='update_discover_host', @@ -179,9 +187,43 @@ class API(wsgi.Router): controller=hosts_resource, action='get_discover_host_detail', conditions={'method': ['GET']}) - + + mapper.connect("/pxe_discover/nodes", + controller=hosts_resource, + action='add_pxe_host', + conditions={'method': ['POST']}) + + mapper.connect("/pxe_discover/nodes/{id}", + controller=hosts_resource, + action='update_pxe_host', + conditions={'method': ['PUT']}) + + hwms_resource = hwms.create_resource() + + mapper.connect("/hwm", + controller=hwms_resource, + action='add_hwm', + conditions={'method': ['POST']}) + mapper.connect("/hwm/{id}", + controller=hwms_resource, + action='delete_hwm', + conditions={'method': ['DELETE']}) + mapper.connect("/hwm/{id}", + controller=hwms_resource, + action='update_hwm', + conditions={'method': ['PUT']}) + mapper.connect("/hwm", + controller=hwms_resource, + action='list', + conditions={'method': ['GET']}) + + mapper.connect("/hwm/{id}", + controller=hwms_resource, + action='detail', + conditions={'method': ['GET']}) + clusters_resource = clusters.create_resource() - + mapper.connect("/clusters", controller=clusters_resource, action='add_cluster', @@ -193,56 +235,54 @@ class API(wsgi.Router): mapper.connect("/clusters/{id}", controller=clusters_resource, action='update_cluster', - conditions={'method': ['PUT']}) + conditions={'method': ['PUT']}) mapper.connect("/clusters", controller=clusters_resource, action='detail', conditions={'method': ['GET']}) - + mapper.connect("/clusters/{id}", controller=clusters_resource, action='get_cluster', conditions={'method': ['GET']}) - - + mapper.connect("/clusters/{id}", controller=clusters_resource, action='update_cluster', - conditions={'method': ['PUT']}) + conditions={'method': ['PUT']}) template_resource = template.create_resource() mapper.connect("/template", controller=template_resource, action='add_template', conditions={'method': ['POST']}) - + mapper.connect("/template/{template_id}", controller=template_resource, action='update_template', conditions={'method': ['PUT']}) - - + mapper.connect("/template/{template_id}", controller=template_resource, action='delete_template', conditions={'method': ['DELETE']}) - + mapper.connect("/template/lists", controller=template_resource, action='get_template_lists', conditions={'method': ['GET']}) - + mapper.connect("/template/{template_id}", controller=template_resource, action='get_template_detail', conditions={'method': ['GET']}) - + mapper.connect("/export_db_to_json", controller=template_resource, action='export_db_to_json', conditions={'method': ['POST']}) - + mapper.connect("/import_json_to_template", controller=template_resource, action='import_json_to_template', @@ -253,7 +293,6 @@ class API(wsgi.Router): action='import_template_to_db', conditions={'method': ['POST']}) - host_template_resource = host_template.create_resource() mapper.connect("/host_template", controller=host_template_resource, @@ -262,7 +301,7 @@ class API(wsgi.Router): mapper.connect("/host_template/{template_id}", controller=host_template_resource, action='update_host_template', - conditions={'method': ['PUT']}) + conditions={'method': ['PUT']}) mapper.connect("/host_template", controller=host_template_resource, action='delete_host_template', @@ -270,11 +309,11 @@ class API(wsgi.Router): mapper.connect("/host_template/lists", controller=host_template_resource, action='get_host_template_lists', - conditions={'method': ['GET']}) + conditions={'method': ['GET']}) mapper.connect("/host_template/{template_id}", controller=host_template_resource, action='get_host_template_detail', - conditions={'method': ['GET']}) + conditions={'method': ['GET']}) mapper.connect("/host_to_template", controller=host_template_resource, action='host_to_template', @@ -283,7 +322,7 @@ class API(wsgi.Router): controller=host_template_resource, action='template_to_host', conditions={'method': ['PUT']}) - + components_resource = components.create_resource() mapper.connect("/components", controller=components_resource, @@ -296,16 +335,16 @@ class API(wsgi.Router): mapper.connect("/components/detail", controller=components_resource, action='detail', - conditions={'method': ['GET']}) + conditions={'method': ['GET']}) mapper.connect("/components/{id}", controller=components_resource, action='get_component', - conditions={'method': ['GET']}) + conditions={'method': ['GET']}) mapper.connect("/components/{id}", controller=components_resource, action='update_component', - conditions={'method': ['PUT']}) - + conditions={'method': ['PUT']}) + services_resource = services.create_resource() mapper.connect("/services", controller=services_resource, @@ -318,15 +357,15 @@ class API(wsgi.Router): mapper.connect("/services/detail", controller=services_resource, action='detail', - conditions={'method': ['GET']}) + conditions={'method': ['GET']}) mapper.connect("/services/{id}", controller=services_resource, action='get_service', - conditions={'method': ['GET']}) + conditions={'method': ['GET']}) mapper.connect("/services/{id}", controller=services_resource, action='update_service', - conditions={'method': ['PUT']}) + conditions={'method': ['PUT']}) roles_resource = roles.create_resource() mapper.connect("/roles", @@ -340,15 +379,15 @@ class API(wsgi.Router): mapper.connect("/roles/detail", controller=roles_resource, action='detail', - conditions={'method': ['GET']}) + conditions={'method': ['GET']}) mapper.connect("/roles/{id}", controller=roles_resource, action='get_role', - conditions={'method': ['GET']}) + conditions={'method': ['GET']}) mapper.connect("/roles/{id}", controller=roles_resource, action='update_role', - conditions={'method': ['PUT']}) + conditions={'method': ['PUT']}) members_resource = members.create_resource() mapper.connect("/clusters/{cluster_id}/nodes/{host_id}", @@ -359,102 +398,102 @@ class API(wsgi.Router): controller=members_resource, action="delete_cluster_host", conditions={'method': ['DELETE']}) - # mapper.connect("/clusters/{cluster_id}/nodes/{host_id}", - # controller=members_resource, - # action="get_cluster_hosts", - # conditions={'method': ['GET']}) - # mapper.connect("/clusters/{cluster_id}/nodes", - # controller=members_resource, - # action="get_cluster_hosts", - # conditions={'method': ['GET']}) - # mapper.connect("/multi_clusters/nodes/{host_id}", - # controller=members_resource, - # action="get_host_clusters", - # conditions={'method': ['GET']}) +# mapper.connect("/clusters/{cluster_id}/nodes/{host_id}", +# controller=members_resource, +# action="get_cluster_hosts", +# conditions={'method': ['GET']}) +# mapper.connect("/clusters/{cluster_id}/nodes", +# controller=members_resource, +# action="get_cluster_hosts", +# conditions={'method': ['GET']}) +# mapper.connect("/multi_clusters/nodes/{host_id}", +# controller=members_resource, +# action="get_host_clusters", +# conditions={'method': ['GET']}) config_files_resource = config_files.create_resource() mapper.connect("/config_files", - controller=config_files_resource, - action="add_config_file", - conditions={'method': ['POST']}) - + controller=config_files_resource, + action="add_config_file", + conditions={'method': ['POST']}) + mapper.connect("/config_files/{id}", - controller=config_files_resource, - action="delete_config_file", - conditions={'method': ['DELETE']}) - + controller=config_files_resource, + action="delete_config_file", + conditions={'method': ['DELETE']}) + mapper.connect("/config_files/{id}", - controller=config_files_resource, - action="update_config_file", - conditions={'method': ['PUT']}) - + controller=config_files_resource, + action="update_config_file", + conditions={'method': ['PUT']}) + mapper.connect("/config_files/detail", - controller=config_files_resource, - action="detail", - conditions={'method': ['GET']}) - + controller=config_files_resource, + action="detail", + conditions={'method': ['GET']}) + mapper.connect("/config_files/{id}", - controller=config_files_resource, - action="get_config_file", - conditions=dict(method=["GET"])) + controller=config_files_resource, + action="get_config_file", + conditions=dict(method=["GET"])) config_sets_resource = config_sets.create_resource() mapper.connect("/config_sets", - controller=config_sets_resource, - action="add_config_set", - conditions={'method': ['POST']}) - + controller=config_sets_resource, + action="add_config_set", + conditions={'method': ['POST']}) + mapper.connect("/config_sets/{id}", - controller=config_sets_resource, - action="delete_config_set", - conditions={'method': ['DELETE']}) - + controller=config_sets_resource, + action="delete_config_set", + conditions={'method': ['DELETE']}) + mapper.connect("/config_sets/{id}", - controller=config_sets_resource, - action="update_config_set", - conditions={'method': ['PUT']}) - + controller=config_sets_resource, + action="update_config_set", + conditions={'method': ['PUT']}) + mapper.connect("/config_sets/detail", - controller=config_sets_resource, - action="detail", - conditions={'method': ['GET']}) - + controller=config_sets_resource, + action="detail", + conditions={'method': ['GET']}) + mapper.connect("/config_sets/{id}", - controller=config_sets_resource, - action="get_config_set", - conditions=dict(method=["GET"])) + controller=config_sets_resource, + action="get_config_set", + conditions=dict(method=["GET"])) mapper.connect("/cluster_config_set_update", - controller=config_sets_resource, - action="cluster_config_set_update", - conditions={'method': ['POST']}) - + controller=config_sets_resource, + action="cluster_config_set_update", + conditions={'method': ['POST']}) + mapper.connect("/cluster_config_set_progress", - controller=config_sets_resource, - action="cluster_config_set_progress", - conditions={'method': ['POST']}) + controller=config_sets_resource, + action="cluster_config_set_progress", + conditions={'method': ['POST']}) configs_resource = configs.create_resource() mapper.connect("/configs", - controller=configs_resource, - action="add_config", - conditions={'method': ['POST']}) - + controller=configs_resource, + action="add_config", + conditions={'method': ['POST']}) + mapper.connect("/configs_delete", - controller=configs_resource, - action="delete_config", - conditions={'method': ['DELETE']}) - + controller=configs_resource, + action="delete_config", + conditions={'method': ['DELETE']}) + mapper.connect("/configs/detail", - controller=configs_resource, - action="detail", - conditions={'method': ['GET']}) - + controller=configs_resource, + action="detail", + conditions={'method': ['GET']}) + mapper.connect("/configs/{id}", - controller=configs_resource, - action="get_config", - conditions=dict(method=["GET"])) + controller=configs_resource, + action="get_config", + conditions=dict(method=["GET"])) networks_resource = networks.create_resource() @@ -474,16 +513,16 @@ class API(wsgi.Router): controller=networks_resource, action='detail', conditions={'method': ['GET']}) - + mapper.connect("/networks/{id}", controller=networks_resource, action='get_network', conditions={'method': ['GET']}) mapper.connect("/networks", - controller=networks_resource, - action='get_all_network', - conditions={'method': ['GET']}) + controller=networks_resource, + action='get_all_network', + conditions={'method': ['GET']}) install_resource = install.create_resource() @@ -491,12 +530,12 @@ class API(wsgi.Router): controller=install_resource, action='install_cluster', conditions={'method': ['POST']}) - + mapper.connect("/export_db", controller=install_resource, action='export_db', conditions={'method': ['POST']}) - + mapper.connect("/uninstall/{cluster_id}", controller=install_resource, action='uninstall_cluster', @@ -510,23 +549,23 @@ class API(wsgi.Router): controller=install_resource, action='update_cluster', conditions={'method': ['POST']}) - + mapper.connect("/update/{cluster_id}", controller=install_resource, action='update_progress', conditions={'method': ['GET']}) - + mapper.connect("/disk_array/{cluster_id}", controller=install_resource, action='update_disk_array', conditions={'method': ['POST']}) - - #mapper.connect("/update/{cluster_id}/versions/{versions_id}", + + # mapper.connect("/update/{cluster_id}/versions/{versions_id}", # controller=update_resource, # action='update_cluster_version', # conditions={'method': ['POST']}) - - array_resource = disk_array.create_resource() + + array_resource = disk_array.create_resource() mapper.connect("/service_disk", controller=array_resource, action='service_disk_add', @@ -547,7 +586,7 @@ class API(wsgi.Router): controller=array_resource, action='service_disk_detail', conditions={'method': ['GET']}) - + mapper.connect("/cinder_volume", controller=array_resource, action='cinder_volume_add', @@ -568,7 +607,23 @@ class API(wsgi.Router): controller=array_resource, action='cinder_volume_detail', conditions={'method': ['GET']}) - + + backup_restore_resource = backup_restore.create_resource() + + mapper.connect("/backup", + controller=backup_restore_resource, + action='backup', + conditions={'method': ['POST']}) + mapper.connect("/restore", + controller=backup_restore_resource, + action='restore', + conditions={'method': ['POST']}) + mapper.connect("/backup_file_version", + controller=backup_restore_resource, + action='get_backup_file_version', + conditions={'method': ['POST']}) + mapper.connect("/version", + controller=backup_restore_resource, + action='version', + conditions={'method': ['POST']}) super(API, self).__init__(mapper) - - diff --git a/code/daisy/daisy/api/v1/services.py b/code/daisy/daisy/api/v1/services.py index b9f55d61..9a0ed3d7 100755 --- a/code/daisy/daisy/api/v1/services.py +++ b/code/daisy/daisy/api/v1/services.py @@ -52,12 +52,13 @@ CONF.import_opt('container_formats', 'daisy.common.config', group='image_format') CONF.import_opt('image_property_quota', 'daisy.common.config') + class Controller(controller.BaseController): """ WSGI controller for services resource in Daisy v1 API - The services resource API is a RESTful web service for service data. The API - is as follows:: + The services resource API is a RESTful web service for service data. + The API is as follows:: GET /services -- Returns a set of brief metadata about services GET /services/detail -- Returns a set of detailed metadata about @@ -124,7 +125,8 @@ class Controller(controller.BaseController): def _raise_404_if_component_deleted(self, req, component_id): component = self.get_component_meta_or_404(req, component_id) if component['deleted']: - msg = _("Component with identifier %s has been deleted.") % component_id + msg = _("Component with identifier %s has been deleted.") % \ + component_id raise HTTPNotFound(msg) @utils.mutating @@ -141,7 +143,7 @@ class Controller(controller.BaseController): service_name = service_meta["name"] service_description = service_meta["description"] - if service_meta.has_key('component_id'): + if 'component_id' in service_meta: orig_component_id = str(service_meta['component_id']) self._raise_404_if_component_deleted(req, orig_component_id) @@ -163,7 +165,7 @@ class Controller(controller.BaseController): """ self._enforce(req, 'delete_service') - #service = self.get_service_meta_or_404(req, id) + # service = self.get_service_meta_or_404(req, id) print "delete_service:%s" % id try: registry.delete_service_metadata(req.context, id) @@ -182,14 +184,15 @@ class Controller(controller.BaseController): request=req, content_type="text/plain") except exception.InUseByStore as e: - msg = (_("service %(id)s could not be deleted because it is in use: " + msg = (_("service %(id)s could not be deleted " + "because it is in use: " "%(exc)s") % {"id": id, "exc": utils.exception_to_str(e)}) LOG.warn(msg) raise HTTPConflict(explanation=msg, request=req, content_type="text/plain") else: - #self.notifier.info('service.delete', service) + # self.notifier.info('service.delete', service) return Response(body='', status=200) @utils.mutating @@ -287,6 +290,7 @@ class Controller(controller.BaseController): return {'service_meta': service_meta} + class ServiceDeserializer(wsgi.JSONRequestDeserializer): """Handles deserialization of specific controller method requests.""" @@ -301,6 +305,7 @@ class ServiceDeserializer(wsgi.JSONRequestDeserializer): def update_service(self, request): return self._deserialize(request) + class ServiceSerializer(wsgi.JSONResponseSerializer): """Handles serialization of specific controller method responses.""" @@ -320,6 +325,7 @@ class ServiceSerializer(wsgi.JSONResponseSerializer): response.headers['Content-Type'] = 'application/json' response.body = self.to_json(dict(service=service_meta)) return response + def get_service(self, response, result): service_meta = result['service_meta'] response.status = 201 @@ -327,6 +333,7 @@ class ServiceSerializer(wsgi.JSONResponseSerializer): response.body = self.to_json(dict(service=service_meta)) return response + def create_resource(): """Services resource factory method""" deserializer = ServiceDeserializer() diff --git a/code/daisy/daisy/api/v1/template.py b/code/daisy/daisy/api/v1/template.py index ba491ab7..86e674f9 100755 --- a/code/daisy/daisy/api/v1/template.py +++ b/code/daisy/daisy/api/v1/template.py @@ -42,10 +42,6 @@ from daisy.registry.api.v1 import template import daisy.api.backends.tecs.common as tecs_cmn import daisy.api.backends.common as daisy_cmn -try: - import simplejson as json -except ImportError: - import json daisy_tecs_path = tecs_cmn.daisy_tecs_path @@ -64,12 +60,13 @@ CONF.import_opt('container_formats', 'daisy.common.config', group='image_format') CONF.import_opt('image_property_quota', 'daisy.common.config') + class Controller(controller.BaseController): """ WSGI controller for Templates resource in Daisy v1 API - The Templates resource API is a RESTful web Template for Template data. The API - is as follows:: + The Templates resource API is a RESTful web Template for Template data. + The API is as follows:: GET /Templates -- Returns a set of brief metadata about Templates GET /Templates/detail -- Returns a set of detailed metadata about @@ -136,8 +133,9 @@ class Controller(controller.BaseController): def _raise_404_if_cluster_deleted(self, req, cluster_id): cluster = self.get_cluster_meta_or_404(req, cluster_id) if cluster['deleted']: - msg = _("Cluster with identifier %s has been deleted.") % cluster_id - raise webob.exc.HTTPNotFound(msg) + msg = _("Cluster with identifier %s has been deleted.") % \ + cluster_id + raise HTTPNotFound(msg) @utils.mutating def add_template(self, req, template): @@ -150,8 +148,7 @@ class Controller(controller.BaseController): :raises HTTPBadRequest if x-Template-name is missing """ self._enforce(req, 'add_template') - template_name = template["name"] - + template = registry.add_template_metadata(req.context, template) return {'template': template} @@ -169,8 +166,8 @@ class Controller(controller.BaseController): self._enforce(req, 'update_template') try: template = registry.update_template_metadata(req.context, - template_id, - template) + template_id, + template) except exception.Invalid as e: msg = (_("Failed to update template metadata. Got error: %s") % @@ -202,6 +199,7 @@ class Controller(controller.BaseController): self.notifier.info('template.update', template) return {'template': template} + @utils.mutating def delete_template(self, req, template_id): """ @@ -230,23 +228,25 @@ class Controller(controller.BaseController): request=req, content_type="text/plain") except exception.InUseByStore as e: - msg = (_("template %(id)s could not be deleted because it is in use: " - "%(exc)s") % {"id": template_id, "exc": utils.exception_to_str(e)}) + msg = (_("template %(id)s could not be deleted " + "because it is in use: " + "%(exc)s") % {"id": template_id, + "exc": utils.exception_to_str(e)}) LOG.error(msg) raise HTTPConflict(explanation=msg, request=req, content_type="text/plain") else: return Response(body='', status=200) - - def _del_general_params(self,param): + + def _del_general_params(self, param): del param['created_at'] del param['updated_at'] del param['deleted'] del param['deleted_at'] del param['id'] - - def _del_cluster_params(self,cluster): + + def _del_cluster_params(self, cluster): del cluster['networks'] del cluster['vlan_start'] del cluster['vlan_end'] @@ -259,7 +259,27 @@ class Controller(controller.BaseController): del cluster['segmentation_type'] del cluster['base_mac'] del cluster['name'] - + + def _get_cinder_volumes(self, req, role): + cinder_volume_params = {'filters': {'role_id': role['id']}} + cinder_volumes = registry.list_cinder_volume_metadata( + req.context, **cinder_volume_params) + for cinder_volume in cinder_volumes: + if cinder_volume.get('role_id', None): + cinder_volume['role_id'] = role['name'] + self._del_general_params(cinder_volume) + return cinder_volumes + + def _get_services_disk(self, req, role): + params = {'filters': {'role_id': role['id']}} + services_disk = registry.list_service_disk_metadata( + req.context, **params) + for service_disk in services_disk: + if service_disk.get('role_id', None): + service_disk['role_id'] = role['name'] + self._del_general_params(service_disk) + return services_disk + @utils.mutating def export_db_to_json(self, req, template): """ @@ -267,40 +287,45 @@ class Controller(controller.BaseController): :param req: The WSGI/Webob Request object :raises HTTPBadRequest if x-Template-cluster is missing """ - cluster_name = template.get('cluster_name',None) - type = template.get('type',None) - description = template.get('description',None) - template_name = template.get('template_name',None) + cluster_name = template.get('cluster_name', None) + type = template.get('type', None) + description = template.get('description', None) + template_name = template.get('template_name', None) self._enforce(req, 'export_db_to_json') cinder_volume_list = [] + service_disk_list = [] template_content = {} template_json = {} template_id = "" if not type or type == "tecs": try: - params = {'filters': {'name':cluster_name}} + params = {'filters': {'name': cluster_name}} clusters = registry.get_clusters_detail(req.context, **params) if clusters: cluster_id = clusters[0]['id'] else: - msg = "the cluster %s is not exist"%cluster_name + msg = "the cluster %s is not exist" % cluster_name LOG.error(msg) - raise HTTPForbidden(explanation=msg, request=req, content_type="text/plain") - - params = {'filters': {'cluster_id':cluster_id}} - cluster = registry.get_cluster_metadata(req.context, cluster_id) + raise HTTPForbidden( + explanation=msg, + request=req, + content_type="text/plain") + + params = {'filters': {'cluster_id': cluster_id}} + cluster = registry.get_cluster_metadata( + req.context, cluster_id) roles = registry.get_roles_detail(req.context, **params) - networks = registry.get_networks_detail(req.context, cluster_id,**params) + networks = registry.get_networks_detail( + req.context, cluster_id, **params) for role in roles: - cinder_volume_params = {'filters': {'role_id':role['id']}} - cinder_volumes = registry.list_cinder_volume_metadata(req.context, **cinder_volume_params) - for cinder_volume in cinder_volumes: - if cinder_volume.get('role_id',None): - cinder_volume['role_id'] = role['name'] - self._del_general_params(cinder_volume) - cinder_volume_list.append(cinder_volume) - if role.get('config_set_id',None): - config_set = registry.get_config_set_metadata(req.context, role['config_set_id']) + cinder_volumes = self._get_cinder_volumes(req, role) + cinder_volume_list += cinder_volumes + services_disk = self._get_services_disk(req, role) + service_disk_list += services_disk + + if role.get('config_set_id', None): + config_set = registry.get_config_set_metadata( + req.context, role['config_set_id']) role['config_set_id'] = config_set['name'] del role['cluster_id'] del role['status'] @@ -309,16 +334,17 @@ class Controller(controller.BaseController): del role['config_set_update_progress'] self._del_general_params(role) for network in networks: - network_detail = registry.get_network_metadata(req.context, network['id']) - if network_detail.get('ip_ranges',None): + network_detail = registry.get_network_metadata( + req.context, network['id']) + if network_detail.get('ip_ranges', None): network['ip_ranges'] = network_detail['ip_ranges'] del network['cluster_id'] self._del_general_params(network) - if cluster.get('routers',None): + if cluster.get('routers', None): for router in cluster['routers']: del router['cluster_id'] self._del_general_params(router) - if cluster.get('logic_networks',None): + if cluster.get('logic_networks', None): for logic_network in cluster['logic_networks']: for subnet in logic_network['subnets']: del subnet['logic_network_id'] @@ -326,7 +352,7 @@ class Controller(controller.BaseController): self._del_general_params(subnet) del logic_network['cluster_id'] self._del_general_params(logic_network) - if cluster.get('nodes',None): + if cluster.get('nodes', None): del cluster['nodes'] self._del_general_params(cluster) self._del_cluster_params(cluster) @@ -334,140 +360,226 @@ class Controller(controller.BaseController): template_content['roles'] = roles template_content['networks'] = networks template_content['cinder_volumes'] = cinder_volume_list + template_content['services_disk'] = service_disk_list template_json['content'] = json.dumps(template_content) template_json['type'] = 'tecs' template_json['name'] = template_name template_json['description'] = description - - template_host_params = {'cluster_name':cluster_name} - template_hosts = registry.host_template_lists_metadata(req.context, **template_host_params) + + template_host_params = {'cluster_name': cluster_name} + template_hosts = registry.host_template_lists_metadata( + req.context, **template_host_params) if template_hosts: template_json['hosts'] = template_hosts[0]['hosts'] else: template_json['hosts'] = "[]" - template_params = {'filters': {'name':template_name}} - template_list = registry.template_lists_metadata(req.context, **template_params) + template_params = {'filters': {'name': template_name}} + template_list = registry.template_lists_metadata( + req.context, **template_params) if template_list: - update_template = registry.update_template_metadata(req.context, template_list[0]['id'], template_json) + registry.update_template_metadata( + req.context, template_list[0]['id'], template_json) template_id = template_list[0]['id'] else: - add_template = registry.add_template_metadata(req.context, template_json) + add_template = registry.add_template_metadata( + req.context, template_json) template_id = add_template['id'] - + if template_id: - template_detail = registry.template_detail_metadata(req.context, template_id) + template_detail = registry.template_detail_metadata( + req.context, template_id) self._del_general_params(template_detail) - template_detail['content'] = json.loads(template_detail['content']) + template_detail['content'] = json.loads( + template_detail['content']) if template_detail['hosts']: - template_detail['hosts'] = json.loads(template_detail['hosts']) - - tecs_json = daisy_tecs_path + "%s.json"%template_name + template_detail['hosts'] = json.loads( + template_detail['hosts']) + + tecs_json = daisy_tecs_path + "%s.json" % template_name cmd = 'rm -rf %s' % (tecs_json,) daisy_cmn.subprocess_call(cmd) with open(tecs_json, "w+") as fp: - fp.write(json.dumps(template_detail)) + json.dump(template_detail, fp, indent=2) + except exception.Invalid as e: raise HTTPBadRequest(explanation=e.msg, request=req) - - return {"template":template_detail} - + + return {"template": template_detail} + @utils.mutating def import_json_to_template(self, req, template): template_id = "" - template = json.loads(template.get('template',None)) + template = json.loads(template.get('template', None)) template_cluster = copy.deepcopy(template) - template_name = template_cluster.get('name',None) - template_params = {'filters': {'name':template_name}} + template_name = template_cluster.get('name', None) + template_params = {'filters': {'name': template_name}} try: - if template_cluster.get('content',None): - template_cluster['content'] = json.dumps(template_cluster['content']) - if template_cluster.get('hosts',None): - template_cluster['hosts'] = json.dumps(template_cluster['hosts']) + if template_cluster.get('content', None): + template_cluster['content'] = json.dumps( + template_cluster['content']) + if template_cluster.get('hosts', None): + template_cluster['hosts'] = json.dumps( + template_cluster['hosts']) else: - template_cluster['hosts'] = "[]" - - template_list = registry.template_lists_metadata(req.context, **template_params) + template_cluster['hosts'] = "[]" + + template_list = registry.template_lists_metadata( + req.context, **template_params) if template_list: - update_template_cluster = registry.update_template_metadata(req.context, template_list[0]['id'], template_cluster) + registry.update_template_metadata( + req.context, template_list[0]['id'], template_cluster) template_id = template_list[0]['id'] else: - add_template_cluster = registry.add_template_metadata(req.context, template_cluster) + add_template_cluster = registry.add_template_metadata( + req.context, template_cluster) template_id = add_template_cluster['id'] - + if template_id: - template_detail = registry.template_detail_metadata(req.context, template_id) + template_detail = registry.template_detail_metadata( + req.context, template_id) del template_detail['deleted'] del template_detail['deleted_at'] - + except exception.Invalid as e: raise HTTPBadRequest(explanation=e.msg, request=req) - - return {"template":template_detail} - + + return {"template": template_detail} + + def _import_cinder_volumes_to_db(self, req, + template_cinder_volumes, roles): + for template_cinder_volume in template_cinder_volumes: + has_template_role = False + for role in roles: + if template_cinder_volume['role_id'] == role['name']: + has_template_role = True + template_cinder_volume['role_id'] = role['id'] + break + if has_template_role: + registry.add_cinder_volume_metadata(req.context, + template_cinder_volume) + else: + msg = "can't find role %s in new cluster when\ + import cinder_volumes from template"\ + % template_cinder_volume['role_id'] + raise HTTPBadRequest(explanation=msg, request=req) + + def _import_services_disk_to_db(self, req, + template_services_disk, roles): + for template_service_disk in template_services_disk: + has_template_role = False + for role in roles: + if template_service_disk['role_id'] == role['name']: + has_template_role = True + template_service_disk['role_id'] = role['id'] + break + if has_template_role: + registry.add_service_disk_metadata(req.context, + template_service_disk) + else: + msg = "can't find role %s in new cluster when\ + import service_disks from template"\ + % template_service_disk['role_id'] + raise HTTPBadRequest(explanation=msg, request=req) + @utils.mutating def import_template_to_db(self, req, template): cluster_id = "" template_cluster = {} cluster_meta = {} template_meta = copy.deepcopy(template) - template_name = template_meta.get('name',None) - cluster_name = template_meta.get('cluster',None) - template_params = {'filters': {'name':template_name}} - template_list = registry.template_lists_metadata(req.context, **template_params) + template_name = template_meta.get('name', None) + cluster_name = template_meta.get('cluster', None) + template_params = {'filters': {'name': template_name}} + template_list = registry.template_lists_metadata( + req.context, **template_params) if template_list: template_cluster = template_list[0] else: msg = "the template %s is not exist" % template_name LOG.error(msg) - raise HTTPForbidden(explanation=msg, request=req, content_type="text/plain") - + raise HTTPForbidden( + explanation=msg, + request=req, + content_type="text/plain") + try: template_content = json.loads(template_cluster['content']) template_content_cluster = template_content['cluster'] template_content_cluster['name'] = cluster_name - template_content_cluster['networking_parameters'] = str(template_content_cluster['networking_parameters']) - template_content_cluster['logic_networks'] = str(template_content_cluster['logic_networks']) - template_content_cluster['logic_networks'] = template_content_cluster['logic_networks'].replace("\'true\'","True") - template_content_cluster['routers'] = str(template_content_cluster['routers']) - + template_content_cluster['networking_parameters'] = str( + template_content_cluster['networking_parameters']) + template_content_cluster['logic_networks'] = str( + template_content_cluster['logic_networks']) + template_content_cluster['logic_networks'] = \ + template_content_cluster[ + 'logic_networks'].replace("\'true\'", "True") + template_content_cluster['routers'] = str( + template_content_cluster['routers']) + if template_cluster['hosts']: template_hosts = json.loads(template_cluster['hosts']) - template_host_params = {'cluster_name':cluster_name} - template_host_list = registry.host_template_lists_metadata(req.context, **template_host_params) + template_host_params = {'cluster_name': cluster_name} + template_host_list = registry.host_template_lists_metadata( + req.context, **template_host_params) if template_host_list: - update_template_meta = {"cluster_name": cluster_name, "hosts":json.dumps(template_hosts)} - registry.update_host_template_metadata(req.context, template_host_list[0]['id'], update_template_meta) + update_template_meta = { + "cluster_name": cluster_name, + "hosts": json.dumps(template_hosts)} + registry.update_host_template_metadata( + req.context, template_host_list[0]['id'], + update_template_meta) else: - template_meta = {"cluster_name": cluster_name, "hosts":json.dumps(template_hosts)} - registry.add_host_template_metadata(req.context, template_meta) - - cluster_params = {'filters': {'name':cluster_name}} - clusters = registry.get_clusters_detail(req.context, **cluster_params) + template_meta = { + "cluster_name": cluster_name, + "hosts": json.dumps(template_hosts)} + registry.add_host_template_metadata( + req.context, template_meta) + + cluster_params = {'filters': {'name': cluster_name}} + clusters = registry.get_clusters_detail( + req.context, **cluster_params) if clusters: msg = "the cluster %s is exist" % clusters[0]['name'] LOG.error(msg) - raise HTTPForbidden(explanation=msg, request=req, content_type="text/plain") + raise HTTPForbidden( + explanation=msg, + request=req, + content_type="text/plain") else: - cluster_meta = registry.add_cluster_metadata(req.context, template_content['cluster']) + if template_content_cluster.get('auto_scale', None) == 1: + params = {'filters': ''} + clusters_list = registry.get_clusters_detail( + req.context, **params) + for cluster in clusters_list: + if cluster.get('auto_scale', None) == 1: + template_content_cluster['auto_scale'] = 0 + break + cluster_meta = registry.add_cluster_metadata( + req.context, template_content['cluster']) cluster_id = cluster_meta['id'] - - params = {'filters':{}} - networks = registry.get_networks_detail(req.context, cluster_id,**params) + + params = {'filters': {}} + networks = registry.get_networks_detail( + req.context, cluster_id, **params) template_content_networks = template_content['networks'] for template_content_network in template_content_networks: - template_content_network['ip_ranges'] = str(template_content_network['ip_ranges']) + template_content_network['ip_ranges'] = str( + template_content_network['ip_ranges']) network_exist = 'false' for network in networks: if template_content_network['name'] == network['name']: - update_network_meta = registry.update_network_metadata(req.context, network['id'], template_content_network) + registry.update_network_metadata( + req.context, network['id'], + template_content_network) network_exist = 'true' if network_exist == 'false': template_content_network['cluster_id'] = cluster_id - add_network_meta = registry.add_network_metadata(req.context, template_content_network) - - params = {'filters': {'cluster_id':cluster_id}} + registry.add_network_metadata( + req.context, template_content_network) + + params = {'filters': {'cluster_id': cluster_id}} roles = registry.get_roles_detail(req.context, **params) template_content_roles = template_content['roles'] for template_content_role in template_content_roles: @@ -475,34 +587,25 @@ class Controller(controller.BaseController): del template_content_role['config_set_id'] for role in roles: if template_content_role['name'] == role['name']: - update_role_meta = registry.update_role_metadata(req.context, role['id'], template_content_role) + registry.update_role_metadata( + req.context, role['id'], template_content_role) role_exist = 'true' - + if role_exist == 'false': template_content_role['cluster_id'] = cluster_id - add_role_meta = registry.add_role_metadata(req.context, template_content_role) - - cinder_volumes = registry.list_cinder_volume_metadata(req.context, **params) - template_content_cinder_volumes = template_content['cinder_volumes'] - for template_content_cinder_volume in template_content_cinder_volumes: - cinder_volume_exist = 'false' - roles = registry.get_roles_detail(req.context, **params) - for role in roles: - if template_content_cinder_volume['role_id'] == role['name']: - template_content_cinder_volume['role_id'] = role['id'] - - for cinder_volume in cinder_volumes: - if template_content_cinder_volume['role_id'] == cinder_volume['role_id']: - update_cinder_volume_meta = registry.update_cinder_volume_metadata(req.context, cinder_volume['id'], template_content_cinder_volume) - cinder_volume_exist = 'true' - - if cinder_volume_exist == 'false': - add_cinder_volumes = registry.add_cinder_volume_metadata(req.context, template_content_cinder_volume) - + registry.add_role_metadata( + req.context, template_content_role) + + self._import_cinder_volumes_to_db( + req, template_content['cinder_volumes'], roles) + self._import_services_disk_to_db(req, + template_content['services_disk'], + roles) + except exception.Invalid as e: raise HTTPBadRequest(explanation=e.msg, request=req) - return {"template":cluster_meta} - + return {"template": cluster_meta} + @utils.mutating def get_template_detail(self, req, template_id): """ @@ -513,7 +616,8 @@ class Controller(controller.BaseController): """ self._enforce(req, 'get_template_detail') try: - template = registry.template_detail_metadata(req.context, template_id) + template = registry.template_detail_metadata( + req.context, template_id) return {'template': template} except exception.NotFound as e: msg = (_("Failed to find template: %s") % @@ -531,97 +635,104 @@ class Controller(controller.BaseController): content_type="text/plain") except exception.InUseByStore as e: msg = (_("template %(id)s could not be get because it is in use: " - "%(exc)s") % {"id": template_id, "exc": utils.exception_to_str(e)}) + "%(exc)s") % {"id": template_id, + "exc": utils.exception_to_str(e)}) LOG.error(msg) raise HTTPConflict(explanation=msg, request=req, content_type="text/plain") else: return Response(body='', status=200) - + @utils.mutating def get_template_lists(self, req): self._enforce(req, 'get_template_lists') params = self._get_query_params(req) try: - template_lists = registry.template_lists_metadata(req.context, **params) + template_lists = registry.template_lists_metadata( + req.context, **params) except exception.Invalid as e: raise HTTPBadRequest(explanation=e.msg, request=req) return dict(template=template_lists) - + + class TemplateDeserializer(wsgi.JSONRequestDeserializer): """Handles deserialization of specific controller method requests.""" - + def _deserialize(self, request): result = {} result["template"] = utils.get_template_meta(request) return result - + def add_template(self, request): return self._deserialize(request) - + def update_template(self, request): return self._deserialize(request) - + def export_db_to_json(self, request): return self._deserialize(request) - + def import_json_to_template(self, request): return self._deserialize(request) - + def import_template_to_db(self, request): return self._deserialize(request) + class TemplateSerializer(wsgi.JSONResponseSerializer): """Handles serialization of specific controller method responses.""" - + def __init__(self): self.notifier = notifier.Notifier() - + def add_template(self, response, result): template = result['template'] response.status = 201 response.headers['Content-Type'] = 'application/json' response.body = self.to_json(dict(template=template)) return response - + def delete_template(self, response, result): template = result['template'] response.status = 201 response.headers['Content-Type'] = 'application/json' response.body = self.to_json(dict(template=template)) return response + def get_template_detail(self, response, result): template = result['template'] response.status = 201 response.headers['Content-Type'] = 'application/json' response.body = self.to_json(dict(template=template)) return response + def update_template(self, response, result): template = result['template'] response.status = 201 response.headers['Content-Type'] = 'application/json' response.body = self.to_json(dict(template=template)) - return response - + return response + def export_db_to_json(self, response, result): response.status = 201 response.headers['Content-Type'] = 'application/json' response.body = self.to_json(result) return response - + def import_json_to_template(self, response, result): response.status = 201 response.headers['Content-Type'] = 'application/json' response.body = self.to_json(result) return response - + def import_template_to_db(self, response, result): response.status = 201 response.headers['Content-Type'] = 'application/json' response.body = self.to_json(result) return response - + + def create_resource(): """Templates resource factory method""" deserializer = TemplateDeserializer() diff --git a/code/daisy/daisy/api/v2/image_actions.py b/code/daisy/daisy/api/v2/image_actions.py index a982a449..9c6e1fba 100755 --- a/code/daisy/daisy/api/v2/image_actions.py +++ b/code/daisy/daisy/api/v2/image_actions.py @@ -32,6 +32,7 @@ _LI = i18n._LI class ImageActionsController(object): + def __init__(self, db_api=None, policy_enforcer=None, notifier=None, store_api=None): self.db_api = db_api or daisy.db.get_api() @@ -39,7 +40,7 @@ class ImageActionsController(object): self.notifier = notifier or daisy.notifier.Notifier() self.store_api = store_api or glance_store self.gateway = daisy.gateway.Gateway(self.db_api, self.store_api, - self.notifier, self.policy) + self.notifier, self.policy) @utils.mutating def deactivate(self, req, image_id): diff --git a/code/daisy/daisy/api/v2/image_data.py b/code/daisy/daisy/api/v2/image_data.py index c0e62020..4adb5885 100755 --- a/code/daisy/daisy/api/v2/image_data.py +++ b/code/daisy/daisy/api/v2/image_data.py @@ -33,6 +33,7 @@ _LE = i18n._LE class ImageDataController(object): + def __init__(self, db_api=None, store_api=None, policy_enforcer=None, notifier=None, gateway=None): @@ -42,7 +43,7 @@ class ImageDataController(object): policy = policy_enforcer or daisy.api.policy.Enforcer() notifier = notifier or daisy.notifier.Notifier() gateway = daisy.gateway.Gateway(db_api, store_api, - notifier, policy) + notifier, policy) self.gateway = gateway def _restore(self, image_repo, image): diff --git a/code/daisy/daisy/api/v2/image_members.py b/code/daisy/daisy/api/v2/image_members.py index 1d2d615c..1e15a1b8 100755 --- a/code/daisy/daisy/api/v2/image_members.py +++ b/code/daisy/daisy/api/v2/image_members.py @@ -38,6 +38,7 @@ _ = i18n._ class ImageMembersController(object): + def __init__(self, db_api=None, policy_enforcer=None, notifier=None, store_api=None): self.db_api = db_api or daisy.db.get_api() @@ -45,7 +46,7 @@ class ImageMembersController(object): self.notifier = notifier or daisy.notifier.Notifier() self.store_api = store_api or glance_store self.gateway = daisy.gateway.Gateway(self.db_api, self.store_api, - self.notifier, self.policy) + self.notifier, self.policy) @utils.mutating def create(self, req, image_id, member_id): @@ -250,6 +251,7 @@ class RequestDeserializer(wsgi.JSONRequestDeserializer): class ResponseSerializer(wsgi.JSONResponseSerializer): + def __init__(self, schema=None): super(ResponseSerializer, self).__init__() self.schema = schema or get_schema() diff --git a/code/daisy/daisy/api/v2/image_tags.py b/code/daisy/daisy/api/v2/image_tags.py index 745884ef..c549dd6a 100755 --- a/code/daisy/daisy/api/v2/image_tags.py +++ b/code/daisy/daisy/api/v2/image_tags.py @@ -31,6 +31,7 @@ _ = i18n._ class Controller(object): + def __init__(self, db_api=None, policy_enforcer=None, notifier=None, store_api=None): self.db_api = db_api or daisy.db.get_api() @@ -38,7 +39,7 @@ class Controller(object): self.notifier = notifier or daisy.notifier.Notifier() self.store_api = store_api or glance_store self.gateway = daisy.gateway.Gateway(self.db_api, self.store_api, - self.notifier, self.policy) + self.notifier, self.policy) @utils.mutating def update(self, req, image_id, tag_value): @@ -85,6 +86,7 @@ class Controller(object): class ResponseSerializer(wsgi.JSONResponseSerializer): + def update(self, response, result): response.status_int = 204 diff --git a/code/daisy/daisy/api/v2/images.py b/code/daisy/daisy/api/v2/images.py index 6151ec03..736f090e 100755 --- a/code/daisy/daisy/api/v2/images.py +++ b/code/daisy/daisy/api/v2/images.py @@ -46,6 +46,7 @@ CONF.import_opt('container_formats', 'daisy.common.config', class ImagesController(object): + def __init__(self, db_api=None, policy_enforcer=None, notifier=None, store_api=None): self.db_api = db_api or daisy.db.get_api() @@ -53,7 +54,7 @@ class ImagesController(object): self.notifier = notifier or daisy.notifier.Notifier() self.store_api = store_api or glance_store self.gateway = daisy.gateway.Gateway(self.db_api, self.store_api, - self.notifier, self.policy) + self.notifier, self.policy) @utils.mutating def create(self, req, image, extra_properties, tags): @@ -188,8 +189,8 @@ class ImagesController(object): self._do_add_locations(image, path[1], value) else: if ((hasattr(image, path_root) or - path_root in image.extra_properties) - and json_schema_version == 4): + path_root in image.extra_properties) and + json_schema_version == 4): msg = _("Property %s already present.") raise webob.exc.HTTPConflict(msg % path_root) if hasattr(image, path_root): @@ -681,6 +682,7 @@ class RequestDeserializer(wsgi.JSONRequestDeserializer): class ResponseSerializer(wsgi.JSONResponseSerializer): + def __init__(self, schema=None): super(ResponseSerializer, self).__init__() self.schema = schema or get_schema() diff --git a/code/daisy/daisy/api/v2/metadef_namespaces.py b/code/daisy/daisy/api/v2/metadef_namespaces.py index b95980e5..28f754ed 100755 --- a/code/daisy/daisy/api/v2/metadef_namespaces.py +++ b/code/daisy/daisy/api/v2/metadef_namespaces.py @@ -48,13 +48,14 @@ CONF = cfg.CONF class NamespaceController(object): + def __init__(self, db_api=None, policy_enforcer=None, notifier=None): self.db_api = db_api or daisy.db.get_api() self.policy = policy_enforcer or policy.Enforcer() self.notifier = notifier or daisy.notifier.Notifier() self.gateway = daisy.gateway.Gateway(db_api=self.db_api, - notifier=self.notifier, - policy_enforcer=self.policy) + notifier=self.notifier, + policy_enforcer=self.policy) self.ns_schema_link = '/v2/schemas/metadefs/namespace' self.obj_schema_link = '/v2/schemas/metadefs/object' self.tag_schema_link = '/v2/schemas/metadefs/tag' @@ -486,6 +487,7 @@ class RequestDeserializer(wsgi.JSONRequestDeserializer): class ResponseSerializer(wsgi.JSONResponseSerializer): + def __init__(self, schema=None): super(ResponseSerializer, self).__init__() self.schema = schema @@ -781,20 +783,20 @@ def get_collection_schema(): def get_namespace_href(namespace): - base_href = '/v2/metadefs/namespaces/%s' % namespace.namespace - return base_href + base_href = '/v2/metadefs/namespaces/%s' % namespace.namespace + return base_href def get_object_href(namespace_name, metadef_object): - base_href = ('/v2/metadefs/namespaces/%s/objects/%s' % - (namespace_name, metadef_object.name)) - return base_href + base_href = ('/v2/metadefs/namespaces/%s/objects/%s' % + (namespace_name, metadef_object.name)) + return base_href def get_tag_href(namespace_name, metadef_tag): - base_href = ('/v2/metadefs/namespaces/%s/tags/%s' % - (namespace_name, metadef_tag.name)) - return base_href + base_href = ('/v2/metadefs/namespaces/%s/tags/%s' % + (namespace_name, metadef_tag.name)) + return base_href def create_resource(): diff --git a/code/daisy/daisy/api/v2/metadef_objects.py b/code/daisy/daisy/api/v2/metadef_objects.py index aed3d8c1..4bde3545 100755 --- a/code/daisy/daisy/api/v2/metadef_objects.py +++ b/code/daisy/daisy/api/v2/metadef_objects.py @@ -42,13 +42,14 @@ CONF = cfg.CONF class MetadefObjectsController(object): + def __init__(self, db_api=None, policy_enforcer=None, notifier=None): self.db_api = db_api or daisy.db.get_api() self.policy = policy_enforcer or policy.Enforcer() self.notifier = notifier or daisy.notifier.Notifier() self.gateway = daisy.gateway.Gateway(db_api=self.db_api, - notifier=self.notifier, - policy_enforcer=self.policy) + notifier=self.notifier, + policy_enforcer=self.policy) self.obj_schema_link = '/v2/schemas/metadefs/object' def create(self, req, metadata_object, namespace): @@ -294,6 +295,7 @@ class RequestDeserializer(wsgi.JSONRequestDeserializer): class ResponseSerializer(wsgi.JSONResponseSerializer): + def __init__(self, schema=None): super(ResponseSerializer, self).__init__() self.schema = schema or get_schema() @@ -324,9 +326,9 @@ class ResponseSerializer(wsgi.JSONResponseSerializer): def get_object_href(namespace_name, metadef_object): - base_href = ('/v2/metadefs/namespaces/%s/objects/%s' % - (namespace_name, metadef_object.name)) - return base_href + base_href = ('/v2/metadefs/namespaces/%s/objects/%s' % + (namespace_name, metadef_object.name)) + return base_href def create_resource(): diff --git a/code/daisy/daisy/api/v2/metadef_properties.py b/code/daisy/daisy/api/v2/metadef_properties.py index 19883abf..96617f3e 100755 --- a/code/daisy/daisy/api/v2/metadef_properties.py +++ b/code/daisy/daisy/api/v2/metadef_properties.py @@ -40,13 +40,14 @@ _LI = i18n._LI class NamespacePropertiesController(object): + def __init__(self, db_api=None, policy_enforcer=None, notifier=None): self.db_api = db_api or daisy.db.get_api() self.policy = policy_enforcer or policy.Enforcer() self.notifier = notifier or daisy.notifier.Notifier() self.gateway = daisy.gateway.Gateway(db_api=self.db_api, - notifier=self.notifier, - policy_enforcer=self.policy) + notifier=self.notifier, + policy_enforcer=self.policy) def _to_dict(self, model_property_type): # Convert the model PropertyTypes dict to a JSON encoding @@ -213,6 +214,7 @@ class RequestDeserializer(wsgi.JSONRequestDeserializer): class ResponseSerializer(wsgi.JSONResponseSerializer): + def __init__(self, schema=None): super(ResponseSerializer, self).__init__() self.schema = schema @@ -288,7 +290,7 @@ def get_collection_schema(): # individual property schema inside property collections namespace_properties_schema.required.remove('name') return daisy.schema.DictCollectionSchema('properties', - namespace_properties_schema) + namespace_properties_schema) def create_resource(): diff --git a/code/daisy/daisy/api/v2/metadef_resource_types.py b/code/daisy/daisy/api/v2/metadef_resource_types.py index 983ea5ea..daed9077 100755 --- a/code/daisy/daisy/api/v2/metadef_resource_types.py +++ b/code/daisy/daisy/api/v2/metadef_resource_types.py @@ -40,13 +40,14 @@ _LI = i18n._LI class ResourceTypeController(object): + def __init__(self, db_api=None, policy_enforcer=None, notifier=None): self.db_api = db_api or daisy.db.get_api() self.policy = policy_enforcer or policy.Enforcer() self.notifier = notifier or daisy.notifier.Notifier() self.gateway = daisy.gateway.Gateway(db_api=self.db_api, - notifier=self.notifier, - policy_enforcer=self.policy) + notifier=self.notifier, + policy_enforcer=self.policy) def index(self, req): try: @@ -167,6 +168,7 @@ class RequestDeserializer(wsgi.JSONRequestDeserializer): class ResponseSerializer(wsgi.JSONResponseSerializer): + def __init__(self, schema=None): super(ResponseSerializer, self).__init__() self.schema = schema @@ -253,7 +255,7 @@ def get_schema(): def get_collection_schema(): resource_type_schema = get_schema() return daisy.schema.CollectionSchema('resource_type_associations', - resource_type_schema) + resource_type_schema) def create_resource(): diff --git a/code/daisy/daisy/api/v2/metadef_tags.py b/code/daisy/daisy/api/v2/metadef_tags.py index 49db0d9b..f642a2e6 100755 --- a/code/daisy/daisy/api/v2/metadef_tags.py +++ b/code/daisy/daisy/api/v2/metadef_tags.py @@ -46,8 +46,8 @@ class TagsController(object): self.policy = policy_enforcer or policy.Enforcer() self.notifier = notifier or daisy.notifier.Notifier() self.gateway = daisy.gateway.Gateway(db_api=self.db_api, - notifier=self.notifier, - policy_enforcer=self.policy) + notifier=self.notifier, + policy_enforcer=self.policy) self.tag_schema_link = '/v2/schemas/metadefs/tag' def create(self, req, namespace, tag_name): diff --git a/code/daisy/daisy/api/v2/tasks.py b/code/daisy/daisy/api/v2/tasks.py index 9b770062..ab78e1ef 100755 --- a/code/daisy/daisy/api/v2/tasks.py +++ b/code/daisy/daisy/api/v2/tasks.py @@ -53,7 +53,7 @@ class TasksController(object): self.notifier = notifier or daisy.notifier.Notifier() self.store_api = store_api or glance_store self.gateway = daisy.gateway.Gateway(self.db_api, self.store_api, - self.notifier, self.policy) + self.notifier, self.policy) def create(self, req, task): task_factory = self.gateway.get_task_factory(req.context) @@ -229,8 +229,8 @@ class ResponseSerializer(wsgi.JSONResponseSerializer): def __init__(self, task_schema=None, partial_task_schema=None): super(ResponseSerializer, self).__init__() self.task_schema = task_schema or get_task_schema() - self.partial_task_schema = (partial_task_schema - or _get_partial_task_schema()) + self.partial_task_schema = (partial_task_schema or + _get_partial_task_schema()) def _inject_location_header(self, response, task): location = self._get_task_location(task) diff --git a/code/daisy/daisy/cmd/api.py b/code/daisy/daisy/cmd/api.py index fd6a8746..4a8a2622 100755 --- a/code/daisy/daisy/cmd/api.py +++ b/code/daisy/daisy/cmd/api.py @@ -27,6 +27,17 @@ import sys import eventlet from daisy.common import utils +import glance_store +from oslo_config import cfg +from oslo_log import log as logging +import osprofiler.notifier +import osprofiler.web + +from daisy.common import config +from daisy.common import exception +from daisy.common import wsgi +from daisy import notifier +from daisy.openstack.common import systemd # Monkey patch socket, time, select, threads eventlet.patcher.monkey_patch(all=False, socket=True, time=True, @@ -40,17 +51,6 @@ possible_topdir = os.path.normpath(os.path.join(os.path.abspath(sys.argv[0]), if os.path.exists(os.path.join(possible_topdir, 'glance', '__init__.py')): sys.path.insert(0, possible_topdir) -import glance_store -from oslo_config import cfg -from oslo_log import log as logging -import osprofiler.notifier -import osprofiler.web - -from daisy.common import config -from daisy.common import exception -from daisy.common import wsgi -from daisy import notifier -from daisy.openstack.common import systemd CONF = cfg.CONF CONF.import_group("profiler", "daisy.common.wsgi") diff --git a/code/daisy/daisy/cmd/cache_cleaner.py b/code/daisy/daisy/cmd/cache_cleaner.py index dce0d99e..42a6249f 100755 --- a/code/daisy/daisy/cmd/cache_cleaner.py +++ b/code/daisy/daisy/cmd/cache_cleaner.py @@ -31,8 +31,9 @@ period, we automatically sweep it up. import os import sys - from oslo_log import log as logging +from daisy.common import config +from daisy.image_cache import cleaner # If ../glance/__init__.py exists, add ../ to Python search path, so that # it will override what happens to be installed in /usr/(local/)lib/python... @@ -42,8 +43,6 @@ possible_topdir = os.path.normpath(os.path.join(os.path.abspath(sys.argv[0]), if os.path.exists(os.path.join(possible_topdir, 'glance', '__init__.py')): sys.path.insert(0, possible_topdir) -from daisy.common import config -from daisy.image_cache import cleaner CONF = config.CONF logging.register_options(CONF) diff --git a/code/daisy/daisy/cmd/cache_manage.py b/code/daisy/daisy/cmd/cache_manage.py index 8f3eb7ed..a7aa32e1 100755 --- a/code/daisy/daisy/cmd/cache_manage.py +++ b/code/daisy/daisy/cmd/cache_manage.py @@ -19,16 +19,16 @@ A simple cache management utility for daisy. """ from __future__ import print_function - import functools import optparse import os import sys import time - from oslo_utils import timeutils - from daisy.common import utils +from daisy.common import exception +import daisy.image_cache.client +from daisy.version import version_info as version # If ../glance/__init__.py exists, add ../ to Python search path, so that # it will override what happens to be installed in /usr/(local/)lib/python... @@ -38,10 +38,6 @@ possible_topdir = os.path.normpath(os.path.join(os.path.abspath(sys.argv[0]), if os.path.exists(os.path.join(possible_topdir, 'glance', '__init__.py')): sys.path.insert(0, possible_topdir) -from daisy.common import exception -import daisy.image_cache.client -from daisy.version import version_info as version - SUCCESS = 0 FAILURE = 1 diff --git a/code/daisy/daisy/cmd/cache_prefetcher.py b/code/daisy/daisy/cmd/cache_prefetcher.py index a8aad985..713c4593 100755 --- a/code/daisy/daisy/cmd/cache_prefetcher.py +++ b/code/daisy/daisy/cmd/cache_prefetcher.py @@ -24,6 +24,11 @@ images to be pretched. import os import sys +import glance_store +from oslo_log import log as logging + +from daisy.common import config +from daisy.image_cache import prefetcher # If ../glance/__init__.py exists, add ../ to Python search path, so that # it will override what happens to be installed in /usr/(local/)lib/python... @@ -33,11 +38,6 @@ possible_topdir = os.path.normpath(os.path.join(os.path.abspath(sys.argv[0]), if os.path.exists(os.path.join(possible_topdir, 'glance', '__init__.py')): sys.path.insert(0, possible_topdir) -import glance_store -from oslo_log import log as logging - -from daisy.common import config -from daisy.image_cache import prefetcher CONF = config.CONF logging.register_options(CONF) diff --git a/code/daisy/daisy/cmd/cache_pruner.py b/code/daisy/daisy/cmd/cache_pruner.py index b2457323..c8ac5d3c 100755 --- a/code/daisy/daisy/cmd/cache_pruner.py +++ b/code/daisy/daisy/cmd/cache_pruner.py @@ -25,6 +25,8 @@ import os import sys from oslo_log import log as logging +from daisy.common import config +from daisy.image_cache import pruner # If ../glance/__init__.py exists, add ../ to Python search path, so that # it will override what happens to be installed in /usr/(local/)lib/python... @@ -34,8 +36,6 @@ possible_topdir = os.path.normpath(os.path.join(os.path.abspath(sys.argv[0]), if os.path.exists(os.path.join(possible_topdir, 'glance', '__init__.py')): sys.path.insert(0, possible_topdir) -from daisy.common import config -from daisy.image_cache import pruner CONF = config.CONF logging.register_options(CONF) diff --git a/code/daisy/daisy/cmd/control.py b/code/daisy/daisy/cmd/control.py index e4c8bed6..324f52ae 100755 --- a/code/daisy/daisy/cmd/control.py +++ b/code/daisy/daisy/cmd/control.py @@ -30,6 +30,12 @@ import subprocess import sys import tempfile import time +from oslo_config import cfg +from oslo_utils import units +# NOTE(jokke): simplified transition to py3, behaves like py2 xrange +from six.moves import range +from daisy.common import config +from daisy import i18n # If ../glance/__init__.py exists, add ../ to Python search path, so that # it will override what happens to be installed in /usr/(local/)lib/python... @@ -39,13 +45,6 @@ possible_topdir = os.path.normpath(os.path.join(os.path.abspath(sys.argv[0]), if os.path.exists(os.path.join(possible_topdir, 'glance', '__init__.py')): sys.path.insert(0, possible_topdir) -from oslo_config import cfg -from oslo_utils import units -# NOTE(jokke): simplified transition to py3, behaves like py2 xrange -from six.moves import range - -from daisy.common import config -from daisy import i18n _ = i18n._ diff --git a/code/daisy/daisy/cmd/manage.py b/code/daisy/daisy/cmd/manage.py index f630fb9d..654e9ef8 100755 --- a/code/daisy/daisy/cmd/manage.py +++ b/code/daisy/daisy/cmd/manage.py @@ -29,15 +29,6 @@ from __future__ import print_function import os import sys - -# If ../glance/__init__.py exists, add ../ to Python search path, so that -# it will override what happens to be installed in /usr/(local/)lib/python... -possible_topdir = os.path.normpath(os.path.join(os.path.abspath(sys.argv[0]), - os.pardir, - os.pardir)) -if os.path.exists(os.path.join(possible_topdir, 'daisy', '__init__.py')): - sys.path.insert(0, possible_topdir) - from oslo_config import cfg from oslo_db.sqlalchemy import migration from oslo_log import log as logging @@ -52,6 +43,14 @@ from daisy.db.sqlalchemy import api as db_api from daisy.db.sqlalchemy import metadata from daisy import i18n +# If ../glance/__init__.py exists, add ../ to Python search path, so that +# it will override what happens to be installed in /usr/(local/)lib/python... +possible_topdir = os.path.normpath(os.path.join(os.path.abspath(sys.argv[0]), + os.pardir, + os.pardir)) +if os.path.exists(os.path.join(possible_topdir, 'daisy', '__init__.py')): + sys.path.insert(0, possible_topdir) + CONF = cfg.CONF LOG = logging.getLogger(__name__) diff --git a/code/daisy/daisy/cmd/orchestration.py b/code/daisy/daisy/cmd/orchestration.py index 3cdd9a67..8e52a6a9 100755 --- a/code/daisy/daisy/cmd/orchestration.py +++ b/code/daisy/daisy/cmd/orchestration.py @@ -23,8 +23,14 @@ Reference implementation server for Daisy orchestration import os import sys - import eventlet +from oslo_config import cfg +from oslo_log import log as logging +from daisy.common import exception +from daisy.common import config +from daisy.openstack.common import loopingcall +from daisy.orchestration.manager import OrchestrationManager +import six # Monkey patch socket and time eventlet.patcher.monkey_patch(all=False, socket=True, time=True, thread=True) @@ -37,23 +43,12 @@ possible_topdir = os.path.normpath(os.path.join(os.path.abspath(sys.argv[0]), if os.path.exists(os.path.join(possible_topdir, 'daisy', '__init__.py')): sys.path.insert(0, possible_topdir) -from oslo_config import cfg -from oslo_log import log as logging -import osprofiler.notifier -import osprofiler.web -from daisy.common import exception -from daisy.common import config -from daisy.common import utils -from daisy.common import wsgi -from daisy import notifier -from daisy.openstack.common import systemd -from daisy.openstack.common import loopingcall -from daisy.orchestration.manager import OrchestrationManager CONF = cfg.CONF scale_opts = [ cfg.StrOpt('auto_scale_interval', default=60, - help='Number of seconds between two checkings to compute auto scale status'), + help='Number of seconds between two ' + 'checkings to compute auto scale status'), ] CONF.register_opts(scale_opts, group='orchestration') logging.register_options(CONF) @@ -62,10 +57,11 @@ logging.register_options(CONF) def fail(returncode, e): sys.stderr.write("ERROR: %s\n" % six.text_type(e)) + def main(): try: config.parse_args() - logging.setup(CONF,'daisy') + logging.setup(CONF, 'daisy') timer = loopingcall.FixedIntervalLoopingCall( OrchestrationManager.find_auto_scale_cluster) timer.start(float(CONF.orchestration.auto_scale_interval)).wait() diff --git a/code/daisy/daisy/cmd/registry.py b/code/daisy/daisy/cmd/registry.py index 4cfec19d..200a6221 100755 --- a/code/daisy/daisy/cmd/registry.py +++ b/code/daisy/daisy/cmd/registry.py @@ -25,6 +25,16 @@ import os import sys import eventlet +from oslo_config import cfg +from oslo_log import log as logging +import osprofiler.notifier +import osprofiler.web + +from daisy.common import config +from daisy.common import utils +from daisy.common import wsgi +from daisy import notifier +from daisy.openstack.common import systemd # Monkey patch socket and time eventlet.patcher.monkey_patch(all=False, socket=True, time=True, thread=True) @@ -37,16 +47,6 @@ possible_topdir = os.path.normpath(os.path.join(os.path.abspath(sys.argv[0]), if os.path.exists(os.path.join(possible_topdir, 'daisy', '__init__.py')): sys.path.insert(0, possible_topdir) -from oslo_config import cfg -from oslo_log import log as logging -import osprofiler.notifier -import osprofiler.web - -from daisy.common import config -from daisy.common import utils -from daisy.common import wsgi -from daisy import notifier -from daisy.openstack.common import systemd CONF = cfg.CONF CONF.import_group("profiler", "daisy.common.wsgi") diff --git a/code/daisy/daisy/cmd/scrubber.py b/code/daisy/daisy/cmd/scrubber.py index d27f6fa3..009e1c05 100755 --- a/code/daisy/daisy/cmd/scrubber.py +++ b/code/daisy/daisy/cmd/scrubber.py @@ -21,6 +21,13 @@ Glance Scrub Service import os import sys +import glance_store +from oslo_config import cfg +from oslo_log import log as logging + +from daisy.common import config +from daisy.openstack.common import systemd +from daisy import scrubber # If ../glance/__init__.py exists, add ../ to Python search path, so that # it will override what happens to be installed in /usr/(local/)lib/python... @@ -30,14 +37,6 @@ possible_topdir = os.path.normpath(os.path.join(os.path.abspath(sys.argv[0]), if os.path.exists(os.path.join(possible_topdir, 'glance', '__init__.py')): sys.path.insert(0, possible_topdir) -import glance_store -from oslo_config import cfg -from oslo_log import log as logging - -from daisy.common import config -from daisy.openstack.common import systemd -from daisy import scrubber - CONF = cfg.CONF logging.register_options(CONF) diff --git a/code/daisy/daisy/cmd/search.py b/code/daisy/daisy/cmd/search.py index 552551f6..62b6679d 100755 --- a/code/daisy/daisy/cmd/search.py +++ b/code/daisy/daisy/cmd/search.py @@ -27,6 +27,15 @@ import sys import eventlet from daisy.common import utils +from oslo.config import cfg +from oslo_log import log as logging +import osprofiler.notifier +import osprofiler.web + +from daisy.common import config +from daisy.common import exception +from daisy.common import wsgi +from daisy import notifier # Monkey patch socket, time, select, threads eventlet.patcher.monkey_patch(socket=True, time=True, select=True, @@ -40,15 +49,6 @@ possible_topdir = os.path.normpath(os.path.join(os.path.abspath(sys.argv[0]), if os.path.exists(os.path.join(possible_topdir, 'glance', '__init__.py')): sys.path.insert(0, possible_topdir) -from oslo.config import cfg -from oslo_log import log as logging -import osprofiler.notifier -import osprofiler.web - -from daisy.common import config -from daisy.common import exception -from daisy.common import wsgi -from daisy import notifier CONF = cfg.CONF CONF.import_group("profiler", "daisy.common.wsgi") diff --git a/code/daisy/daisy/common/artifacts/declarative.py b/code/daisy/daisy/common/artifacts/declarative.py index 4d07527d..5ae3e66e 100755 --- a/code/daisy/daisy/common/artifacts/declarative.py +++ b/code/daisy/daisy/common/artifacts/declarative.py @@ -399,6 +399,7 @@ class PropertyDefinition(AttributeDefinition): class RelationDefinition(AttributeDefinition): """A base class for Attributes defining cross-artifact relations""" + def __init__(self, internal=False, **kwargs): self.internal = internal kwargs.setdefault('mutable', False) @@ -482,6 +483,7 @@ class ArtifactPropertyDescriptor(object): class ArtifactAttributes(object): """A container class storing description of Artifact Type attributes""" + def __init__(self): self.properties = {} self.dependencies = {} diff --git a/code/daisy/daisy/common/artifacts/definitions.py b/code/daisy/daisy/common/artifacts/definitions.py index d74d57a1..2abc8847 100755 --- a/code/daisy/daisy/common/artifacts/definitions.py +++ b/code/daisy/daisy/common/artifacts/definitions.py @@ -121,7 +121,7 @@ class SemVerString(String): super(SemVerString, self).__init__(validators=[(validate, - "Invalid semver string")], + "Invalid semver string")], **kwargs) @@ -436,7 +436,7 @@ class ArtifactReference(declarative.RelationDefinition): if artifact.type_name not in type_names: return False if (type_version is not None and - artifact.type_version != type_version): + artifact.type_version != type_version): return False return True @@ -477,6 +477,7 @@ class ArtifactReferenceList(declarative.ListAttributeDefinition, class Blob(object): """A Binary object being part of the Artifact""" + def __init__(self, size=0, locations=None, checksum=None, item_key=None): """Initializes a new Binary Object for an Artifact diff --git a/code/daisy/daisy/common/artifacts/loader.py b/code/daisy/daisy/common/artifacts/loader.py index 62154e91..aefe762e 100755 --- a/code/daisy/daisy/common/artifacts/loader.py +++ b/code/daisy/daisy/common/artifacts/loader.py @@ -48,6 +48,7 @@ CONF.register_opts(plugins_opts) class ArtifactsPluginLoader(object): + def __init__(self, namespace): self.mgr = enabled.EnabledExtensionManager( check_func=self._gen_check_func(), diff --git a/code/daisy/daisy/common/auth.py b/code/daisy/daisy/common/auth.py index b09ee2d5..69c175ac 100755 --- a/code/daisy/daisy/common/auth.py +++ b/code/daisy/daisy/common/auth.py @@ -44,6 +44,7 @@ _ = i18n._ class BaseStrategy(object): + def __init__(self): self.auth_token = None # TODO(sirp): Should expose selecting public/internal/admin URL. @@ -62,6 +63,7 @@ class BaseStrategy(object): class NoAuthStrategy(BaseStrategy): + def authenticate(self): pass diff --git a/code/daisy/daisy/common/exception.py b/code/daisy/daisy/common/exception.py index 4464d07d..27425936 100755 --- a/code/daisy/daisy/common/exception.py +++ b/code/daisy/daisy/common/exception.py @@ -27,6 +27,7 @@ _FATAL_EXCEPTION_FORMAT_ERRORS = False class RedirectException(Exception): + def __init__(self, url): self.url = urlparse.urlparse(url) @@ -336,13 +337,16 @@ class TaskException(DaisyException): class BadTaskConfiguration(DaisyException): message = _("Task was not configured properly") + class InstallException(DaisyException): message = _("Cluster installtation raise exception") + class InstallTimeoutException(DaisyException): message = _( "Time out, during install TECS components to cluster %(cluster_id)s") + class TaskNotFound(TaskException, NotFound): message = _("Task with the given id %(task_id)s was not found") @@ -566,23 +570,32 @@ class InvalidJsonPatchPath(JsonPatchException): class InvalidNetworkConfig(DaisyException): pass + class InvalidIP(DaisyException): pass - + + class OSInstallFailed(DaisyException): message = _("os installtation failed.") - + + class IMPIOprationFailed(DaisyException): message = _("ipmi command failed.") - + + class ThreadBinException(DaisyException): + def __init__(self, *args): super(ThreadBinException, self).__init__(*args) - + + class SubprocessCmdFailed(DaisyException): message = _("suprocess command failed.") - + + class DeleteConstrainted(DaisyException): message = _("delete is not allowed.") +class TrustMeFailed(DaisyException): + message = _("Trust me script failed.") diff --git a/code/daisy/daisy/common/rpc.py b/code/daisy/daisy/common/rpc.py index 3d3cd6b8..e8ad3262 100755 --- a/code/daisy/daisy/common/rpc.py +++ b/code/daisy/daisy/common/rpc.py @@ -251,8 +251,8 @@ class RPCClient(client.BaseClient): # checking if content contains the '_error' key, # verify if it is an instance of dict - since the # RPC call may have returned something different. - if self.raise_exc and (isinstance(content, dict) - and '_error' in content): + if self.raise_exc and (isinstance(content, dict) and + '_error' in content): error = content['_error'] try: exc_cls = imp.import_class(error['cls']) diff --git a/code/daisy/daisy/common/scripts/image_import/main.py b/code/daisy/daisy/common/scripts/image_import/main.py index 718b88dd..b77ae5a7 100755 --- a/code/daisy/daisy/common/scripts/image_import/main.py +++ b/code/daisy/daisy/common/scripts/image_import/main.py @@ -12,11 +12,6 @@ # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. - -__all__ = [ - 'run', -] - from oslo_concurrency import lockutils from oslo_log import log as logging from oslo_utils import excutils @@ -28,6 +23,9 @@ from daisy.common.scripts import utils as script_utils from daisy.common import store_utils from daisy.common import utils as common_utils from daisy import i18n +__all__ = [ + 'run', +] LOG = logging.getLogger(__name__) @@ -150,7 +148,7 @@ def set_image_data(image, uri, task_id): data_iter = None try: LOG.info(_LI("Task %(task_id)s: Got image data uri %(data_uri)s to be " - "imported") % {"data_uri": uri, "task_id": task_id}) + "imported") % {"data_uri": uri, "task_id": task_id}) data_iter = script_utils.get_image_data_iter(uri) image.set_data(data_iter) except Exception as e: diff --git a/code/daisy/daisy/common/scripts/utils.py b/code/daisy/daisy/common/scripts/utils.py index fa991dc6..295e37d2 100755 --- a/code/daisy/daisy/common/scripts/utils.py +++ b/code/daisy/daisy/common/scripts/utils.py @@ -12,7 +12,12 @@ # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. +import urllib2 +from oslo_log import log as logging + +from daisy.common import exception +from daisy import i18n __all__ = [ 'get_task', 'unpack_task_input', @@ -22,14 +27,6 @@ __all__ = [ ] -import urllib2 - -from oslo_log import log as logging - -from daisy.common import exception -from daisy import i18n - - LOG = logging.getLogger(__name__) _ = i18n._ _LE = i18n._LE @@ -100,7 +97,7 @@ def validate_location_uri(location): "source of image data.") # NOTE: raise Exception and let the encompassing block save # the error msg in the task.message. - raise StandardError(msg) + raise Exception(msg) else: # TODO(nikhil): add other supported uris diff --git a/code/daisy/daisy/common/semver_db.py b/code/daisy/daisy/common/semver_db.py index 56972c98..769b91dd 100755 --- a/code/daisy/daisy/common/semver_db.py +++ b/code/daisy/daisy/common/semver_db.py @@ -25,6 +25,7 @@ _ = i18n._ class DBVersion(object): + def __init__(self, components_long, prerelease, build): """ Creates a DBVersion object out of 3 component fields. This initializer @@ -54,8 +55,8 @@ class DBVersion(object): other.version == self.version) def __ne__(self, other): - return (not isinstance(other, DBVersion) - or self.version != other.version) + return (not isinstance(other, DBVersion) or + self.version != other.version) def __composite_values__(self): long_version = _version_to_long(self.version) diff --git a/code/daisy/daisy/common/swift_store_utils.py b/code/daisy/daisy/common/swift_store_utils.py index e1215482..72e46b68 100755 --- a/code/daisy/daisy/common/swift_store_utils.py +++ b/code/daisy/daisy/common/swift_store_utils.py @@ -63,6 +63,7 @@ def is_multiple_swift_store_accounts_enabled(): class SwiftParams(object): + def __init__(self): if is_multiple_swift_store_accounts_enabled(): self.params = self._load_config() @@ -71,8 +72,8 @@ class SwiftParams(object): def _form_default_params(self): default = {} - if (CONF.swift_store_user and CONF.swift_store_key - and CONF.swift_store_auth_address): + if (CONF.swift_store_user and CONF.swift_store_key and + CONF.swift_store_auth_address): default['user'] = CONF.swift_store_user default['key'] = CONF.swift_store_key default['auth_address'] = CONF.swift_store_auth_address diff --git a/code/daisy/daisy/common/utils.py b/code/daisy/daisy/common/utils.py index a44badef..2a055b80 100755 --- a/code/daisy/daisy/common/utils.py +++ b/code/daisy/daisy/common/utils.py @@ -21,6 +21,7 @@ System-level utilities and helper functions. """ import errno +from functools import reduce try: from eventlet import sleep @@ -46,9 +47,11 @@ from oslo_utils import netutils from oslo_utils import strutils import six from webob import exc +import ConfigParser from daisy.common import exception from daisy import i18n +from ironicclient import client as ironic_client CONF = cfg.CONF @@ -73,6 +76,11 @@ IMAGE_META_HEADERS = ['x-image-meta-location', 'x-image-meta-size', DAISY_TEST_SOCKET_FD_STR = 'DAISY_TEST_SOCKET_FD' +DISCOVER_DEFAULTS = { + 'listen_port': '5050', + 'ironic_url': 'http://127.0.0.1:6385/v1', +} + def chunkreadable(iter, chunk_size=65536): """ @@ -135,6 +143,7 @@ MAX_COOP_READER_BUFFER_SIZE = 134217728 # 128M seems like a sane buffer limit class CooperativeReader(object): + """ An eventlet thread friendly class for reading in image data. @@ -144,6 +153,7 @@ class CooperativeReader(object): starvation, ie allows all threads to be scheduled periodically rather than having the same thread be continuously active. """ + def __init__(self, fd): """ :param fd: Underlying image file object @@ -223,10 +233,12 @@ class CooperativeReader(object): class LimitingReader(object): + """ Reader designed to fail when reading image data past the configured allowable amount. """ + def __init__(self, data, limit): """ :param data: Underlying image data object @@ -330,72 +342,91 @@ def get_image_meta_from_headers(response): result[key] = strutils.bool_from_string(result[key]) return result + def get_host_meta(response): result = {} - for key,value in response.json.items(): + for key, value in response.json.items(): result[key] = value return result + +def get_hwm_meta(response): + result = {} + for key, value in response.json.items(): + result[key] = value + return result + + def get_cluster_meta(response): result = {} - for key,value in response.json.items(): + for key, value in response.json.items(): result[key] = value return result + def get_component_meta(response): result = {} - for key,value in response.json.items(): + for key, value in response.json.items(): result[key] = value return result + def get_service_meta(response): result = {} - for key,value in response.json.items(): + for key, value in response.json.items(): result[key] = value return result + def get_template_meta(response): result = {} - for key,value in response.json.items(): + for key, value in response.json.items(): result[key] = value return result + def get_role_meta(response): result = {} - for key,value in response.json.items(): + for key, value in response.json.items(): result[key] = value return result + def get_config_file_meta(response): result = {} - for key,value in response.json.items(): + for key, value in response.json.items(): result[key] = value return result + def get_config_set_meta(response): result = {} - for key,value in response.json.items(): + for key, value in response.json.items(): result[key] = value return result + def get_config_meta(response): result = {} - for key,value in response.json.items(): - result[key] = value - return result - -def get_network_meta(response): - result = {} - for key,value in response.json.items(): + for key, value in response.json.items(): result[key] = value return result -def get_dict_meta(response): + +def get_network_meta(response): result = {} - for key,value in response.json.items(): + for key, value in response.json.items(): result[key] = value return result + +def get_dict_meta(response): + result = {} + for key, value in response.json.items(): + result[key] = value + return result + + def create_mashup_dict(image_meta): """ Returns a dictionary-like mashup of the image core properties @@ -434,6 +465,7 @@ def safe_remove(path): class PrettyTable(object): + """Creates an ASCII art table for use in bin/glance Example: @@ -442,6 +474,7 @@ class PrettyTable(object): --- ----------------- ------------ ----- 122 image 22 0 """ + def __init__(self): self.columns = [] @@ -506,8 +539,9 @@ def get_terminal_size(): try: height_width = struct.unpack('hh', fcntl.ioctl(sys.stderr.fileno(), - termios.TIOCGWINSZ, - struct.pack('HH', 0, 0))) + termios.TIOCGWINSZ, + struct.pack( + 'HH', 0, 0))) except Exception: pass @@ -802,3 +836,254 @@ def get_search_plugins(): ext_manager = stevedore.extension.ExtensionManager( namespace, invoke_on_load=True) return ext_manager.extensions + + +def get_host_min_mac(host_interfaces): + if not isinstance(host_interfaces, list): + host_interfaces = eval(host_interfaces) + macs = [interface['mac'] for interface in host_interfaces + if interface['type'] == 'ether' and interface['mac']] + min_mac = min(macs) + return min_mac + + +def ip_into_int(ip): + """ + Switch ip string to decimalism integer.. + :param ip: ip string + :return: decimalism integer + """ + return reduce(lambda x, y: (x << 8) + y, map(int, ip.split('.'))) + + +def is_ip_in_cidr(ip, cidr): + """ + Check ip is in cidr + :param ip: Ip will be checked, like:192.168.1.2. + :param cidr: Ip range,like:192.168.0.0/24. + :return: If ip in cidr, return True, else return False. + """ + network = cidr.split('/') + mask = ~(2**(32 - int(network[1])) - 1) + return (ip_into_int(ip) & mask) == (ip_into_int(network[0]) & mask) + + +def is_ip_in_ranges(ip, ip_ranges): + """ + Check ip is in range + : ip: Ip will be checked, like:192.168.1.2. + : ip_ranges : Ip ranges, like: + [{'start':'192.168.0.10', 'end':'192.168.0.20'} + {'start':'192.168.0.50', 'end':'192.168.0.60'}] + :return: If ip in ip_ranges, return True, else return False. + """ + for ip_range in ip_ranges: + start_ip_int = ip_into_int(ip_range['start']) + end_ip_int = ip_into_int(ip_range['end']) + ip_int = ip_into_int(ip) + if ip_int >= start_ip_int and ip_int <= end_ip_int: + return True + + return False + + +def get_ironicclient(): # pragma: no cover + """Get Ironic client instance.""" + config_discoverd = ConfigParser.ConfigParser(defaults=DISCOVER_DEFAULTS) + config_discoverd.read("/etc/ironic-discoverd/discoverd.conf") + ironic_url = config_discoverd.get("discoverd", "ironic_url") + args = {'os_auth_token': 'fake', + 'ironic_url': ironic_url} + return ironic_client.get_client(1, **args) + + +def get_host_hw_info(host_interface): + host_hw_config = {} + ironicclient = get_ironicclient() + if host_interface: + min_mac = get_host_min_mac(host_interface) + try: + host_obj = ironicclient.physical_node.get(min_mac) + host_hw_config = dict([(f, getattr(host_obj, f, '')) + for f in ['system', 'memory', 'cpu', + 'disks', 'interfaces', + 'pci', 'devices']]) + except Exception: + LOG.exception(_LE("Unable to find ironic data %s") + % Exception) + return host_hw_config + + +def get_dvs_interfaces(host_interfaces): + dvs_interfaces = [] + if not isinstance(host_interfaces, list): + host_interfaces = eval(host_interfaces) + for interface in host_interfaces: + if not isinstance(interface, dict): + interface = eval(interface) + if ('vswitch_type' in interface and + interface['vswitch_type'] == 'dvs'): + dvs_interfaces.append(interface) + + return dvs_interfaces + + +def get_clc_pci_info(pci_info): + clc_pci = [] + flag1 = 'Intel Corporation Coleto Creek PCIe Endpoint' + flag2 = '8086:0435' + for pci in pci_info: + if flag1 in pci or flag2 in pci: + clc_pci.append(pci.split()[0]) + return clc_pci + + +def cpu_str_to_list(spec): + """Parse a CPU set specification. + + :param spec: cpu set string eg "1-4,^3,6" + + Each element in the list is either a single + CPU number, a range of CPU numbers, or a + caret followed by a CPU number to be excluded + from a previous range. + + :returns: a set of CPU indexes + """ + + cpusets = [] + if not spec: + return cpusets + + cpuset_ids = set() + cpuset_reject_ids = set() + for rule in spec.split(','): + rule = rule.strip() + # Handle multi ',' + if len(rule) < 1: + continue + # Note the count limit in the .split() call + range_parts = rule.split('-', 1) + if len(range_parts) > 1: + # So, this was a range; start by converting the parts to ints + try: + start, end = [int(p.strip()) for p in range_parts] + except ValueError: + raise exception.Invalid(_("Invalid range expression %r") + % rule) + # Make sure it's a valid range + if start > end: + raise exception.Invalid(_("Invalid range expression %r") + % rule) + # Add available CPU ids to set + cpuset_ids |= set(range(start, end + 1)) + elif rule[0] == '^': + # Not a range, the rule is an exclusion rule; convert to int + try: + cpuset_reject_ids.add(int(rule[1:].strip())) + except ValueError: + raise exception.Invalid(_("Invalid exclusion " + "expression %r") % rule) + else: + # OK, a single CPU to include; convert to int + try: + cpuset_ids.add(int(rule)) + except ValueError: + raise exception.Invalid(_("Invalid inclusion " + "expression %r") % rule) + + # Use sets to handle the exclusion rules for us + cpuset_ids -= cpuset_reject_ids + cpusets = list(cpuset_ids) + cpusets.sort() + return cpusets + + +def cpu_list_to_str(cpu_list): + """Parse a CPU list to string. + + :param cpu_list: eg "[1,2,3,4,6,7]" + + :returns: a string of CPU ranges, eg 1-4,6,7 + """ + spec = '' + if not cpu_list: + return spec + + cpu_list.sort() + count = 0 + group_cpus = [] + tmp_cpus = [] + for cpu in cpu_list: + if count == 0: + init = cpu + tmp_cpus.append(cpu) + else: + if cpu == (init + count): + tmp_cpus.append(cpu) + else: + group_cpus.append(tmp_cpus) + tmp_cpus = [] + count = 0 + init = cpu + tmp_cpus.append(cpu) + count += 1 + + group_cpus.append(tmp_cpus) + + for group in group_cpus: + if len(group) > 2: + group_spec = ("%s-%s" % (group[0], group[0]+len(group)-1)) + else: + group_str = [str(num) for num in group] + group_spec = ','.join(group_str) + if spec: + spec += ',' + group_spec + else: + spec = group_spec + + return spec + + +def simple_subprocess_call(cmd): + return_code = subprocess.call(cmd, + shell=True, + stdout=subprocess.PIPE, + stderr=subprocess.PIPE) + return return_code + + +def translate_quotation_marks_for_shell(orig_str): + translated_str = '' + quotation_marks = '"' + quotation_marks_count = orig_str.count(quotation_marks) + if quotation_marks_count > 0: + replace_marks = '\\"' + translated_str = orig_str.replace(quotation_marks, replace_marks) + else: + translated_str = orig_str + return translated_str + + +def get_numa_node_cpus(host_cpu): + numa = {} + if 'numa_node0' in host_cpu: + numa['numa_node0'] = cpu_str_to_list(host_cpu['numa_node0']) + if 'numa_node1' in host_cpu: + numa['numa_node1'] = cpu_str_to_list(host_cpu['numa_node1']) + return numa + + +def get_numa_node_from_cpus(numa, str_cpus): + numa_nodes = [] + + cpu_list = cpu_str_to_list(str_cpus) + for cpu in cpu_list: + if cpu in numa['numa_node0']: + numa_nodes.append(0) + if cpu in numa['numa_node1']: + numa_nodes.append(1) + + numa_nodes = list(set(numa_nodes)) + numa_nodes.sort() + return numa_nodes diff --git a/code/daisy/daisy/common/vcpu_pin.py b/code/daisy/daisy/common/vcpu_pin.py new file mode 100755 index 00000000..381218c9 --- /dev/null +++ b/code/daisy/daisy/common/vcpu_pin.py @@ -0,0 +1,392 @@ +# Copyright 2010 United States Government as represented by the +# Administrator of the National Aeronautics and Space Administration. +# Copyright 2014 SoftLayer Technologies, Inc. +# Copyright 2015 Mirantis, Inc +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +""" +System-level utilities and helper functions. +""" +from oslo_config import cfg +from oslo_log import log as logging +from webob import exc + +from daisy.common import utils +from daisy import i18n + +CONF = cfg.CONF + +LOG = logging.getLogger(__name__) +_ = i18n._ +_LE = i18n._LE + + +def get_total_cpus_for_numa(numa_cpus): + all_cpus = [] + for value in numa_cpus.values(): + all_cpus.extend(value) + return all_cpus + + +def get_default_os_num(host_roles_name): + if (('CONTROLLER_LB' in host_roles_name or + 'CONTROLLER_HA' in host_roles_name) and + 'COMPUTER' in host_roles_name): + # host with role of CONTOLLER and COMPUTER, + # isolate 4 cpu cores default for OS and TECS + os_cpu_num = 4 + elif 'COMPUTER' in host_roles_name: + # host with role of COMPUTER only, + # isolate 2 cpu cores default for OS and TECS + os_cpu_num = 2 + elif ('CONTROLLER_LB' in host_roles_name or + 'CONTROLLER_HA' in host_roles_name): + # host with role of CONTOLLER only, + # don't isolate cpu for OS and TECS + os_cpu_num = 0 + else: + os_cpu_num = 0 + + return os_cpu_num + + +def pci_get_cpu_sets(numa_cpus, pci_info, device_numa_node): + high_pci_cpu_set = {} + msg = '' + return_code = 0 + status = {'rc': 0, 'msg': ''} + + if not numa_cpus: + msg = "The architecture of CPU does not supported" + LOG.info(msg) + return_code = 0 + status['rc'] = return_code + status['msg'] = msg + return (status, high_pci_cpu_set) + + # get Intel Corporation Coleto Creek PCIe Endpoint + clc_pci_lines = utils.get_clc_pci_info(pci_info) + if not clc_pci_lines: + msg = "No CLC card in system" + LOG.info(msg) + return_code = 0 + status['rc'] = return_code + status['msg'] = msg + return (status, high_pci_cpu_set) + + high_pci_cpusets = [] + for clc_pci_line in clc_pci_lines: + numa_node = device_numa_node['0000:' + clc_pci_line] + numa_key = 'numa_node' + str(numa_node) + if numa_key not in numa_cpus: + msg = "Can't find numa_node '%s' for CLC" % numa_node + return_code = 1 + status['rc'] = return_code + status['msg'] = msg + return (status, high_pci_cpu_set) + high_pci_cpusets += numa_cpus[numa_key] + + high_pci_cpu_set['high'] = list(set(high_pci_cpusets)) + total_cpus = get_total_cpus_for_numa(numa_cpus) + high_pci_cpu_set['low'] =\ + list(set(total_cpus) - set(high_pci_cpu_set['high'])) + LOG.debug("high_pci_cpu_set:%s" % high_pci_cpu_set) + + return (status, high_pci_cpu_set) + + +# if numa codes are not same, return -1 +def get_numa_by_nic(nic_info, device_numa_node): + numa = [] + try: + for nic in nic_info: + numa.append(device_numa_node[nic['bus']]) + + numa = list(set(numa)) + numa_info = (-1 if len(numa) > 1 else numa[0]) + except Exception: + numa_info = -1 + + return numa_info + + +def dvs_get_cpu_sets(dic_numas, nic_info, device_numa_node, cpu_num=4): + dvs_cpu_set = [] + total_cpus = [] + high_cpu_set = [] + low_cpu_set = [] + cpu_set = {} + msg = '' + return_code = 0 + status = {} + + if not dic_numas: + msg = "The architecture of CPU not supported" + LOG.info(msg) + return_code = 1 + status['rc'] = return_code + status['msg'] = msg + return (status, cpu_set) + + numa_node = get_numa_by_nic(nic_info, device_numa_node) + if numa_node < 0: + msg = 'Invalid numa node:%s' % numa_node + LOG.info(msg) + return_code = 3 + status['rc'] = return_code + status['msg'] = msg + return (status, cpu_set) + + numa_key = "numa_node%s" % numa_node + if numa_key not in dic_numas: + msg = "Can't find numa node '%s' for DVS" % numa_node + return_code = 4 + status['rc'] = return_code + status['msg'] = msg + return (status, cpu_set) + + if len(dic_numas[numa_key]) < (cpu_num + 1): + msg = "CPU on %s is not enough" % numa_key + LOG.info(msg) + return_code = 5 + status['rc'] = return_code + status['msg'] = msg + return (status, cpu_set) + + total_cpus = get_total_cpus_for_numa(dic_numas) + LOG.debug("total_cpu:%s" % total_cpus) + + # sort + dic_numas[numa_key] = sorted(dic_numas[numa_key], reverse=True) + for i in dic_numas[numa_key][0:cpu_num]: + dvs_cpu_set.append(i) + + high_cpu_set = dic_numas[numa_key] + low_cpu_set =\ + list(set(total_cpus).difference(set(dic_numas[numa_key]))) + LOG.debug("cpu used by dvs:%s" % dvs_cpu_set) + LOG.debug("low_cpu_set:%s" % low_cpu_set) + LOG.debug("high_cpu_set:%s" % high_cpu_set) + + cpu_set['dvs'] = dvs_cpu_set + cpu_set['high'] = high_cpu_set + cpu_set['low'] = low_cpu_set + LOG.debug("cpu_set:%s" % cpu_set) + + msg = 'Success' + status['rc'] = return_code + status['msg'] = msg + LOG.debug("status:%s" % status) + + return (status, cpu_set) + + +def get_dvs_cpusets(numa_cpus, host_detail, host_hw_info): + dvs_nics_name = [] + dvs_interfaces = utils.get_dvs_interfaces(host_detail['interfaces']) + for dvs_interface in dvs_interfaces: + if dvs_interface['type'] == 'ether': + dvs_nics_name.append(dvs_interface['name']) + if dvs_interface['type'] == 'bond': + if dvs_interface.get('slaves', None): + dvs_nics_name.extend(dvs_interface['slaves']) + elif dvs_interface.get('slave1', None) and \ + dvs_interface.get('slave2', None): + slave_list = [] + slave_list.append(dvs_interface['slave1']) + slave_list.append(dvs_interface['slave2']) + dvs_nics_name.extend(slave_list) + + dvs_cpusets = {} + if dvs_nics_name: + nics_info = [{'name': nic_name, 'bus': interface['pci']} + for nic_name in dvs_nics_name + for interface in host_hw_info['interfaces'].values() + if nic_name == interface['name']] + dvs_cpu_num = 4 + device_numa = {} + for device in host_hw_info['devices'].values(): + device_numa.update(device) + LOG.info("DVS netcard info: '%s'" % nics_info) + (status, dvs_cpusets) = \ + dvs_get_cpu_sets(numa_cpus, + nics_info, + device_numa, + dvs_cpu_num) + if status['rc'] != 0: + msg = "Get dvs cpu sets for host '%s' failed,\ + detail error is '%s'"\ + % (host_detail['name'], status['msg']) + LOG.error(msg) + raise exc.HTTPBadRequest(explanation=msg) + + return dvs_cpusets + + +def get_pci_cpusets(numa_cpus, host_hw_info): + device_numa = {} + for device in host_hw_info['devices'].values(): + device_numa.update(device) + + (status, pci_cpusets) = pci_get_cpu_sets(numa_cpus, + host_hw_info['pci'].values(), + device_numa) + if status['rc'] != 0: + LOG.error(status['msg']) + raise exc.HTTPBadRequest(explanation=status['msg']) + + return pci_cpusets + + +def allocate_os_cpus(roles_name, pci_cpusets, dvs_cpusets): + os_cpus = [] + if not dvs_cpusets and not pci_cpusets: + return os_cpus + + os_cpu_num = get_default_os_num(roles_name) + if os_cpu_num == 0: + return os_cpus + + os_available_cpuset = [] + if ((pci_cpusets and pci_cpusets.get('high')) and + (not dvs_cpusets or not dvs_cpusets.get('high'))): + # if only pci exist, get OS cores from pci lowset + cpus_low = pci_cpusets.get('low', []) + cpus_high = pci_cpusets.get('high', []) + + if dvs_cpusets and dvs_cpusets.get('high'): + # if only dvs exist, get OS cores from dvs lowset. + # if pci and dvs exist at the same time, + # get OS cores from lowset from dvs lowset. + cpus_low = list(set(dvs_cpusets.get('low', [])) - + set(dvs_cpusets.get('dvs', []))) + + cpus_high = list(set(dvs_cpusets.get('high', [])) - + set(dvs_cpusets.get('dvs', []))) + + cpus_low.sort() + cpus_high.sort() + os_available_cpuset = cpus_low + cpus_high + if not os_available_cpuset: + return os_cpus + + if (len(os_available_cpuset) < os_cpu_num): + msg = 'cpus are not enough' + LOG.error(msg) + raise exc.HTTPBadRequest(explanation=msg) + + # cpu core 0 must give OS + cpu0 = 0 + if cpu0 in os_available_cpuset: + os_available_cpuset.remove(cpu0) + os_available_cpuset = [cpu0] + os_available_cpuset + + os_cpus = os_available_cpuset[:os_cpu_num] + return os_cpus + + +# when config role 'COMPUTER', allocate cpus for CLC +def allocate_clc_cpus(host_detail): + pci_cpu_sets = {} + if 'COMPUTER' not in host_detail.get('role', []): + return pci_cpu_sets + + host_interfaces = host_detail.get('interfaces') + if host_interfaces: + host_hw_info = utils.get_host_hw_info(host_interfaces) + else: + return pci_cpu_sets + + host_id = host_detail.get('id') + clc_pci = utils.get_clc_pci_info(host_hw_info['pci'].values()) + if not clc_pci: + return pci_cpu_sets + else: + LOG.info("CLC card pci number: '%s'" % clc_pci) + numa_cpus = utils.get_numa_node_cpus(host_hw_info.get('cpu', {})) + if not numa_cpus or not numa_cpus['numa_node0']: + msg = "No NUMA CPU found from of host '%s'" % host_id + LOG.info(msg) + return pci_cpu_sets + LOG.info("Get CLC cpusets of host '%s'" % host_id) + pci_cpu_sets = get_pci_cpusets(numa_cpus, host_hw_info) + if not pci_cpu_sets or not pci_cpu_sets.get('high'): + msg = "Can't get CLC cpusets of host '%s'" % host_id + LOG.error(msg) + raise exc.HTTPBadRequest(explanation=msg) + + return pci_cpu_sets + + +# when config DVS on network plane mapping, allocate cpus for dvs +def allocate_dvs_cpus(host_detail): + dvs_cpu_sets = {} + host_interfaces = host_detail.get('interfaces') + if not host_interfaces: + return dvs_cpu_sets + + dvs_interfaces = utils.get_dvs_interfaces(host_interfaces) + if not dvs_interfaces: + return dvs_cpu_sets + + host_id = host_detail.get('id') + host_hw_info = utils.get_host_hw_info(host_interfaces) + numa_cpus = utils.get_numa_node_cpus(host_hw_info.get('cpu', {})) + if not numa_cpus or not numa_cpus['numa_node0']: + msg = "No NUMA CPU found from of host '%s'" % host_id + LOG.info(msg) + return dvs_cpu_sets + + LOG.info("Get dvs cpusets of host '%s'" % host_id) + dvs_cpu_sets = get_dvs_cpusets(numa_cpus, + host_detail, + host_hw_info) + if not dvs_cpu_sets or not dvs_cpu_sets.get('high'): + msg = "Can't get dvs high cpusets of host '%s'" % host_id + LOG.error(msg) + raise exc.HTTPBadRequest(explanation=msg) + return dvs_cpu_sets + + +def allocate_cpus(host_detail): + host_cpu_sets = {'dvs_high_cpuset': '', + 'pci_high_cpuset': '', + 'suggest_dvs_cpus': '', + 'suggest_os_cpus': ''} + + dvs_cpusets = allocate_dvs_cpus(host_detail) + pci_cpusets = allocate_clc_cpus(host_detail) + + # no CLC and no DVS + if (not pci_cpusets and not dvs_cpusets): + return host_cpu_sets + + host_roles_name = host_detail.get('role', []) + os_cpus = allocate_os_cpus(host_roles_name, + pci_cpusets, + dvs_cpusets) + + host_cpu_sets['dvs_high_cpuset'] =\ + utils.cpu_list_to_str(dvs_cpusets.get('high', [])) + host_cpu_sets['pci_high_cpuset'] =\ + utils.cpu_list_to_str(pci_cpusets.get('high', [])) + host_cpu_sets['suggest_dvs_cpus'] =\ + utils.cpu_list_to_str(dvs_cpusets.get('dvs', [])) + host_cpu_sets['suggest_os_cpus'] = utils.cpu_list_to_str(os_cpus) + + LOG.info("NUMA CPU usage for host %s: %s" + % (host_detail['id'], host_cpu_sets)) + + return host_cpu_sets diff --git a/code/daisy/daisy/common/wsgi.py b/code/daisy/daisy/common/wsgi.py index 6aed981e..024a5d3d 100755 --- a/code/daisy/daisy/common/wsgi.py +++ b/code/daisy/daisy/common/wsgi.py @@ -244,6 +244,7 @@ class Server(object): This class requires initialize_glance_store set to True if glance store needs to be initialized. """ + def __init__(self, threads=1000, initialize_glance_store=False): os.umask(0o27) # ensure files are created with the correct privileges self._logger = logging.getLogger("eventlet.wsgi.server") @@ -638,6 +639,7 @@ class APIMapper(routes.Mapper): class RejectMethodController(object): + def reject(self, req, allowed_methods, *args, **kwargs): LOG.debug("The method %s is not allowed for this resource" % req.environ['REQUEST_METHOD']) diff --git a/code/daisy/daisy/common/wsme_utils.py b/code/daisy/daisy/common/wsme_utils.py index 82f4cbd0..6d0d36e5 100755 --- a/code/daisy/daisy/common/wsme_utils.py +++ b/code/daisy/daisy/common/wsme_utils.py @@ -40,7 +40,7 @@ class WSMEModelTransformer(object): for name in names: value = getattr(db_entity, name, None) if value is not None: - if type(value) == datetime: + if isinstance(value, datetime): iso_datetime_value = timeutils.isotime(value) values.update({name: iso_datetime_value}) else: diff --git a/code/daisy/daisy/contrib/plugins/image_artifact/v2/image.py b/code/daisy/daisy/contrib/plugins/image_artifact/v2/image.py index 056790c8..0f1523b8 100755 --- a/code/daisy/daisy/contrib/plugins/image_artifact/v2/image.py +++ b/code/daisy/daisy/contrib/plugins/image_artifact/v2/image.py @@ -52,8 +52,8 @@ class ImageAsAnArtifact(v1_1.ImageAsAnArtifact): if service['name'] == 'glance') try: client = daisyclient.Client(version=2, - endpoint=glance_endpoint, - token=context.auth_token) + endpoint=glance_endpoint, + token=context.auth_token) legacy_image = client.images.get(self.legacy_image_id) except Exception: raise exception.InvalidArtifactPropertyValue( diff --git a/code/daisy/daisy/db/registry/api.py b/code/daisy/daisy/db/registry/api.py index 80dcbaac..c5aa39ba 100755 --- a/code/daisy/daisy/db/registry/api.py +++ b/code/daisy/daisy/db/registry/api.py @@ -607,4 +607,3 @@ def artifact_publish(client, artifact_id, return client.artifact_publish(artifact_id=artifact_id, type_name=type_name, type_version=type_version) - diff --git a/code/daisy/daisy/db/simple/api.py b/code/daisy/daisy/db/simple/api.py index 717b0af7..fa28c73c 100755 --- a/code/daisy/daisy/db/simple/api.py +++ b/code/daisy/daisy/db/simple/api.py @@ -272,8 +272,8 @@ def _filter_images(images, filters, context, elif visibility == 'private': if image['is_public']: continue - if not (has_ownership or (context.is_admin - and not admin_as_user)): + if not (has_ownership or (context.is_admin and not + admin_as_user)): continue elif visibility == 'shared': if not is_member: @@ -387,8 +387,8 @@ def _image_get(context, image_id, force_show_deleted=False, status=None): LOG.warn(_LW('Could not find image %s') % image_id) raise exception.NotFound() - if image['deleted'] and not (force_show_deleted - or context.can_see_deleted): + if image['deleted'] and not (force_show_deleted or + context.can_see_deleted): LOG.warn(_LW('Unable to get deleted image')) raise exception.NotFound() @@ -625,7 +625,7 @@ def _image_locations_delete_all(context, image_id, delete_time=None): delete_time=delete_time) for i, loc in enumerate(DATA['locations']): - if image_id == loc['image_id'] and loc['deleted'] == False: + if image_id == loc['image_id'] and loc['deleted'] is False: del DATA['locations'][i] diff --git a/code/daisy/daisy/db/sqlalchemy/api.py b/code/daisy/daisy/db/sqlalchemy/api.py index 8bbf01c9..1567b358 100755 --- a/code/daisy/daisy/db/sqlalchemy/api.py +++ b/code/daisy/daisy/db/sqlalchemy/api.py @@ -39,6 +39,8 @@ import sqlalchemy import sqlalchemy.orm as sa_orm import sqlalchemy.sql as sa_sql import types +import socket +import netaddr from daisy import artifacts as ga from daisy.common import exception @@ -166,7 +168,7 @@ def image_destroy(context, image_id): _image_tag_delete_all(context, image_id, delete_time, session) return _normalize_locations(context, image_ref) - + def _normalize_locations(context, image, force_show_deleted=False): """ Generate suitable dictionary list for locations field of image. @@ -249,7 +251,7 @@ def _image_get(context, image_id, session=None, force_show_deleted=False): raise exception.Forbidden(msg) return image - + def _check_host_id(host_id): """ check if the given host id is valid before executing operations. For @@ -262,13 +264,17 @@ def _check_host_id(host_id): if (host_id and len(host_id) > models.Host.id.property.columns[0].type.length): raise exception.NotFound() - -def _ip_checker_re(ip_str): - pattern = r"\b(25[0-5]|2[0-4][0-9]|[01]?[0-9][0-9]?)\.(25[0-5]|2[0-4][0-9]|[01]?[0-9][0-9]?)\.(25[0-5]|2[0-4][0-9]|[01]?[0-9][0-9]?)\.(25[0-5]|2[0-4][0-9]|[01]?[0-9][0-9]?)\b" - if re.match(pattern, ip_str): + +def _checker_the_ip_or_hostname_valid(ip_str): + try: + socket.gethostbyname_ex(ip_str) return True - else: - return False + except Exception: + if netaddr.IPAddress(ip_str).version == 6: + return True + else: + return False + def ip_into_int(ip): """ @@ -276,12 +282,15 @@ def ip_into_int(ip): :param ip: ip string :return: decimalism integer """ - return reduce(lambda x,y:(x<<8)+y, map(int, ip.split('.'))) - + return reduce(lambda x, y: (x << 8) + y, map(int, ip.split('.'))) + + def inter_into_ip(num): - inter_ip = lambda x: '.'.join([str(x/(256**i)%256) for i in range(3,-1,-1)]) + inter_ip = lambda x: '.'.join( + [str(x / (256 ** i) % 256) for i in range(3, -1, -1)]) return inter_ip(num) - + + def is_in_cidr_range(ip, network): """ Check ip is in range @@ -292,63 +301,120 @@ def is_in_cidr_range(ip, network): network = network.split('/') mask = ~(2**(32 - int(network[1])) - 1) return (ip_into_int(ip) & mask) == (ip_into_int(network[0]) & mask) - + + +def is_in_ip_range(ip, ip_range): + """ + Check ip is in ip range + :param ip: Ip will be checked, like:192.168.1.2. + :param network: Ip range,like: + {u'start': u'192.168.1.20', u'end': u'192.168.1.21'}. + :return: If ip in range,return True,else return False. + """ + if ip_range.get('start') and ip_range.get('end'): + integer_start_ip = ip_into_int(ip_range['start']) + integer_end_ip = ip_into_int(ip_range['end']) + ip_int = ip_into_int(ip) + return True if integer_start_ip <= ip_int <= integer_end_ip else False + + def cidr_convert_ip_ranges(cidr): str_ip_mask = cidr.split('/')[1] ip_addr = cidr.split('/')[0] - ip_inst=ip_into_int(ip_addr) + ip_inst = ip_into_int(ip_addr) mask = ~(2**(32 - int(str_ip_mask)) - 1) ip_addr_min = inter_into_ip(ip_inst & (mask & 0xffffffff)) ip_addr_max = inter_into_ip(ip_inst | (~mask & 0xffffffff)) - if ip_addr_min.split('.')[3]=='0': - ip_addr_min=ip_addr_min.split('.')[0]+'.'+ip_addr_min.split('.')[1]+'.'+ip_addr_min.split('.')[2]+'.1' - return [ip_addr_min,ip_addr_max] - -def get_ip_with_equal_cidr(cluster_id,network_plane_name,session): - equal_cidr_network_plane_id_list=[] - available_ip_list=[] + if ip_addr_min.split('.')[3] == '0': + ip_addr_min = ip_addr_min.split('.')[0] + '.' + \ + ip_addr_min.split('.')[1] + '.' + ip_addr_min.split('.')[2] + '.1' + return [ip_addr_min, ip_addr_max] - sql_network_plane_cidr="select networks.cidr from networks where networks.name='"+network_plane_name+"' and networks.cluster_id='"+cluster_id+"' and networks.deleted=0" - query_network_plane_cidr = session.execute(sql_network_plane_cidr).fetchone() - network_cidr=query_network_plane_cidr.values().pop() +def get_ip_with_equal_cidr(cluster_id,network_plane_name,session, exclude_ips=[]): + equal_cidr_network_plane_id_list = [] + available_ip_list = copy.deepcopy(exclude_ips) + + sql_network_plane_cidr = "select networks.cidr from networks \ + where networks.name='" + network_plane_name + \ + "' and networks.cluster_id='" + cluster_id + \ + "' and networks.deleted=0" + query_network_plane_cidr = \ + session.execute(sql_network_plane_cidr).fetchone() + network_cidr = query_network_plane_cidr.values().pop() if not network_cidr: - msg = "Error:The CIDR is blank of %s!"%network_plane_name + msg = "Error:The CIDR is blank of %s!" % network_plane_name LOG.error(msg) raise exception.Forbidden(msg) - str_network_cidr=','.join(cidr_convert_ip_ranges(network_cidr)) - - sql_all_network_plane_info="select networks.id,networks.cidr from networks where networks.cluster_id='"+cluster_id+"' and networks.deleted=0" - query_all_network_plane_info = session.execute(sql_all_network_plane_info).fetchall() + str_network_cidr = ','.join(cidr_convert_ip_ranges(network_cidr)) + + sql_all_network_plane_info = "select networks.id,networks.cidr,\ + networks.name from networks where \ + networks.cluster_id='" + cluster_id + \ + "' and networks.deleted=0" + query_all_network_plane_info = \ + session.execute(sql_all_network_plane_info).fetchall() for network_plane_tmp in query_all_network_plane_info: - query_network_plane_tmp_info=network_plane_tmp.values() + query_network_plane_tmp_info = network_plane_tmp.values() cidr = query_network_plane_tmp_info[1] if not cidr: continue - ip_ranges_cidr=cidr_convert_ip_ranges(cidr) - str_query_network_plane_cidr=','.join(ip_ranges_cidr) - if str_network_cidr==str_query_network_plane_cidr: - equal_cidr_network_plane_id_list.append(query_network_plane_tmp_info[0]) - + ip_ranges_cidr = cidr_convert_ip_ranges(cidr) + str_query_network_plane_cidr = ','.join(ip_ranges_cidr) + if str_network_cidr == str_query_network_plane_cidr: + equal_cidr_network_plane_id_list.append( + query_network_plane_tmp_info[0]) + if query_network_plane_tmp_info[2] == 'MANAGEMENT': + roles_info_sql = "select roles.db_vip,roles.glance_vip,\ + roles.vip from roles where \ + roles.cluster_id='" + cluster_id + \ + "' and roles.deleted=0" + roles_vip = session.execute(roles_info_sql).fetchall() + available_ip_list.extend([vip for role_vip in + roles_vip for vip in + role_vip if vip]) + for network_id in equal_cidr_network_plane_id_list: - sql_ip="select assigned_networks.ip from assigned_networks where assigned_networks.deleted=0 and assigned_networks.network_id='"+network_id+"' order by assigned_networks.ip" + sql_ip = "select assigned_networks.ip from assigned_networks \ + where assigned_networks.deleted=0 and \ + assigned_networks.network_id='" + network_id + \ + "' order by assigned_networks.ip" query_ip_list = session.execute(sql_ip).fetchall() for tmp_ip in query_ip_list: - ip_pop=tmp_ip.values().pop() + ip_pop = tmp_ip.values().pop() available_ip_list.append(ip_pop) - return available_ip_list - + return list(set(available_ip_list)) + + +# for example: +# merged_by_cidr_vlan['(10,23)'] = [management_network_plane] +# merged_by_cidr_vlan['(9,24)'] = [ +# deployment_network_plane,storage_network_plane] +# merged_networks=[{'name':'MAGEMENT','ip':"10.43.177.2"},{'name':'DEPLOYMENT,STORAGE','ip':""}] def merge_networks_for_unifiers(cluster_id, assigned_networks): merged_by_cidr_vlan = {} session = get_session() for network_plane in assigned_networks: network_plane_name = network_plane['name'] - network_plane_ip = network_plane.get('ip') - sql_network_plane_info="select networks.vlan_id,networks.cidr from networks where networks.name='"+network_plane_name+"' and networks.cluster_id='"+cluster_id+"' and networks.deleted=0" - query_network_plane_info = session.execute(sql_network_plane_info).fetchone() + # network_plane_ip = network_plane.get('ip') + sql_network_plane_info = "select networks.vlan_id,networks.cidr \ + from networks where networks.name='" + \ + network_plane_name + \ + "' and networks.cluster_id='" + \ + cluster_id + "' and networks.deleted=0" + query_network_plane_info = \ + session.execute(sql_network_plane_info).fetchone() vlan_id = query_network_plane_info.values()[0] if not vlan_id: vlan_id = '' cidr = query_network_plane_info.values()[1] + + if cidr: + cidr_split = cidr.split('/') + mask = ~(2**(32 - int(cidr_split[1])) - 1) + ip_validvalue = ip_into_int(cidr_split[0]) & mask + ip_str = inter_into_ip(ip_validvalue) + cidr = ip_str + '/' + cidr_split[1] + index = (vlan_id, cidr) if merged_by_cidr_vlan.has_key(index): merged_by_cidr_vlan[index].append(network_plane) @@ -363,111 +429,151 @@ def merge_networks_for_unifiers(cluster_id, assigned_networks): networks_name.append(network['name']) if not networks_ip: networks_ip = network.get('ip') - merged_networks.append({'name':','.join(networks_name),'ip':networks_ip}) - + merged_networks.append({'name': ','.join(networks_name), + 'ip': networks_ip}) + return merged_networks - -def check_ip_exist_and_in_cidr_range(cluster_id,network_plane_name,network_plane_ip,occupied_network_ips,session): - equal_cidr_network_plane_id=[] - - check_ip_if_valid=_ip_checker_re(network_plane_ip) + + +def check_ip_exist_and_in_cidr_range(cluster_id, network_plane_name, + network_plane_ip, + occupied_network_ips, session): + # equal_cidr_network_plane_id = [] + + check_ip_if_valid = _checker_the_ip_or_hostname_valid(network_plane_ip) if not check_ip_if_valid: - msg = "Error:The %s is not the right ip!"%network_plane_ip + msg = "Error:The %s is not the right ip!" % network_plane_ip LOG.error(msg) raise exception.Forbidden(msg) - - sql_network_plane_cidr="select networks.cidr from networks where networks.name='"+network_plane_name+"' and networks.cluster_id='"+cluster_id+"' and networks.deleted=0" - query_network_plane_cidr = session.execute(sql_network_plane_cidr).fetchone() - network_cidr=query_network_plane_cidr.values().pop() - - check_ip_if_in_cidr=is_in_cidr_range(network_plane_ip, network_cidr) - if not check_ip_if_in_cidr: - msg = "Error:The ip %s is not in cidr %s range!"%(network_plane_ip,network_cidr) - raise exception.Forbidden(msg) - available_ip_list=get_ip_with_equal_cidr(cluster_id,network_plane_name,session) + sql_network_plane_cidr = \ + "select networks.cidr from networks \ + where networks.name='" + network_plane_name + \ + "' and networks.cluster_id='" + cluster_id + \ + "' and networks.deleted=0" + query_network_plane_cidr = \ + session.execute(sql_network_plane_cidr).fetchone() + network_cidr = query_network_plane_cidr.values().pop() + + check_ip_if_in_cidr = is_in_cidr_range(network_plane_ip, network_cidr) + if not check_ip_if_in_cidr: + msg = "Error:The ip %s is not in cidr %s range!" \ + % (network_plane_ip, network_cidr) + raise exception.Forbidden(msg) + + available_ip_list = \ + get_ip_with_equal_cidr(cluster_id, network_plane_name, session) # allow different networks with same ip in the same interface - if (network_plane_ip in available_ip_list and - network_plane_ip not in occupied_network_ips): - msg = "Error:The IP %s is already exist."%network_plane_ip + if (network_plane_ip in available_ip_list or + network_plane_ip in occupied_network_ips): + msg = "Error:The IP %s already exist." % network_plane_ip LOG.error(msg) raise exception.Forbidden(msg) + def check_ip_ranges(ip_ranges_one,available_ip_list): ip_range = copy.deepcopy(ip_ranges_one.values()) - ip_ranges_end=ip_range.pop() - ip_ranges_start=ip_range.pop() - inter_num=ip_into_int(ip_ranges_start) - ip_ranges_end_inter=ip_into_int(ip_ranges_end) + ip_ranges_end = ip_range.pop() + ip_ranges_start = ip_range.pop() + inter_num = ip_into_int(ip_ranges_start) + ip_ranges_end_inter = ip_into_int(ip_ranges_end) while True: - inter_tmp=inter_num - ip_tmp=inter_into_ip(inter_tmp) + inter_tmp = inter_num + ip_tmp = inter_into_ip(inter_tmp) if ip_tmp not in available_ip_list: if inter_tmp > ip_ranges_end_inter: - msg = "warning:The IP address assigned by IP ranges is already insufficient." + msg = "warning:The IP address assigned \ + by IP ranges is already insufficient." LOG.warn(msg) break else: - return [True,ip_tmp] + return [True, ip_tmp] else: - inter_num=inter_tmp+1 - - return [False,None] + inter_num = inter_tmp + 1 -def change_host_name(values, mangement_ip): - if mangement_ip: - values['name'] = "host-" + mangement_ip.replace('.','-') - -def according_to_cidr_distribution_ip(cluster_id,network_plane_name,session): - ip_ranges_cidr=[] - distribution_ip="" + return [False, None] - - sql_network_plane_info="select networks.id,networks.cidr,networks.network_type from networks where networks.name='"+network_plane_name+"' and networks.cluster_id='"+cluster_id+"' and networks.deleted=0" - query_network_plane_info = session.execute(sql_network_plane_info).fetchone() - network_id=query_network_plane_info.values()[0] - network_cidr=query_network_plane_info.values()[1] - network_type=query_network_plane_info.values()[2] - if network_type not in ['PRIVATE','EXTERNAL','VXLAN']: - available_ip_list=get_ip_with_equal_cidr(cluster_id,network_plane_name,session) - sql_ip_ranges="select ip_ranges.start,end from ip_ranges where network_id='"+network_id +"'" +def change_host_name(values, mangement_ip,host_ref): + if mangement_ip and host_ref.os_status != "active": + values['name'] = "host-" + mangement_ip.replace('.', '-') + + +def compare_same_cidr_ip(x, y): + return eval(x[0].split('.').pop()) - eval(y[0].split('.').pop()) + + +def according_to_cidr_distribution_ip(cluster_id, network_plane_name, + session, exclude_ips=[]): + ip_ranges_cidr = [] + distribution_ip = "" + + sql_network_plane_info = "select networks.id,networks.cidr,\ + networks.network_type from networks \ + where networks.name='" + network_plane_name + \ + "' and networks.cluster_id='" + cluster_id + \ + "' and networks.deleted=0" + query_network_plane_info = \ + session.execute(sql_network_plane_info).fetchone() + network_id = query_network_plane_info.values()[0] + network_cidr = query_network_plane_info.values()[1] + network_type = query_network_plane_info.values()[2] + + if network_type not in ['DATAPLANE','EXTERNAL']: + available_ip_list = get_ip_with_equal_cidr( + cluster_id, network_plane_name, session, exclude_ips) + sql_ip_ranges = "select ip_ranges.start,end from \ + ip_ranges where network_id='" + network_id + \ + "' and ip_ranges.deleted=0" query_ip_ranges = session.execute(sql_ip_ranges).fetchall() + query_ip_ranges = sorted(query_ip_ranges, cmp=compare_same_cidr_ip) if query_ip_ranges: for ip_ranges_one in query_ip_ranges: - check_ip_exist_list=check_ip_ranges(ip_ranges_one,available_ip_list) + check_ip_exist_list = \ + check_ip_ranges(ip_ranges_one, available_ip_list) if check_ip_exist_list[0]: - distribution_ip=check_ip_exist_list[1] - break - else: - continue + distribution_ip = check_ip_exist_list[1] + return distribution_ip + msg = "Error:The IP address assigned by \ + ip ranges is already insufficient." + LOG.error(msg) + raise exception.Forbidden(msg) else: - ip_ranges_cidr=cidr_convert_ip_ranges(network_cidr) - ip_min_inter=ip_into_int(ip_ranges_cidr[0]) - ip_max_inter=ip_into_int(ip_ranges_cidr[1]) + ip_ranges_cidr = cidr_convert_ip_ranges(network_cidr) + ip_min_inter = ip_into_int(ip_ranges_cidr[0]) + ip_max_inter = ip_into_int(ip_ranges_cidr[1]) while True: - distribution_ip=inter_into_ip(ip_min_inter+1) + distribution_ip = inter_into_ip(ip_min_inter + 1) if distribution_ip not in available_ip_list: - distribution_ip_inter=ip_into_int(distribution_ip) + distribution_ip_inter = ip_into_int(distribution_ip) if distribution_ip_inter < ip_max_inter: break else: - msg = "Error:The IP address assigned by CIDR is already insufficient." + msg = "Error:The IP address assigned by \ + CIDR is already insufficient." LOG.error(msg) - break + raise exception.Forbidden(msg) else: - ip_min_inter=ip_min_inter+1 + ip_min_inter = ip_min_inter + 1 return distribution_ip -def add_assigned_networks_data(context,network,cluster_id,host_interface_ref,network_plane_names,network_plane_ip,session): + +def add_assigned_networks_data(context, network, cluster_id, + host_interface_ref, network_plane_names, + network_plane_ip, session): for network_plane_name in network_plane_names: - sql_network_plane_id="select networks.id,networks.network_type from networks where networks.name='"+network_plane_name+"' and networks.cluster_id='"+cluster_id+"' and networks.deleted=0" - query_network_plane_id = session.execute(sql_network_plane_id).fetchone() - network_id=query_network_plane_id.values()[0] - network_type=query_network_plane_id.values()[1] - + sql_network_plane_id = "select networks.id,networks.network_type \ + from networks where networks.name='" + \ + network_plane_name + \ + "' and networks.cluster_id='" + \ + cluster_id + "' and networks.deleted=0" + query_network_plane_id = \ + session.execute(sql_network_plane_id).fetchone() + network_id = query_network_plane_id.values()[0] + network_type = query_network_plane_id.values()[1] + assigned_network = dict() - assigned_network['ip']=network_plane_ip + assigned_network['ip'] = network_plane_ip assigned_networks_ref = models.AssignedNetworks() assigned_network['network_id'] = network_id if host_interface_ref.type == 'bond': @@ -475,27 +581,78 @@ def add_assigned_networks_data(context,network,cluster_id,host_interface_ref,net else: assigned_network['mac'] = network['mac'] assigned_network['interface_id'] = host_interface_ref.id - if network_type == 'VXLAN' or network_type == 'PRIVATE': - assigned_network['vswitch_type'] = network.get('vswitch_type', 'ovs') - assigned_networks_ref.update(assigned_network) + if network_type == 'VXLAN' or network_type == 'DATAPLANE': + assigned_network['vswitch_type'] = network.get('vswitch_type', + 'ovs') + assigned_networks_ref.update(assigned_network) _update_values(assigned_networks_ref, assigned_network) assigned_networks_ref.save(session=session) -def _according_interface_to_add_network_alias(context,interface_assigned_networks,values): + +def assign_float_ip(context, cluster_id, role_id, network_name, session): + assigned_ips = [] + vip = {} + query = session.query(models.Role).filter_by(id=role_id).\ + filter_by(deleted=False) + query_role = query.one() + if query_role.name == 'CONTROLLER_HA': + if not query_role.vip: + vip['vip'] = according_to_cidr_distribution_ip( + cluster_id, network_name, session, assigned_ips) + assigned_ips.append(vip['vip']) + + sql_cluster_info = "select clusters.public_vip from clusters \ + where id='" + cluster_id +\ + "' and clusters.deleted=0" + query_cluster_info = session.execute(sql_cluster_info).fetchone() + cluster_public_vip = query_cluster_info.values().pop() + if not cluster_public_vip: + cluster_values = dict() + cluster_ref = _cluster_get(context, cluster_id, + session=session) + cluster_values['public_vip'] = vip['vip'] + cluster_ref.update(cluster_values) + _update_values(cluster_ref, cluster_values) + cluster_ref.save(session=session) + if not query_role.glance_vip: + vip['glance_vip'] = according_to_cidr_distribution_ip( + cluster_id, network_name, session, assigned_ips) + assigned_ips.append(vip['glance_vip']) + + if not query_role.db_vip: + vip['db_vip'] = according_to_cidr_distribution_ip( + cluster_id, network_name, session, assigned_ips) + assigned_ips.append(vip['db_vip']) + elif query_role.name == 'CONTROLLER_LB': + if not query_role.vip: + vip['vip'] = according_to_cidr_distribution_ip( + cluster_id, network_name, session) + if vip: + query.update(vip, synchronize_session='fetch') + + +def _according_interface_to_add_network_alias(context, + interface_assigned_networks, + values): network_cidrs = [] session = get_session() - network_query = session.query(models.Network).filter_by(deleted=False).filter_by(cluster_id=values['cluster']).all() + network_query = \ + session.query(models.Network).filter_by( + deleted=False).filter_by(cluster_id=values['cluster']).all() for network_info in network_query: for network_name in interface_assigned_networks: - if network_name==network_info['name']: + if network_name == network_info['name']: network_cidrs.append(network_info['cidr']) if len(set(network_cidrs)) == 1 and len(network_cidrs) > 1: for sub_network_query in network_query: if sub_network_query.name in interface_assigned_networks: alias_name = '_'.join(interface_assigned_networks) - query_network = session.query(models.Network).filter_by(deleted=False).filter_by(id=sub_network_query.id) + query_network = \ + session.query(models.Network).filter_by( + deleted=False).filter_by(id=sub_network_query.id) query_network.update({"alias": alias_name}) + @retry(retry_on_exception=_retry_on_deadlock, wait_fixed=500, stop_max_attempt_number=50) @utils.no_4byte_params @@ -512,9 +669,9 @@ def _host_update(context, values, host_id): role_values = dict() host_interfaces_values = dict() host_cluster_values = dict() - assigned_networks_ip=[] - management_ip="" - + # assigned_networks_ip = [] + # management_ip = "" + session = get_session() with session.begin(): if host_id: @@ -531,9 +688,11 @@ def _host_update(context, values, host_id): host_cluster_values['updated_at'] = timeutils.utcnow() if host_id: - if values.has_key("os_version") and utils.is_uuid_like(values['os_version']): + if values.has_key("os_version") and \ + utils.is_uuid_like(values['os_version']): host_ref.os_version_id = values['os_version'] - elif(values.has_key("os_version") and not utils.is_uuid_like(values['os_version'])): + elif(values.has_key("os_version") and not + utils.is_uuid_like(values['os_version'])): host_ref.os_version_file = values['os_version'] if values.has_key('cluster'): delete_host_cluster(context, host_id, session) @@ -541,7 +700,7 @@ def _host_update(context, values, host_id): host_cluster_values['cluster_id'] = values['cluster'] if host_ref.status == 'init': values['status'] = "in-cluster" - cluster_host_ref.update(host_cluster_values) + cluster_host_ref.update(host_cluster_values) _update_values(cluster_host_ref, host_cluster_values) cluster_host_ref.save(session=session) if values.has_key('role'): @@ -551,74 +710,107 @@ def _host_update(context, values, host_id): host_role_ref = models.HostRole() role_values['host_id'] = host_ref.id role_values['role_id'] = role_info - host_role_ref.update(role_values) + host_role_ref.update(role_values) _update_values(host_role_ref, role_values) host_role_ref.save(session=session) + if values.get('cluster'): + assign_float_ip(context, values['cluster'], + role_info, 'MANAGEMENT', session) values['status'] = "with-role" else: delete_host_role(context, host_id, session) - if (values.has_key('cluster') or - host_ref.status == 'with-role' or - host_ref.status == 'in-cluster'): + if (values.has_key('cluster') or + host_ref.status == 'with-role' or + host_ref.status == 'in-cluster'): values['status'] = "in-cluster" else: values['status'] = "init" if values.has_key('interfaces'): - host_interfaces=get_host_interface(context, host_id, None, session) + host_interfaces = \ + get_host_interface(context, host_id, None, session) if host_interfaces: for host_interface_info in host_interfaces: - delete_assigned_networks(context, host_interface_info.id, session) + delete_assigned_networks( + context, host_interface_info.id, session) delete_host_interface(context, host_id, session) orig_keys = list(eval(values['interfaces'])) for host_interface_info in orig_keys: - if host_interface_info.has_key('assigned_networks'): - _according_interface_to_add_network_alias(context, host_interface_info['assigned_networks'],values) + if (host_interface_info.has_key('assigned_networks') and + host_interface_info['assigned_networks']): + _according_interface_to_add_network_alias( + context, host_interface_info[ + 'assigned_networks'], values) for network in orig_keys: host_interfaces_values = network.copy() + if len(network.get('name', '')) > 15: + msg = 'The length of interface name:%s \ + is larger than 15.' % network['name'] + LOG.error(msg) + raise exception.Forbidden(msg) if network.has_key('slaves'): if len(network['slaves']) == 1: - host_interfaces_values['slave1'] = network['slaves'][0] + host_interfaces_values['slave1'] = \ + network['slaves'][0] elif len(network['slaves']) == 2: - host_interfaces_values['slave1'] = network['slaves'][0] - host_interfaces_values['slave2'] = network['slaves'][1] + host_interfaces_values['slave1'] = \ + network['slaves'][0] + host_interfaces_values['slave2'] = \ + network['slaves'][1] del host_interfaces_values['slaves'] if host_interfaces_values.has_key('assigned_networks'): del host_interfaces_values['assigned_networks'] if host_interfaces_values.has_key('is_deployment'): - if host_interfaces_values['is_deployment']=="True" or host_interfaces_values['is_deployment'] == True or host_interfaces_values['is_deployment'] == "true": - host_interfaces_values['is_deployment']=1 + if host_interfaces_values[ + 'is_deployment'] == "True" or\ + host_interfaces_values['is_deployment'] == True or\ + host_interfaces_values['is_deployment'] == "true": + host_interfaces_values['is_deployment'] = 1 else: - host_interfaces_values['is_deployment']=0 - if host_interfaces_values.has_key('id'): del host_interfaces_values['id'] + host_interfaces_values['is_deployment'] = 0 + if host_interfaces_values.has_key('id'): + del host_interfaces_values['id'] host_interface_ref = models.HostInterface() host_interface_ref.update(host_interfaces_values) host_interface_ref.host_id = host_id - _update_values(host_interface_ref, host_interfaces_values) + _update_values(host_interface_ref, host_interfaces_values) host_interface_ref.save(session=session) if values.has_key('cluster'): if network.has_key('assigned_networks'): occupied_network_ips = [] - merged_assigned_networks = merge_networks_for_unifiers(values['cluster'], - network['assigned_networks']) + merged_assigned_networks = \ + merge_networks_for_unifiers( + values['cluster'], + network['assigned_networks']) for networks_plane in merged_assigned_networks: - network_plane_names = networks_plane['name'].split(',') + network_plane_names = \ + networks_plane['name'].split(',') network_plane_ip = networks_plane.get('ip') if network_plane_ip: - occupied_network_ips.append(network_plane_ip) - check_ip_exist_and_in_cidr_range(values['cluster'], - network_plane_names[0], - network_plane_ip, - occupied_network_ips, - session) + check_ip_exist_and_in_cidr_range( + values['cluster'], + network_plane_names[0], + network_plane_ip, + occupied_network_ips, + session) + occupied_network_ips.append( + network_plane_ip) else: - network_plane_ip = according_to_cidr_distribution_ip(values['cluster'], network_plane_names[0], - session) + network_plane_ip = \ + according_to_cidr_distribution_ip( + values['cluster'], + network_plane_names[0], + session) if 'MANAGEMENT' in network_plane_names: - change_host_name(values, network_plane_ip) - add_assigned_networks_data(context,network,values['cluster'],host_interface_ref,network_plane_names,network_plane_ip,session) + change_host_name(values, network_plane_ip, + host_ref) + # management_ip = network_plane_ip + add_assigned_networks_data( + context, network, values['cluster'], + host_interface_ref, network_plane_names, + network_plane_ip, session) query = session.query(models.Host).filter_by(id=host_id) keys = values.keys() @@ -628,11 +820,8 @@ def _host_update(context, values, host_id): updated = query.update(values, synchronize_session='fetch') if not updated: - msg = (_('cannot transition from %(current)s to ' - '%(next)s in update (wanted ' - 'from_state=%(from)s)') % - {'current': current, 'next': new_status, - 'from': from_state}) + msg = (_('update host_id %(host_id)s failed') % + {'host_id': host_id}) raise exception.Conflict(msg) host_ref = _host_get(context, host_id, session=session) @@ -649,27 +838,32 @@ def _host_update(context, values, host_id): raise exception.Duplicate("Node ID %s already exists!" % values['id']) - if values.has_key('cluster'): + if values.has_key('cluster'): host_cluster_values['host_id'] = host_ref.id host_cluster_values['cluster_id'] = values['cluster'] - cluster_host_ref.update(host_cluster_values) + cluster_host_ref.update(host_cluster_values) _update_values(cluster_host_ref, host_cluster_values) - cluster_host_ref.save(session=session) - + cluster_host_ref.save(session=session) + if values.has_key('role'): for role_info in values['role']: host_role_ref = models.HostRole() role_values['host_id'] = host_ref.id role_values['role_id'] = role_info - host_role_ref.update(role_values) + host_role_ref.update(role_values) _update_values(host_role_ref, role_values) host_role_ref.save(session=session) - - if values.has_key("os_version") and utils.is_uuid_like(values['os_version']): + if values.get('cluster'): + assign_float_ip( + context, values['cluster'], role_info, + 'MANAGEMENT', session) + if values.has_key("os_version") and \ + utils.is_uuid_like(values['os_version']): host_ref.os_version_id = values['os_version'] - elif(values.has_key("os_version") and not utils.is_uuid_like(values['os_version'])): + elif(values.has_key("os_version") and not + utils.is_uuid_like(values['os_version'])): host_ref.os_version_file = values['os_version'] - + if values.has_key('interfaces'): orig_keys = list(eval(values['interfaces'])) for network in orig_keys: @@ -677,43 +871,62 @@ def _host_update(context, values, host_id): host_interfaces_values = network.copy() if network.has_key('slaves'): if len(network['slaves']) == 1: - host_interfaces_values['slave1'] = network['slaves'][0] + host_interfaces_values['slave1'] = \ + network['slaves'][0] elif len(network['slaves']) == 2: - host_interfaces_values['slave1'] = network['slaves'][0] - host_interfaces_values['slave2'] = network['slaves'][1] - + host_interfaces_values['slave1'] = \ + network['slaves'][0] + host_interfaces_values['slave2'] = \ + network['slaves'][1] + if host_interfaces_values.has_key('is_deployment'): - if host_interfaces_values['is_deployment']=="True" or host_interfaces_values['is_deployment'] == True or host_interfaces_values['is_deployment'] == "true": - host_interfaces_values['is_deployment']=1 + if host_interfaces_values['is_deployment'] == \ + "True" or\ + host_interfaces_values['is_deployment'] == True or \ + host_interfaces_values['is_deployment'] == "true": + host_interfaces_values['is_deployment'] = 1 else: - host_interfaces_values['is_deployment']=0 + host_interfaces_values['is_deployment'] = 0 host_interfaces_values['host_id'] = host_ref.id - host_interface_ref.update(host_interfaces_values) + host_interface_ref.update(host_interfaces_values) _update_values(host_interface_ref, host_interfaces_values) host_interface_ref.save(session=session) - + if values.has_key('cluster'): if network.has_key('assigned_networks'): occupied_network_ips = [] - merged_assigned_networks = merge_networks_for_unifiers(values['cluster'], - network['assigned_networks']) + merged_assigned_networks = \ + merge_networks_for_unifiers( + values['cluster'], + network['assigned_networks']) for networks_plane in merged_assigned_networks: - network_plane_names = networks_plane['name'].split(',') + network_plane_names = \ + networks_plane['name'].split(',') network_plane_ip = networks_plane.get('ip') if network_plane_ip: - occupied_network_ips.append(network_plane_ip) - check_ip_exist_and_in_cidr_range(values['cluster'], - network_plane_names[0], - network_plane_ip, - occupied_network_ips, - session) + check_ip_exist_and_in_cidr_range( + values['cluster'], + network_plane_names[0], + network_plane_ip, + occupied_network_ips, + session) + occupied_network_ips.append( + network_plane_ip) else: - network_plane_ip = according_to_cidr_distribution_ip(values['cluster'], network_plane_names[0], - session) + network_plane_ip = \ + according_to_cidr_distribution_ip( + values['cluster'], + network_plane_names[0], + session) if 'MANAGEMENT' in network_plane_names: - change_host_name(values, network_plane_ip) - add_assigned_networks_data(context,network,values['cluster'],host_interface_ref,network_plane_names,network_plane_ip,session) - + change_host_name(values, network_plane_ip, + host_ref) + # management_ip = network_plane_ip + add_assigned_networks_data( + context, network, values['cluster'], + host_interface_ref, network_plane_names, + network_plane_ip, session) + query = session.query(models.Host).filter_by(id=host_ref.id) if values.has_key('cluster'): del values['cluster'] @@ -726,6 +939,7 @@ def _host_update(context, values, host_id): updated = query.update(values, synchronize_session='fetch') return host_get(context, host_ref.id) + def _host_get(context, host_id, session=None, force_show_deleted=False): """Get an host or raise if it does not exist.""" _check_host_id(host_id) @@ -747,37 +961,45 @@ def _host_get(context, host_id, session=None, force_show_deleted=False): return host + def host_get(context, host_id, session=None, force_show_deleted=False): host = _host_get(context, host_id, session=session, - force_show_deleted=force_show_deleted) + force_show_deleted=force_show_deleted) return host - -def get_host_interface(context, host_id, mac=None, session=None, force_show_deleted=False): + + +def get_host_interface(context, host_id, mac=None, session=None, + force_show_deleted=False): session = session or get_session() try: query = session.query(models.HostInterface).filter_by(host_id=host_id) if mac: - query = query.filter_by(mac=mac) + query = query.filter_by(mac=mac) # filter out deleted images if context disallows it if not force_show_deleted: query = query.filter_by(deleted=False) host_interface = query.all() - + for interface in host_interface: assigned_networks_list = [] openvswitch_type = '' - assignnetwork_query = session.query(models.AssignedNetworks).filter_by(interface_id=interface.id).filter_by(deleted=False) + assignnetwork_query = \ + session.query(models.AssignedNetworks).filter_by( + interface_id=interface.id).filter_by(deleted=False) assignnetwork_list = assignnetwork_query.all() for assignnetwork in assignnetwork_list: - query_network = session.query(models.Network).filter_by(id=assignnetwork.network_id).filter_by(deleted=False).first() + query_network = \ + session.query(models.Network).filter_by( + id=assignnetwork.network_id).filter_by( + deleted=False).first() if query_network: - assigned_networks_info = {'name':query_network.name, - 'ip':assignnetwork.ip} + assigned_networks_info = {'name': query_network.name, + 'ip': assignnetwork.ip} assigned_networks_list.append(assigned_networks_info) - if query_network.network_type in ['VXLAN','PRIVATE']: + if query_network.network_type in ['DATAPLANE']: openvswitch_type = assignnetwork.vswitch_type interface.assigned_networks = assigned_networks_list interface.vswitch_type = openvswitch_type @@ -789,20 +1011,28 @@ def get_host_interface(context, host_id, mac=None, session=None, force_show_dele return host_interface -def get_host_interface_mac(context, mac, session=None, force_show_deleted=False): + +def get_host_interface_mac(context, mac, session=None, + force_show_deleted=False): session = session or get_session() try: - query = session.query(models.HostInterface).filter_by(mac=mac).filter_by(deleted=False) + query = session.query(models.HostInterface).filter_by( + mac=mac).filter_by(deleted=False) if not force_show_deleted and not context.can_see_deleted: query = query.filter_by(deleted=False) host_interface = query.all() for interface in host_interface: list = [] - assignnetwork_query = session.query(models.AssignedNetworks).filter_by(interface_id=interface.id).filter_by(deleted=False) + assignnetwork_query = \ + session.query(models.AssignedNetworks).filter_by( + interface_id=interface.id).filter_by(deleted=False) assignnetwork_list = assignnetwork_query.all() for assignnetwork in assignnetwork_list: - query_network_name = session.query(models.Network).filter_by(id=assignnetwork.network_id).filter_by(deleted=False).one() + query_network_name = \ + session.query(models.Network).filter_by( + id=assignnetwork.network_id).filter_by( + deleted=False).one() list.append(query_network_name.name) interface.assigned_networks = list @@ -812,23 +1042,31 @@ def get_host_interface_mac(context, mac, session=None, force_show_deleted=False) raise exception.NotFound(msg) return host_interface - -def get_assigned_network(context, interface_id, network_id, session=None, force_show_deleted=False): + + +def get_assigned_network(context, interface_id, network_id, + session=None, force_show_deleted=False): session = session or get_session() try: - query = session.query(models.AssignedNetworks).filter_by(interface_id=interface_id).filter_by(network_id=network_id).filter_by(deleted=False) + query = \ + session.query(models.AssignedNetworks).filter_by( + interface_id=interface_id).filter_by( + network_id=network_id).filter_by(deleted=False) host_assigned_network = query.one() except sa_orm.exc.NoResultFound: - msg = "No assigned_network found with interface %s and network %s" % (interface_id, network_id) + msg = "No assigned_network found with interface %s and \ + network %s" % (interface_id, network_id) LOG.debug(msg) raise exception.NotFound(msg) return host_assigned_network - + + def delete_host_role(context, host_id, session=None): session = session or get_session() try: - query = session.query(models.HostRole).filter_by(host_id=host_id).filter_by(deleted=False) + query = session.query(models.HostRole).filter_by( + host_id=host_id).filter_by(deleted=False) host_roles = query.all() for host_role in host_roles: host_role.delete(session=session) @@ -837,10 +1075,12 @@ def delete_host_role(context, host_id, session=None): LOG.debug(msg) raise exception.NotFound(msg) -def delete_host_cluster(context, host_id , session=None): + +def delete_host_cluster(context, host_id, session=None): session = session or get_session() try: - query = session.query(models.ClusterHost).filter_by(host_id=host_id).filter_by(deleted=False) + query = session.query(models.ClusterHost).filter_by( + host_id=host_id).filter_by(deleted=False) host_clusters = query.all() for host_cluster in host_clusters: host_cluster.delete(session=session) @@ -848,11 +1088,13 @@ def delete_host_cluster(context, host_id , session=None): msg = "No host found with ID %s" % host_id LOG.debug(msg) raise exception.NotFound(msg) - + + def delete_host_interface(context, host_id, session=None): session = session or get_session() try: - query = session.query(models.HostInterface).filter_by(host_id=host_id).filter_by(deleted=False) + query = session.query(models.HostInterface).filter_by( + host_id=host_id).filter_by(deleted=False) host_interface = query.all() for interface in host_interface: interface.delete(session=session) @@ -861,63 +1103,76 @@ def delete_host_interface(context, host_id, session=None): LOG.debug(msg) raise exception.NotFound(msg) -def _get_assigned_networks_by_network_id(context, network_id, session=None, force_show_deleted=False): + +def _get_assigned_networks_by_network_id(context, network_id, session=None, + force_show_deleted=False): session = session or get_session() try: - query = session.query(models.AssignedNetworks).filter_by(network_id=network_id).filter_by(deleted=False) + query = session.query(models.AssignedNetworks).filter_by( + network_id=network_id).filter_by(deleted=False) assigned_networks = query.all() except sa_orm.exc.NoResultFound: - msg = "No host found with ID %s" % host_id + msg = "No network found with ID %s" % network_id LOG.debug(msg) raise exception.NotFound(msg) return assigned_networks -def get_assigned_networks(context, interface_id, session=None, force_show_deleted=False): + +def get_assigned_networks(context, interface_id, session=None, + force_show_deleted=False): session = session or get_session() try: - query = session.query(models.AssignedNetworks).filter_by(interface_id=interface_id).filter_by(deleted=False) + query = session.query(models.AssignedNetworks).filter_by( + interface_id=interface_id).filter_by(deleted=False) assigned_networks = query.all() except sa_orm.exc.NoResultFound: - msg = "No host found with ID %s" % host_id + msg = "No interface found with ID %s" % interface_id LOG.debug(msg) raise exception.NotFound(msg) return assigned_networks -def delete_assigned_networks(context, interface_id, session=None, force_show_deleted=False): + +def delete_assigned_networks(context, interface_id, session=None, + force_show_deleted=False): session = session or get_session() try: - query = session.query(models.AssignedNetworks).filter_by(interface_id=interface_id).filter_by(deleted=False) + query = session.query(models.AssignedNetworks).filter_by( + interface_id=interface_id).filter_by(deleted=False) assigned_networks = query.all() for assigned_network in assigned_networks: assigned_network.delete(session=session) - + except sa_orm.exc.NoResultFound: - msg = "No host found with ID %s" % host_id + msg = "No interface found with ID %s" % interface_id LOG.debug(msg) raise exception.NotFound(msg) -def get_os_version(context, version_id, session=None, force_show_deleted=False): + +def get_os_version(context, version_id, session=None, + force_show_deleted=False): session = session or get_session() try: - query = session.query(models.version).filter_by(id=version_id) + query = session.query(models.version).filter_by(id=version_id) - #filter out deleted images if context disallows it + # filter out deleted images if context disallows it if not force_show_deleted and not context.can_see_deleted: query = query.filter_by(deleted=False) os_version = query.one() except sa_orm.exc.NoResultFound: - msg = "No host found with ID %s" % host_id + msg = "No version found with ID %s" % version_id LOG.debug(msg) raise exception.NotFound(msg) return os_version + def host_add(context, values): """Add an host from the values dictionary.""" return _host_update(context, values, None) + @retry(retry_on_exception=_retry_on_deadlock, wait_fixed=500, stop_max_attempt_number=50) def host_destroy(context, host_id): @@ -925,7 +1180,7 @@ def host_destroy(context, host_id): session = get_session() with session.begin(): host_ref = _host_get(context, host_id, session=session) - host_interfaces=get_host_interface(context, host_id, None, session) + host_interfaces = get_host_interface(context, host_id, None, session) if host_interfaces: for host_interface_info in host_interfaces: delete_assigned_networks(context, host_interface_info.id) @@ -934,6 +1189,7 @@ def host_destroy(context, host_id): return host_ref + def host_update(context, host_id, values): """ Set the given properties on an image and update it. @@ -942,10 +1198,12 @@ def host_update(context, host_id, values): """ return _host_update(context, values, host_id) + def discover_host_add(context, values): """Add an discover host from the values dictionary.""" return _discover_host_update(context, values, None) + def discover_host_update(context, discover_host_id, values): """ Set the given properties on an image and update it. @@ -954,6 +1212,7 @@ def discover_host_update(context, discover_host_id, values): """ return _discover_host_update(context, values, discover_host_id) + @retry(retry_on_exception=_retry_on_deadlock, wait_fixed=500, stop_max_attempt_number=50) @utils.no_4byte_params @@ -963,15 +1222,17 @@ def _discover_host_update(context, values, discover_host_id): :param context: Request context :param values: A dict of attributes to set - :param discover_host_id: If None, create the discover host, otherwise, find and update it + :param discover_host_id: If None, create the discover host, + otherwise, find and update it """ # NOTE(jbresnah) values is altered in this so a copy is needed values = values.copy() session = get_session() with session.begin(): if discover_host_id: - discover_host_ref = _discover_host_get(context, discover_host_id, session=session) - + discover_host_ref = \ + _discover_host_get(context, discover_host_id, session=session) + else: discover_host_ref = models.DiscoverHost() @@ -981,7 +1242,7 @@ def _discover_host_update(context, values, discover_host_id): # NOTE(iccha-sethi): updated_at must be explicitly set in case # only ImageProperty table was modifited values['updated_at'] = timeutils.utcnow() - + if discover_host_id: if values.get('id', None): del values['id'] discover_host_ref.update(values) @@ -999,12 +1260,14 @@ def _discover_host_update(context, values, discover_host_id): except db_exception.DBDuplicateEntry: raise exception.Duplicate("Node ID %s already exists!" % values['id']) - + return discover_host_get(context, discover_host_ref.id) - -def _discover_host_get(context, discover_host_id, session=None, force_show_deleted=False): + + +def _discover_host_get(context, discover_host_id, session=None, + force_show_deleted=False): """Get an host or raise if it does not exist.""" - + session = session or get_session() try: query = session.query(models.DiscoverHost).filter_by(id=discover_host_id) @@ -1018,13 +1281,16 @@ def _discover_host_get(context, discover_host_id, session=None, force_show_delet msg = "No host found with ID %s" % discover_host_id LOG.debug(msg) raise exception.NotFound(msg) - - -def discover_host_get(context, discover_host_id, session=None, force_show_deleted=False): - discover_host = _discover_host_get(context, discover_host_id, session=session, - force_show_deleted=force_show_deleted) + + +def discover_host_get(context, discover_host_id, session=None, + force_show_deleted=False): + discover_host = _discover_host_get(context, discover_host_id, + session=session, + force_show_deleted=force_show_deleted) return discover_host - + + @retry(retry_on_exception=_retry_on_deadlock, wait_fixed=500, stop_max_attempt_number=50) def discover_host_destroy(context, host_id): @@ -1034,9 +1300,10 @@ def discover_host_destroy(context, host_id): host_ref = _discover_host_get(context, host_id, session=session) host_ref.delete(session=session) return host_ref - + + def discover_host_get_all(context, filters=None, marker=None, limit=None, - sort_key=None, sort_dir=None): + sort_key=None, sort_dir=None): sort_key = ['created_at'] if not sort_key else sort_key @@ -1054,9 +1321,10 @@ def discover_host_get_all(context, filters=None, marker=None, limit=None, False) marker_discover_host = None if marker is not None: - marker_discover_host = _discover_host_get(context, - marker, - force_show_deleted=showing_deleted) + marker_discover_host =\ + _discover_host_get(context, + marker, + force_show_deleted=showing_deleted) for key in ['created_at', 'id']: if key not in sort_key: @@ -1064,27 +1332,41 @@ def discover_host_get_all(context, filters=None, marker=None, limit=None, sort_dir.append(default_sort_dir) session = get_session() - - query = session.query(models.DiscoverHost).filter_by(deleted=showing_deleted) + discover_hosts = [] + if 'cluster_id' in filters: + cluster_id = filters.pop('cluster_id') + sql = "select discover_hosts.* from discover_hosts where \ + discover_hosts.deleted=0 and discover_hosts.cluster_id ='" + \ + cluster_id + "'" + query = session.execute(sql).fetchall() + for host in query: + host_dict = dict(host.items()) + discover_hosts.append(host_dict) + return discover_hosts + + query = session.query(models.DiscoverHost).filter_by( + deleted=showing_deleted) query = _paginate_query(query, models.DiscoverHost, limit, sort_key, marker=marker_discover_host, sort_dir=None, sort_dirs=sort_dir) - discover_hosts = [] for discover_host in query.all(): discover_host = discover_host.to_dict() discover_hosts.append(discover_host) return discover_hosts - -def get_discover_host_detail(context, discover_host_id, session=None, force_show_deleted=False): + + +def get_discover_host_detail(context, discover_host_id, session=None, + force_show_deleted=False): ''' ''' session = session or get_session() try: - query = session.query(models.DiscoverHost).filter_by(id=discover_host_id, deleted=False) + query = session.query(models.DiscoverHost).filter_by( + id=discover_host_id, deleted=False) # filter out deleted images if context disallows it if not force_show_deleted and not context.can_see_deleted: query = query.filter_by(deleted=False) @@ -1096,6 +1378,7 @@ def get_discover_host_detail(context, discover_host_id, session=None, force_show return discover_host + def _check_cluster_id(cluster_id): """ check if the given project id is valid before executing operations. For @@ -1108,11 +1391,13 @@ def _check_cluster_id(cluster_id): if (cluster_id and len(cluster_id) > models.Cluster.id.property.columns[0].type.length): raise exception.NotFound() - + + def delete_cluster_host(context, cluster_id, session=None): session = session or get_session() try: - query = session.query(models.ClusterHost).filter_by(cluster_id=cluster_id).filter_by(deleted=False) + query = session.query(models.ClusterHost).filter_by( + cluster_id=cluster_id).filter_by(deleted=False) cluster_host = query.all() for host in cluster_host: host.delete(session=session) @@ -1120,7 +1405,8 @@ def delete_cluster_host(context, cluster_id, session=None): msg = "No host found with ID %s" % cluster_id LOG.debug(msg) raise exception.NotFound(msg) - + + @retry(retry_on_exception=_retry_on_deadlock, wait_fixed=500, stop_max_attempt_number=50) @utils.no_4byte_params @@ -1130,7 +1416,8 @@ def _cluster_update(context, values, cluster_id): :param context: Request context :param values: A dict of attributes to set - :param cluster_id: If None, create the project, otherwise, find and update it + :param cluster_id: If None, create the project, + otherwise, find and update it """ # NOTE(jbresnah) values is altered in this so a copy is needed @@ -1140,37 +1427,43 @@ def _cluster_update(context, values, cluster_id): values = values.copy() hosts_values = dict() interfaces_values = dict() - + session = get_session() with session.begin(): if cluster_id: project_ref = _cluster_get(context, cluster_id, session=session) else: project_ref = models.Cluster() - host_ref = models.Host() + # host_ref = models.Host() # parse the range params if values.has_key('networking_parameters'): network_params = eval(values['networking_parameters']) if network_params: - if network_params.has_key('gre_id_range') and len(network_params['gre_id_range'])>1: + if network_params.has_key('gre_id_range') and \ + len(network_params['gre_id_range']) > 1: values['gre_id_start'] = network_params['gre_id_range'][0] values['gre_id_end'] = network_params['gre_id_range'][1] - if network_params.has_key('vlan_range') and len(network_params['vlan_range'])>1: + if network_params.has_key('vlan_range') and \ + len(network_params['vlan_range']) > 1: values['vlan_start'] = network_params['vlan_range'][0] values['vlan_end'] = network_params['vlan_range'][1] - if network_params.has_key('vni_range') and len(network_params['vni_range'])>1: + if network_params.has_key('vni_range') and \ + len(network_params['vni_range']) > 1: values['vni_start'] = network_params['vni_range'][0] values['vni_end'] = network_params['vni_range'][1] - values['net_l23_provider'] = network_params.get('net_l23_provider', None) + values['net_l23_provider'] = \ + network_params.get('net_l23_provider', None) values['base_mac'] = network_params.get('base_mac', None) - values['segmentation_type'] = network_params.get('segmentation_type', None) + values['segmentation_type'] = \ + network_params.get('segmentation_type', None) values['public_vip'] = network_params.get('public_vip', None) # save host info if values.has_key('nodes'): for host_id in eval(values['nodes']): - host = host_get(context, host_id, session=None, force_show_deleted=False) + host = host_get(context, host_id, session=None, + force_show_deleted=False) host.status = "in-cluster" host.save(session=session) @@ -1183,11 +1476,12 @@ def _cluster_update(context, values, cluster_id): if cluster_id: - for cluster in session.query(models.Cluster).filter_by(deleted=False).all(): + for cluster in session.query(models.Cluster).filter_by( + deleted=False).all(): if cluster['id'] == cluster_id: continue - if cluster['name'] == values['name']: - msg = "cluster name is repeated!" + if cluster['name'] == values.get('name', None): + msg = "cluster name is repeated!" LOG.debug(msg) raise exception.Invalid(msg) if values.has_key('nodes'): @@ -1198,64 +1492,70 @@ def _cluster_update(context, values, cluster_id): hosts_values['host_id'] = host_id _update_values(cluster_host_ref, hosts_values) cluster_host_ref.save(session=session) - + if values.has_key('networks'): - for interface_id in eval(values['networks']): - query = session.query(models.Network).filter_by(id=interface_id) + for interface_id in eval(values['networks']): + query = \ + session.query(models.Network).filter_by( + id=interface_id) interfaces_values['cluster_id'] = project_ref.id interfaces_values['updated_at'] = timeutils.utcnow() - updated = query.update(interfaces_values, synchronize_session='fetch') + updated = query.update(interfaces_values, + synchronize_session='fetch') - # update --------------------------------------------------------------------- + # update ---------------------------------------------------------- # deal with logic_network if values.has_key('logic_networks'): - query = session.query(models.Cluster).filter_by(id = cluster_id) + query = session.query(models.Cluster).filter_by(id=cluster_id) if not query: - raise exception.NotFound("Cluster not found,id=%s" % cluster_id) - # insert data to logic_network tables + raise exception.NotFound( + "Cluster not found,id=%s" % cluster_id) + # insert data to logic_network tables logic_networks = eval(values['logic_networks']) if logic_networks: _cluster_add_logic_network( - logic_networks = logic_networks, - cluster_id = project_ref.id, - session = session, - status = "update") - #---start--delete all logic_networks if values['logic_networks'] == []--- + logic_networks=logic_networks, + cluster_id=project_ref.id, + session=session, + status="update") + # ---start--delete all logic_networks + # if values['logic_networks'] == []--- else: logic_networks_query = session.query(models.LogicNetwork).\ - filter_by(cluster_id = cluster_id, deleted = 0) - if logic_networks_query: + filter_by(cluster_id=cluster_id, deleted=0) + if logic_networks_query: logic_networks_query.update( - {"deleted" : True, "deleted_at" : timeutils.utcnow()} + {"deleted": True, "deleted_at": timeutils.utcnow()} ) - #------------------------end---------------------------------------------- + # ------------------------end---------------------------------------------- # deal routers if values.has_key('routers'): routers = eval(values['routers']) if routers: _cluster_add_routers( - routers = routers, - cluster_id = project_ref.id, - session = session, - status = "update" + routers=routers, + cluster_id=project_ref.id, + session=session, + status="update" ) - #----delete all routers if values['routers'] == []--- + # ----delete all routers if values['routers'] == []--- else: router_ref = \ session.query(models.Router).filter_by( - cluster_id = cluster_id, deleted=False) + cluster_id=cluster_id, deleted=False) if router_ref: router_ref.update( - {"deleted" : True, "deleted_at" : timeutils.utcnow()} + {"deleted": True, "deleted_at": timeutils.utcnow()} ) - #------------------------end-------------------------------- - # update --------------------------------------------------------------------- + # ------------------------end-------------------------------- + # update ---------------------------------------------------------- query = session.query(models.Cluster).filter_by(id=cluster_id) - # Validate fields for projects table. This is similar to what is done + # Validate fields for projects table. + # This is similar to what is done # for the query result update except that we need to do it prior # in this case. # TODO(dosaboy): replace this with a dict comprehension once py26 @@ -1267,22 +1567,20 @@ def _cluster_update(context, values, cluster_id): updated = query.update(values, synchronize_session='fetch') if not updated: - msg = (_('cannot transition from %(current)s to ' - '%(next)s in update (wanted ' - 'from_state=%(from)s)') % - {'current': current, 'next': new_status, - 'from': from_state}) + msg = (_('update cluster_id %(cluster_id)s failed') % + {'cluster_id': cluster_id}) raise exception.Conflict(msg) project_ref = _cluster_get(context, cluster_id, session=session) else: - for cluster in session.query(models.Cluster).filter_by(deleted=False).all(): + for cluster in session.query(models.Cluster).filter_by( + deleted=False).all(): if cluster['name'] == values['name']: - msg = "cluster name is repeated!" + msg = "cluster name is repeated!" LOG.debug(msg) raise exception.Forbidden(msg) - project_ref.update(values) - _update_values(project_ref, values) + project_ref.update(values) + _update_values(project_ref, values) try: project_ref.save(session=session) except db_exception.DBDuplicateEntry: @@ -1292,18 +1590,22 @@ def _cluster_update(context, values, cluster_id): for host_id in eval(values['nodes']): cluster_host_ref = models.ClusterHost() hosts_values['cluster_id'] = project_ref.id - hosts_values['host_id'] = host_id - _update_values(cluster_host_ref, hosts_values) + hosts_values['host_id'] = host_id + _update_values(cluster_host_ref, hosts_values) cluster_host_ref.save(session=session) - + if values.has_key('networks'): - for interface_id in eval(values['networks']): - query = session.query(models.Network).filter_by(id=interface_id) + for interface_id in eval(values['networks']): + query = session.query(models.Network).filter_by( + id=interface_id) interfaces_values['cluster_id'] = project_ref.id interfaces_values['updated_at'] = timeutils.utcnow() - updated = query.update(interfaces_values, synchronize_session='fetch') + updated = query.update(interfaces_values, + synchronize_session='fetch') - network_query = session.query(models.Network).filter_by(type="template").filter_by(deleted=False).all() + network_query = \ + session.query(models.Network).filter_by( + type="template").filter_by(deleted=False).all() for sub_network_query in network_query: network_ref = models.Network() network_ref.cluster_id = project_ref.id @@ -1315,31 +1617,34 @@ def _cluster_update(context, values, cluster_id): network_ref.capability = sub_network_query.capability network_ref.save(session=session) - # add --------------------------------------------------------------------- + # add ------------------------------------------------------------- # deal logic_network infos if values.has_key('logic_networks'): # insert data to logic_network tables logic_networks = eval(values['logic_networks']) if logic_networks: _cluster_add_logic_network( - logic_networks = logic_networks, - cluster_id = project_ref.id, - session = session, - status = "add") + logic_networks=logic_networks, + cluster_id=project_ref.id, + session=session, + status="add") # deal routers if values.has_key('routers'): routers = eval(values['routers']) if routers: _cluster_add_routers( - routers = routers, - cluster_id = project_ref.id, - session = session, - status = "add" + routers=routers, + cluster_id=project_ref.id, + session=session, + status="add" ) - # add --------------------------------------------------------------------- + # add ------------------------------------------------------------ - role_query = session.query(models.Role).filter_by(type="template",cluster_id=None).filter_by(deleted=False).all() + role_query = \ + session.query(models.Role).filter_by( + type="template", cluster_id=None).filter_by( + deleted=False).all() for sub_role_query in role_query: role_ref = models.Role() role_ref.cluster_id = project_ref.id @@ -1348,37 +1653,45 @@ def _cluster_update(context, values, cluster_id): role_ref.status = sub_role_query.status role_ref.type = "default" role_ref.deployment_backend = sub_role_query.deployment_backend + role_ref.role_type = sub_role_query.role_type configset_ref = models.ConfigSet() configset_ref.name = project_ref.name + role_ref.name configset_ref.description = project_ref.name + role_ref.name configset_ref.save(session=session) role_ref.config_set_id = configset_ref.id role_ref.save(session=session) - service_role_query = session.query(models.ServiceRole).filter_by(role_id=sub_role_query.id).filter_by(deleted=False).all() + service_role_query = \ + session.query(models.ServiceRole).filter_by( + role_id=sub_role_query.id).filter_by( + deleted=False).all() for sub_service_role_query in service_role_query: service_role_ref = models.ServiceRole() service_role_ref.role_id = role_ref.id - service_role_ref.service_id = sub_service_role_query.service_id + service_role_ref.service_id = \ + sub_service_role_query.service_id service_role_ref.save(session=session) - + return _cluster_get(context, project_ref.id) + def _cluster_add_routers(**params): session = params['session'] or get_session() if 0 == cmp(params['status'], "update"): router_ref = \ - session.query(models.Router).filter_by(cluster_id = params['cluster_id']) + session.query(models.Router).filter_by( + cluster_id=params['cluster_id']) if router_ref.all(): router_ref.update( - {"deleted" : True, "deleted_at" : timeutils.utcnow()} + {"deleted": True, "deleted_at": timeutils.utcnow()} ) - + logic_networks_query_all = [] - logic_networks_query = session.query(models.LogicNetwork).\ - filter_by(cluster_id = params['cluster_id'], deleted = 0) + logic_networks_query = \ + session.query(models.LogicNetwork).\ + filter_by(cluster_id=params['cluster_id'], deleted=0) if logic_networks_query: logic_networks_query_all = logic_networks_query.all() - + for router in params['routers']: # inser data to router tables router_values = {} @@ -1389,34 +1702,39 @@ def _cluster_add_routers(**params): external_name = router.get('external_logic_network', None) if external_name: logic_network_query = \ - session.query(models.LogicNetwork).filter_by(name = external_name).filter_by(deleted=False).first() + session.query(models.LogicNetwork).filter_by( + name=external_name).filter_by(deleted=False).first() if logic_network_query: router_values['external_logic_network'] = external_name _update_values(router_ref, router_values) - router_ref.save(session) # submit logic_network info to affair + router_ref.save(session) + # submit logic_network info to affair for internal_subnet_name in router.get('subnets', None): for logic_netwrok in logic_networks_query_all: subnet_query = \ - session.query(models.Subnet).filter_by(name = internal_subnet_name, - deleted=False, - logic_network_id = logic_netwrok.id) + session.query(models.Subnet).filter_by( + name=internal_subnet_name, + deleted=False, + logic_network_id=logic_netwrok.id) if subnet_query.first(): subnet_query.update( - {"router_id" : router_ref.id, "updated_at" : timeutils.utcnow()} + {"router_id": router_ref.id, + "updated_at": timeutils.utcnow()} ) + def _cluster_add_logic_network(**params): session = params['session']or get_session() logic_networks_query_all = [] if "update" == params['status']: logic_networks_query = session.query(models.LogicNetwork).\ - filter_by(cluster_id = params['cluster_id'], deleted = 0) - if logic_networks_query: + filter_by(cluster_id=params['cluster_id'], deleted=0) + if logic_networks_query: logic_networks_query_all = logic_networks_query.all() logic_networks_query.update( - {"deleted" : True, "deleted_at" : timeutils.utcnow()} + {"deleted": True, "deleted_at": timeutils.utcnow()} ) for logic_network in params['logic_networks']: @@ -1424,42 +1742,49 @@ def _cluster_add_logic_network(**params): logic_network_values = {} logic_network_values['name'] = logic_network.get('name', None) logic_network_values['type'] = logic_network.get('type', None) - logic_network_values['segmentation_type'] = logic_network.get('segmentation_type', None) - logic_network_values['segmentation_id'] = logic_network.get('segmentation_id', None) + logic_network_values['segmentation_type'] = \ + logic_network.get('segmentation_type', None) + logic_network_values['segmentation_id'] = \ + logic_network.get('segmentation_id', None) logic_network_values['shared'] = logic_network.get('shared', None) - if logic_network.get('physnet_name', None): + if logic_network.get('physnet_name', None): query_list = session.query(models.Network).\ - filter_by(cluster_id = params['cluster_id']).filter_by(deleted=False).all() + filter_by(cluster_id=params['cluster_id']).filter_by( + deleted=False).all() if (query_list and [valid_physnet - for valid_physnet in query_list - if logic_network['physnet_name'] == valid_physnet.name]) or \ + for valid_physnet in query_list + if logic_network['physnet_name'] == + valid_physnet.name]) or \ logic_network.get('segmentation_type', None) == "flat": - logic_network_values['physnet_name'] = logic_network['physnet_name'] + logic_network_values['physnet_name'] = \ + logic_network['physnet_name'] logic_network_values['cluster_id'] = params['cluster_id'] logic_network_ref = models.LogicNetwork() _update_values(logic_network_ref, logic_network_values) - logic_network_ref.save(session) # submit logic_network info to affair + logic_network_ref.save(session) + # submit logic_network info to affair - if logic_network.get('subnets', None) : + if logic_network.get('subnets', None): _cluster_add_subnet( - subnets = logic_network['subnets'] , - logic_networks_query_all = logic_networks_query_all, - logic_network_id = logic_network_ref.id, - session = session, - status = params['status']) + subnets=logic_network['subnets'], + logic_networks_query_all=logic_networks_query_all, + logic_network_id=logic_network_ref.id, + session=session, + status=params['status']) + def _cluster_add_subnet(**params): session = params['session'] or get_session() subnets_query_all = [] - if "update" == params['status']: + if "update" == params['status']: for logic_network_query in params['logic_networks_query_all']: subnet_query = session.query(models.Subnet).\ - filter_by(logic_network_id = logic_network_query.id, deleted = 0) + filter_by(logic_network_id=logic_network_query.id, deleted=0) if subnet_query: subnets_query_all += subnet_query.all() subnet_query.update({ - "deleted" : True, "deleted_at" : timeutils.utcnow()} + "deleted": True, "deleted_at": timeutils.utcnow()} ) for subnet in params['subnets']: @@ -1475,19 +1800,20 @@ def _cluster_add_subnet(**params): if subnet.get('floating_ranges', None): _cluster_add_floating_range( - values = subnet['floating_ranges'], - subnets_query_all = subnets_query_all, - subnet_id = subnet_ref.id, - session = session, - status = params['status']) + values=subnet['floating_ranges'], + subnets_query_all=subnets_query_all, + subnet_id=subnet_ref.id, + session=session, + status=params['status']) if subnet.get('dns_nameservers', None): _cluster_add_dns_nameservers( - values = subnet['dns_nameservers'], - subnets_query_all = subnets_query_all, - subnet_id = subnet_ref.id, - session = session, - status = params['status']) + values=subnet['dns_nameservers'], + subnets_query_all=subnets_query_all, + subnet_id=subnet_ref.id, + session=session, + status=params['status']) + def _cluster_add_floating_range(**params): session = params['session'] or get_session() @@ -1495,21 +1821,22 @@ def _cluster_add_floating_range(**params): if params['status'] == "update": for subnet_query in params['subnets_query_all']: query = session.query(models.FloatIpRange).\ - filter_by(subnet_id = subnet_query.id).filter_by(deleted=False) + filter_by(subnet_id=subnet_query.id).filter_by(deleted=False) if query.first() is not None: floating_range_values['updated_at'] = timeutils.utcnow() query.delete(synchronize_session='fetch') - + if params['values']: for floating_range in params['values']: float_ip_range_ref = models.FloatIpRange() - if len(floating_range) >1: + if len(floating_range) > 1: floating_range_values['start'] = floating_range[0] floating_range_values['end'] = floating_range[1] floating_range_values['subnet_id'] = params['subnet_id'] - float_ip_range_ref.update(floating_range_values) + float_ip_range_ref.update(floating_range_values) _update_values(float_ip_range_ref, floating_range_values) - float_ip_range_ref.save(session = session) + float_ip_range_ref.save(session=session) + def _cluster_add_dns_nameservers(**params): session = params['session'] or get_session() @@ -1521,30 +1848,35 @@ def _cluster_add_dns_nameservers(**params): if query.first() is not None: dns_nameservers_values['updated_at'] = timeutils.utcnow() query.delete(synchronize_session='fetch') - + if params['values']: for dns_nameservers in params['values']: dns_Nameservers_ref = models.DnsNameservers() dns_nameservers_values['dns'] = dns_nameservers dns_nameservers_values['subnet_id'] = params['subnet_id'] - session.query(models.DnsNameservers).filter_by(subnet_id = params['subnet_id']).filter_by(deleted=False) + session.query(models.DnsNameservers).filter_by( + subnet_id=params['subnet_id']).filter_by(deleted=False) dns_Nameservers_ref.update(dns_nameservers_values) _update_values(dns_Nameservers_ref, dns_nameservers_values) - dns_Nameservers_ref.save(session=session) - + dns_Nameservers_ref.save(session=session) + + def _check_component_id(component_id): """ check if the given component id is valid before executing operations. For now, we only check its length. The original purpose of this method is - wrapping the different behaviors between MySql and DB2 when the component id + wrapping the different behaviors between + MySql and DB2 when the component id length is longer than the defined length in database model. :param image_id: The id of the component we want to check :return: Raise NoFound exception if given component id is invalid """ if (component_id and - len(component_id) > models.Component.id.property.columns[0].type.length): + len(component_id) > + models.Component.id.property.columns[0].type.length): raise exception.NotFound() + @retry(retry_on_exception=_retry_on_deadlock, wait_fixed=500, stop_max_attempt_number=50) @utils.no_4byte_params @@ -1554,19 +1886,21 @@ def _component_update(context, values, component_id): :param context: Request context :param values: A dict of attributes to set - :param component_id: If None, create the component, otherwise, find and update it + :param component_id: If None, create the component, + otherwise, find and update it """ # NOTE(jbresnah) values is altered in this so a copy is needed values = values.copy() - + session = get_session() with session.begin(): if component_id: - component_ref = _component_get(context, component_id, session=session) + component_ref = _component_get(context, component_id, + session=session) else: component_ref = models.Component() - #if host_ref.id is None: + # if host_ref.id is None: # host_ref.id = str(uuid.uuid4()) if component_id: # Don't drop created_at if we're passing it in... @@ -1576,9 +1910,11 @@ def _component_update(context, values, component_id): values['updated_at'] = timeutils.utcnow() if component_id: - query = session.query(models.Component).filter_by(id=component_id).filter_by(deleted=False) + query = session.query(models.Component).filter_by( + id=component_id).filter_by(deleted=False) - # Validate fields for components table. This is similar to what is done + # Validate fields for components table. + # This is similar to what is done # for the query result update except that we need to do it prior # in this case. # TODO(dosaboy): replace this with a dict comprehension once py26 @@ -1590,28 +1926,26 @@ def _component_update(context, values, component_id): updated = query.update(values, synchronize_session='fetch') if not updated: - msg = (_('cannot transition from %(current)s to ' - '%(next)s in update (wanted ' - 'from_state=%(from)s)') % - {'current': current, 'next': new_status, - 'from': from_state}) + msg = (_('update component_id %(component_id)s failed') % + {'component_id': component_id}) raise exception.Conflict(msg) - component_ref = _component_get(context, component_id, session=session) + component_ref = _component_get(context, component_id, + session=session) else: - #print "1 host_ref.id:%s" % host_ref.id - #print host_ref.created_at - #print values - #values["id"] = host_ref.id + # print "1 host_ref.id:%s" % host_ref.id + # print host_ref.created_at + # print values + # values["id"] = host_ref.id component_ref.update(values) # Validate the attributes before we go any further. From my # investigation, the @validates decorator does not validate # on new records, only on existing records, which is, well, # idiotic. _update_values(component_ref, values) - #print "2 host_ref.id:%s" % host_ref.id - #print host_ref.created_at - #print values + # print "2 host_ref.id:%s" % host_ref.id + # print host_ref.created_at + # print values try: component_ref.save(session=session) except db_exception.DBDuplicateEntry: @@ -1620,6 +1954,7 @@ def _component_update(context, values, component_id): return component_get(context, component_ref.id) + def component_update(context, component_id, values): """ Set the given properties on an image and update it. @@ -1627,14 +1962,17 @@ def component_update(context, component_id, values): :raises NotFound if component does not exist. """ return _component_update(context, values, component_id) - + + def _cluster_get(context, cluster_id, session=None, force_show_deleted=False): """Get an project or raise if it does not exist.""" _check_cluster_id(cluster_id) session = session or get_session() try: - query = session.query(models.Cluster).filter_by(id=cluster_id).filter_by(deleted=False) + query = session.query(models.Cluster).filter_by( + id=cluster_id).filter_by( + deleted=False) # filter out deleted images if context disallows it if not force_show_deleted: query = query.filter_by(deleted=False) @@ -1648,11 +1986,14 @@ def _cluster_get(context, cluster_id, session=None, force_show_deleted=False): return project -def get_logic_network(context, cluster_id, session=None, force_show_deleted=False): + +def get_logic_network(context, cluster_id, session=None, + force_show_deleted=False): """Get an logic network or raise if it does not exist.""" session = session or get_session() try: - query = session.query(models.LogicNetwork).filter_by(cluster_id=cluster_id).filter_by(deleted=False) + query = session.query(models.LogicNetwork).filter_by( + cluster_id=cluster_id).filter_by(deleted=False) # filter out deleted images if context disallows it if not force_show_deleted and not context.can_see_deleted: query = query.filter_by(deleted=False) @@ -1664,65 +2005,81 @@ def get_logic_network(context, cluster_id, session=None, force_show_deleted=Fals LOG.debug(msg) raise exception.NotFound(msg) for logic_network in list(logic_networks): - #subnet_list = [] - subnet = _get_subnet(context, logic_network['id'], None,session) - #subnet_list.append(subnet) + # subnet_list = [] + subnet = _get_subnet(context, logic_network['id'], None, session) + # subnet_list.append(subnet) logic_network['subnets'] = subnet return logic_networks -def _get_subnet(context, logic_network_id=None, router_id=None, session=None, force_show_deleted=False): + +def _get_subnet(context, logic_network_id=None, router_id=None, + session=None, force_show_deleted=False): """Get an subnet or raise if it does not exist.""" session = session or get_session() try: if logic_network_id and router_id is None: - query = session.query(models.Subnet).filter_by(logic_network_id=logic_network_id).filter_by(deleted=False) + query = \ + session.query(models.Subnet).filter_by( + logic_network_id=logic_network_id).filter_by( + deleted=False) elif logic_network_id is None and router_id is not None: query = session.query(models.Subnet).filter_by(router_id=router_id) query = query.filter_by(deleted=False) return query.all() else: query = session.query(models.Subnet) - + if not force_show_deleted: query = query.filter_by(deleted=False) subnets = query.all() except sa_orm.exc.NoResultFound: - msg = "No Float Ip Range found with cluster ID %s" % cluster_id + msg = ("No Float Ip Range found with " + "logic_network_id %s and router_id %s" + % (logic_network_id, router_id)) LOG.debug(msg) raise exception.NotFound(msg) - ip_into_int = lambda ip: reduce(lambda x, y: (x<<8)+y, map(int, ip.split('.'))) - int_to_ip = lambda x: '.'.join([str(x/(256**i)%256) for i in range(3,-1,-1)]) + ip_into_int = lambda ip: reduce(lambda x, y: (x << 8) + y, map(int, ip.split('.'))) + int_to_ip = lambda x: '.'.join([str(x / (256 **i) % 256) for i in range(3, -1, -1)]) for subnet in subnets: dns_nameservers = _dns_nameservers_get(context, subnet['id'], session) subnet['dns_nameservers'] = \ - [dns_server['dns'] for dns_server in dns_nameservers] if dns_nameservers else [] + [dns_server['dns'] for dns_server in + dns_nameservers] if dns_nameservers else [] subnet['dns_nameservers'].sort() float_ip_range = _float_ip_range_get(context, subnet['id'], session) if float_ip_range and len(float_ip_range) > 1: - int_ip_range = [[ip_into_int(float_ip[0]), ip_into_int(float_ip[1])] for float_ip in float_ip_range] - int_ip_range = sorted(int_ip_range, key=lambda x : x[0]) - float_ip_range = [[int_to_ip(int_ip[0]), int_to_ip(int_ip[1])] for int_ip in int_ip_range] + int_ip_range = \ + [[ip_into_int(float_ip[0]), ip_into_int(float_ip[1])] for + float_ip in float_ip_range] + int_ip_range = sorted(int_ip_range, key=lambda x: x[0]) + float_ip_range = \ + [[int_to_ip(int_ip[0]), int_to_ip(int_ip[1])] for + int_ip in int_ip_range] subnet['floating_ranges'] = float_ip_range if float_ip_range else [] return subnets -def _float_ip_range_get(context, subnet_id, session=None, force_show_deleted=False): + +def _float_ip_range_get(context, subnet_id, session=None, + force_show_deleted=False): """Get an project or raise if it does not exist.""" session = session or get_session() try: - query = session.query(models.FloatIpRange).filter_by(subnet_id=subnet_id).filter_by(deleted=False) + query = \ + session.query(models.FloatIpRange).filter_by( + subnet_id=subnet_id).filter_by(deleted=False) # filter out deleted images if context disallows it if not force_show_deleted and not context.can_see_deleted: query = query.filter_by(deleted=False) floatIpRange = query.all() - + float_ip_ranges_list = [] for float_ip_range in list(floatIpRange): float_ip_range_list = [] @@ -1737,12 +2094,15 @@ def _float_ip_range_get(context, subnet_id, session=None, force_show_deleted=Fal return float_ip_ranges_list -def _dns_nameservers_get(context, subnet_id, session=None, force_show_deleted=False): + +def _dns_nameservers_get(context, subnet_id, session=None, + force_show_deleted=False): """Get an nameservers or raise if it does not exist.""" session = session or get_session() try: - query = session.query(models.DnsNameservers).filter_by(subnet_id=subnet_id).filter_by(deleted=False) + query = session.query(models.DnsNameservers).filter_by( + subnet_id=subnet_id).filter_by(deleted=False) # filter out deleted images if context disallows it if not force_show_deleted and not context.can_see_deleted: query = query.filter_by(deleted=False) @@ -1756,11 +2116,14 @@ def _dns_nameservers_get(context, subnet_id, session=None, force_show_deleted=Fa return dns_nameservers -def router_get(context, cluster_id, session=None, force_show_deleted=False): + +def router_get(context, cluster_id, session=None, + force_show_deleted=False): """Get an routers or raise if it does not exist.""" session = session or get_session() try: - query = session.query(models.Router).filter_by(cluster_id=cluster_id).filter_by(deleted=False) + query = session.query(models.Router).filter_by( + cluster_id=cluster_id).filter_by(deleted=False) if not force_show_deleted: query = query.filter_by(deleted=False) @@ -1769,10 +2132,10 @@ def router_get(context, cluster_id, session=None, force_show_deleted=False): for router in routers: subnets = [] router_id = router['id'] - subnets = _get_subnet(context, None,router_id, session) + subnets = _get_subnet(context, None, router_id, session) router['subnets'] = [subnet.name for subnet in subnets] routers_list.append(router) - + except sa_orm.exc.NoResultFound: msg = "No routers found with cluster ID %s" % cluster_id LOG.debug(msg) @@ -1780,15 +2143,19 @@ def router_get(context, cluster_id, session=None, force_show_deleted=False): return routers_list -def cluster_get(context, cluster_id, session=None, force_show_deleted=False): + +def cluster_get(context, cluster_id, session=None, + force_show_deleted=False): Cluster = _cluster_get(context, cluster_id, session=session, - force_show_deleted=force_show_deleted) + force_show_deleted=force_show_deleted) return Cluster + def cluster_add(context, values): """Add an cluster from the values dictionary.""" return _cluster_update(context, values, None) - + + def cluster_update(context, cluster_id, values): """ Set the given properties on an cluster and update it. @@ -1797,11 +2164,14 @@ def cluster_update(context, cluster_id, values): """ return _cluster_update(context, values, cluster_id) -def get_cluster_host(context, cluster_id, session=None, force_show_deleted=False): + +def get_cluster_host(context, cluster_id, session=None, + force_show_deleted=False): _check_cluster_id(cluster_id) session = session or get_session() try: - query = session.query(models.ClusterHost).filter_by(cluster_id = cluster_id, deleted=False) + query = session.query(models.ClusterHost).filter_by( + cluster_id=cluster_id, deleted=False) # filter out deleted images if context disallows it if not force_show_deleted and not context.can_see_deleted: query = query.filter_by(deleted=False) @@ -1815,15 +2185,17 @@ def get_cluster_host(context, cluster_id, session=None, force_show_deleted=False raise exception.NotFound(msg) return cluster_hosts_id - -def _component_get(context, component_id, session=None, force_show_deleted=False): + +def _component_get(context, component_id, session=None, + force_show_deleted=False): """Get an component or raise if it does not exist.""" _check_component_id(component_id) session = session or get_session() try: - query = session.query(models.Component).filter_by(id=component_id).filter_by(deleted=False) + query = session.query(models.Component).filter_by( + id=component_id).filter_by(deleted=False) # filter out deleted images if context disallows it if not force_show_deleted and not context.can_see_deleted: query = query.filter_by(deleted=False) @@ -1837,38 +2209,46 @@ def _component_get(context, component_id, session=None, force_show_deleted=False return component -def component_get(context, component_id, session=None, force_show_deleted=False): + +def component_get(context, component_id, session=None, + force_show_deleted=False): component = _component_get(context, component_id, session=session, - force_show_deleted=force_show_deleted) + force_show_deleted=force_show_deleted) return component + def component_add(context, values): """Add an component from the values dictionary.""" return _component_update(context, values, None) - + + def _component_services_get(context, component_id, session=None): """Get an service or raise if it does not exist.""" _check_component_id(component_id) session = session or get_session() try: - query = session.query(models.Service).filter_by(component_id=component_id).filter_by(deleted=False) + query = session.query(models.Service).filter_by( + component_id=component_id).filter_by(deleted=False) services = query.all() except sa_orm.exc.NoResultFound: - msg = "No service found with ID %s" % service_id + msg = "No component found with ID %s" % component_id LOG.debug(msg) raise exception.NotFound(msg) return services + def _services_used_in_cluster(context, services_id, session=None): session = session or get_session() services_used = set() for service_id in services_id: _check_service_id(service_id) try: - query = session.query(models.ServiceRole).filter_by(service_id=service_id).filter_by(deleted=False) + query = session.query(models.ServiceRole).filter_by( + service_id=service_id).filter_by(deleted=False) service_roles = query.all() for service_role in service_roles: - role_ref = _role_get(context, service_role.role_id, session=session) + role_ref = _role_get(context, service_role.role_id, + session=session) if role_ref.type != 'template': services_used.add(service_id) except sa_orm.exc.NoResultFound: @@ -1876,7 +2256,8 @@ def _services_used_in_cluster(context, services_id, session=None): LOG.debug(msg) raise exception.NotFound(msg) return services_used - + + @retry(retry_on_exception=_retry_on_deadlock, wait_fixed=500, stop_max_attempt_number=50) def component_destroy(context, component_id): @@ -1888,7 +2269,8 @@ def component_destroy(context, component_id): services_used = _services_used_in_cluster(context, services_id, session) if services_used: - msg = "Services '%s' of component '%s' is using in cluster" % (','.join(services_used),component_id) + msg = "Services '%s' of component '%s' is using in cluster" % ( + ','.join(services_used), component_id) raise exception.DeleteConstrainted(msg) for service_id in services_id: @@ -1900,6 +2282,7 @@ def component_destroy(context, component_id): return component_ref + def _check_service_id(service_id): """ check if the given service id is valid before executing operations. For @@ -1913,6 +2296,7 @@ def _check_service_id(service_id): len(service_id) > models.Service.id.property.columns[0].type.length): raise exception.NotFound() + @retry(retry_on_exception=_retry_on_deadlock, wait_fixed=500, stop_max_attempt_number=50) @utils.no_4byte_params @@ -1922,19 +2306,20 @@ def _service_update(context, values, service_id): :param context: Request context :param values: A dict of attributes to set - :param service_id: If None, create the service, otherwise, find and update it + :param service_id: If None, create the service, + otherwise, find and update it """ # NOTE(jbresnah) values is altered in this so a copy is needed values = values.copy() - + session = get_session() with session.begin(): if service_id: service_ref = _service_get(context, service_id, session=session) else: service_ref = models.Service() - #if host_ref.id is None: + # if host_ref.id is None: # host_ref.id = str(uuid.uuid4()) if service_id: # Don't drop created_at if we're passing it in... @@ -1944,9 +2329,11 @@ def _service_update(context, values, service_id): values['updated_at'] = timeutils.utcnow() if service_id: - query = session.query(models.Service).filter_by(id=service_id).filter_by(deleted=False) + query = session.query(models.Service).filter_by( + id=service_id).filter_by(deleted=False) - # Validate fields for services table. This is similar to what is done + # Validate fields for services table. + # This is similar to what is done # for the query result update except that we need to do it prior # in this case. # TODO(dosaboy): replace this with a dict comprehension once py26 @@ -1958,28 +2345,25 @@ def _service_update(context, values, service_id): updated = query.update(values, synchronize_session='fetch') if not updated: - msg = (_('cannot transition from %(current)s to ' - '%(next)s in update (wanted ' - 'from_state=%(from)s)') % - {'current': current, 'next': new_status, - 'from': from_state}) + msg = (_('update service_id %(service_id)s failed') % + {'service_id': service_id}) raise exception.Conflict(msg) service_ref = _service_get(context, service_id, session=session) else: - #print "1 host_ref.id:%s" % host_ref.id - #print host_ref.created_at - #print values - #values["id"] = host_ref.id + # print "1 host_ref.id:%s" % host_ref.id + # print host_ref.created_at + # print values + # values["id"] = host_ref.id service_ref.update(values) # Validate the attributes before we go any further. From my # investigation, the @validates decorator does not validate # on new records, only on existing records, which is, well, # idiotic. _update_values(service_ref, values) - #print "2 host_ref.id:%s" % host_ref.id - #print host_ref.created_at - #print values + # print "2 host_ref.id:%s" % host_ref.id + # print host_ref.created_at + # print values try: service_ref.save(session=session) except db_exception.DBDuplicateEntry: @@ -1988,6 +2372,7 @@ def _service_update(context, values, service_id): return service_get(context, service_ref.id) + def service_update(context, service_id, values): """ Set the given properties on an image and update it. @@ -1996,6 +2381,7 @@ def service_update(context, service_id, values): """ return _service_update(context, values, service_id) + def _service_get(context, service_id, session=None, force_show_deleted=False): """Get an service or raise if it does not exist.""" _check_service_id(service_id) @@ -2016,20 +2402,24 @@ def _service_get(context, service_id, session=None, force_show_deleted=False): return service + def service_get(context, service_id, session=None, force_show_deleted=False): service = _service_get(context, service_id, session=session, - force_show_deleted=force_show_deleted) + force_show_deleted=force_show_deleted) return service + def service_add(context, values): """Add an service from the values dictionary.""" return _service_update(context, values, None) + def _delete_service_role(context, service_id, session=None): _check_service_id(service_id) session = session or get_session() try: - query = session.query(models.ServiceRole).filter_by(service_id=service_id).filter_by(deleted=False) + query = session.query(models.ServiceRole).filter_by( + service_id=service_id).filter_by(deleted=False) service_roles = query.all() for service_role in service_roles: service_role.delete(session=session) @@ -2038,6 +2428,7 @@ def _delete_service_role(context, service_id, session=None): LOG.debug(msg) raise exception.NotFound(msg) + def _service_destroy(context, service_id): """Destroy the service or raise if it does not exist.""" session = get_session() @@ -2046,7 +2437,8 @@ def _service_destroy(context, service_id): service_ref = _service_get(context, service_id, session=session) service_ref.delete(session=session) return service_ref - + + @retry(retry_on_exception=_retry_on_deadlock, wait_fixed=500, stop_max_attempt_number=50) def service_destroy(context, service_id): @@ -2060,8 +2452,9 @@ def service_destroy(context, service_id): return _service_destroy(context, service_id) + def service_get_all(context, filters=None, marker=None, limit=None, - sort_key=None, sort_dir=None): + sort_key=None, sort_dir=None): """ Get all hosts that match zero or more filters. @@ -2089,9 +2482,10 @@ def service_get_all(context, filters=None, marker=None, limit=None, False) marker_service = None if marker is not None: - marker_service = _service_get(context, - marker, - force_show_deleted=showing_deleted) + marker_service =\ + _service_get(context, + marker, + force_show_deleted=showing_deleted) for key in ['created_at', 'id']: if key not in sort_key: @@ -2099,7 +2493,7 @@ def service_get_all(context, filters=None, marker=None, limit=None, sort_dir.append(default_sort_dir) session = get_session() - + query = session.query(models.Service).filter_by(deleted=showing_deleted) query = _paginate_query(query, models.Service, limit, @@ -2114,29 +2508,38 @@ def service_get_all(context, filters=None, marker=None, limit=None, services.append(service_dict) return services - + def _role_host_member_get(context, session, member_id=None, host_id=None): - """Fetch an HostRole entity by id.""" + """ + Fetch an HostRole entity by id. + """ query = session.query(models.HostRole) - + if host_id is not None and member_id is not None: - query = query.filter(models.HostRole.role_id == member_id).filter(models.HostRole.host_id == host_id).filter(models.HostRole.deleted == 0) + query = query.filter(models.HostRole.role_id == member_id).filter( + models.HostRole.host_id == host_id).filter( + models.HostRole.deleted == 0) elif member_id is not None and host_id is None: - query = query.filter(models.HostRole.role_id == member_id).filter(models.HostRole.deleted == 0) + query = query.filter(models.HostRole.role_id == member_id).filter( + models.HostRole.deleted == 0) elif host_id is not None and member_id is None: - query = query.filter(models.HostRole.host_id == host_id).filter(models.HostRole.deleted == 0) + query = query.filter(models.HostRole.host_id == host_id).filter( + models.HostRole.deleted == 0) return query.all() - + + def role_host_member_get(context, member_id=None, host_id=None): session = get_session() nodes_ref = _role_host_member_get(context, session, member_id, host_id) return nodes_ref + def _set_host_status(context, host_id, status): session = get_session() - host_ref = _host_get(context, host_id, session=session) + host_ref = _host_get(context, host_id, session=session) host_ref.status = status - host_ref.save(session=session) + host_ref.save(session=session) + def role_host_member_delete(context, member_id=None, host_id=None): """Delete an HostRole object.""" @@ -2151,22 +2554,27 @@ def role_host_member_delete(context, member_id=None, host_id=None): if not nodes_ref: _set_host_status(context, host_id, "in-cluster") + def _role_service_member_get(context, session, member_id): """Fetch an ServiceRole entity by id.""" query = session.query(models.ServiceRole) - query = query.filter(models.ServiceRole.role_id == member_id).filter(models.ServiceRole.deleted == 0) - + query = query.filter(models.ServiceRole.role_id == member_id).filter( + models.ServiceRole.deleted == 0) + return query.all() + def role_service_member_delete(context, member_id): """Delete an ServiceRole object.""" session = get_session() services_ref = _role_service_member_get(context, session, member_id) for service_ref in services_ref: - if service_ref.role_id==member_id: + if service_ref.role_id == member_id: service_ref.delete(session=session) - + + def _check_role_id(role_id): + """ check if the given role id is valid before executing operations. For now, we only check its length. The original purpose of this method is @@ -2179,6 +2587,7 @@ def _check_role_id(role_id): len(role_id) > models.Role.id.property.columns[0].type.length): raise exception.NotFound() + @retry(retry_on_exception=_retry_on_deadlock, wait_fixed=500, stop_max_attempt_number=50) @utils.no_4byte_params @@ -2193,8 +2602,8 @@ def _role_update(context, values, role_id): # NOTE(jbresnah) values is altered in this so a copy is needed values = values.copy() - - hosts_values = dict() + + hosts_values = dict() services_values = dict() host_cluster_values = dict() session = get_session() @@ -2204,7 +2613,7 @@ def _role_update(context, values, role_id): else: role_ref = models.Role() - #if host_ref.id is None: + # if host_ref.id is None: # host_ref.id = str(uuid.uuid4()) if role_id: # Don't drop created_at if we're passing it in... @@ -2214,13 +2623,15 @@ def _role_update(context, values, role_id): if nodes_ref: for host_id in orig_hosts: _set_host_status(context, host_id, "with-role") - host_flag=0 + host_flag = 0 for node_ref in nodes_ref: - if node_ref.host_id==host_id: - host_flag=1 + if node_ref.host_id == host_id: + host_flag = 1 break - if host_flag==0: # host without this role, add role to this host, should add host to cluster at the same time + if host_flag == 0: + # host without this role, add role to this host, + # should add host to cluster at the same time role_host_ref = models.HostRole() hosts_values['role_id'] = role_id hosts_values['host_id'] = host_id @@ -2230,21 +2641,30 @@ def _role_update(context, values, role_id): if values.has_key('cluster_id'): cluster_id = values['cluster_id'] else: - role_def_tmp = session.query(models.Role).filter_by(id=role_id, deleted=False).one() + role_def_tmp = \ + session.query(models.Role).filter_by( + id=role_id, deleted=False).one() if role_def_tmp: cluster_id = role_def_tmp.cluster_id if cluster_id: - cluster_hosts_id = get_cluster_host(context, cluster_id, session=None, force_show_deleted=False) - #check this host existed in the cluster or not + cluster_hosts_id = \ + get_cluster_host(context, cluster_id, + session=None, + force_show_deleted=False) + # check this host existed in the cluster or not if host_id not in cluster_hosts_id: cluster_host_ref = models.ClusterHost() - host_cluster_values['updated_at'] = timeutils.utcnow() + host_cluster_values['updated_at'] = \ + timeutils.utcnow() host_cluster_values['host_id'] = host_id - host_cluster_values['cluster_id'] = cluster_id - cluster_host_ref.update(host_cluster_values) - _update_values(cluster_host_ref, host_cluster_values) + host_cluster_values['cluster_id'] = \ + cluster_id + cluster_host_ref.update( + host_cluster_values) + _update_values( + cluster_host_ref, host_cluster_values) cluster_host_ref.save(session=session) - else: #new host + else: # new host for host_id in orig_hosts: _set_host_status(context, host_id, "with-role") role_host_ref = models.HostRole() @@ -2256,40 +2676,48 @@ def _role_update(context, values, role_id): if values.has_key('cluster_id'): cluster_id = values['cluster_id'] else: - role_def_tmp = session.query(models.Role).filter_by(id=role_id, deleted=False).one() + role_def_tmp = \ + session.query(models.Role).filter_by( + id=role_id, deleted=False).one() if role_def_tmp: cluster_id = role_def_tmp.cluster_id if cluster_id: - cluster_hosts_id = get_cluster_host(context, cluster_id, session=None, force_show_deleted=False) + cluster_hosts_id = \ + get_cluster_host(context, cluster_id, + session=None, + force_show_deleted=False) if host_id not in cluster_hosts_id: cluster_host_ref = models.ClusterHost() - host_cluster_values['updated_at'] = timeutils.utcnow() + host_cluster_values['updated_at'] =\ + timeutils.utcnow() host_cluster_values['host_id'] = host_id host_cluster_values['cluster_id'] = cluster_id - cluster_host_ref.update(host_cluster_values) - _update_values(cluster_host_ref, host_cluster_values) + cluster_host_ref.update(host_cluster_values) + _update_values(cluster_host_ref, + host_cluster_values) cluster_host_ref.save(session=session) if values.has_key('services'): orig_services = list(eval(values['services'])) - services_ref = _role_service_member_get(context, session, role_id) + services_ref = \ + _role_service_member_get(context, session, role_id) if services_ref: for service_id in orig_services: - service_flag=0 + service_flag = 0 for service_ref in services_ref: - if service_ref.service_id==service_id: - service_flag=1 + if service_ref.service_id == service_id: + service_flag = 1 break - if service_flag==0: + if service_flag == 0: role_service_ref = models.ServiceRole() services_values['role_id'] = role_id services_values['service_id'] = service_id _update_values(role_service_ref, services_values) role_service_ref.save(session=session) else: - for service_id in orig_services: + for service_id in orig_services: role_service_ref = models.ServiceRole() services_values['role_id'] = role_id services_values['service_id'] = service_id @@ -2302,7 +2730,9 @@ def _role_update(context, values, role_id): values['updated_at'] = timeutils.utcnow() if role_id: - query = session.query(models.Role).filter_by(id=role_id).filter_by(deleted=False) + query = \ + session.query(models.Role).filter_by( + id=role_id).filter_by(deleted=False) # Validate fields for roles table. This is similar to what is done # for the query result update except that we need to do it prior @@ -2316,28 +2746,25 @@ def _role_update(context, values, role_id): updated = query.update(values, synchronize_session='fetch') if not updated: - msg = (_('cannot transition from %(current)s to ' - '%(next)s in update (wanted ' - 'from_state=%(from)s)') % - {'current': current, 'next': new_status, - 'from': from_state}) + msg = (_('update role_id %(role_id)s failed') % + {'role_id': role_id}) raise exception.Conflict(msg) role_ref = _role_get(context, role_id, session=session) else: - #print "1 host_ref.id:%s" % host_ref.id - #print host_ref.created_at - #print values - #values["id"] = host_ref.id + # print "1 host_ref.id:%s" % host_ref.id + # print host_ref.created_at + # print values + # values["id"] = host_ref.id role_ref.update(values) # Validate the attributes before we go any further. From my # investigation, the @validates decorator does not validate # on new records, only on existing records, which is, well, # idiotic. _update_values(role_ref, values) - #print "2 host_ref.id:%s" % host_ref.id - #print host_ref.created_at - #print values + # print "2 host_ref.id:%s" % host_ref.id + # print host_ref.created_at + # print values try: role_ref.save(session=session) except db_exception.DBDuplicateEntry: @@ -2349,23 +2776,29 @@ def _role_update(context, values, role_id): cluster_id = None if values.has_key('cluster_id') and values['cluster_id']: cluster_id = values['cluster_id'] - - for host_id in orig_hosts: + + for host_id in orig_hosts: _set_host_status(context, host_id, "with-role") role_host_ref = models.HostRole() hosts_values['role_id'] = role_ref.id - hosts_values['host_id'] = host_id - _update_values(role_host_ref, hosts_values) + hosts_values['host_id'] = host_id + _update_values(role_host_ref, hosts_values) role_host_ref.save(session=session) - - cluster_hosts_id = get_cluster_host(context, cluster_id, session=None, force_show_deleted=False) - if host_id not in cluster_hosts_id: # add new record in cluster_host + + cluster_hosts_id = \ + get_cluster_host(context, cluster_id, + session=None, + force_show_deleted=False) + if host_id not in cluster_hosts_id: + # add new record in cluster_host cluster_host_ref = models.ClusterHost() - host_cluster_values['updated_at'] = timeutils.utcnow() + host_cluster_values['updated_at'] = \ + timeutils.utcnow() host_cluster_values['host_id'] = host_id host_cluster_values['cluster_id'] = cluster_id - cluster_host_ref.update(host_cluster_values) - _update_values(cluster_host_ref, host_cluster_values) + cluster_host_ref.update(host_cluster_values) + _update_values(cluster_host_ref, + host_cluster_values) cluster_host_ref.save(session=session) if values.has_key('services'): @@ -2373,13 +2806,13 @@ def _role_update(context, values, role_id): for service_id in orig_services: role_service_ref = models.ServiceRole() services_values['role_id'] = role_ref.id - services_values['service_id'] = service_id - _update_values(role_service_ref, services_values) + services_values['service_id'] = service_id + _update_values(role_service_ref, services_values) role_service_ref.save(session=session) - return role_get(context, role_ref.id) + def role_update(context, role_id, values): """ Set the given properties on an image and update it. @@ -2388,13 +2821,16 @@ def role_update(context, role_id, values): """ return _role_update(context, values, role_id) + def _role_get(context, role_id, session=None, force_show_deleted=False): """Get an role or raise if it does not exist.""" _check_role_id(role_id) session = session or get_session() try: - query = session.query(models.Role).filter_by(id=role_id).filter_by(deleted=False) + query = \ + session.query(models.Role).filter_by( + id=role_id).filter_by(deleted=False) # filter out deleted images if context disallows it if not force_show_deleted and not context.can_see_deleted: query = query.filter_by(deleted=False) @@ -2408,14 +2844,18 @@ def _role_get(context, role_id, session=None, force_show_deleted=False): return role + def role_get(context, role_id, session=None, force_show_deleted=False): role = _role_get(context, role_id, session=session, - force_show_deleted=force_show_deleted) + force_show_deleted=force_show_deleted) return role + def role_add(context, values): """Add an role from the values dictionary.""" return _role_update(context, values, None) + + @retry(retry_on_exception=_retry_on_deadlock, wait_fixed=500, stop_max_attempt_number=50) def role_destroy(context, role_id): @@ -2427,13 +2867,13 @@ def role_destroy(context, role_id): role_ref = _role_get(context, role_id, session=session) role_ref.delete(session=session) - role_host_member_delete(context,role_id) - role_service_member_delete(context,role_id) + role_host_member_delete(context, role_id) + role_service_member_delete(context, role_id) return role_ref def role_get_all(context, filters=None, marker=None, limit=None, - sort_key=None, sort_dir=None): + sort_key=None, sort_dir=None): """ Get all hosts that match zero or more filters. @@ -2574,13 +3014,10 @@ def role_host_update(context, role_host_id, values): query = session.query(models.HostRole).filter_by(id=role_host_id) updated = query.update(values, synchronize_session='fetch') if not updated: - msg = (_('cannot transition from %(current)s to ' - '%(next)s in update (wanted ' - 'from_state=%(from)s)') % - {'current': current, 'next': new_status, - 'from': from_state}) + msg = (_('update role_host_id %(role_host_id)s failed') % + {'role_host_id': role_host_id}) raise exception.Conflict(msg) - return + return def _check_role_host_id(role_host_id): """ @@ -3007,9 +3444,9 @@ def image_get_all(context, filters=None, marker=None, limit=None, visibility) if visibility is not None: - if visibility == 'public': + if visibility == 'publicAPI': query = query.filter(models.Image.is_public == True) - elif visibility == 'private': + elif visibility == 'dataplane': query = query.filter(models.Image.is_public == False) if prop_cond: @@ -4534,11 +4971,8 @@ def _config_file_update(context, values, config_file_id): updated = query.update(values, synchronize_session='fetch') if not updated: - msg = (_('cannot transition from %(current)s to ' - '%(next)s in update (wanted ' - 'from_state=%(from)s)') % - {'current': current, 'next': new_status, - 'from': from_state}) + msg = (_('update config_file_id %(config_file_id)s failed') % + {'config_file_id': config_file_id}) raise exception.Conflict(msg) config_file_ref = _config_file_get(context, config_file_id, session=session) @@ -4682,7 +5116,7 @@ def _check_config_set_id(config_set_id): @retry(retry_on_exception=_retry_on_deadlock, wait_fixed=500, stop_max_attempt_number=50) @utils.no_4byte_params -def _config_set_update(context, values, config_set_id): +def _config_set_update(context, values, config_set_id=None): """ Used internally by config_set_add and config_set_update @@ -4723,11 +5157,8 @@ def _config_set_update(context, values, config_set_id): updated = query.update(values, synchronize_session='fetch') if not updated: - msg = (_('cannot transition from %(current)s to ' - '%(next)s in update (wanted ' - 'from_state=%(from)s)') % - {'current': current, 'next': new_status, - 'from': from_state}) + msg = (_('update config_file_id %(config_set_id)s failed') % + {'config_set_id': config_set_id}) raise exception.Conflict(msg) config_set_ref = _config_set_get(context, config_set_id, session=session) @@ -4759,7 +5190,7 @@ def _config_set_get(context, config_set_id, session=None, force_show_deleted=Fal except sa_orm.exc.NoResultFound: msg = "No config_set found with ID %s" % config_set_id - LOG.debug(msg) + LOG.info(msg) raise exception.NotFound(msg) return config_set @@ -4771,7 +5202,7 @@ def config_set_get(context, config_set_id, session=None, force_show_deleted=Fals def config_set_add(context, values): """Add an config_set from the values dictionary.""" - return _config_set_update(context, values, None) + return _config_set_update(context, values) def _config_item_set_get_by_config_id(context, config_id, session=None, force_show_deleted=False): """Get an config_set or raise if it does not exist.""" @@ -4821,14 +5252,38 @@ def config_set_destroy(context, config_set_id): """Destroy the config_set or raise if it does not exist.""" session = get_session() with session.begin(): - config_set_ref = _config_set_get(context, config_set_id, session=session) + config_set_ref = _config_set_get(context, + config_set_id, + session=session) + query_role = session.query(models.Host).filter_by(\ + config_set_id=config_set_id).filter_by(deleted=False) + if query_role.all(): + msg = "config_set %s is being used by other host"\ + % config_set_id + raise exception.Forbidden(msg) + query_role = session.query(models.Role).filter_by(\ + config_set_id=config_set_id).filter_by(deleted=False) + if query_role.all(): + msg = "config_set %s is being used by other role"\ + % config_set_id + raise exception.Forbidden(msg) config_set_ref.delete(session=session) - config_item_refs = _config_item_get_by_config_set_id(context, config_set_id, session=session) - + config_item_refs =\ + _config_item_get_by_config_set_id(context, + config_set_id, + session=session) for config_item_ref in config_item_refs: + config_id = config_item_ref.config_id config_item_ref.delete(session=session) + if not _config_item_set_get_by_config_id(context, + config_id, + session=session): + config_ref = _config_get(context, + config_id, + session=session) + config_ref.delete(session=session) return config_set_ref @@ -4967,11 +5422,8 @@ def _config_update(context, values, config_id): query.update(config_item_values, synchronize_session='fetch') if not updated: - msg = (_('cannot transition from %(current)s to ' - '%(next)s in update (wanted ' - 'from_state=%(from)s)') % - {'current': current, 'next': new_status, - 'from': from_state}) + msg = (_('update config_id %(config_id)s failed') % + {'config_id': config_id}) raise exception.Conflict(msg) config_ref = _config_get(context, config_id, session=session) @@ -5097,7 +5549,7 @@ def config_destroy(context, config_id): session = get_session() with session.begin(): config_ref = _config_get(context, config_id, session=session) - config_file_id=config_ref.config_file_id + # config_file_id=config_ref.config_file_id config_item_refs = _config_item_get_by_config_id(config_id, session=session) for config_item_ref in config_item_refs: config_item_ref.delete(session=session) @@ -5212,10 +5664,12 @@ def delete_network_ip_range(context, network_id): def get_network_ip_range(context, network_id): session = get_session() with session.begin(): - querry= session.query(models.IpRange).filter_by(network_id=network_id).filter_by(deleted=0) - ip_ranges=querry.all() - - return ip_ranges + sql_ip_ranges="select ip_ranges.start,end from ip_ranges where ip_ranges." \ + "network_id='"+network_id+"' and ip_ranges.deleted=0 " \ + "order by ip_ranges.start" + ip_ranges = session.execute(sql_ip_ranges).fetchall() + ip_ranges_sorted = sorted(ip_ranges, cmp=compare_same_cidr_ip) + return ip_ranges_sorted def network_get_all(context, cluster_id=None, filters=None, marker=None, limit=None, sort_key=None, sort_dir=None): """ @@ -5263,6 +5717,10 @@ def network_get_all(context, cluster_id=None, filters=None, marker=None, limit=N query = session.query(models.Network).\ filter_by(cluster_id=cluster_id).\ filter_by(deleted=showing_deleted) + elif filters.get('type'): + query = session.query(models.Network).\ + filter_by(type=filters['type']).\ + filter_by(deleted=showing_deleted) else: query = session.query(models.Network). \ filter_by(deleted=showing_deleted) @@ -5275,8 +5733,6 @@ def network_get_all(context, cluster_id=None, filters=None, marker=None, limit=N query = query.all() networks = [] for network in query: - if network.type == 'template': - continue ip_range_list=[] ip_ranges=get_network_ip_range(context, network['id']) if ip_ranges: @@ -5328,6 +5784,37 @@ def update_phyname_of_network(context, network_phyname_set): {'updated_at' : timeutils.utcnow(), 'physnet_name' :"physnet_"+v[1]} ) + +def check_assigned_ip_in_ip_range(assigned_ip_list, ip_range_list): + if not ip_range_list: + return + assigned_ips = copy.deepcopy(assigned_ip_list) + ip_ranges = copy.deepcopy(ip_range_list) + ip_list = [ip for ip in assigned_ips if ip] + for ip in ip_list: + flag = False + for ip_range in ip_ranges: + if is_in_ip_range(ip, ip_range): + flag = True + break + if not flag: + msg = "ip assigned by this ip range is being used by " \ + "networkplane.Delete the network on host interfaces " \ + "before changing ip range." + LOG.error(msg) + raise exception.Forbidden(msg) + + +def _get_role_float_ip(session, cluster_id): + roles = session.query(models.Role).filter_by(cluster_id=cluster_id).\ + filter_by(deleted=False) + float_ip_lists = [[role.vip, role.db_vip, role.glance_vip] + for role in roles if role.name == 'CONTROLLER_HA' or + role.name == 'CONTROLLER_LB'] + return [ip for float_ip_list in float_ip_lists + for ip in float_ip_list if ip] + + @retry(retry_on_exception=_retry_on_deadlock, wait_fixed=500, stop_max_attempt_number=50) @utils.no_4byte_params @@ -5343,8 +5830,8 @@ def _network_update(context, values, network_id): # NOTE(jbresnah) values is altered in this so a copy is needed values = values.copy() ip_ranges_values = dict() - session = get_session() + role_vip_list = [] with session.begin(): if network_id: network_ref = _network_get(context, network_id, session=session) @@ -5360,7 +5847,12 @@ def _network_update(context, values, network_id): if network_id: query = session.query(models.Network).filter_by(id=network_id).filter_by(deleted=False) - + sql_ip="select assigned_networks.ip from assigned_networks where assigned_networks.deleted=0 and assigned_networks.network_id='"+network_id+"' order by assigned_networks.ip" + query_ip_list = session.execute(sql_ip).fetchall() + network_ip_list = [tmp_ip.values().pop() for tmp_ip in query_ip_list] + if values.get('name') == 'MANAGEMENT'and query.one().cluster_id: + role_vip_list = _get_role_float_ip(session, query.one().cluster_id) + assign_ip_list = list(set(network_ip_list + role_vip_list)) # Validate fields for projects table. This is similar to what is done # for the query result update except that we need to do it prior # in this case. @@ -5372,20 +5864,18 @@ def _network_update(context, values, network_id): network_tmp=query_cidr.values() network_cidr=network_tmp.pop() if network_cidr and network_cidr != values['cidr']: - if values['cidr'] != network_cidr: - #sql_ip="select host_interfaces.ip from host_interfaces, assigned_networks where host_interfaces.deleted=0 and host_interfaces.id=assigned_networks.interface_id and assigned_networks.deleted=0 and assigned_networks.network_id='"+network_id+"'" - sql_ip="select assigned_networks.ip from assigned_networks where assigned_networks.deleted=0 and assigned_networks.network_id='"+network_id+"' order by assigned_networks.ip" - query_ip_list = session.execute(sql_ip).fetchall() - for tmp_ip in query_ip_list: - ip_pop=tmp_ip.values().pop() - if ip_pop: - if is_in_cidr_range(ip_pop, network_cidr): - msg = "Error:Distribution ip by CIDR is being used, and the CIDR is not allowed to change." - LOG.error(msg) - raise exception.Forbidden(msg) - + #sql_ip="select host_interfaces.ip from host_interfaces, assigned_networks where host_interfaces.deleted=0 and host_interfaces.id=assigned_networks.interface_id and assigned_networks.deleted=0 and assigned_networks.network_id='"+network_id+"'" + for tmp_ip in assign_ip_list: + if tmp_ip and not is_in_cidr_range(tmp_ip, values['cidr']): + msg = "ip %s being used is not in range of new cidr" \ + "%s" % (tmp_ip, values['cidr']) + LOG.error(msg) + raise exception.Forbidden(msg) + network_ref = _network_get(context, network_id, session=session) if values.has_key("ip_ranges"): + check_assigned_ip_in_ip_range(network_ip_list, + eval(values['ip_ranges'])) delete_network_ip_range(context, network_id) for ip_range in list(eval(values['ip_ranges'])): ip_range_ref = models.IpRange() @@ -5401,11 +5891,8 @@ def _network_update(context, values, network_id): updated = query.update(values, synchronize_session='fetch') if not updated: - msg = (_('cannot transition from %(current)s to ' - '%(next)s in update (wanted ' - 'from_state=%(from)s)') % - {'current': current, 'next': new_status, - 'from': from_state}) + msg = (_('update network_id %(network_id)s failed') % + {'network_id': network_id}) raise exception.Conflict(msg) else: @@ -5428,7 +5915,7 @@ def _network_update(context, values, network_id): ip_range_ref.save(session=session) except db_exception.DBDuplicateEntry: raise exception.Duplicate("ip rangge %s already exists!" - % value['ip_ranges']) + % values['ip_ranges']) return _network_get(context, network_ref.id) @@ -5473,24 +5960,26 @@ def update_config(session,config_flag,config_set_id,query_set_item_list,config_i query_config_file_info= session.query(models.ConfigFile).filter_by(id=query_config_info.one().config_file_id).filter_by(deleted=False) if query_config_file_info.one().name == config_interface_info['file-name']\ and config_interface_info['section'] == query_config_info.one().section and config_interface_info['key'] == query_config_info.one().key: - del config_interface_info['file-name'] - config_interface_info['config_version']=query_config_info.one().config_version+1 - config_updated = query_config_info.one().update(config_interface_info) + config_info = copy.deepcopy(config_interface_info) + del config_info['file-name'] + config_info['config_version']=query_config_info.one().config_version+1 + query_config_info.one().update(config_info) config_flag=1 return config_flag else: continue return config_flag - + def add_config(session,config_interface_info,config_set_id,config_file_id): config_set_value=dict() add_config = models.Config() - del config_interface_info['file-name'] - config_interface_info['config_file_id']=config_file_id - config_interface_info['config_version']=1 - config_interface_info['running_version']=0 - add_config.update(config_interface_info) - _update_values(add_config,config_interface_info) + config_info = copy.deepcopy(config_interface_info) + del config_info['file-name'] + config_info['config_file_id']=config_file_id + config_info['config_version']=1 + config_info['running_version']=0 + add_config.update(config_info) + _update_values(add_config,config_info) add_config.save(session=session) add_config_setitem=models.ConfigSetItem() @@ -5519,26 +6008,52 @@ def config_interface(context, config_interface): config_flag=0 config_info_list=[] config_interface = config_interface.copy() + + if isinstance(config_interface['config'], list): + config_items = config_interface['config'] + else: + config_items = eval(config_interface['config']) + session = get_session() with session.begin(): if config_interface.get('role',None) and config_interface.get('cluster',None): - query_role_info=session.query(models.Role).filter_by(name=config_interface['role']).filter_by(cluster_id=config_interface['cluster']).filter_by(deleted=False) + query_role_info=session.query(models.Role).filter_by(\ + name=config_interface['role']).filter_by(\ + cluster_id=config_interface['cluster']).filter_by(\ + deleted=False) if query_role_info.one().config_set_id: config_set_id=query_role_info.one().config_set_id else: - msg = "No config_set_id found with Role Name %s" % config_interface.role-name + msg = "No config_set_id found with Role Name %s" % config_interface.role_name LOG.error(msg) raise exception.NotFound(msg) - else: - if config_interface.get('config_set',None): - config_set_id=config_interface.get('config_set',None) + elif config_interface.get('host_id',None): + query_host_ref = _host_get(context, + config_interface['host_id'], + session=session) + if query_host_ref.config_set_id: + config_set_id=query_host_ref.config_set_id else: - msg = "no role name and cluster id or config_set_id" - LOG.error(msg) - raise exception.NotFound(msg) - + #create config_set and get id + config_set_value = {'name':config_interface['host_id'], + 'description':'config set for host %s'\ + %config_interface['host_id']} + config_set = _config_set_update(context, config_set_value) + config_set_id = config_set['id'] + #add config_set_id to host + host_meta = {'config_set_id':config_set_id} + _host_update(context, + host_meta, + config_interface['host_id']) + elif config_interface.get('config_set',None): + config_set_id=config_interface.get('config_set',None) + else: + msg = "no way to add config" + LOG.error(msg) + raise exception.NotFound(msg) + try: - for config_interface_info in eval(config_interface['config']): + for config_interface_info in config_items: query_set_item_list=session.query(models.ConfigSetItem).filter_by(config_set_id=config_set_id).filter_by(deleted=False) if query_set_item_list.all(): config_exist=update_config(session,config_flag,config_set_id,query_set_item_list,config_interface_info) @@ -5550,13 +6065,13 @@ def config_interface(context, config_interface): add_config_and_file(session,config_interface_info,config_set_id) else: add_config_and_file(session,config_interface_info,config_set_id) - + except sa_orm.exc.NoResultFound: msg = "No config_set found with ID %s" % config_set_id LOG.error(msg) raise exception.NotFound(msg) - - for config_interface_info in eval(config_interface['config']): + + for config_interface_info in config_items: query_config_set_item_list=session.query(models.ConfigSetItem).filter_by(config_set_id=config_set_id).filter_by(deleted=False) if query_config_set_item_list.all(): for config_set_item in query_config_set_item_list.all(): @@ -5652,11 +6167,8 @@ def _service_disk_update(context, values, service_disk_id): updated = query.update(values, synchronize_session='fetch') if not updated: - msg = (_('cannot transition from %(current)s to ' - '%(next)s in update (wanted ' - 'from_state=%(from)s)') % - {'current': current, 'next': new_status, - 'from': from_state}) + msg = (_('update service_disk_id %(service_disk_id)s failed') % + {'service_disk_id': service_disk_id}) raise exception.Conflict(msg) else: service_disk_ref = models.ServiceDisk() @@ -5808,11 +6320,8 @@ def _cinder_volume_update(context, values, cinder_volume_id): updated = query.update(values, synchronize_session='fetch') if not updated: - msg = (_('cannot transition from %(current)s to ' - '%(next)s in update (wanted ' - 'from_state=%(from)s)') % - {'current': current, 'next': new_status, - 'from': from_state}) + msg = (_('update cinder_volume_id %(cinder_volume_id)s failed') % + {'cinder_volume_id': cinder_volume_id}) raise exception.Conflict(msg) else: cinder_volume_ref = models.CinderVolume() @@ -5883,6 +6392,127 @@ def cinder_volume_list(context, filters=None, **param): cinder_volume_ref = _cinder_volume_get(context, role_id=role_id) return cinder_volume_ref +@retry(retry_on_exception=_retry_on_deadlock, wait_fixed=500, + stop_max_attempt_number=50) +def hwm_add(context, values): + """add hwm to daisy.""" + return _hwm_update(context, values, None) + + +@retry(retry_on_exception=_retry_on_deadlock, wait_fixed=500, + stop_max_attempt_number=50) +def hwm_update(context, hwm_id, values): + """update cluster template to daisy.""" + return _hwm_update(context, values, hwm_id) + + +def _hwm_update(context, values, hwm_id): + """update or add hwm to daisy.""" + values = values.copy() + session = get_session() + with session.begin(): + if hwm_id: + hwm_ref = _hwm_get(context, hwm_id, session=session) + else: + hwm_ref = models.Hwm() + + if hwm_id: + # Don't drop created_at if we're passing it in... + _drop_protected_attrs(models.Hwm, values) + # NOTE(iccha-sethi): updated_at must be explicitly set in case + # only ImageProperty table was modifited + values['updated_at'] = timeutils.utcnow() + + if hwm_id: + if values.get('id', None): del values['id'] + hwm_ref.update(values) + _update_values(hwm_ref, values) + try: + hwm_ref.save(session=session) + except db_exception.DBDuplicateEntry: + raise exception.Duplicate("Node ID %s already exists!" + % values['id']) + else: + hwm_ref.update(values) + _update_values(hwm_ref, values) + try: + hwm_ref.save(session=session) + except db_exception.DBDuplicateEntry: + raise exception.Duplicate("Node ID %s already exists!" + % values['id']) + + return hwm_get(context, hwm_ref.id) + + +def hwm_destroy(context, hwm_id, session=None, force_show_deleted=False): + session = session or get_session() + with session.begin(): + hwm_ref = _hwm_get(context, hwm_id, session=session) + hwm_ref.delete(session=session) + return hwm_ref + + +def _hwm_get(context, hwm_id, session=None, force_show_deleted=False): + """Get an hwm or raise if it does not exist.""" + session = session or get_session() + try: + query = session.query(models.Hwm).filter_by(id=hwm_id) + + # filter out deleted images if context disallows it + if not force_show_deleted and not context.can_see_deleted: + query = query.filter_by(deleted=False) + hwm = query.one() + return hwm + except sa_orm.exc.NoResultFound: + msg = "No hwm found with ID %s" % hwm_id + LOG.debug(msg) + raise exception.NotFound(msg) + + +def hwm_get(context, hwm_id, session=None, force_show_deleted=False): + hwm = _hwm_get(context, hwm_id, session=session, + force_show_deleted=force_show_deleted) + return hwm + + +def hwm_get_all(context, filters=None, marker=None, limit=None, sort_key=None, + sort_dir=None): + sort_key = ['created_at'] if not sort_key else sort_key + + default_sort_dir = 'desc' + + if not sort_dir: + sort_dir = [default_sort_dir] * len(sort_key) + elif len(sort_dir) == 1: + default_sort_dir = sort_dir[0] + sort_dir *= len(sort_key) + + filters = filters or {} + showing_deleted = 'changes-since' in filters or filters.get('deleted', + False) + marker_hwm = None + if marker is not None: + marker_hwm = _hwm_get(context, marker, + force_show_deleted=showing_deleted) + + for key in ['created_at', 'id']: + if key not in sort_key: + sort_key.append(key) + sort_dir.append(default_sort_dir) + + session = get_session() + query = session.query(models.Hwm).filter_by(deleted=showing_deleted) + + query = _paginate_query(query, models.Hwm, limit, sort_key, + marker=marker_hwm, + sort_dir=None, + sort_dirs=sort_dir) + hwms = [] + for hwm in query.all(): + hwm = hwm.to_dict() + hwms.append(hwm) + return hwms + @retry(retry_on_exception=_retry_on_deadlock, wait_fixed=500, stop_max_attempt_number=50) def template_add(context, values): diff --git a/code/daisy/daisy/db/sqlalchemy/metadata.py b/code/daisy/daisy/db/sqlalchemy/metadata.py index 86a59285..96551f09 100755 --- a/code/daisy/daisy/db/sqlalchemy/metadata.py +++ b/code/daisy/daisy/db/sqlalchemy/metadata.py @@ -184,8 +184,8 @@ def _populate_metadata(meta, metadata_path=None, merge=False, json_schema_files = [metadata_path] else: json_schema_files = [f for f in os.listdir(metadata_path) - if isfile(join(metadata_path, f)) - and f.endswith('.json')] + if isfile(join(metadata_path, f)) and + f.endswith('.json')] except OSError as e: LOG.error(utils.exception_to_str(e)) return diff --git a/code/daisy/daisy/db/sqlalchemy/migrate_repo/versions/001_add_daisy_tables.py b/code/daisy/daisy/db/sqlalchemy/migrate_repo/versions/001_add_daisy_tables.py index 2d92e19d..16f24924 100755 --- a/code/daisy/daisy/db/sqlalchemy/migrate_repo/versions/001_add_daisy_tables.py +++ b/code/daisy/daisy/db/sqlalchemy/migrate_repo/versions/001_add_daisy_tables.py @@ -12,11 +12,11 @@ # License for the specific language governing permissions and limitations # under the License. -from sqlalchemy.schema import (Column, ForeignKey, Index, MetaData, Table) +from sqlalchemy.schema import (Column, ForeignKey, MetaData, Table) from daisy.db.sqlalchemy.migrate_repo.schema import ( - BigInteger, Boolean, DateTime, Integer, Numeric, String, Text, + BigInteger, Boolean, DateTime, Integer, String, Text, create_tables) # noqa @@ -49,43 +49,48 @@ def define_hosts_table(meta): Column('updated_at', DateTime(), nullable=False), Column('deleted_at', DateTime()), Column('deleted', - Boolean(), - nullable=False, - default=False, - index=True), + Boolean(), + nullable=False, + default=False, + index=True), mysql_engine='InnoDB', extend_existing=True) return hosts - + + def define_discover_hosts_table(meta): discover_hosts = Table('discover_hosts', - meta, - Column('id', String(36), primary_key=True, - nullable=False), - Column('ip', String(255), nullable=False), - Column('user', String(36)), - Column('passwd', String(36), nullable=False), - Column('status', String(255), default='init', nullable=True), - Column('created_at', DateTime(), nullable=True), - Column('updated_at', DateTime(), nullable=True), - Column('deleted_at', DateTime()), - Column('deleted', - Boolean(), - nullable=False, - default=False, - index=True), - mysql_engine='InnoDB', - extend_existing=True) + meta, + Column('id', String(36), primary_key=True, + nullable=False), + Column('ip', String(255), nullable=True), + Column('user', String(36)), + Column('passwd', String(36), nullable=True), + Column( + 'status', String(255), default='init', + nullable=True), + Column('created_at', DateTime(), nullable=True), + Column('updated_at', DateTime(), nullable=True), + Column('deleted_at', DateTime()), + Column('deleted', + Boolean(), + nullable=False, + default=False, + index=True), + mysql_engine='InnoDB', + extend_existing=True) return discover_hosts + def define_clusters_table(meta): clusters = Table('clusters', meta, Column('id', String(36), primary_key=True, nullable=False), - Column('name', String(255), default='TECS', nullable=False), + Column( + 'name', String(255), default='TECS', nullable=False), Column('owner', String(255)), Column('description', Text()), Column('net_l23_provider', String(64)), @@ -98,43 +103,46 @@ def define_clusters_table(meta): Column('vni_end', BigInteger()), Column('public_vip', String(128)), Column('segmentation_type', String(64)), - Column('auto_scale', Integer(), nullable=False, default=0), + Column( + 'auto_scale', Integer(), nullable=False, default=0), Column('created_at', DateTime(), nullable=False), Column('updated_at', DateTime(), nullable=False), Column('deleted_at', DateTime()), Column('deleted', - Boolean(), - nullable=False, - default=False, - index=True), + Boolean(), + nullable=False, + default=False, + index=True), mysql_engine='InnoDB', extend_existing=True) return clusters + def define_cluster_hosts_table(meta): cluster_hosts = Table('cluster_hosts', - meta, - Column('id', String(36), primary_key=True, - nullable=False), - Column('cluster_id', String(36), - ForeignKey('clusters.id'), - nullable=False), - Column('host_id', String(36), - nullable=False), - Column('created_at', DateTime(), nullable=False), - Column('updated_at', DateTime(), nullable=False), - Column('deleted_at', DateTime()), - Column('deleted', - Boolean(), - nullable=False, - default=False, - index=True), - mysql_engine='InnoDB', - extend_existing=True) - + meta, + Column('id', String(36), primary_key=True, + nullable=False), + Column('cluster_id', String(36), + ForeignKey('clusters.id'), + nullable=False), + Column('host_id', String(36), + nullable=False), + Column('created_at', DateTime(), nullable=False), + Column('updated_at', DateTime(), nullable=False), + Column('deleted_at', DateTime()), + Column('deleted', + Boolean(), + nullable=False, + default=False, + index=True), + mysql_engine='InnoDB', + extend_existing=True) + return cluster_hosts + def define_networks_table(meta): networks = Table('networks', meta, @@ -145,382 +153,416 @@ def define_networks_table(meta): Column('cluster_id', String(36)), Column('cidr', String(255)), Column('vlan_id', String(36)), - Column('vlan_start', Integer(),nullable=False, default=1), - Column('vlan_end', Integer(),nullable=False, default=4094), + Column( + 'vlan_start', Integer(), nullable=False, default=1), + Column( + 'vlan_end', Integer(), nullable=False, default=4094), Column('ip', String(256)), Column('gateway', String(128)), - Column('type', String(36), nullable=False, default='default'), + Column( + 'type', String(36), nullable=False, + default='default'), Column('ml2_type', String(36)), Column('network_type', String(36), nullable=False), Column('physnet_name', String(108)), - Column('capability', String(36)), + Column('capability', String(36), default='high'), Column('mtu', Integer(), nullable=False, default=1500), Column('alias', String(255)), Column('created_at', DateTime(), nullable=False), Column('updated_at', DateTime(), nullable=False), Column('deleted_at', DateTime()), Column('deleted', - Boolean(), - nullable=False, - default=False, - index=True), + Boolean(), + nullable=False, + default=False, + index=True), mysql_engine='InnoDB', extend_existing=True) return networks - + + def define_ip_ranges_table(meta): ip_ranges = Table('ip_ranges', - meta, - Column('id', String(36), primary_key=True, - nullable=False), - Column('start', String(128)), - Column('end', String(128)), - Column('network_id', String(36)), - Column('created_at', DateTime(), nullable=False), - Column('updated_at', DateTime(), nullable=False), - Column('deleted_at', DateTime()), - Column('deleted', + meta, + Column('id', String(36), primary_key=True, + nullable=False), + Column('start', String(128)), + Column('end', String(128)), + Column('network_id', String(36)), + Column('created_at', DateTime(), nullable=False), + Column('updated_at', DateTime(), nullable=False), + Column('deleted_at', DateTime()), + Column('deleted', Boolean(), nullable=False, default=False, index=True), - mysql_engine='InnoDB', - extend_existing=True) + mysql_engine='InnoDB', + extend_existing=True) + + return ip_ranges + - return ip_ranges - def define_host_interfaces_table(meta): host_interfaces = Table('host_interfaces', - meta, - Column('id', String(36), primary_key=True, - nullable=False), - Column('host_id', String(36), - ForeignKey('hosts.id'), - nullable=False), - Column('name', String(64)), - Column('ip', String(256)), - Column('netmask', String(256)), - Column('gateway', String(256)), - Column('mac', String(256)), - Column('pci', String(32)), - Column('type', String(32),nullable=False, default='ether'), - Column('slave1', String(32)), - Column('slave2', String(32)), - Column('mode', String(36)), - Column('is_deployment', Boolean(),default=False), - Column('created_at', DateTime(), nullable=False), - Column('updated_at', DateTime(), nullable=False), - Column('deleted_at', DateTime()), - Column('deleted', - Boolean(), - nullable=False, - default=False, - index=True), - mysql_engine='InnoDB', - extend_existing=True) - - return host_interfaces - + meta, + Column('id', String(36), primary_key=True, + nullable=False), + Column('host_id', String(36), + ForeignKey('hosts.id'), + nullable=False), + Column('name', String(64)), + Column('ip', String(256)), + Column('netmask', String(256)), + Column('gateway', String(256)), + Column('mac', String(256)), + Column('pci', String(32)), + Column( + 'type', String(32), nullable=False, + default='ether'), + Column('slave1', String(32)), + Column('slave2', String(32)), + Column('mode', String(36)), + Column('is_deployment', Boolean(), default=False), + Column('created_at', DateTime(), nullable=False), + Column('updated_at', DateTime(), nullable=False), + Column('deleted_at', DateTime()), + Column('deleted', + Boolean(), + nullable=False, + default=False, + index=True), + mysql_engine='InnoDB', + extend_existing=True) + + return host_interfaces + + def define_host_roles_table(meta): host_roles = Table('host_roles', - meta, - Column('id', String(36), primary_key=True, - nullable=False), - Column('host_id', - String(36), - ForeignKey('hosts.id'), - nullable=False), - Column('role_id', - String(36), - ForeignKey('roles.id'), - nullable=False), - Column('status', String(32), nullable=False, default='init'), - Column('progress', Integer(), default=0), - Column('messages', Text()), - Column('created_at', DateTime(), nullable=False), - Column('updated_at', DateTime(), nullable=False), - Column('deleted_at', DateTime()), - Column('deleted', - Boolean(), - nullable=False, - default=False, - index=True), - mysql_engine='InnoDB', - extend_existing=True) + meta, + Column('id', String(36), primary_key=True, + nullable=False), + Column('host_id', + String(36), + ForeignKey('hosts.id'), + nullable=False), + Column('role_id', + String(36), + ForeignKey('roles.id'), + nullable=False), + Column( + 'status', String(32), nullable=False, + default='init'), + Column('progress', Integer(), default=0), + Column('messages', Text()), + Column('created_at', DateTime(), nullable=False), + Column('updated_at', DateTime(), nullable=False), + Column('deleted_at', DateTime()), + Column('deleted', + Boolean(), + nullable=False, + default=False, + index=True), + mysql_engine='InnoDB', + extend_existing=True) + + return host_roles + - return host_roles - - def define_roles_table(meta): roles = Table('roles', - meta, - Column('id', - String(36), primary_key=True, - nullable=False, index=True), - Column('name', - String(255), - nullable=False), - Column('status', String(32), nullable=False, default='init'), - Column('progress', Integer(), default=0), - Column('config_set_id', - String(36), - ForeignKey('config_sets.id')), - Column('description', Text()), - Column('cluster_id', String(36)), - Column('type', String(36), nullable=False, default='custom'), - Column('vip', String(256)), - Column('messages', Text()), - Column('db_lv_size', Integer()), - Column('glance_lv_size', Integer()), - Column('nova_lv_size', Integer(), default=0), - Column('disk_location', String(255), nullable=False, default='local'), - Column('deployment_backend', String(36)), - Column('config_set_update_progress', Integer(), default=0), - Column('ntp_server', String(255)), - Column('created_at', DateTime(), nullable=False), - Column('updated_at', DateTime(), nullable=False), - Column('deleted_at', DateTime()), - Column('deleted', - Boolean(), - nullable=False, - default=False, - index=True), - mysql_engine='InnoDB', - extend_existing=True) - + meta, + Column('id', + String(36), primary_key=True, + nullable=False, index=True), + Column('name', + String(255), + nullable=False), + Column('status', String(32), nullable=False, default='init'), + Column('progress', Integer(), default=0), + Column('config_set_id', + String(36), + ForeignKey('config_sets.id')), + Column('description', Text()), + Column('cluster_id', String(36)), + Column('type', String(36), nullable=False, default='custom'), + Column('vip', String(256)), + Column('messages', Text()), + Column('db_lv_size', Integer()), + Column('glance_lv_size', Integer()), + Column('nova_lv_size', Integer(), default=0), + Column( + 'disk_location', String(255), nullable=False, + default='local'), + Column('deployment_backend', String(36)), + Column('config_set_update_progress', Integer(), default=0), + Column('ntp_server', String(255)), + Column('created_at', DateTime(), nullable=False), + Column('updated_at', DateTime(), nullable=False), + Column('deleted_at', DateTime()), + Column('deleted', + Boolean(), + nullable=False, + default=False, + index=True), + mysql_engine='InnoDB', + extend_existing=True) + return roles - + + def define_service_roles_table(meta): service_roles = Table('service_roles', - meta, - Column('id', String(36), primary_key=True, - nullable=False), - Column('role_id', String(36), ForeignKey('roles.id'), - nullable=False), - Column('service_id', String(36), ForeignKey('services.id'), nullable=False), - Column('created_at', DateTime(), nullable=False), - Column('updated_at', DateTime(), nullable=False), - Column('deleted_at', DateTime()), - Column('deleted', - Boolean(), - nullable=False, - default=False, - index=True), - mysql_engine='InnoDB', - extend_existing=True) + meta, + Column('id', String(36), primary_key=True, + nullable=False), + Column('role_id', String(36), ForeignKey('roles.id'), + nullable=False), + Column( + 'service_id', String(36), ForeignKey( + 'services.id'), nullable=False), + Column('created_at', DateTime(), nullable=False), + Column('updated_at', DateTime(), nullable=False), + Column('deleted_at', DateTime()), + Column('deleted', + Boolean(), + nullable=False, + default=False, + index=True), + mysql_engine='InnoDB', + extend_existing=True) return service_roles - + + def define_services_table(meta): services = Table('services', meta, Column('id', String(36), primary_key=True, nullable=False), Column('name', String(255), nullable=False), - Column('component_id', String(36), ForeignKey('components.id'), nullable=True), + Column('component_id', String(36), ForeignKey( + 'components.id'), nullable=True), Column('description', Text()), - Column('backup_type', String(32), nullable=False, default='none'), + Column( + 'backup_type', String(32), nullable=False, + default='none'), Column('created_at', DateTime(), nullable=False), Column('updated_at', DateTime(), nullable=False), Column('deleted_at', DateTime()), Column('deleted', - Boolean(), - nullable=False, - default=False, - index=True), + Boolean(), + nullable=False, + default=False, + index=True), mysql_engine='InnoDB', extend_existing=True) return services + def define_components_table(meta): components = Table('components', - meta, - Column('id', String(36), primary_key=True, - nullable=False), - Column('name', String(255), nullable=False), - Column('description', Text()), - Column('created_at', DateTime(), nullable=False), - Column('updated_at', DateTime(), nullable=False), - Column('deleted_at', DateTime()), - Column('deleted', - Boolean(), - nullable=False, - default=False, - index=True), - mysql_engine='InnoDB', - extend_existing=True) + meta, + Column('id', String(36), primary_key=True, + nullable=False), + Column('name', String(255), nullable=False), + Column('description', Text()), + Column('created_at', DateTime(), nullable=False), + Column('updated_at', DateTime(), nullable=False), + Column('deleted_at', DateTime()), + Column('deleted', + Boolean(), + nullable=False, + default=False, + index=True), + mysql_engine='InnoDB', + extend_existing=True) + + return components - return components def define_config_sets_table(meta): config_sets = Table('config_sets', - meta, - Column('id', String(36), primary_key=True, - nullable=False), - Column('name', String(255), nullable=False), - Column('description', Text()), - Column('created_at', DateTime(), nullable=False), - Column('updated_at', DateTime(), nullable=False), - Column('deleted_at', DateTime()), - Column('deleted', - Boolean(), - nullable=False, - default=False, - index=True), - mysql_engine='InnoDB', - extend_existing=True) + meta, + Column('id', String(36), primary_key=True, + nullable=False), + Column('name', String(255), nullable=False), + Column('description', Text()), + Column('created_at', DateTime(), nullable=False), + Column('updated_at', DateTime(), nullable=False), + Column('deleted_at', DateTime()), + Column('deleted', + Boolean(), + nullable=False, + default=False, + index=True), + mysql_engine='InnoDB', + extend_existing=True) + + return config_sets + - return config_sets - def define_configs_table(meta): configs = Table('configs', - meta, - Column('id', String(36), primary_key=True, - nullable=False), - Column('section', String(255)), - Column('key', String(255), nullable=False), - Column('value', String(255)), - Column('config_file_id', String(36), ForeignKey('config_files.id'), nullable=False), - Column('config_version', Integer(),default=0), - Column('running_version', Integer(),default=0), - Column('description', Text()), - Column('created_at', DateTime(), nullable=False), - Column('updated_at', DateTime(), nullable=False), - Column('deleted_at', DateTime()), - Column('deleted', - Boolean(), - nullable=False, - default=False, - index=True), - mysql_engine='InnoDB', - extend_existing=True) + meta, + Column('id', String(36), primary_key=True, + nullable=False), + Column('section', String(255)), + Column('key', String(255), nullable=False), + Column('value', String(255)), + Column('config_file_id', String(36), ForeignKey( + 'config_files.id'), nullable=False), + Column('config_version', Integer(), default=0), + Column('running_version', Integer(), default=0), + Column('description', Text()), + Column('created_at', DateTime(), nullable=False), + Column('updated_at', DateTime(), nullable=False), + Column('deleted_at', DateTime()), + Column('deleted', + Boolean(), + nullable=False, + default=False, + index=True), + mysql_engine='InnoDB', + extend_existing=True) + + return configs - return configs def define_config_files_table(meta): config_files = Table('config_files', - meta, - Column('id', String(36), primary_key=True, - nullable=False), - Column('name', String(255), nullable=False), - Column('description', Text()), - Column('created_at', DateTime(), nullable=False), - Column('updated_at', DateTime(), nullable=False), - Column('deleted_at', DateTime()), - Column('deleted', - Boolean(), - nullable=False, - default=False, - index=True), - mysql_engine='InnoDB', - extend_existing=True) - + meta, + Column('id', String(36), primary_key=True, + nullable=False), + Column('name', String(255), nullable=False), + Column('description', Text()), + Column('created_at', DateTime(), nullable=False), + Column('updated_at', DateTime(), nullable=False), + Column('deleted_at', DateTime()), + Column('deleted', + Boolean(), + nullable=False, + default=False, + index=True), + mysql_engine='InnoDB', + extend_existing=True) + return config_files - + + def define_config_set_items_table(meta): config_set_items = Table('config_set_items', - meta, - Column('id', String(36), primary_key=True, - nullable=False), - Column('config_set_id', String(36), ForeignKey('config_sets.id'), - nullable=False), - Column('config_id', String(36), ForeignKey('configs.id'), nullable=False), - Column('created_at', DateTime(), nullable=False), - Column('updated_at', DateTime(), nullable=False), - Column('deleted_at', DateTime()), - Column('deleted', - Boolean(), - nullable=False, - default=False, - index=True), - mysql_engine='InnoDB', - extend_existing=True) - - return config_set_items - + meta, + Column('id', String(36), primary_key=True, + nullable=False), + Column('config_set_id', String(36), + ForeignKey('config_sets.id'), + nullable=False), + Column('config_id', String(36), ForeignKey( + 'configs.id'), nullable=False), + Column('created_at', DateTime(), nullable=False), + Column('updated_at', DateTime(), nullable=False), + Column('deleted_at', DateTime()), + Column('deleted', + Boolean(), + nullable=False, + default=False, + index=True), + mysql_engine='InnoDB', + extend_existing=True) + + return config_set_items + + def define_config_historys_table(meta): config_historys = Table('config_historys', - meta, - Column('id', String(36), primary_key=True, - nullable=False), - Column('config_id', String(36)), - Column('value', String(255)), - Column('version', Integer()), - Column('created_at', DateTime(), nullable=False), - Column('updated_at', DateTime(), nullable=False), - Column('deleted_at', DateTime()), - Column('deleted', - Boolean(), - nullable=False, - default=False, - index=True), - mysql_engine='InnoDB', - extend_existing=True) - + meta, + Column('id', String(36), primary_key=True, + nullable=False), + Column('config_id', String(36)), + Column('value', String(255)), + Column('version', Integer()), + Column('created_at', DateTime(), nullable=False), + Column('updated_at', DateTime(), nullable=False), + Column('deleted_at', DateTime()), + Column('deleted', + Boolean(), + nullable=False, + default=False, + index=True), + mysql_engine='InnoDB', + extend_existing=True) + return config_historys + def define_tasks_table(meta): tasks = Table('tasks', - meta, - Column('id', String(36), primary_key=True, nullable=False), - Column('type', String(30), nullable=False), - Column('status', String(30), nullable=False), - Column('owner', String(255), nullable=False), - Column('expires_at', DateTime()), - Column('created_at', DateTime(), nullable=False), - Column('updated_at', DateTime(), nullable=False), - Column('deleted_at', DateTime()), - Column('deleted', - Boolean(), - nullable=False, - default=False, - index=True), - mysql_engine='InnoDB', - extend_existing=True) - + meta, + Column('id', String(36), primary_key=True, nullable=False), + Column('type', String(30), nullable=False), + Column('status', String(30), nullable=False), + Column('owner', String(255), nullable=False), + Column('expires_at', DateTime()), + Column('created_at', DateTime(), nullable=False), + Column('updated_at', DateTime(), nullable=False), + Column('deleted_at', DateTime()), + Column('deleted', + Boolean(), + nullable=False, + default=False, + index=True), + mysql_engine='InnoDB', + extend_existing=True) + return tasks + def define_task_infos_table(meta): task_infos = Table('task_infos', - meta, - Column('task_id', String(36)), - Column('input', Text()), - Column('result', Text()), - Column('message', Text()), - Column('created_at', DateTime(), nullable=False), - Column('updated_at', DateTime(), nullable=False), - Column('deleted_at', DateTime()), - Column('deleted', - Boolean(), - nullable=False, - default=False, - index=True), - mysql_engine='InnoDB', - extend_existing=True) - + meta, + Column('task_id', String(36)), + Column('input', Text()), + Column('result', Text()), + Column('message', Text()), + Column('created_at', DateTime(), nullable=False), + Column('updated_at', DateTime(), nullable=False), + Column('deleted_at', DateTime()), + Column('deleted', + Boolean(), + nullable=False, + default=False, + index=True), + mysql_engine='InnoDB', + extend_existing=True) + return task_infos + def define_repositorys_table(meta): repositorys = Table('repositorys', - meta, - Column('id', String(36), primary_key=True, nullable=False), - Column('url', String(255)), - Column('description', Text()), - Column('created_at', DateTime(), nullable=False), - Column('updated_at', DateTime(), nullable=False), - Column('deleted_at', DateTime()), - Column('deleted', - Boolean(), - nullable=False, - default=False, - index=True), - mysql_engine='InnoDB', - extend_existing=True) - - return repositorys - + meta, + Column( + 'id', String(36), primary_key=True, + nullable=False), + Column('url', String(255)), + Column('description', Text()), + Column('created_at', DateTime(), nullable=False), + Column('updated_at', DateTime(), nullable=False), + Column('deleted_at', DateTime()), + Column('deleted', + Boolean(), + nullable=False, + default=False, + index=True), + mysql_engine='InnoDB', + extend_existing=True) + + return repositorys + def define_users_table(meta): users = Table('users', @@ -536,184 +578,202 @@ def define_users_table(meta): Column('updated_at', DateTime(), nullable=False), Column('deleted_at', DateTime()), Column('deleted', - Boolean(), - nullable=False, - default=False, - index=True), + Boolean(), + nullable=False, + default=False, + index=True), mysql_engine='InnoDB', extend_existing=True) return users - + + def define_versions_table(meta): versions = Table('versions', - meta, - Column('id', String(36), primary_key=True, - nullable=False), - Column('name', String(256), nullable=False), - Column('size', BigInteger()), - Column('status', String(30)), - Column('checksum', String(128)), - Column('owner', String(256)), - Column('version', String(32)), - Column('type', String(30), default='0'), - Column('description', Text()), - Column('created_at', DateTime(), nullable=False), - Column('updated_at', DateTime(), nullable=False), - Column('deleted_at', DateTime()), - Column('deleted', - Boolean(), - nullable=False, - default=False, - index=True), - mysql_engine='InnoDB', - extend_existing=True) + meta, + Column('id', String(36), primary_key=True, + nullable=False), + Column('name', String(256), nullable=False), + Column('size', BigInteger()), + Column('status', String(30)), + Column('checksum', String(128)), + Column('owner', String(256)), + Column('version', String(32)), + Column('type', String(30), default='0'), + Column('description', Text()), + Column('created_at', DateTime(), nullable=False), + Column('updated_at', DateTime(), nullable=False), + Column('deleted_at', DateTime()), + Column('deleted', + Boolean(), + nullable=False, + default=False, + index=True), + mysql_engine='InnoDB', + extend_existing=True) + + return versions - return versions def define_assigned_networks_table(meta): assigned_networks = Table('assigned_networks', - meta, - Column('id', String(36), primary_key=True, - nullable=False), - Column('mac', String(128)), - Column('network_id', String(36)), - Column('interface_id', String(36)), - Column('ip', String(256)), - Column('vswitch_type', String(256)), - Column('created_at', DateTime(), nullable=False), - Column('updated_at', DateTime(), nullable=False), - Column('deleted_at', DateTime()), - Column('deleted', - Boolean(), - nullable=False, - default=False, - index=True), - mysql_engine='InnoDB', - extend_existing=True) + meta, + Column('id', String(36), primary_key=True, + nullable=False), + Column('mac', String(128)), + Column('network_id', String(36)), + Column('interface_id', String(36)), + Column('ip', String(256)), + Column('vswitch_type', String(256)), + Column('created_at', DateTime(), nullable=False), + Column('updated_at', DateTime(), nullable=False), + Column('deleted_at', DateTime()), + Column('deleted', + Boolean(), + nullable=False, + default=False, + index=True), + mysql_engine='InnoDB', + extend_existing=True) return assigned_networks + def define_logic_networks_table(meta): logic_networks = Table('logic_networks', - meta, - Column('id', String(36), primary_key=True, - nullable=False), - Column('name', String(255), nullable=False), - Column('type', String(36)), - Column('physnet_name', String(255)), - Column('cluster_id', String(36), ForeignKey('clusters.id'), nullable=False), - Column('segmentation_id', BigInteger()), - Column('segmentation_type', String(64), nullable=False), - Column('shared', Boolean(), default=False), - Column('created_at', DateTime(), nullable=False), - Column('updated_at', DateTime(), nullable=False), - Column('deleted_at', DateTime()), - Column('deleted', - Boolean(), - nullable=False, - default=False, - index=True), - mysql_engine='InnoDB', - extend_existing=True) + meta, + Column('id', String(36), primary_key=True, + nullable=False), + Column('name', String(255), nullable=False), + Column('type', String(36)), + Column('physnet_name', String(255)), + Column('cluster_id', String(36), ForeignKey( + 'clusters.id'), nullable=False), + Column('segmentation_id', BigInteger()), + Column( + 'segmentation_type', String(64), + nullable=False), + Column('shared', Boolean(), default=False), + Column('created_at', DateTime(), nullable=False), + Column('updated_at', DateTime(), nullable=False), + Column('deleted_at', DateTime()), + Column('deleted', + Boolean(), + nullable=False, + default=False, + index=True), + mysql_engine='InnoDB', + extend_existing=True) return logic_networks - + + def define_subnets_table(meta): subnets = Table('subnets', - meta, - Column('id', String(36), primary_key=True, - nullable=False), - Column('cidr', String(128)), - Column('gateway', String(128)), - Column('logic_network_id', String(36), ForeignKey('logic_networks.id'), nullable=False), - Column('name',String(255), nullable=False), - Column('router_id', String(36), ForeignKey('routers.id')), - Column('created_at', DateTime(), nullable=False), - Column('updated_at', DateTime(), nullable=False), - Column('deleted_at', DateTime()), - Column('deleted', - Boolean(), - nullable=False, - default=False, - index=True), - mysql_engine='InnoDB', - extend_existing=True) + meta, + Column('id', String(36), primary_key=True, + nullable=False), + Column('cidr', String(128)), + Column('gateway', String(128)), + Column('logic_network_id', String(36), ForeignKey( + 'logic_networks.id'), nullable=False), + Column('name', String(255), nullable=False), + Column('router_id', String(36), ForeignKey('routers.id')), + Column('created_at', DateTime(), nullable=False), + Column('updated_at', DateTime(), nullable=False), + Column('deleted_at', DateTime()), + Column('deleted', + Boolean(), + nullable=False, + default=False, + index=True), + mysql_engine='InnoDB', + extend_existing=True) return subnets - + + def define_float_ip_ranges_table(meta): float_ip_ranges = Table('float_ip_ranges', - meta, - Column('id', String(36), primary_key=True, - nullable=False), - Column('start', String(128)), - Column('end', String(128)), - Column('subnet_id', String(36), ForeignKey('subnets.id'), nullable=False), - Column('created_at', DateTime(), nullable=False), - Column('updated_at', DateTime(), nullable=False), - Column('deleted_at', DateTime()), - Column('deleted', - Boolean(), - nullable=False, - default=False, - index=True), - mysql_engine='InnoDB', - extend_existing=True) + meta, + Column('id', String(36), primary_key=True, + nullable=False), + Column('start', String(128)), + Column('end', String(128)), + Column('subnet_id', String(36), ForeignKey( + 'subnets.id'), nullable=False), + Column('created_at', DateTime(), nullable=False), + Column('updated_at', DateTime(), nullable=False), + Column('deleted_at', DateTime()), + Column('deleted', + Boolean(), + nullable=False, + default=False, + index=True), + mysql_engine='InnoDB', + extend_existing=True) return float_ip_ranges + def define_dns_nameservers_table(meta): dns_nameservers = Table('dns_nameservers', - meta, - Column('id', String(36), primary_key=True, - nullable=False), - Column('dns', String(128)), - Column('subnet_id', String(36), ForeignKey('subnets.id'), nullable=False), - Column('created_at', DateTime(), nullable=False), - Column('updated_at', DateTime(), nullable=False), - Column('deleted_at', DateTime()), - Column('deleted', - Boolean(), - nullable=False, - default=False, - index=True), - mysql_engine='InnoDB', - extend_existing=True) + meta, + Column('id', String(36), primary_key=True, + nullable=False), + Column('dns', String(128)), + Column( + 'subnet_id', String(36), + ForeignKey('subnets.id'), nullable=False), + Column('created_at', DateTime(), nullable=False), + Column('updated_at', DateTime(), nullable=False), + Column('deleted_at', DateTime()), + Column('deleted', + Boolean(), + nullable=False, + default=False, + index=True), + mysql_engine='InnoDB', + extend_existing=True) return dns_nameservers + def define_routers_table(meta): routers = Table('routers', - meta, - Column('id', String(36), primary_key=True, - nullable=False), - Column('name', String(255)), - Column('description', Text()), - Column('cluster_id', String(36), ForeignKey('clusters.id'), nullable=False), - Column('external_logic_network', String(255)), - Column('created_at', DateTime(), nullable=False), - Column('updated_at', DateTime(), nullable=False), - Column('deleted_at', DateTime()), - Column('deleted', - Boolean(), - nullable=False, - default=False, - index=True), - mysql_engine='InnoDB', - extend_existing=True) + meta, + Column('id', String(36), primary_key=True, + nullable=False), + Column('name', String(255)), + Column('description', Text()), + Column('cluster_id', String(36), ForeignKey( + 'clusters.id'), nullable=False), + Column('external_logic_network', String(255)), + Column('created_at', DateTime(), nullable=False), + Column('updated_at', DateTime(), nullable=False), + Column('deleted_at', DateTime()), + Column('deleted', + Boolean(), + nullable=False, + default=False, + index=True), + mysql_engine='InnoDB', + extend_existing=True) + + return routers + - return routers - def define_service_disks_table(meta): disks = Table('service_disks', meta, Column('id', String(36), primary_key=True, nullable=False), Column('service', String(255)), - Column('role_id', - String(36), - ForeignKey('roles.id'), - nullable=False), - Column('disk_location', String(255), nullable=False, default='local'), + Column('role_id', + String(36), + ForeignKey('roles.id'), + nullable=False), + Column( + 'disk_location', String(255), + nullable=False, default='local'), Column('lun', Integer()), Column('data_ips', String(255)), Column('size', Integer()), @@ -721,15 +781,16 @@ def define_service_disks_table(meta): Column('updated_at', DateTime(), nullable=False), Column('deleted_at', DateTime()), Column('deleted', - Boolean(), - nullable=False, - default=False, - index=True), + Boolean(), + nullable=False, + default=False, + index=True), mysql_engine='InnoDB', extend_existing=True) - return disks - + return disks + + def define_cinder_volumes_table(meta): disks = Table('cinder_volumes', meta, @@ -739,27 +800,27 @@ def define_cinder_volumes_table(meta): Column('user_pwd', String(255)), Column('management_ips', String(255)), Column('pools', String(255)), - Column('volume_driver', String(255)), + Column('volume_driver', String(255)), Column('volume_type', String(255)), Column('backend_index', String(255)), - Column('role_id', - String(36), - ForeignKey('roles.id'), - nullable=False), + Column('role_id', + String(36), + ForeignKey('roles.id'), + nullable=False), Column('created_at', DateTime(), nullable=False), Column('updated_at', DateTime(), nullable=False), Column('deleted_at', DateTime()), Column('deleted', - Boolean(), - nullable=False, - default=False, - index=True), + Boolean(), + nullable=False, + default=False, + index=True), mysql_engine='InnoDB', extend_existing=True) - return disks + return disks + - def upgrade(migrate_engine): meta = MetaData() meta.bind = migrate_engine @@ -774,7 +835,7 @@ def upgrade(migrate_engine): define_components_table(meta), define_services_table(meta), define_roles_table(meta), - define_host_roles_table(meta), + define_host_roles_table(meta), define_service_roles_table(meta), define_config_files_table(meta), define_configs_table(meta), diff --git a/code/daisy/daisy/db/sqlalchemy/migrate_repo/versions/002_add_role_type_roles.py b/code/daisy/daisy/db/sqlalchemy/migrate_repo/versions/002_add_role_type_roles.py index 0004b0ae..7bd69b41 100755 --- a/code/daisy/daisy/db/sqlalchemy/migrate_repo/versions/002_add_role_type_roles.py +++ b/code/daisy/daisy/db/sqlalchemy/migrate_repo/versions/002_add_role_type_roles.py @@ -19,9 +19,9 @@ from sqlalchemy import MetaData, Table, Column, String meta = MetaData() role_type = Column('role_type', String(255)) + def upgrade(migrate_engine): meta.bind = migrate_engine roles = Table('roles', meta, autoload=True) roles.create_column(role_type) - diff --git a/code/daisy/daisy/db/sqlalchemy/migrate_repo/versions/003_add_template_tables.py b/code/daisy/daisy/db/sqlalchemy/migrate_repo/versions/003_add_template_tables.py index 8c25d575..c42e80da 100755 --- a/code/daisy/daisy/db/sqlalchemy/migrate_repo/versions/003_add_template_tables.py +++ b/code/daisy/daisy/db/sqlalchemy/migrate_repo/versions/003_add_template_tables.py @@ -12,7 +12,7 @@ # License for the specific language governing permissions and limitations # under the License. -from sqlalchemy.schema import (Column, Index, MetaData, Table) +from sqlalchemy.schema import (Column, MetaData, Table) from daisy.db.sqlalchemy.migrate_repo.schema import ( Boolean, DateTime, String, Text, create_tables) @@ -20,47 +20,49 @@ from daisy.db.sqlalchemy.migrate_repo.schema import ( def define_template_table(meta): templates = Table('template', - meta, - Column('id', String(36), primary_key=True, - nullable=False), - Column('name', String(36), nullable=False), - Column('description', Text()), - Column('type', String(36), nullable=True), - Column('hosts', Text(), nullable=True), - Column('content', Text(), nullable=True), - Column('updated_at', DateTime(), nullable=False), - Column('deleted_at', DateTime()), - Column('created_at', DateTime(), nullable=False), - Column('deleted', + meta, + Column('id', String(36), primary_key=True, + nullable=False), + Column('name', String(36), nullable=False), + Column('description', Text()), + Column('type', String(36), nullable=True), + Column('hosts', Text(), nullable=True), + Column('content', Text(), nullable=True), + Column('updated_at', DateTime(), nullable=False), + Column('deleted_at', DateTime()), + Column('created_at', DateTime(), nullable=False), + Column('deleted', Boolean(), nullable=False, default=False, index=True), - mysql_engine='InnoDB', - extend_existing=True) + mysql_engine='InnoDB', + extend_existing=True) return templates + def define_host_template_table(meta): host_templates = Table('host_templates', - meta, - Column('id', String(36), primary_key=True, - nullable=False), - Column('cluster_name', String(36), nullable=False), - Column('hosts', Text(), nullable=True), - Column('updated_at', DateTime(), nullable=False), - Column('deleted_at', DateTime()), - Column('created_at', DateTime(), nullable=False), - Column('deleted', - Boolean(), - nullable=False, - default=False, - index=True), - mysql_engine='InnoDB', - extend_existing=True) + meta, + Column('id', String(36), primary_key=True, + nullable=False), + Column('cluster_name', String(36), nullable=False), + Column('hosts', Text(), nullable=True), + Column('updated_at', DateTime(), nullable=False), + Column('deleted_at', DateTime()), + Column('created_at', DateTime(), nullable=False), + Column('deleted', + Boolean(), + nullable=False, + default=False, + index=True), + mysql_engine='InnoDB', + extend_existing=True) return host_templates + def upgrade(migrate_engine): meta = MetaData() meta.bind = migrate_engine diff --git a/code/daisy/daisy/db/sqlalchemy/migrate_repo/versions/004_add_message_host_id_discover_hosts.py b/code/daisy/daisy/db/sqlalchemy/migrate_repo/versions/004_add_message_host_id_discover_hosts.py index 469e1ee2..2cbbfa77 100755 --- a/code/daisy/daisy/db/sqlalchemy/migrate_repo/versions/004_add_message_host_id_discover_hosts.py +++ b/code/daisy/daisy/db/sqlalchemy/migrate_repo/versions/004_add_message_host_id_discover_hosts.py @@ -19,6 +19,7 @@ meta = MetaData() message = Column('message', Text(), nullable=True) host_id = Column('host_id', String(36), nullable=True) + def upgrade(migrate_engine): meta.bind = migrate_engine diff --git a/code/daisy/daisy/db/sqlalchemy/migrate_repo/versions/007_add_role_db_glance_mogondb_vip.py b/code/daisy/daisy/db/sqlalchemy/migrate_repo/versions/007_add_role_db_glance_mogondb_vip.py index 56ca2493..3dc7f76e 100755 --- a/code/daisy/daisy/db/sqlalchemy/migrate_repo/versions/007_add_role_db_glance_mogondb_vip.py +++ b/code/daisy/daisy/db/sqlalchemy/migrate_repo/versions/007_add_role_db_glance_mogondb_vip.py @@ -24,14 +24,7 @@ mongodb_vip = Column('mongodb_vip', String(255)) def upgrade(migrate_engine): meta.bind = migrate_engine - - roles = Table('roles', meta, autoload=True) roles.create_column(db_vip) roles.create_column(glance_vip) roles.create_column(mongodb_vip) - - - - - diff --git a/code/daisy/daisy/db/sqlalchemy/migrate_repo/versions/008_add_cluster_use_dns.py b/code/daisy/daisy/db/sqlalchemy/migrate_repo/versions/008_add_cluster_use_dns.py index aacfc9ec..061c25a4 100755 --- a/code/daisy/daisy/db/sqlalchemy/migrate_repo/versions/008_add_cluster_use_dns.py +++ b/code/daisy/daisy/db/sqlalchemy/migrate_repo/versions/008_add_cluster_use_dns.py @@ -13,7 +13,7 @@ # License for the specific language governing permissions and limitations # under the License. -from sqlalchemy import MetaData, Table, Column, String, Integer +from sqlalchemy import MetaData, Table, Column, Integer meta = MetaData() diff --git a/code/horizon/openstack_dashboard/dashboards/environment/network/subnets/urls.py b/code/daisy/daisy/db/sqlalchemy/migrate_repo/versions/009_add_cluster_id_in_discover_hosts.py old mode 100644 new mode 100755 similarity index 59% rename from code/horizon/openstack_dashboard/dashboards/environment/network/subnets/urls.py rename to code/daisy/daisy/db/sqlalchemy/migrate_repo/versions/009_add_cluster_id_in_discover_hosts.py index 7a4332ca..a3dc0115 --- a/code/horizon/openstack_dashboard/dashboards/environment/network/subnets/urls.py +++ b/code/daisy/daisy/db/sqlalchemy/migrate_repo/versions/009_add_cluster_id_in_discover_hosts.py @@ -1,4 +1,5 @@ -# Copyright 2012 NEC Corporation +# Copyright 2013 OpenStack Foundation +# All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain @@ -12,17 +13,15 @@ # License for the specific language governing permissions and limitations # under the License. -from django.conf.urls import patterns -from django.conf.urls import url - -from openstack_dashboard.dashboards.project.networks.subnets import views +from sqlalchemy import MetaData, Table, Column, String -SUBNETS = r'^(?P[^/]+)/%s$' -VIEW_MOD = 'openstack_dashboard.dashboards.project.networks.subnets.views' +meta = MetaData() +cluster_id = Column('cluster_id', String(36)) -urlpatterns = patterns( - VIEW_MOD, - url(SUBNETS % 'detail', views.DetailView.as_view(), name='detail') -) +def upgrade(migrate_engine): + meta.bind = migrate_engine + + discover_hosts = Table('discover_hosts', meta, autoload=True) + discover_hosts.create_column(cluster_id) diff --git a/code/daisy/daisy/db/sqlalchemy/migrate_repo/versions/010_add_share_disk_type.py b/code/daisy/daisy/db/sqlalchemy/migrate_repo/versions/010_add_share_disk_type.py new file mode 100755 index 00000000..e34ba4e8 --- /dev/null +++ b/code/daisy/daisy/db/sqlalchemy/migrate_repo/versions/010_add_share_disk_type.py @@ -0,0 +1,27 @@ +# Copyright 2013 OpenStack Foundation +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# s +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from sqlalchemy import MetaData, Table, Column, String + + +meta = MetaData() +protocol_type = Column('protocol_type', String(36)) + + +def upgrade(migrate_engine): + meta.bind = migrate_engine + + service_disks = Table('service_disks', meta, autoload=True) + service_disks.create_column(protocol_type) diff --git a/code/daisy/daisy/db/sqlalchemy/migrate_repo/versions/011_add_hwm_table.py b/code/daisy/daisy/db/sqlalchemy/migrate_repo/versions/011_add_hwm_table.py new file mode 100755 index 00000000..f7e3c753 --- /dev/null +++ b/code/daisy/daisy/db/sqlalchemy/migrate_repo/versions/011_add_hwm_table.py @@ -0,0 +1,54 @@ +# Copyright 2013 OpenStack Foundation +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from sqlalchemy import MetaData, Table, Column, String +from daisy.db.sqlalchemy.migrate_repo.schema import (Boolean, DateTime, Text, + create_tables) + +hwm_id = Column('hwm_id', String(36)) +host_hwm_ip = Column('hwm_ip', String(256)) +cluster_hwm_ip = Column('hwm_ip', String(256)) + + +def define_hwm_table(meta): + hwm = Table('hwm', + meta, + Column('id', String(36), primary_key=True, nullable=False), + Column('hwm_ip', String(36), nullable=False), + Column('description', Text()), + Column('updated_at', DateTime(), nullable=False), + Column('deleted_at', DateTime()), + Column('created_at', DateTime(), nullable=False), + Column('deleted', Boolean(), nullable=False, default=False, + index=True), + mysql_engine='InnoDB', + extend_existing=True) + + return hwm + + +def upgrade(migrate_engine): + meta = MetaData() + meta.bind = migrate_engine + + tables = [define_hwm_table(meta), ] + create_tables(tables) + + hosts = Table('hosts', meta, autoload=True) + hosts.create_column(hwm_id) + hosts.create_column(host_hwm_ip) + + clusters = Table('clusters', meta, autoload=True) + clusters.create_column(cluster_hwm_ip) diff --git a/code/daisy/daisy/db/sqlalchemy/migrate_repo/versions/012_add_host_numa_set.py b/code/daisy/daisy/db/sqlalchemy/migrate_repo/versions/012_add_host_numa_set.py new file mode 100755 index 00000000..7278e9a5 --- /dev/null +++ b/code/daisy/daisy/db/sqlalchemy/migrate_repo/versions/012_add_host_numa_set.py @@ -0,0 +1,34 @@ +# Copyright 2013 OpenStack Foundation +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from sqlalchemy import MetaData, Table, Column, String + +meta = MetaData() +vcpu_pin_set = Column('vcpu_pin_set', String(255)) +dvs_high_cpuset = Column('dvs_high_cpuset', String(255)) +pci_high_cpuset = Column('pci_high_cpuset', String(255)) +os_cpus = Column('os_cpus', String(255)) +dvs_cpus = Column('dvs_cpus', String(255)) + + +def upgrade(migrate_engine): + meta.bind = migrate_engine + + hosts = Table('hosts', meta, autoload=True) + hosts.create_column(vcpu_pin_set) + hosts.create_column(dvs_high_cpuset) + hosts.create_column(pci_high_cpuset) + hosts.create_column(os_cpus) + hosts.create_column(dvs_cpus) diff --git a/code/daisy/daisy/db/sqlalchemy/migrate_repo/versions/013_add_mac_in_discover_hosts.py b/code/daisy/daisy/db/sqlalchemy/migrate_repo/versions/013_add_mac_in_discover_hosts.py new file mode 100755 index 00000000..3cf0792f --- /dev/null +++ b/code/daisy/daisy/db/sqlalchemy/migrate_repo/versions/013_add_mac_in_discover_hosts.py @@ -0,0 +1,27 @@ +# Copyright 2013 OpenStack Foundation +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from sqlalchemy import MetaData, Table, Column, String + + +meta = MetaData() +mac = Column('mac', String(36)) + + +def upgrade(migrate_engine): + meta.bind = migrate_engine + + discover_hosts = Table('discover_hosts', meta, autoload=True) + discover_hosts.create_column(mac) diff --git a/code/daisy/daisy/db/sqlalchemy/migrate_repo/versions/014_add_host_config_set_id.py b/code/daisy/daisy/db/sqlalchemy/migrate_repo/versions/014_add_host_config_set_id.py new file mode 100755 index 00000000..d2963db8 --- /dev/null +++ b/code/daisy/daisy/db/sqlalchemy/migrate_repo/versions/014_add_host_config_set_id.py @@ -0,0 +1,26 @@ +# Copyright 2013 OpenStack Foundation +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from sqlalchemy import MetaData, Table, Column, String + +meta = MetaData() +config_set_id = Column('config_set_id', String(36)) + + +def upgrade(migrate_engine): + meta.bind = migrate_engine + + hosts = Table('hosts', meta, autoload=True) + hosts.create_column(config_set_id) diff --git a/code/daisy/daisy/db/sqlalchemy/migrate_repo/versions/015_add_segmentation_type_in_networks.py b/code/daisy/daisy/db/sqlalchemy/migrate_repo/versions/015_add_segmentation_type_in_networks.py new file mode 100755 index 00000000..4d62b395 --- /dev/null +++ b/code/daisy/daisy/db/sqlalchemy/migrate_repo/versions/015_add_segmentation_type_in_networks.py @@ -0,0 +1,33 @@ +# Copyright 2013 OpenStack Foundation +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from sqlalchemy import MetaData, Table, Column, String, BigInteger, Integer + +meta = MetaData() +segmentation_type = Column('segmentation_type', String(64)) +vni_start = Column('vni_start', BigInteger()) +vni_end = Column('vni_end', BigInteger()) +gre_id_start = Column('gre_id_start', Integer()) +gre_id_end = Column('gre_id_end', Integer()) + + +def upgrade(migrate_engine): + meta.bind = migrate_engine + networks = Table('networks', meta, autoload=True) + networks.create_column(segmentation_type) + networks.create_column(vni_start) + networks.create_column(vni_end) + networks.create_column(gre_id_start) + networks.create_column(gre_id_end) diff --git a/code/daisy/daisy/db/sqlalchemy/models.py b/code/daisy/daisy/db/sqlalchemy/models.py index 8c859fd5..4d8a6caa 100755 --- a/code/daisy/daisy/db/sqlalchemy/models.py +++ b/code/daisy/daisy/db/sqlalchemy/models.py @@ -32,12 +32,9 @@ from sqlalchemy.ext.declarative import declarative_base from sqlalchemy import ForeignKey from sqlalchemy import Index from sqlalchemy import Integer -from sqlalchemy.orm import backref, relationship -from sqlalchemy import sql from sqlalchemy import String from sqlalchemy import Text from sqlalchemy.types import TypeDecorator -from sqlalchemy import UniqueConstraint BASE = declarative_base() @@ -49,6 +46,7 @@ def compile_big_int_sqlite(type_, compiler, **kw): class JSONEncodedDict(TypeDecorator): + """Represents an immutable structure as a json-encoded string""" impl = Text @@ -63,7 +61,9 @@ class JSONEncodedDict(TypeDecorator): value = jsonutils.loads(value) return value + class DaisyBase(models.ModelBase, models.TimestampMixin): + """Base class for Daisy Models.""" __table_args__ = {'mysql_engine': 'InnoDB'} @@ -74,9 +74,10 @@ class DaisyBase(models.ModelBase, models.TimestampMixin): def save(self, session=None): from daisy.db.sqlalchemy import api as db_api super(DaisyBase, self).save(session or db_api.get_session()) - - id = Column(String(36), primary_key=True, default=lambda: str(uuid.uuid4())) - + + id = Column( + String(36), primary_key=True, default=lambda: str(uuid.uuid4())) + created_at = Column(DateTime, default=lambda: timeutils.utcnow(), nullable=False) # TODO(vsergeyev): Column `updated_at` have no default value in @@ -115,7 +116,19 @@ class DaisyBase(models.ModelBase, models.TimestampMixin): d.pop("_sa_instance_state") return d + +class Hwm(BASE, DaisyBase): + + """Represents an hwm in the datastore.""" + __tablename__ = 'hwm' + __table_args__ = (Index('ix_hwm_deleted', 'deleted'),) + + description = Column(Text) + hwm_ip = Column(String(36), nullable=True) + + class Host(BASE, DaisyBase): + """Represents an host in the datastore.""" __tablename__ = 'hosts' __table_args__ = (Index('ix_hosts_deleted', 'deleted'),) @@ -124,40 +137,54 @@ class Host(BASE, DaisyBase): dmi_uuid = Column(String(36)) description = Column(Text) resource_type = Column(String(36)) - ipmi_user=Column(String(36)) - ipmi_passwd=Column(String(36)) - ipmi_addr=Column(String(255)) + ipmi_user = Column(String(36)) + ipmi_passwd = Column(String(36)) + ipmi_addr = Column(String(255)) status = Column(String(36), default='init', nullable=False) root_disk = Column(String(36)) - root_lv_size = Column(Integer(),default=51200) - swap_lv_size = Column(Integer(),default=4096) + root_lv_size = Column(Integer(), default=102400) + swap_lv_size = Column(Integer(), default=4096) root_pwd = Column(String(36)) isolcpus = Column(String(255)) os_version_id = Column(String(36)) os_version_file = Column(String(255)) - os_progress = Column(Integer(),default=0) + os_progress = Column(Integer(), default=0) os_status = Column(String(36)) messages = Column(Text) hugepagesize = Column(String(36)) - hugepages = Column(Integer(),default=0) - + hugepages = Column(Integer(), default=0) + hwm_id = Column(String(36)) + hwm_ip = Column(String(256)) + vcpu_pin_set = Column(String(255)) + dvs_high_cpuset = Column(String(255)) + pci_high_cpuset = Column(String(255)) + os_cpus = Column(String(255)) + dvs_cpus = Column(String(255)) + config_set_id = Column(String(36)) + + class DiscoverHost(BASE, DaisyBase): + """Represents an host in the datastore.""" __tablename__ = 'discover_hosts' __table_args__ = (Index('ix_discover_hosts_deleted', 'deleted'),) ip = Column(String(255)) - user=Column(String(36)) - passwd=Column(String(36)) + user = Column(String(36)) + passwd = Column(String(36)) status = Column(String(64), default='init') message = Column(Text) host_id = Column(String(36)) + cluster_id = Column(String(36)) + mac = Column(String(36)) + class Cluster(BASE, DaisyBase): + """Represents an clusters in the datastore.""" __tablename__ = 'clusters' __table_args__ = (Index('ix_clusters_deleted', 'deleted'),) - + name = Column(String(255), nullable=False) owner = Column(String(255)) description = Column(Text) @@ -173,19 +200,24 @@ class Cluster(BASE, DaisyBase): segmentation_type = Column(String(64)) auto_scale = Column(Integer(), nullable=False, default=0) use_dns = Column(Integer(), nullable=False, default=0) + hwm_ip = Column(String(256)) + class ClusterHost(BASE, DaisyBase): + """Represents an cluster host in the datastore.""" __tablename__ = 'cluster_hosts' __table_args__ = (Index('ix_cluster_hosts_deleted', 'deleted'),) cluster_id = Column(String(36), - ForeignKey('clusters.id'), - nullable=False) + ForeignKey('clusters.id'), + nullable=False) host_id = Column(String(36), - nullable=False) - + nullable=False) + + class Template(BASE, DaisyBase): + """Represents an cluster host in the datastore.""" __tablename__ = 'template' __table_args__ = (Index('ix_template_deleted', 'deleted'),) @@ -195,8 +227,10 @@ class Template(BASE, DaisyBase): type = Column(String(36), nullable=True) hosts = Column(Text(), nullable=True) content = Column(Text(), nullable=True) - + + class HostTemplate(BASE, DaisyBase): + """Represents an host template in the datastore.""" __tablename__ = 'host_templates' __table_args__ = (Index('ix_host_template_deleted', 'deleted'),) @@ -206,27 +240,31 @@ class HostTemplate(BASE, DaisyBase): cluster_name = Column(String(36), nullable=True) hosts = Column(Text(), nullable=True) + class HostInterface(BASE, DaisyBase): + """Represents an host_interfaces in the datastore.""" __tablename__ = 'host_interfaces' __table_args__ = (Index('ix_host_interfaces_deleted', 'deleted'),) host_id = Column(String(36), - ForeignKey('hosts.id'), - nullable=False) + ForeignKey('hosts.id'), + nullable=False) name = Column(String(64)) ip = Column(String(256)) netmask = Column(String(256)) gateway = Column(String(256)) mac = Column(String(256)) pci = Column(String(32)) - type = Column(String(32),nullable=False, default='ether') + type = Column(String(32), nullable=False, default='ether') slave1 = Column(String(32)) slave2 = Column(String(32)) mode = Column(String(36)) - is_deployment=Column(Boolean(),default=False) + is_deployment = Column(Boolean(), default=False) + class Network(BASE, DaisyBase): + """Represents an networks in the datastore.""" __tablename__ = 'networks' __table_args__ = (Index('ix_networks_deleted', 'deleted'),) @@ -236,61 +274,74 @@ class Network(BASE, DaisyBase): cluster_id = Column(String(36)) cidr = Column(String(255)) vlan_id = Column(String(36)) + segmentation_type = Column(String(64)) vlan_start = Column(Integer(), nullable=False, default=1) vlan_end = Column(Integer(), nullable=False, default=4094) + gre_id_start = Column(Integer()) + gre_id_end = Column(Integer()) + vni_start = Column(BigInteger()) + vni_end = Column(BigInteger()) gateway = Column(String(128)) ip = Column(String(256)) type = Column(String(36), nullable=False, default='default') ml2_type = Column(String(36)) network_type = Column(String(36), nullable=False) physnet_name = Column(String(108)) - capability = Column(String(36)) + capability = Column(String(36), default='high') mtu = Column(Integer(), nullable=False, default=1500) alias = Column(String(255)) + + class IpRange(BASE, DaisyBase): + """Represents an ip_ranges in the datastore.""" __tablename__ = 'ip_ranges' __table_args__ = (Index('ix_ip_ranges_deleted', 'deleted'),) start = Column(String(128)) end = Column(String(128)) - network_id = Column(String(36)) + network_id = Column(String(36)) + class HostRole(BASE, DaisyBase): + """Represents an host_roles in the datastore.""" __tablename__ = 'host_roles' __table_args__ = (Index('ix_host_roles_deleted', 'deleted'),) - host_id = Column(String(36), - ForeignKey('hosts.id'), - nullable=False) + host_id = Column(String(36), + ForeignKey('hosts.id'), + nullable=False) role_id = Column(String(36), - ForeignKey('roles.id'), - nullable=False) + ForeignKey('roles.id'), + nullable=False) status = Column(String(32), nullable=False, default='init') progress = Column(Integer(), default=0) messages = Column(Text) + class Role(BASE, DaisyBase): + """Represents an roles in the datastore.""" __tablename__ = 'roles' - __table_args__ = (Index('ix_roles_deleted', 'deleted'),Index('ix_roles_id', 'id'),) + __table_args__ = ( + Index('ix_roles_deleted', 'deleted'), Index('ix_roles_id', 'id'),) - name = Column(String(255), - nullable=False) + name = Column(String(255), + nullable=False) description = Column(Text) status = Column(String(32), nullable=False, default='init') progress = Column(Integer(), default=0) - config_set_id = Column(String(36), - ForeignKey('config_sets.id')) + config_set_id = Column(String(36), + ForeignKey('config_sets.id')) cluster_id = Column(String(36)) type = Column(String(36), nullable=False, default='custom') vip = Column(String(256)) deployment_backend = Column(String(36)) messages = Column(Text) config_set_update_progress = Column(Integer(), default=0) - db_lv_size = Column(Integer(),default=0) - glance_lv_size = Column(Integer(),default=0) + db_lv_size = Column(Integer(), default=0) + glance_lv_size = Column(Integer(), default=0) nova_lv_size = Column(Integer(), default=0) disk_location = Column(String(255), nullable=False, default='local') ntp_server = Column(String(255)) @@ -300,25 +351,32 @@ class Role(BASE, DaisyBase): public_vip = Column(String(255)) mongodb_vip = Column(String(255)) + class ServiceRole(BASE, DaisyBase): + """Represents an service_roles in the datastore.""" __tablename__ = 'service_roles' __table_args__ = (Index('ix_service_roles_deleted', 'deleted'),) role_id = Column(String(36), ForeignKey('roles.id'), nullable=False) service_id = Column(String(36), ForeignKey('services.id'), nullable=False) - + + class Service(BASE, DaisyBase): + """Represents an services in the datastore.""" __tablename__ = 'services' __table_args__ = (Index('ix_services_deleted', 'deleted'),) name = Column(String(255), nullable=False) description = Column(Text) - component_id = Column(String(36), ForeignKey('components.id'), nullable=True) + component_id = Column( + String(36), ForeignKey('components.id'), nullable=True) backup_type = Column(String(32), nullable=False, default='none') + class Component(BASE, DaisyBase): + """Represents an components in the datastore.""" __tablename__ = 'components' __table_args__ = (Index('ix_components_deleted', 'deleted'),) @@ -326,15 +384,19 @@ class Component(BASE, DaisyBase): name = Column(String(255), nullable=False) description = Column(Text) + class ConfigSet(BASE, DaisyBase): + """Represents an config_sets in the datastore.""" __tablename__ = 'config_sets' __table_args__ = (Index('ix_config_sets_deleted', 'deleted'),) name = Column(String(255), nullable=False) - description = Column(Text) + description = Column(Text) + class Config(BASE, DaisyBase): + """Represents an configs in the datastore.""" __tablename__ = 'configs' __table_args__ = (Index('ix_configs_deleted', 'deleted'),) @@ -342,38 +404,47 @@ class Config(BASE, DaisyBase): section = Column(String(255)) key = Column(String(255), nullable=False) value = Column(String(255)) - config_file_id = Column(String(36), ForeignKey('config_files.id'), nullable=False) - config_version = Column(Integer(),default='0') - running_version = Column(Integer(),default='0') + config_file_id = Column( + String(36), ForeignKey('config_files.id'), nullable=False) + config_version = Column(Integer(), default='0') + running_version = Column(Integer(), default='0') description = Column(Text) + class ConfigFile(BASE, DaisyBase): + """Represents an config_files in the datastore.""" __tablename__ = 'config_files' __table_args__ = (Index('ix_config_files_deleted', 'deleted'),) name = Column(String(255), nullable=False) - description = Column(Text) - + description = Column(Text) + + class ConfigSetItem(BASE, DaisyBase): + """Represents an config_set_items in the datastore.""" __tablename__ = 'config_set_items' __table_args__ = (Index('ix_config_set_items_deleted', 'deleted'),) config_set_id = Column(String(36), ForeignKey('config_sets.id'), - nullable=False) + nullable=False) config_id = Column(String(36), ForeignKey('configs.id'), nullable=False) - + + class ConfigHistory(BASE, DaisyBase): + """Represents an config_historys in the datastore.""" __tablename__ = 'config_historys' __table_args__ = (Index('ix_config_historys_deleted', 'deleted'),) config_id = Column(String(36)) value = Column(String(255)) - version = Column(Integer()) - + version = Column(Integer()) + + class Task(BASE, DaisyBase): + """Represents an tasks in the datastore.""" __tablename__ = 'tasks' __table_args__ = (Index('ix_tasks_deleted', 'deleted'),) @@ -382,8 +453,10 @@ class Task(BASE, DaisyBase): status = Column(String(30), nullable=False) owner = Column(String(255), nullable=False) expires_at = Column(DateTime()) - + + class TaskInfo(BASE, DaisyBase): + """Represents an task_infos in the datastore.""" __tablename__ = 'task_infos' __table_args__ = (Index('ix_task_infos_deleted', 'deleted'),) @@ -392,17 +465,20 @@ class TaskInfo(BASE, DaisyBase): input = Column(Text()) result = Column(Text()) message = Column(Text()) - + + class Repository(BASE, DaisyBase): + """Represents an repositorys in the datastore.""" __tablename__ = 'repositorys' __table_args__ = (Index('ix_repositorys_deleted', 'deleted'),) url = Column(String(255)) description = Column(Text()) - + class User(BASE, DaisyBase): + """Represents an users in the datastore.""" __tablename__ = 'users' __table_args__ = (Index('ix_users_deleted', 'deleted'),) @@ -413,7 +489,9 @@ class User(BASE, DaisyBase): phone = Column(String(128)) address = Column(String(256)) + class Version(BASE, DaisyBase): + """Represents an versions in the datastore.""" __tablename__ = 'versions' __table_args__ = (Index('ix_versions_deleted', 'deleted'),) @@ -426,8 +504,10 @@ class Version(BASE, DaisyBase): version = Column(String(32)) type = Column(String(30), default='0') description = Column(Text()) - + + class AssignedNetworks(BASE, DaisyBase): + """Represents an assigned_networks in the datastore.""" __tablename__ = 'assigned_networks' __table_args__ = (Index('ix_assigned_networks_deleted', 'deleted'),) @@ -438,31 +518,38 @@ class AssignedNetworks(BASE, DaisyBase): ip = Column(String(256)) vswitch_type = Column(String(256)) + class LogicNetwork(BASE, DaisyBase): + """Represents an logic_networks in the datastore.""" __tablename__ = 'logic_networks' __table_args__ = (Index('ix_logic_networks_deleted', 'deleted'),) - + name = Column(String(255), nullable=False) type = Column(String(36)) physnet_name = Column(String(255)) - cluster_id= Column(String(36), ForeignKey('clusters.id'), nullable=False) + cluster_id = Column(String(36), ForeignKey('clusters.id'), nullable=False) segmentation_id = Column(BigInteger()) segmentation_type = Column(String(64), nullable=False) shared = Column(Boolean(), default=False) + class Subnet(BASE, DaisyBase): + """Represents an subnets in the datastore.""" __tablename__ = 'subnets' __table_args__ = (Index('ix_subnets_deleted', 'deleted'),) cidr = Column(String(128)) gateway = Column(String(128)) - logic_network_id = Column(String(36), ForeignKey('logic_networks.id'), nullable=False) + logic_network_id = Column( + String(36), ForeignKey('logic_networks.id'), nullable=False) name = Column(String(255), nullable=False) - router_id = Column(String(36), ForeignKey('routers.id')) + router_id = Column(String(36), ForeignKey('routers.id')) + class FloatIpRange(BASE, DaisyBase): + """Represents an float_ip_ranges in the datastore.""" __tablename__ = 'float_ip_ranges' __table_args__ = (Index('ix_float_ip_ranges_deleted', 'deleted'),) @@ -471,15 +558,19 @@ class FloatIpRange(BASE, DaisyBase): end = Column(String(36)) subnet_id = Column(String(36), ForeignKey('subnets.id'), nullable=False) + class DnsNameservers(BASE, DaisyBase): + """Represents an dns_nameservers in the datastore.""" __tablename__ = 'dns_nameservers' __table_args__ = (Index('ix_dns_nameservers_deleted', 'deleted'),) dns = Column(String(128)) - subnet_id = Column(String(36), ForeignKey('subnets.id'), nullable=False) + subnet_id = Column(String(36), ForeignKey('subnets.id'), nullable=False) + class Router(BASE, DaisyBase): + """Represents an routers in the datastore.""" __tablename__ = 'routers' __table_args__ = (Index('ix_routers_deleted', 'deleted'),) @@ -489,24 +580,28 @@ class Router(BASE, DaisyBase): cluster_id = Column(String(36), ForeignKey('clusters.id'), nullable=False) external_logic_network = Column(String(255)) + class ServiceDisk(BASE, DaisyBase): + """Represents an service disks in the datastore.""" __tablename__ = 'service_disks' __table_args__ = (Index('ix_service_disks_deleted', 'deleted'),) - + service = Column(String(255)) role_id = Column(String(36), ForeignKey('roles.id'), nullable=False) disk_location = Column(String(255), nullable=False, default='local') lun = Column(Integer()) data_ips = Column(String(255)) size = Column(Integer()) + protocol_type = Column(String(36)) + - class CinderVolume(BASE, DaisyBase): + """Represents an cinder volumes in the datastore.""" __tablename__ = 'cinder_volumes' __table_args__ = (Index('ix_service_disks_deleted', 'deleted'),) - + user_name = Column(String(255)) user_pwd = Column(String(255)) management_ips = Column(String(255)) @@ -517,14 +612,28 @@ class CinderVolume(BASE, DaisyBase): backend_index = Column(String(255)) role_id = Column(String(36), ForeignKey('roles.id'), nullable=False) + def register_models(engine): """Create database tables for all models with the given engine.""" - models = (Host,Project) + models = (Hwm, Host, DiscoverHost, Cluster, ClusterHost, Template, + HostTemplate, HostInterface, Network, IpRange, HostRole, + Role, ServiceRole, Service, Component, ConfigSet, Config, + ConfigFile, ConfigSetItem, ConfigHistory, Task, TaskInfo, + Repository, User, Version, AssignedNetworks, LogicNetwork, + Subnet, FloatIpRange, DnsNameservers, Router, ServiceDisk, + CinderVolume) for model in models: model.metadata.create_all(engine) + def unregister_models(engine): """Drop database tables for all models with the given engine.""" - models = (Host,project) + models = (Hwm, Host, DiscoverHost, Cluster, ClusterHost, Template, + HostTemplate, HostInterface, Network, IpRange, HostRole, + Role, ServiceRole, Service, Component, ConfigSet, Config, + ConfigFile, ConfigSetItem, ConfigHistory, Task, TaskInfo, + Repository, User, Version, AssignedNetworks, LogicNetwork, + Subnet, FloatIpRange, DnsNameservers, Router, ServiceDisk, + CinderVolume) for model in models: model.metadata.drop_all(engine) diff --git a/code/daisy/daisy/gateway.py b/code/daisy/daisy/gateway.py index 49b04601..20e0fb3f 100755 --- a/code/daisy/daisy/gateway.py +++ b/code/daisy/daisy/gateway.py @@ -138,8 +138,8 @@ class Gateway(object): image_repo = self.get_repo(context) image_factory = self.get_image_factory(context) return daisy.domain.TaskExecutorFactory(task_repo, - image_repo, - image_factory) + image_repo, + image_factory) def get_metadef_namespace_factory(self, context): ns_factory = daisy.domain.MetadefNamespaceFactory() diff --git a/code/daisy/daisy/opts.py b/code/daisy/daisy/opts.py index ed53540e..90804fb4 100755 --- a/code/daisy/daisy/opts.py +++ b/code/daisy/daisy/opts.py @@ -12,14 +12,6 @@ # License for the specific language governing permissions and limitations # under the License. -__all__ = [ - 'list_api_opts', - 'list_registry_opts', - 'list_scrubber_opts', - 'list_cache_opts', - 'list_manage_opts' -] - import copy import itertools @@ -39,6 +31,14 @@ import daisy.registry.client import daisy.registry.client.v1.api import daisy.scrubber +__all__ = [ + 'list_api_opts', + 'list_registry_opts', + 'list_scrubber_opts', + 'list_cache_opts', + 'list_manage_opts' +] + _api_opts = [ (None, list(itertools.chain( diff --git a/code/daisy/daisy/orchestration/__init__.py b/code/daisy/daisy/orchestration/__init__.py index 8b137891..e69de29b 100755 --- a/code/daisy/daisy/orchestration/__init__.py +++ b/code/daisy/daisy/orchestration/__init__.py @@ -1 +0,0 @@ - diff --git a/code/daisy/daisy/orchestration/manager.py b/code/daisy/daisy/orchestration/manager.py index 0ffa269a..4152c532 100755 --- a/code/daisy/daisy/orchestration/manager.py +++ b/code/daisy/daisy/orchestration/manager.py @@ -16,28 +16,15 @@ """ /orchestration for tecs API """ -import copy -import subprocess -import time -import traceback -import webob.exc from oslo_config import cfg from oslo_log import log as logging -from webob.exc import HTTPBadRequest -from webob.exc import HTTPForbidden -from webob.exc import HTTPServerError from webob.exc import HTTPNotFound -import threading -from threading import Thread - -from daisy import i18n -from daisy import notifier from daisy.common import exception from daisyclient.v1.client import Client -from eventlet import greenthread -import eventlet.timeout +import ConfigParser + LOG = logging.getLogger(__name__) CONF = cfg.CONF @@ -47,116 +34,156 @@ class OrchestrationManager(): def __init__(self, *args, **kwargs): """Load orchestration options and initialization.""" pass - - @staticmethod + + @staticmethod def find_auto_scale_cluster(): try: daisy_version = 1.0 - daisy_endpoint="http://127.0.0.1:19292" - daisy_client = Client(version=daisy_version, endpoint=daisy_endpoint) + config_discoverd = ConfigParser.ConfigParser() + config_discoverd.read("/etc/daisy/daisy-api.conf") + bind_port = config_discoverd.get("DEFAULT", "bind_port") + daisy_endpoint = "http://127.0.0.1:" + bind_port + # daisy_endpoint="http://127.0.0.1:19292" + daisy_client = Client( + version=daisy_version, endpoint=daisy_endpoint) orchestrationManager = OrchestrationManager() - cluster_meta={'auto_scale':'1'} - params = {'filters':cluster_meta} + cluster_meta = {'auto_scale': '1'} + params = {'filters': cluster_meta} clusters_gen = daisy_client.clusters.list(**params) - clusters = [cluster.to_dict() for cluster in clusters_gen if cluster.auto_scale == 1 ] + clusters = [cluster.to_dict() + for cluster in clusters_gen if cluster.auto_scale == 1] if clusters: cluster_id = clusters[0]['id'] - params = {'filters':''} + params = {'filters': ''} hosts_gen = daisy_client.hosts.list(**params) - init_hosts = [host.to_dict() for host in hosts_gen if host.os_status =="init" or host.os_status == "install-failed"] + init_hosts = [host.to_dict( + ) for host in hosts_gen if host.os_status == "init" or + host.os_status == "install-failed"] if not init_hosts: LOG.info("no init or install-failed host") - return {"status":"no init host"} - - params = {'filters':{'cluster_id':cluster_id}} + return {"status": "no init host"} + + params = {'filters': {'cluster_id': cluster_id}} roles_gen = daisy_client.roles.list(**params) roles_in_cluster = [role.to_dict() for role in roles_gen] - roles = [role for role in roles_in_cluster if role['name'] =="CONTROLLER_HA" and role['status'] == "active"] + roles = [role for role in roles_in_cluster if role[ + 'name'] == "CONTROLLER_HA" and role['status'] == "active"] if not roles: LOG.info("no active CONTROLLER_HA role") - return {"status":"no active CONTROLLER_HA role"} + return {"status": "no active CONTROLLER_HA role"} for host in init_hosts: if host['status'] == "init": host_info = daisy_client.hosts.get(host['id']) if hasattr(host_info, "interfaces"): - scale_host_info = orchestrationManager.set_scale_host_interface(cluster_id, host_info,daisy_client) - if scale_host_info: - host_meta ={'name':scale_host_info.name,'os_version':scale_host_info.os_version_file, 'root_lv_size':scale_host_info.root_lv_size,'swap_lv_size':scale_host_info.swap_lv_size, 'role':['COMPUTER'], 'cluster':cluster_id, 'interfaces':scale_host_info.interfaces } - daisy_client.hosts.update(host['id'],**host_meta) + scale_host = \ + orchestrationManager.set_scale_host_interface( + cluster_id, host_info, daisy_client) + if scale_host: + host_meta = { + 'hugepagesize': scale_host.hugepagesize, + 'hugepages': scale_host.hugepages, + 'isolcpus': scale_host.isolcpus, + 'name': scale_host.name, + 'os_version': scale_host.os_version_file, + 'root_lv_size': scale_host.root_lv_size, + 'swap_lv_size': scale_host.swap_lv_size, + 'role': ['COMPUTER'], + 'cluster': cluster_id, + 'interfaces': scale_host.interfaces} + daisy_client.hosts.update( + host['id'], **host_meta) else: LOG.error("can not set scale host") - return {"status":"no scale host"} - + return {"status": "no scale host"} + else: LOG.info("not interfaces in host %s" % host['id']) - raise HTTPNotFound("not interfaces in host %s" % host['id']) + raise HTTPNotFound( + "not interfaces in host %s" % host['id']) orchestrationManager._os_tecs_install(cluster_id, daisy_client) except exception.Invalid as e: LOG.exception(e.message) - - + def _os_tecs_install(self, cluster_id, daisy_client): try: - install_meta = {'cluster_id':cluster_id} + install_meta = {'cluster_id': cluster_id} daisy_client.install.install(**install_meta) - LOG.info("install cluster %s" %cluster_id) + LOG.info("install cluster %s" % cluster_id) except exception.Invalid as e: LOG.error("install error:%s" % e.message) def get_active_compute(self, cluster_id, daisy_client): - host_meta={'cluster_id':cluster_id} + host_meta = {'cluster_id': cluster_id} host_meta['filters'] = host_meta host_list_generator = daisy_client.hosts.list(**host_meta) active_compute_list = [] - host_list = [host for host in host_list_generator if hasattr(host,"role_status") and host.role_status == "active"] + host_list = [host for host in host_list_generator if hasattr( + host, "role_status") and host.role_status == "active"] for host in host_list: host_info = daisy_client.hosts.get(host.id) - if hasattr(host_info,"role") and "COMPUTER" in host_info.role and hasattr(host_info,"interfaces"): + if hasattr(host_info, "role") and "COMPUTER" in host_info.role and\ + hasattr(host_info, "interfaces"): active_compute_list.append(host_info) return active_compute_list - + def set_scale_host_interface(self, cluster_id, host_info, daisy_client): compute_list = [] active_compute_host = None compute_list = self.get_active_compute(cluster_id, daisy_client) - if compute_list and hasattr(host_info,"interfaces"): - active_compute_host = self.check_isomorphic_host(compute_list, host_info.interfaces) + if compute_list and hasattr(host_info, "interfaces"): + active_compute_host = self.check_isomorphic_host( + compute_list, host_info.interfaces) if not active_compute_host: LOG.info("%s not isomorphic host" % host_info.name) return None host_info.os_version_file = active_compute_host.os_version_file host_info.root_lv_size = active_compute_host.root_lv_size host_info.swap_lv_size = active_compute_host.swap_lv_size - host_info.name="computer-" + host_info.name[-12:] + host_info.name = "computer-" + host_info.name[-12:] + # add for autoscale computer host + host_info.hugepagesize = active_compute_host.hugepagesize + host_info.hugepages = active_compute_host.hugepages + host_info.isolcpus = active_compute_host.isolcpus else: LOG.error("no active compute node in cluster") return None - + if active_compute_host: for interface in host_info.interfaces: for compute_interface in active_compute_host.interfaces: - if interface['pci'] == compute_interface['pci'] and compute_interface.has_key("assigned_networks"): - for assigned_network in compute_interface['assigned_networks']: + if interface['pci'] == compute_interface['pci'] and \ + "assigned_networks" in compute_interface: + for assigned_network in compute_interface[ + 'assigned_networks']: assigned_network['ip'] = '' - interface['assigned_networks'] = compute_interface['assigned_networks'] + interface['assigned_networks'] = compute_interface[ + 'assigned_networks'] interface['name'] = compute_interface['name'] interface['netmask'] = compute_interface['netmask'] interface['gateway'] = compute_interface['gateway'] interface['mode'] = compute_interface['mode'] + interface['vswitch_type'] = \ + compute_interface['vswitch_type'] for compute_interface in active_compute_host.interfaces: for assigned_network in compute_interface['assigned_networks']: assigned_network['ip'] = '' compute_interface['host_id'] = host_info.id if compute_interface['type'] == "bond": - interfaces = [interface for interface in host_info.interfaces if interface['name'] == compute_interface['name']] + interfaces = [interface for interface in + host_info.interfaces if interface[ + 'name'] == compute_interface['name']] if not interfaces: host_info.interfaces.append(compute_interface) return host_info - + def check_isomorphic_host(self, compute_list, new_interfaces): for compute_host in compute_list: - new_interface_count = len([interface for interface in new_interfaces if interface['type'] =="ether"]) - compute_interface_count = len([interface for interface in compute_host.interfaces if interface['type'] =="ether"]) + new_interface_count = len( + [interface for interface in + new_interfaces if interface['type'] == "ether"]) + compute_interface_count = len( + [interface for interface in + compute_host.interfaces if interface['type'] == "ether"]) if new_interface_count != compute_interface_count: continue is_isomorphic = False @@ -164,9 +191,13 @@ class OrchestrationManager(): if interface['type'] != "ether": continue for compute_interface in compute_host.interfaces: - if interface['pci'] == compute_interface['pci'] and interface['max_speed'] == compute_interface['max_speed']: + if interface['pci'] == compute_interface['pci'] and\ + interface['max_speed'] == \ + compute_interface['max_speed']: is_isomorphic = True - elif interface['pci'] == compute_interface['pci'] and interface['max_speed'] != compute_interface['max_speed']: + elif interface['pci'] == compute_interface['pci'] and \ + interface['max_speed'] != \ + compute_interface['max_speed']: is_isomorphic = False break if not is_isomorphic: @@ -174,4 +205,3 @@ class OrchestrationManager(): if is_isomorphic: return compute_host return False - diff --git a/code/daisy/daisy/quota/__init__.py b/code/daisy/daisy/quota/__init__.py index 6f91d96f..c886f7cf 100755 --- a/code/daisy/daisy/quota/__init__.py +++ b/code/daisy/daisy/quota/__init__.py @@ -250,8 +250,8 @@ class QuotaImageLocationsProxy(object): self.image, locations) daisy.api.common.check_quota(self.context, - required_size, - self.db_api) + required_size, + self.db_api) _enforce_image_location_quota(self.image, locations) def __copy__(self): diff --git a/code/daisy/daisy/registry/api/v1/__init__.py b/code/daisy/daisy/registry/api/v1/__init__.py index 35177c28..f0129faa 100755 --- a/code/daisy/daisy/registry/api/v1/__init__.py +++ b/code/daisy/daisy/registry/api/v1/__init__.py @@ -23,6 +23,8 @@ from daisy.registry.api.v1 import configs from daisy.registry.api.v1 import networks from daisy.registry.api.v1 import disk_array from daisy.registry.api.v1 import template +from daisy.registry.api.v1 import hwms + def init(mapper): @@ -32,7 +34,7 @@ def init(mapper): controller=members_resource, action="add_cluster_host", conditions={'method': ['PUT']}) - + mapper.connect("/clusters/{cluster_id}/nodes/{host_id}", controller=members_resource, action="delete_cluster_host", @@ -50,33 +52,60 @@ def init(mapper): action="get_host_clusters", conditions={'method': ['GET']}) + hwms_resource = hwms.create_resource() + + mapper.connect("/hwm", + controller=hwms_resource, + action="add_hwm", + conditions={'method': ['POST']}) + + mapper.connect("/hwm/{id}", + controller=hwms_resource, + action="delete_hwm", + conditions={'method': ['DELETE']}) + + mapper.connect("/hwm/{id}", + controller=hwms_resource, + action="update_hwm", + conditions={'method': ['PUT']}) + + mapper.connect("/hwm", + controller=hwms_resource, + action="hwm_list", + conditions={'method': ['GET']}) + + mapper.connect("/hwm/{id}", + controller=hwms_resource, + action="detail", + conditions=dict(method=["GET"])) + hosts_resource = hosts.create_resource() mapper.connect("/nodes", controller=hosts_resource, action="add_host", conditions={'method': ['POST']}) - + mapper.connect("/nodes/{id}", controller=hosts_resource, action="delete_host", conditions={'method': ['DELETE']}) - + mapper.connect("/nodes/{id}", controller=hosts_resource, action="update_host", conditions={'method': ['PUT']}) - + mapper.connect("/nodes", controller=hosts_resource, action="detail_host", conditions={'method': ['GET']}) - + mapper.connect("/nodes/{id}", controller=hosts_resource, action="get_host", conditions=dict(method=["GET"])) - + mapper.connect("/discover/nodes", controller=hosts_resource, action="add_discover_host", @@ -89,12 +118,12 @@ def init(mapper): controller=hosts_resource, action="update_discover_host", conditions={'method': ['PUT']}) - + mapper.connect("/discover/nodes/{discover_host_id}", controller=hosts_resource, action="get_discover_host", conditions=dict(method=["GET"])) - + mapper.connect("/discover/nodes/{id}", controller=hosts_resource, action="delete_discover_host", @@ -117,7 +146,7 @@ def init(mapper): controller=hosts_resource, action="add_cluster", conditions={'method': ['POST']}) - + mapper.connect("/clusters/{id}", controller=hosts_resource, action="update_cluster", @@ -132,13 +161,12 @@ def init(mapper): controller=hosts_resource, action='detail_cluster', conditions={'method': ['GET']}) - + mapper.connect("/clusters/{id}", controller=hosts_resource, action="get_cluster", conditions=dict(method=["GET"])) - mapper.connect("/components", controller=hosts_resource, action="add_component", @@ -159,7 +187,7 @@ def init(mapper): controller=hosts_resource, action="update_component", conditions={'method': ['PUT']}) - + mapper.connect("/services", controller=hosts_resource, action="add_service", @@ -204,11 +232,11 @@ def init(mapper): mapper.connect("/roles/{id}/services", controller=hosts_resource, action="role_services", - conditions={'method': ['GET']}) + conditions={'method': ['GET']}) mapper.connect("/roles/{id}/hosts", controller=hosts_resource, action="host_roles", - conditions={'method': ['GET']}) + conditions={'method': ['GET']}) mapper.connect("/roles/{id}/hosts", controller=hosts_resource, action="delete_role_hosts", @@ -217,33 +245,33 @@ def init(mapper): controller=hosts_resource, action="update_role_hosts", conditions={'method': ['PUT']}) - + config_files_resource = config_files.create_resource() mapper.connect("/config_files", controller=config_files_resource, action="add_config_file", conditions={'method': ['POST']}) - + mapper.connect("/config_files/{id}", controller=config_files_resource, action="delete_config_file", conditions={'method': ['DELETE']}) - + mapper.connect("/config_files/{id}", controller=config_files_resource, action="update_config_file", conditions={'method': ['PUT']}) - + mapper.connect("/config_files/detail", controller=config_files_resource, action="detail_config_file", conditions={'method': ['GET']}) - + mapper.connect("/config_files/{id}", controller=config_files_resource, action="get_config_file", - conditions=dict(method=["GET"])) + conditions=dict(method=["GET"])) config_sets_resource = config_sets.create_resource() @@ -251,22 +279,22 @@ def init(mapper): controller=config_sets_resource, action="add_config_set", conditions={'method': ['POST']}) - + mapper.connect("/config_sets/{id}", controller=config_sets_resource, action="delete_config_set", conditions={'method': ['DELETE']}) - + mapper.connect("/config_sets/{id}", controller=config_sets_resource, action="update_config_set", conditions={'method': ['PUT']}) - + mapper.connect("/config_sets/detail", controller=config_sets_resource, action="detail_config_set", conditions={'method': ['GET']}) - + mapper.connect("/config_sets/{id}", controller=config_sets_resource, action="get_config_set", @@ -278,33 +306,33 @@ def init(mapper): controller=configs_resource, action="add_config", conditions={'method': ['POST']}) - + mapper.connect("/configs/{id}", controller=configs_resource, action="delete_config", conditions={'method': ['DELETE']}) - + mapper.connect("/configs/{id}", controller=configs_resource, action="update_config", conditions={'method': ['PUT']}) mapper.connect("/configs/update_config_by_role_hosts", - controller=configs_resource, - action="update_config_by_role_hosts", - conditions={'method': ['POST']}) + controller=configs_resource, + action="update_config_by_role_hosts", + conditions={'method': ['POST']}) mapper.connect("/configs/detail", controller=configs_resource, action="detail_config", conditions={'method': ['GET']}) - + mapper.connect("/configs/{id}", controller=configs_resource, action="get_config", - conditions=dict(method=["GET"])) - - networks_resource = networks.create_resource() + conditions=dict(method=["GET"])) + + networks_resource = networks.create_resource() mapper.connect("/clusters/{id}/networks", controller=networks_resource, @@ -317,8 +345,10 @@ def init(mapper): conditions={'method': ['GET']}) # mapper.resource('network', 'networks',controller=networks_resource, - # collection={'update_phyname_of_network':'POST', 'add_network':"POST"}, - # member={'get_network':'GET', 'update_network':'PUT', 'delete_network':'DELETE'}) + # collection={'update_phyname_of_network':'POST', + # 'add_network':"POST"}, + # member={'get_network':'GET', 'update_network':'PUT', + # 'delete_network':'DELETE'}) mapper.connect("/networks", controller=networks_resource, @@ -341,9 +371,9 @@ def init(mapper): conditions=dict(method=["GET"])) mapper.connect("/networks/update_phyname_of_network", - controller=networks_resource, - action="update_phyname_of_network", - conditions=dict(method=["POST"])) + controller=networks_resource, + action="update_phyname_of_network", + conditions=dict(method=["POST"])) config_interface_resource = hosts.create_resource() @@ -352,7 +382,7 @@ def init(mapper): action="config_interface", conditions={'method': ['POST']}) - array_resource = disk_array.create_resource() + array_resource = disk_array.create_resource() mapper.connect("/service_disk", controller=array_resource, action='service_disk_add', @@ -373,7 +403,7 @@ def init(mapper): controller=array_resource, action='service_disk_detail', conditions={'method': ['GET']}) - + mapper.connect("/cinder_volume", controller=array_resource, action='cinder_volume_add', @@ -394,8 +424,8 @@ def init(mapper): controller=array_resource, action='cinder_volume_detail', conditions={'method': ['GET']}) - - template_resource = template.create_resource() + + template_resource = template.create_resource() mapper.connect("/template", controller=template_resource, action='template_add', @@ -436,8 +466,9 @@ def init(mapper): mapper.connect("/host_template/{template_id}", controller=template_resource, action='host_template_detail', - conditions={'method': ['GET']}) - + conditions={'method': ['GET']}) + + class API(wsgi.Router): """WSGI entry point for all Registry requests.""" diff --git a/code/daisy/daisy/registry/api/v1/config_files.py b/code/daisy/daisy/registry/api/v1/config_files.py index f4899bf7..2d5682cf 100755 --- a/code/daisy/daisy/registry/api/v1/config_files.py +++ b/code/daisy/daisy/registry/api/v1/config_files.py @@ -38,12 +38,12 @@ _LW = i18n._LW CONF = cfg.CONF -DISPLAY_FIELDS_IN_INDEX = ['id', 'name','container_format', +DISPLAY_FIELDS_IN_INDEX = ['id', 'name', 'container_format', 'checksum'] SUPPORTED_FILTERS = ['name', 'container_format'] -SUPPORTED_SORT_KEYS = ('name', 'container_format', +SUPPORTED_SORT_KEYS = ('name', 'container_format', 'id', 'created_at', 'updated_at') SUPPORTED_SORT_DIRS = ('asc', 'desc') @@ -60,7 +60,7 @@ class Controller(object): """Get config_files, wrapping in exception if necessary.""" try: return self.db_api.config_file_get_all(context, filters=filters, - **params) + **params) except exception.NotFound: LOG.warn(_LW("Invalid marker. Config_file %(id)s could not be " "found.") % {'id': params.get('marker')}) @@ -109,7 +109,7 @@ class Controller(object): for key, value in params.items(): if value is None: del params[key] - + return params def _get_filters(self, req): @@ -225,21 +225,23 @@ class Controller(object): which will include the newly-created config_file's internal id in the 'id' field """ - + config_file_data = body["config_file"] config_file_id = config_file_data.get('id') - + if config_file_id and not utils.is_uuid_like(config_file_id): - msg = _LI("Rejecting config_file creation request for invalid config_file " + msg = _LI("Rejecting config_file creation request for " + "invalid config_file " "id '%(bad_id)s'") % {'bad_id': config_file_id} LOG.info(msg) msg = _("Invalid config_file id format") return exc.HTTPBadRequest(explanation=msg) try: - config_file_data = self.db_api.config_file_add(req.context, config_file_data) - + config_file_data = self.db_api.config_file_add( + req.context, config_file_data) + msg = (_LI("Successfully created config_file %s") % config_file_data["id"]) LOG.info(msg) @@ -247,7 +249,8 @@ class Controller(object): config_file_data = dict(config_file=config_file_data) return config_file_data except exception.Duplicate: - msg = _("config_file with identifier %s already exists!") % config_file_id + msg = _("config_file with identifier %s already exists!") % \ + config_file_id LOG.warn(msg) return exc.HTTPConflict(msg) except exception.Invalid as e: @@ -256,7 +259,9 @@ class Controller(object): LOG.error(msg) return exc.HTTPBadRequest(msg) except Exception: - LOG.exception(_LE("Unable to create config_file %s"), config_file_id) + LOG.exception( + _LE("Unable to create config_file %s"), + config_file_id) raise @utils.mutating @@ -270,12 +275,14 @@ class Controller(object): success, the body contains the deleted image information as a mapping. """ try: - deleted_config_file = self.db_api.config_file_destroy(req.context, id) + deleted_config_file = self.db_api.config_file_destroy( + req.context, id) msg = _LI("Successfully deleted config_file %(id)s") % {'id': id} LOG.info(msg) return dict(config_file=deleted_config_file) except exception.ForbiddenPublicImage: - msg = _LI("Delete denied for public config_file %(id)s") % {'id': id} + msg = _LI("Delete denied for public config_file %(id)s") % { + 'id': id} LOG.info(msg) raise exc.HTTPForbidden() except exception.Forbidden: @@ -317,7 +324,7 @@ class Controller(object): if 'config_file' not in config_file_data: config_file_data = dict(config_file=config_file_data) return config_file_data - + @utils.mutating def update_config_file(self, req, id, body): """Updates an existing config_file with the registry. @@ -330,7 +337,8 @@ class Controller(object): """ config_file_data = body['config_file'] try: - updated_config_file = self.db_api.config_file_update(req.context, id, config_file_data) + updated_config_file = self.db_api.config_file_update( + req.context, id, config_file_data) msg = _LI("Updating metadata for config_file %(id)s") % {'id': id} LOG.info(msg) @@ -370,6 +378,7 @@ class Controller(object): LOG.exception(_LE("Unable to update config_file %s") % id) raise + def create_resource(): """Images resource factory method.""" deserializer = wsgi.JSONRequestDeserializer() diff --git a/code/daisy/daisy/registry/api/v1/config_sets.py b/code/daisy/daisy/registry/api/v1/config_sets.py index 61ec8637..4c93c657 100755 --- a/code/daisy/daisy/registry/api/v1/config_sets.py +++ b/code/daisy/daisy/registry/api/v1/config_sets.py @@ -38,12 +38,12 @@ _LW = i18n._LW CONF = cfg.CONF -DISPLAY_FIELDS_IN_INDEX = ['id', 'name','container_format', +DISPLAY_FIELDS_IN_INDEX = ['id', 'name', 'container_format', 'checksum'] SUPPORTED_FILTERS = ['name', 'container_format'] -SUPPORTED_SORT_KEYS = ('name', 'container_format', +SUPPORTED_SORT_KEYS = ('name', 'container_format', 'id', 'created_at', 'updated_at') SUPPORTED_SORT_DIRS = ('asc', 'desc') @@ -60,7 +60,7 @@ class Controller(object): """Get config_sets, wrapping in exception if necessary.""" try: return self.db_api.config_set_get_all(context, filters=filters, - **params) + **params) except exception.NotFound: LOG.warn(_LW("Invalid marker. Config_set %(id)s could not be " "found.") % {'id': params.get('marker')}) @@ -109,7 +109,7 @@ class Controller(object): for key, value in params.items(): if value is None: del params[key] - + return params def _get_filters(self, req): @@ -225,21 +225,23 @@ class Controller(object): which will include the newly-created config_set's internal id in the 'id' field """ - + config_set_data = body["config_set"] config_set_id = config_set_data.get('id') - + if config_set_id and not utils.is_uuid_like(config_set_id): - msg = _LI("Rejecting config_set creation request for invalid config_set " + msg = _LI("Rejecting config_set creation request for " + "invalid config_set " "id '%(bad_id)s'") % {'bad_id': config_set_id} LOG.info(msg) msg = _("Invalid config_set id format") return exc.HTTPBadRequest(explanation=msg) try: - config_set_data = self.db_api.config_set_add(req.context, config_set_data) - + config_set_data = self.db_api.config_set_add( + req.context, config_set_data) + msg = (_LI("Successfully created config_set %s") % config_set_data["id"]) LOG.info(msg) @@ -247,7 +249,8 @@ class Controller(object): config_set_data = dict(config_set=config_set_data) return config_set_data except exception.Duplicate: - msg = _("config_set with identifier %s already exists!") % config_set_id + msg = _("config_set with identifier %s already exists!") % \ + config_set_id LOG.warn(msg) return exc.HTTPConflict(msg) except exception.Invalid as e: @@ -270,21 +273,21 @@ class Controller(object): success, the body contains the deleted image information as a mapping. """ try: - deleted_config_set = self.db_api.config_set_destroy(req.context, id) + deleted_config_set = self.db_api.config_set_destroy( + req.context, id) msg = _LI("Successfully deleted config_set %(id)s") % {'id': id} LOG.info(msg) return dict(config_set=deleted_config_set) except exception.ForbiddenPublicImage: - msg = _LI("Delete denied for public config_set %(id)s") % {'id': id} + msg = _LI("Delete denied for public config_set %(id)s") % { + 'id': id} LOG.info(msg) raise exc.HTTPForbidden() - except exception.Forbidden: + except exception.Forbidden as e: # If it's private and doesn't belong to them, don't let on # that it exists - msg = _LI("Access denied to config_set %(id)s but returning" - " 'not found'") % {'id': id} - LOG.info(msg) - return exc.HTTPNotFound() + LOG.info(e) + return exc.HTTPForbidden(e) except exception.NotFound: msg = _LI("config_set %(id)s not found") % {'id': id} LOG.info(msg) @@ -316,15 +319,17 @@ class Controller(object): raise if 'config_set' not in config_set_data: config_set_data = dict(config_set=config_set_data) - config_items = self.db_api._config_item_get_by_config_set_id(req.context, id) + config_items = self.db_api._config_item_get_by_config_set_id( + req.context, id) config = [] for config_item in config_items: - config_inf = self.db_api.config_get(req.context, config_item['config_id']) + config_inf = self.db_api.config_get( + req.context, config_item['config_id']) config.append(config_inf) if config: config_set_data['config_set']['config'] = config return config_set_data - + @utils.mutating def update_config_set(self, req, id, body): """Updates an existing config_set with the registry. @@ -337,7 +342,8 @@ class Controller(object): """ config_set_data = body['config_set'] try: - updated_config_set = self.db_api.config_set_update(req.context, id, config_set_data) + updated_config_set = self.db_api.config_set_update( + req.context, id, config_set_data) msg = _LI("Updating metadata for config_set %(id)s") % {'id': id} LOG.info(msg) @@ -377,6 +383,7 @@ class Controller(object): LOG.exception(_LE("Unable to update config_set %s") % id) raise + def create_resource(): """Images resource factory method.""" deserializer = wsgi.JSONRequestDeserializer() diff --git a/code/daisy/daisy/registry/api/v1/configs.py b/code/daisy/daisy/registry/api/v1/configs.py index 9172b3b8..50d8e834 100755 --- a/code/daisy/daisy/registry/api/v1/configs.py +++ b/code/daisy/daisy/registry/api/v1/configs.py @@ -38,12 +38,12 @@ _LW = i18n._LW CONF = cfg.CONF -DISPLAY_FIELDS_IN_INDEX = ['id', 'name','container_format', +DISPLAY_FIELDS_IN_INDEX = ['id', 'name', 'container_format', 'checksum'] SUPPORTED_FILTERS = ['name', 'container_format'] -SUPPORTED_SORT_KEYS = ('name', 'container_format', +SUPPORTED_SORT_KEYS = ('name', 'container_format', 'id', 'created_at', 'updated_at') SUPPORTED_SORT_DIRS = ('asc', 'desc') @@ -60,7 +60,7 @@ class Controller(object): """Get configs, wrapping in exception if necessary.""" try: return self.db_api.config_get_all(context, filters=filters, - **params) + **params) except exception.NotFound: LOG.warn(_LW("Invalid marker. Config %(id)s could not be " "found.") % {'id': params.get('marker')}) @@ -109,7 +109,7 @@ class Controller(object): for key, value in params.items(): if value is None: del params[key] - + return params def _get_filters(self, req): @@ -225,11 +225,11 @@ class Controller(object): which will include the newly-created config's internal id in the 'id' field """ - + config_data = body["config"] config_id = config_data.get('id') - + if config_id and not utils.is_uuid_like(config_id): msg = _LI("Rejecting config creation request for invalid config " "id '%(bad_id)s'") % {'bad_id': config_id} @@ -239,7 +239,7 @@ class Controller(object): try: config_data = self.db_api.config_add(req.context, config_data) - + msg = (_LI("Successfully created config %s") % config_data["id"]) LOG.info(msg) @@ -317,10 +317,11 @@ class Controller(object): if 'config' not in config_data: config_data = dict(config=config_data) return config_data - + @utils.mutating def update_config_by_role_hosts(self, req, body): - return self.db_api.update_config_by_role_hosts(req.context, body['configs']) + return self.db_api.update_config_by_role_hosts( + req.context, body['configs']) @utils.mutating def update_config(self, req, id, body): @@ -334,7 +335,8 @@ class Controller(object): """ config_data = body['config'] try: - updated_config = self.db_api.config_update(req.context, id, config_data) + updated_config = self.db_api.config_update( + req.context, id, config_data) msg = _LI("Updating metadata for config %(id)s") % {'id': id} LOG.info(msg) @@ -374,6 +376,7 @@ class Controller(object): LOG.exception(_LE("Unable to update config %s") % id) raise + def create_resource(): """Images resource factory method.""" deserializer = wsgi.JSONRequestDeserializer() diff --git a/code/daisy/daisy/registry/api/v1/disk_array.py b/code/daisy/daisy/registry/api/v1/disk_array.py index 23683960..7c5bd7ef 100755 --- a/code/daisy/daisy/registry/api/v1/disk_array.py +++ b/code/daisy/daisy/registry/api/v1/disk_array.py @@ -42,22 +42,25 @@ DISPLAY_FIELDS_IN_INDEX = ['id', 'name', 'size', 'disk_format', 'container_format', 'checksum'] -SUPPORTED_FILTERS = ['name', 'status', 'role_id', 'container_format', 'disk_format', +SUPPORTED_FILTERS = ['name', 'status', 'role_id', 'container_format', + 'disk_format', 'min_ram', 'min_disk', 'size_min', 'size_max', 'changes-since', 'protected'] -SUPPORTED_SORT_KEYS = ('name', 'status', 'cluster_id', 'container_format', 'disk_format', +SUPPORTED_SORT_KEYS = ('name', 'status', 'cluster_id', 'container_format', + 'disk_format', 'size', 'id', 'created_at', 'updated_at') SUPPORTED_SORT_DIRS = ('asc', 'desc') SUPPORTED_PARAMS = ('limit', 'marker', 'sort_key', 'sort_dir', 'cluster_id') -SUPPORTED_SORT_KEYS = ('name','role_id', 'status', 'container_format', 'disk_format', +SUPPORTED_SORT_KEYS = ('name', 'role_id', 'status', 'container_format', + 'disk_format', 'size', 'id', 'created_at', 'updated_at') SUPPORTED_SORT_DIRS = ('asc', 'desc') -SUPPORTED_PARAMS = ('role_id','limit', 'marker', 'sort_key', 'sort_dir') +SUPPORTED_PARAMS = ('role_id', 'limit', 'marker', 'sort_key', 'sort_dir') class Controller(object): @@ -82,7 +85,7 @@ class Controller(object): for key, value in params.items(): if value is None: del params[key] - + return params def _get_filters(self, req): @@ -194,11 +197,12 @@ class Controller(object): :param req: wsgi Request object :param body: Dictionary of information about the service_disk - :retval Returns the newly-created service_disk information as a mapping, + :retval Returns the newly-created service_disk + information as a mapping, which will include the newly-created service_disk's internal id in the 'id' field """ - + service_disk_data = body["service_disk"] id = service_disk_data.get('id') @@ -207,17 +211,20 @@ class Controller(object): # add id and role # if role # self.db_api.get_role(req.context,role) - + if id and not utils.is_uuid_like(id): - msg = _LI("Rejecting service_disk creation request for invalid service_disk " + msg = _LI("Rejecting service_disk creation request for " + "invalid service_disk " "id '%(bad_id)s'") % {'bad_id': id} LOG.info(msg) msg = _("Invalid service_disk id format") return exc.HTTPBadRequest(explanation=msg) try: - service_disk_data = self.db_api.service_disk_add(req.context, service_disk_data) - #service_disk_data = dict(service_disk=make_image_dict(service_disk_data)) + service_disk_data = self.db_api.service_disk_add( + req.context, service_disk_data) + # service_disk_data = dict(service_disk=make_image_dict( + # service_disk_data)) msg = (_LI("Successfully created node %s") % service_disk_data["id"]) LOG.info(msg) @@ -225,7 +232,7 @@ class Controller(object): service_disk_data = dict(service_disk=service_disk_data) return service_disk_data except exception.Duplicate: - msg = _("node with identifier %s already exists!") % image_id + msg = _("node with identifier %s already exists!") % id LOG.warn(msg) return exc.HTTPConflict(msg) except exception.Invalid as e: @@ -248,12 +255,14 @@ class Controller(object): success, the body contains the deleted image information as a mapping. """ try: - deleted_service_disk = self.db_api.service_disk_destroy(req.context, id) + deleted_service_disk = self.db_api.service_disk_destroy( + req.context, id) msg = _LI("Successfully deleted service_disk %(id)s") % {'id': id} LOG.info(msg) return dict(service_disk=deleted_service_disk) except exception.ForbiddenPublicImage: - msg = _LI("Delete denied for public service_disk %(id)s") % {'id': id} + msg = _LI("Delete denied for public service_disk %(id)s") % { + 'id': id} LOG.info(msg) raise exc.HTTPForbidden() except exception.Forbidden: @@ -270,8 +279,7 @@ class Controller(object): except Exception: LOG.exception(_LE("Unable to delete service_disk %s") % id) raise - - + @utils.mutating def service_disk_update(self, req, id, body): """Updates an existing service_disk with the registry. @@ -284,7 +292,8 @@ class Controller(object): """ service_disk_data = body['service_disk'] try: - updated_service_disk = self.db_api.service_disk_update(req.context, id, service_disk_data) + updated_service_disk = self.db_api.service_disk_update( + req.context, id, service_disk_data) msg = _LI("Updating metadata for service_disk %(id)s") % {'id': id} LOG.info(msg) @@ -303,7 +312,8 @@ class Controller(object): request=req, content_type='text/plain') except exception.ForbiddenPublicImage: - msg = _LI("Update denied for public service_disk %(id)s") % {'id': id} + msg = _LI("Update denied for public service_disk %(id)s") % { + 'id': id} LOG.info(msg) raise exc.HTTPForbidden() except exception.Forbidden: @@ -316,13 +326,13 @@ class Controller(object): except Exception: LOG.exception(_LE("Unable to update service_disk %s") % id) raise - - + @utils.mutating def service_disk_detail(self, req, id): """Return data about the given service_disk id.""" try: - service_disk_data = self.db_api.service_disk_detail(req.context, id) + service_disk_data = self.db_api.service_disk_detail( + req.context, id) msg = "Successfully retrieved service_disk %(id)s" % {'id': id} LOG.debug(msg) except exception.NotFound: @@ -342,12 +352,12 @@ class Controller(object): if 'service_disk' not in service_disk_data: service_disk_data = dict(service_disk=service_disk_data) return service_disk_data - + def _list_service_disks(self, context, filters, params): """Get service_disks, wrapping in exception if necessary.""" try: return self.db_api.service_disk_list(context, filters=filters, - **params) + **params) except exception.NotFound: LOG.warn(_LW("Invalid marker. service_disk %(id)s could not be " "found.") % {'id': params.get('marker')}) @@ -363,7 +373,8 @@ class Controller(object): raise def service_disk_list(self, req): - """Return a filtered list of public, non-deleted service_disks in detail + """Return a filtered list of public, + non-deleted service_disks in detail :param req: the Request object coming from the wsgi layer :retval a mapping of the following form:: @@ -377,7 +388,7 @@ class Controller(object): filters = params.pop('filters') service_disks = self._list_service_disks(req.context, filters, params) return dict(service_disks=service_disks) - + @utils.mutating def cinder_volume_add(self, req, body): """Registers a new cinder_volume with the registry. @@ -385,11 +396,13 @@ class Controller(object): :param req: wsgi Request object :param body: Dictionary of information about the cinder_volume - :retval Returns the newly-created cinder_volume information as a mapping, - which will include the newly-created cinder_volume's internal id + :retval Returns the newly-created cinder_volume + information as a mapping, + which will include the newly-created + cinder_volume's internal id in the 'id' field """ - + cinder_volume_data = body["cinder_volume"] id = cinder_volume_data.get('id') @@ -398,16 +411,18 @@ class Controller(object): # add id and role # if role # self.db_api.get_role(req.context,role) - + if id and not utils.is_uuid_like(id): - msg = _LI("Rejecting cinder_volume creation request for invalid cinder_volume " + msg = _LI("Rejecting cinder_volume creation request for " + "invalid cinder_volume " "id '%(bad_id)s'") % {'bad_id': id} LOG.info(msg) msg = _("Invalid cinder_volume id format") return exc.HTTPBadRequest(explanation=msg) try: - cinder_volume_data = self.db_api.cinder_volume_add(req.context, cinder_volume_data) + cinder_volume_data = self.db_api.cinder_volume_add( + req.context, cinder_volume_data) msg = (_LI("Successfully created cinder_volume %s") % cinder_volume_data["id"]) LOG.info(msg) @@ -426,7 +441,7 @@ class Controller(object): except Exception: LOG.exception(_LE("Unable to create cinder_volume %s"), id) raise - + @utils.mutating def cinder_volume_delete(self, req, id): """Deletes an existing cinder_volume with the registry. @@ -438,12 +453,17 @@ class Controller(object): success, the body contains the deleted image information as a mapping. """ try: - deleted_cinder_volume = self.db_api.cinder_volume_destroy(req.context, id) - msg = _LI("Successfully deleted cinder_volume %(cinder_volume_id)s") % {'cinder_volume_id': id} + deleted_cinder_volume = self.db_api.cinder_volume_destroy( + req.context, id) + msg = _LI("Successfully deleted cinder_volume %(" + "cinder_volume_id)s") % { + 'cinder_volume_id': id} LOG.info(msg) return dict(cinder_volume=deleted_cinder_volume) except exception.ForbiddenPublicImage: - msg = _LI("Delete denied for public cinder_volume %(cinder_volume_id)s") % {'cinder_volume_id': id} + msg = _LI("Delete denied for public cinder_volume %(" + "cinder_volume_id)s") % { + 'cinder_volume_id': id} LOG.info(msg) raise exc.HTTPForbidden() except exception.Forbidden: @@ -454,13 +474,14 @@ class Controller(object): LOG.info(msg) return exc.HTTPNotFound() except exception.NotFound: - msg = _LI("cinder_volume %(cinder_volume_id)s not found") % {'cinder_volume_id': id} + msg = _LI("cinder_volume %(cinder_volume_id)s not found") % { + 'cinder_volume_id': id} LOG.info(msg) return exc.HTTPNotFound() except Exception: LOG.exception(_LE("Unable to delete cinder_volume %s") % id) raise - + @utils.mutating def cinder_volume_update(self, req, id, body): """Updates an existing cinder_volume with the registry. @@ -473,9 +494,12 @@ class Controller(object): """ cinder_volume_data = body['cinder_volume'] try: - updated_cinder_volume = self.db_api.cinder_volume_update(req.context, id, cinder_volume_data) + updated_cinder_volume = self.db_api.cinder_volume_update( + req.context, id, cinder_volume_data) - msg = _LI("Updating metadata for cinder_volume %(cinder_volume_id)s") % {'cinder_volume_id': id} + msg = _LI("Updating metadata for cinder_volume %(" + "cinder_volume_id)s") % { + 'cinder_volume_id': id} LOG.info(msg) if 'cinder_volume' not in updated_cinder_volume: cinder_volume_data = dict(cinder_volume=updated_cinder_volume) @@ -486,13 +510,16 @@ class Controller(object): LOG.error(msg) return exc.HTTPBadRequest(msg) except exception.NotFound: - msg = _LI("cinder_volume %(cinder_volume_id)s not found") % {'cinder_volume_id': id} + msg = _LI("cinder_volume %(cinder_volume_id)s not found") % { + 'cinder_volume_id': id} LOG.info(msg) raise exc.HTTPNotFound(body='cinder_volume not found', request=req, content_type='text/plain') except exception.ForbiddenPublicImage: - msg = _LI("Update denied for public cinder_volume %(cinder_volume_id)s") % {'cinder_volume_id': id} + msg = _LI("Update denied for public cinder_volume %(" + "cinder_volume_id)s") % { + 'cinder_volume_id': id} LOG.info(msg) raise exc.HTTPForbidden() except exception.Forbidden: @@ -505,12 +532,13 @@ class Controller(object): except Exception: LOG.exception(_LE("Unable to update cinder_volume %s") % id) raise - + @utils.mutating def cinder_volume_detail(self, req, id): """Return data about the given cinder_volume id.""" try: - cinder_volume_data = self.db_api.cinder_volume_detail(req.context, id) + cinder_volume_data = self.db_api.cinder_volume_detail( + req.context, id) msg = "Successfully retrieved cinder_volume %(id)s" % {'id': id} LOG.debug(msg) except exception.NotFound: @@ -530,12 +558,12 @@ class Controller(object): if 'cinder_volume' not in cinder_volume_data: cinder_volume_data = dict(cinder_volume=cinder_volume_data) return cinder_volume_data - + def _list_cinder_volumes(self, context, filters, params): """Get cinder_volumes, wrapping in exception if necessary.""" try: return self.db_api.cinder_volume_list(context, filters=filters, - **params) + **params) except exception.NotFound: LOG.warn(_LW("Invalid marker. cinder_volume %(id)s could not be " "found.") % {'id': params.get('marker')}) @@ -549,9 +577,10 @@ class Controller(object): except Exception: LOG.exception(_LE("Unable to get cinder_volumes")) raise - + def cinder_volume_list(self, req): - """Return a filtered list of public, non-deleted cinder_volumes in detail + """Return a filtered list of public, non-deleted + cinder_volumes in detail :param req: the Request object coming from the wsgi layer :retval a mapping of the following form:: @@ -563,11 +592,12 @@ class Controller(object): """ params = self._get_query_params(req) filters = params.pop('filters') - cinder_volumes = self._list_cinder_volumes(req.context, filters, params) + cinder_volumes = self._list_cinder_volumes( + req.context, filters, params) return dict(cinder_volumes=cinder_volumes) - - + + def create_resource(): """Images resource factory method.""" deserializer = wsgi.JSONRequestDeserializer() diff --git a/code/daisy/daisy/registry/api/v1/hosts.py b/code/daisy/daisy/registry/api/v1/hosts.py index 50eef9c3..9056a569 100755 --- a/code/daisy/daisy/registry/api/v1/hosts.py +++ b/code/daisy/daisy/registry/api/v1/hosts.py @@ -17,6 +17,7 @@ Reference implementation registry server WSGI controller """ +import sys from oslo_config import cfg from oslo_log import log as logging from oslo_utils import strutils @@ -28,7 +29,13 @@ from daisy.common import utils from daisy.common import wsgi import daisy.db from daisy import i18n -from ironicclient import client as ironic_client + +from daisyclient import client as daisy_client +from daisy.registry.api.v1 import hwms as registry_hwm +import ConfigParser + +reload(sys) +sys.setdefaultencoding('utf-8') LOG = logging.getLogger(__name__) _ = i18n._ @@ -42,11 +49,13 @@ DISPLAY_FIELDS_IN_INDEX = ['id', 'name', 'size', 'disk_format', 'container_format', 'checksum'] -SUPPORTED_FILTERS = ['name', 'status','id','cluster_id' , 'auto_scale', 'container_format', 'disk_format', - +SUPPORTED_FILTERS = ['name', 'status', 'id', 'cluster_id', + 'auto_scale', 'container_format', 'disk_format', + 'changes-since', 'protected'] -SUPPORTED_SORT_KEYS = ('name', 'status', 'cluster_id', 'container_format', 'disk_format', +SUPPORTED_SORT_KEYS = ('name', 'status', 'cluster_id', + 'container_format', 'disk_format', 'size', 'id', 'created_at', 'updated_at') SUPPORTED_SORT_DIRS = ('asc', 'desc') @@ -58,20 +67,26 @@ class Controller(object): def __init__(self): self.db_api = daisy.db.get_api() - self.ironicclient = self.get_ironicclient() - + self.ironicclient = utils.get_ironicclient() + self.daisyclient = self.get_daisyclient() + @staticmethod - def get_ironicclient(): # pragma: no cover - """Get Ironic client instance.""" - args = {'os_auth_token': 'fake', - 'ironic_url':'http://127.0.0.1:6385/v1'} - return ironic_client.get_client(1, **args) + def get_daisyclient(): + """Get Daisy client instance.""" + config_daisy = ConfigParser.ConfigParser() + config_daisy.read("/etc/daisy/daisy-api.conf") + daisy_port = config_daisy.get("DEFAULT", "bind_port") + args = { + 'version': 1.0, + 'endpoint': 'http://127.0.0.1:' + daisy_port + } + return daisy_client.Client(**args) def _get_hosts(self, context, filters, **params): """Get hosts, wrapping in exception if necessary.""" try: return self.db_api.host_get_all(context, filters=filters, - **params) + **params) except exception.NotFound: LOG.warn(_LW("Invalid marker. Host %(id)s could not be " "found.") % {'id': params.get('marker')}) @@ -90,7 +105,7 @@ class Controller(object): """Get clusters, wrapping in exception if necessary.""" try: return self.db_api.cluster_get_all(context, filters=filters, - **params) + **params) except exception.NotFound: LOG.warn(_LW("Invalid marker. Cluster %(id)s could not be " "found.") % {'id': params.get('marker')}) @@ -139,7 +154,7 @@ class Controller(object): for key, value in params.items(): if value is None: del params[key] - + return params def _get_filters(self, req): @@ -255,11 +270,11 @@ class Controller(object): which will include the newly-created host's internal id in the 'id' field """ - + host_data = body["host"] host_id = host_data.get('id') - + if host_id and not utils.is_uuid_like(host_id): msg = _LI("Rejecting host creation request for invalid host " "id '%(bad_id)s'") % {'bad_id': host_id} @@ -271,8 +286,21 @@ class Controller(object): if host_id is None: host_data = self.db_api.host_add(req.context, host_data) else: - host_data = self.db_api.host_update(req.context, host_id, host_data) - #host_data = dict(host=make_image_dict(host_data)) + orig_config_set_id = None + if 'config_set_id' in host_data: + orig_host_data = self.db_api.host_get(req.context, host_id) + orig_config_set_id = orig_host_data.get('config_set_id') + + host_data = self.db_api.host_update( + req.context, host_id, host_data) + + if orig_config_set_id: + try: + self.db_api.config_set_destroy(req.context, + orig_config_set_id) + except exception.Forbidden as e: + LOG.info(e) + # host_data = dict(host=make_image_dict(host_data)) msg = (_LI("Successfully created node %s") % host_data["id"]) LOG.info(msg) @@ -280,7 +308,7 @@ class Controller(object): host_data = dict(host=host_data) return host_data except exception.Duplicate: - msg = _("node with identifier %s already exists!") % image_id + msg = _("node with identifier %s already exists!") % host_id LOG.warn(msg) return exc.HTTPConflict(msg) except exception.Invalid as e: @@ -288,6 +316,10 @@ class Controller(object): "Got error: %s") % utils.exception_to_str(e)) LOG.error(msg) return exc.HTTPBadRequest(msg) + except exception.Forbidden as e: + msg = (_("%s") % utils.exception_to_str(e)) + LOG.error(msg) + raise exc.HTTPForbidden(msg) except Exception: LOG.exception(_LE("Unable to create node %s"), host_id) raise @@ -303,16 +335,31 @@ class Controller(object): success, the body contains the deleted image information as a mapping. """ try: + host_interface = self.db_api.get_host_interface(req.context, id) deleted_host = self.db_api.host_destroy(req.context, id) msg = _LI("Successfully deleted host %(id)s") % {'id': id} LOG.info(msg) members = self.db_api.cluster_host_member_find(req.context, - host_id=id) + host_id=id) if members: for member in members: - self.db_api.cluster_host_member_delete(req.context, member['id']) - + self.db_api.cluster_host_member_delete( + req.context, member['id']) + self.db_api.role_host_member_delete(req.context, host_id=id) + orig_config_set_id = deleted_host.config_set_id + if orig_config_set_id: + try: + self.db_api.config_set_destroy(req.context, + orig_config_set_id) + except exception.Forbidden as e: + LOG.info(e) + + # delete ironic host by mac + if host_interface: + min_mac = utils.get_host_min_mac(host_interface) + self.ironicclient.physical_node.get(min_mac) + self.ironicclient.physical_node.delete(min_mac) return dict(host=deleted_host) except exception.ForbiddenPublicImage: msg = _LI("Delete denied for public host %(id)s") % {'id': id} @@ -340,11 +387,12 @@ class Controller(object): try: host_data = self.db_api.host_get(req.context, id) if utils.is_uuid_like(host_data.os_version_id): - version = self.db_api.get_os_version(req.context, host_data.os_version_id) - if version: - os_version_dict['name'] = version.name - os_version_dict['id'] = version.id - os_version_dict['desc'] = version.description + version = self.db_api.get_os_version( + req.context, host_data.os_version_id) + if version: + os_version_dict['name'] = version.name + os_version_dict['id'] = version.id + os_version_dict['desc'] = version.description msg = "Successfully retrieved host %(id)s" % {'id': id} LOG.debug(msg) except exception.NotFound: @@ -361,20 +409,36 @@ class Controller(object): except Exception: LOG.exception(_LE("Unable to show host %s") % id) raise + param = dict() + param['hwm_ip'] = host_data.hwm_ip + param['hwm_id'] = host_data.hwm_id + controller = registry_hwm.Controller() + hwms = controller.hwm_list(req) + hwms_ip = [hwm['hwm_ip'] for hwm in hwms] + if param['hwm_ip'] in hwms_ip: + result = self.daisyclient.node.location(**param) + location = str(result.rack) + '/' + str(result.position) + else: + location = "" host_interface = self.db_api.get_host_interface(req.context, id) - - role_name=[] + + role_name = [] if host_data.status == "with-role": - host_roles=self.db_api.role_host_member_get(req.context,None,id) + host_roles = self.db_api.role_host_member_get( + req.context, None, id) for host_role in host_roles: - role_info=self.db_api.role_get(req.context, host_role.role_id) + role_info = self.db_api.role_get( + req.context, host_role.role_id) role_name.append(role_info['name']) - host_cluster=self.db_api.cluster_host_member_find(req.context, None,id) + host_cluster = self.db_api.cluster_host_member_find( + req.context, None, id) if host_cluster: - cluster_info = self.db_api.cluster_get(req.context, host_cluster[0]['cluster_id']) + cluster_info = self.db_api.cluster_get( + req.context, host_cluster[0]['cluster_id']) cluster_name = cluster_info['name'] else: cluster_name = None + if 'host' not in host_data: host_data = dict(host=host_data) if host_interface: @@ -382,48 +446,48 @@ class Controller(object): if os_version_dict: host_data['host']['os_version'] = os_version_dict if role_name: - host_data['host']['role']=role_name + host_data['host']['role'] = role_name if cluster_name: - host_data['host']['cluster']=cluster_name + host_data['host']['cluster'] = cluster_name + + host_hardware_config = utils.get_host_hw_info(host_interface) + if host_hardware_config: + host_data['host']['system'] = host_hardware_config['system'] + host_data['host']['memory'] = host_hardware_config['memory'] + host_data['host']['cpu'] = host_hardware_config['cpu'] + host_data['host']['disks'] = host_hardware_config['disks'] + + for interface in host_interface: + for ironic_interface in host_hardware_config[ + 'interfaces'].values(): + if interface['mac'] == ironic_interface['mac'] and \ + interface['pci'] == ironic_interface['pci']: + interface['state'] = ironic_interface['state'] + interface['max_speed'] = ironic_interface['max_speed'] + interface['current_speed'] = ironic_interface[ + 'current_speed'] + # interface['pci'] = ironic_interface['pci'] + host_data['host']['interfaces'] = host_interface + + host_data['host']['position'] = location - host_deploy_network = [hi for hi in host_interface if hi['is_deployment']] - if host_deploy_network: - try: - host_obj = self.ironicclient.physical_node.get(host_deploy_network[0]['mac']) - host_hardware_config = dict([(f, getattr(host_obj, f, '')) for f in ['system', 'memory', 'cpu', 'disks', 'interfaces']]) - host_data['host']['system'] = host_hardware_config['system'] - host_data['host']['memory'] = host_hardware_config['memory'] - host_data['host']['cpu'] = host_hardware_config['cpu'] - host_data['host']['disks'] = host_hardware_config['disks'] - if host_interface: - for interface in host_interface: - for ironic_interface in host_hardware_config['interfaces'].values(): - if interface['mac'] == ironic_interface['mac'] and \ - interface['pci'] == ironic_interface['pci']: - interface['state'] = ironic_interface['state'] - interface['max_speed'] = ironic_interface['max_speed'] - interface['current_speed'] = ironic_interface['current_speed'] - # interface['pci'] = ironic_interface['pci'] - host_data['host']['interfaces'] = host_interface - except Exception: - LOG.exception(_LE("Unable to find ironic data %s") % Exception) - return host_data - + @utils.mutating def get_host_interface(self, req, body): orig_interfaces = list(eval(body['interfaces'])) for orig_interface in orig_interfaces: - host_interface = self.db_api.get_host_interface_mac(req.context,orig_interface['mac']) + host_interface = self.db_api.get_host_interface_mac( + req.context, orig_interface['mac']) return host_interface @utils.mutating - def get_all_host_interfaces(self, req, body): + def get_all_host_interfaces(self, req, body, **params): """Return all_host_interfaces about the given filter.""" filters = body['filters'] - try: - host_interfaces = self.db_api.host_interfaces_get_all(req.context, filters) + host_interfaces = self.db_api.host_interfaces_get_all( + req.context, filters) return host_interfaces except exception.NotFound: LOG.warn(_LW("Invalid marker. template %(id)s could not be " @@ -443,23 +507,28 @@ class Controller(object): @utils.mutating def get_assigned_network(self, req, interface_id, network_id): try: - host_assigned_network = self.db_api.get_assigned_network(req.context, - interface_id, network_id) + host_assigned_network = self.db_api.get_assigned_network( + req.context, + interface_id, network_id) except exception.NotFound: - LOG.warn(_LW("Invalid marker. Assigned_network with interface %(interface_id)s and network %(network_id)s" - " could not be found.") % {'interface_id': interface_id,'network_id': network_id}) + LOG.warn(_LW("Invalid marker. Assigned_network with interface %(" + "interface_id)s and network %(network_id)s" + " could not be found.") % { + 'interface_id': interface_id, 'network_id': network_id}) msg = _("Invalid marker. Assigned_network could not be found.") raise exc.HTTPBadRequest(explanation=msg) except exception.Forbidden: - LOG.warn(_LW("Access denied for assigned_network with interface %(interface_id)s " - "and network %(network_id)s") % {'interface_id': interface_id,'network_id': network_id}) + LOG.warn(_LW("Access denied for assigned_network with interface %(" + "interface_id)s " + "and network %(network_id)s") % { + 'interface_id': interface_id, 'network_id': network_id}) msg = _("Invalid marker. Assigned_network denied to get.") raise exc.HTTPBadRequest(explanation=msg) except Exception: LOG.exception(_LE("Unable to get assigned_network")) raise return host_assigned_network - + @utils.mutating def add_discover_host(self, req, body): """Registers a new host with the registry. @@ -474,7 +543,7 @@ class Controller(object): discover_host_data = body["discover_host"] discover_host_id = discover_host_data.get('id') - + if discover_host_id and not utils.is_uuid_like(discover_host_id): msg = _LI("Rejecting host creation request for invalid host " "id '%(bad_id)s'") % {'bad_id': discover_host_id} @@ -484,18 +553,21 @@ class Controller(object): try: if discover_host_id is None: - discover_host_data = self.db_api.discover_host_add(req.context, discover_host_data) + discover_host_data = self.db_api.discover_host_add( + req.context, discover_host_data) else: - discover_host_data = self.db_api.discover_host_update(req.context, discover_host_id, discover_host_data) - #host_data = dict(host=make_image_dict(host_data)) + discover_host_data = self.db_api.discover_host_update( + req.context, discover_host_id, discover_host_data) + # host_data = dict(host=make_image_dict(host_data)) msg = (_LI("Successfully created node %s") % discover_host_data["id"]) LOG.info(msg) if 'discover_host' not in discover_host_data: - discover_host_data = dict(discover_host = discover_host_data) + discover_host_data = dict(discover_host=discover_host_data) return discover_host_data except exception.Duplicate: - msg = _("node with identifier %s already exists!") % discover_host_id + msg = _("node with identifier %s already exists!") % \ + discover_host_id LOG.warn(msg) return exc.HTTPConflict(msg) except exception.Invalid as e: @@ -506,7 +578,7 @@ class Controller(object): except Exception: LOG.exception(_LE("Unable to create node %s"), discover_host_id) raise - + @utils.mutating def delete_discover_host(self, req, id): """Deletes an existing discover host with the registry. @@ -533,7 +605,7 @@ class Controller(object): except Exception: LOG.exception(_LE("Unable to delete host %s") % id) raise - + def detail_discover_host(self, req): """Return a filtered list of public, non-deleted hosts in detail @@ -548,7 +620,7 @@ class Controller(object): params = self._get_query_params(req) try: nodes = self.db_api.discover_host_get_all(req.context, - **params) + **params) except exception.NotFound: LOG.warn(_LW("Invalid marker. Host %(id)s could not be " "found.") % {'id': params.get('marker')}) @@ -563,8 +635,8 @@ class Controller(object): LOG.exception(_LE("Unable to get hosts")) raise - return dict(nodes=nodes) - + return dict(nodes=nodes) + @utils.mutating def update_discover_host(self, req, id, body): ''' @@ -576,9 +648,10 @@ class Controller(object): LOG.info(msg) msg = _("Invalid host id format") return exc.HTTPBadRequest(explanation=msg) - + try: - updated_host = self.db_api.discover_host_update(req.context, id, discover_host_data) + updated_host = self.db_api.discover_host_update( + req.context, id, discover_host_data) msg = _LI("Updating metadata for host %(id)s") % {'id': id} LOG.info(msg) if 'discover_host' not in updated_host: @@ -609,7 +682,7 @@ class Controller(object): except Exception: LOG.exception(_LE("Unable to update host %s") % id) raise - + def get_discover_host(self, req, discover_host_id): ''' ''' @@ -619,10 +692,12 @@ class Controller(object): LOG.info(msg) msg = _("Invalid host id format") return exc.HTTPBadRequest(explanation=msg) - + try: - host_detail_info = self.db_api.get_discover_host_detail(req.context, discover_host_id) - msg = _LI("Updating metadata for host %(id)s") % {'id': discover_host_id} + host_detail_info = self.db_api.get_discover_host_detail( + req.context, discover_host_id) + msg = _LI("Updating metadata for host %(id)s") % { + 'id': discover_host_id} LOG.info(msg) if 'discover_host' not in host_detail_info: host_data = dict(discover_host=host_detail_info) @@ -640,7 +715,8 @@ class Controller(object): request=req, content_type='text/plain') except exception.ForbiddenPublicImage: - msg = _LI("Update denied for public host %(id)s") % {'id': discover_host_id} + msg = _LI("Update denied for public host %(id)s") % { + 'id': discover_host_id} LOG.info(msg) raise exc.HTTPForbidden() except exception.Forbidden: @@ -665,11 +741,11 @@ class Controller(object): which will include the newly-created host's internal id in the 'id' field """ - + cluster_data = body["cluster"] cluster_id = cluster_data.get('id') - + if cluster_id and not utils.is_uuid_like(cluster_id): msg = _LI("Rejecting host creation request for invalid cluster " "id '%(bad_id)s'") % {'bad_id': cluster_id} @@ -686,7 +762,7 @@ class Controller(object): cluster_data = dict(cluster=cluster_data) return cluster_data except exception.Duplicate: - msg = _("cluster with identifier %s already exists!") % image_id + msg = _("cluster with identifier %s already exists!") % cluster_id LOG.warn(msg) return exc.HTTPConflict(msg) except exception.Invalid as e: @@ -712,13 +788,14 @@ class Controller(object): deleted_cluster = self.db_api.cluster_destroy(req.context, id) msg = _LI("Successfully deleted cluster %(id)s") % {'id': id} LOG.info(msg) - # Look up an existing membership + # Look up an existing membership members = self.db_api.cluster_host_member_find(req.context, - cluster_id=id) + cluster_id=id) if members: for member in members: - self.db_api.cluster_host_member_delete(req.context, member['id']) - + self.db_api.cluster_host_member_delete( + req.context, member['id']) + return dict(cluster=deleted_cluster) except exception.ForbiddenPublicImage: msg = _LI("Delete denied for public cluster %(id)s") % {'id': id} @@ -746,13 +823,18 @@ class Controller(object): cluster_data = self.db_api.cluster_get(req.context, id) msg = "Successfully retrieved cluster %(id)s" % {'id': id} LOG.debug(msg) - networking_parameters = {} - networking_parameters['gre_id_range'] = [cluster_data['gre_id_start'],cluster_data['gre_id_end']] - networking_parameters['vlan_range'] = [cluster_data['vlan_start'],cluster_data['vlan_end']] - networking_parameters['vni_range'] = [cluster_data['vni_start'],cluster_data['vni_end']] - networking_parameters['net_l23_provider'] = cluster_data['net_l23_provider'] + networking_parameters = {} + networking_parameters['gre_id_range'] = [ + cluster_data['gre_id_start'], cluster_data['gre_id_end']] + networking_parameters['vlan_range'] = [ + cluster_data['vlan_start'], cluster_data['vlan_end']] + networking_parameters['vni_range'] = [ + cluster_data['vni_start'], cluster_data['vni_end']] + networking_parameters['net_l23_provider'] = cluster_data[ + 'net_l23_provider'] networking_parameters['base_mac'] = cluster_data['base_mac'] - networking_parameters['segmentation_type'] = cluster_data['segmentation_type'] + networking_parameters['segmentation_type'] = cluster_data[ + 'segmentation_type'] networking_parameters['public_vip'] = cluster_data['public_vip'] cluster_data['networking_parameters'] = networking_parameters except exception.NotFound: @@ -772,22 +854,24 @@ class Controller(object): cluster_host_member_list = [] cluster_network_member_list = [] cluster_id = id - cluster_host_member = self.db_api.cluster_host_member_find(req.context,cluster_id) + cluster_host_member = self.db_api.cluster_host_member_find( + req.context, cluster_id) if len(cluster_host_member) > 0: for cluster_host in list(cluster_host_member): cluster_host_member_list.append(cluster_host['host_id']) cluster_data['nodes'] = cluster_host_member_list - - cluster_network_member = self.db_api.network_get_all(req.context,cluster_id) + + cluster_network_member = self.db_api.network_get_all( + req.context, cluster_id) if len(cluster_network_member) > 0: for cluster_network in list(cluster_network_member): cluster_network_member_list.append(cluster_network['id']) cluster_data['networks'] = cluster_network_member_list - - logic_networks = self.db_api.get_logic_network(req.context,id) + + logic_networks = self.db_api.get_logic_network(req.context, id) cluster_data['logic_networks'] = logic_networks - routers = self.db_api.router_get(req.context,cluster_id) + routers = self.db_api.router_get(req.context, cluster_id) cluster_data['routers'] = routers return cluster_data @@ -809,13 +893,22 @@ class Controller(object): clusters = self._get_clusters(req.context, **params) for cluster in clusters: cluster_id = cluster['id'] - cluster_host_member = self.db_api.cluster_host_member_find(req.context,cluster_id) + filters = {'deleted': False, 'cluster_id': cluster_id} + roles = self._get_roles(req.context, filters) + roles_status = [role['status'] for role in roles] + if len(set(roles_status)) == 1: + cluster['status'] = roles_status[0] + else: + cluster['status'] = "init" + cluster_host_member = self.db_api.cluster_host_member_find( + req.context, cluster_id) if len(cluster_host_member) > 0: for cluster_host in list(cluster_host_member): cluster_host_member_list.append(cluster_host['host_id']) cluster['nodes'] = cluster_host_member_list - cluster_network_member = self.db_api.network_get_all(req.context,cluster_id) + cluster_network_member = self.db_api.network_get_all( + req.context, cluster_id) if len(cluster_network_member) > 0: for cluster_network in list(cluster_network_member): cluster_network_member_list.append(cluster_network['id']) @@ -834,11 +927,11 @@ class Controller(object): which will include the newly-created host's internal id in the 'id' field """ - + component_data = body["component"] component_id = component_data.get('id') - + if component_id and not utils.is_uuid_like(component_id): msg = _LI("Rejecting host creation request for invalid component " "id '%(bad_id)s'") % {'bad_id': component_id} @@ -847,8 +940,9 @@ class Controller(object): return exc.HTTPBadRequest(explanation=msg) try: - component_data = self.db_api.component_add(req.context, component_data) - #host_data = dict(host=make_image_dict(host_data)) + component_data = self.db_api.component_add( + req.context, component_data) + # host_data = dict(host=make_image_dict(host_data)) msg = (_LI("Successfully created component %s") % component_data["id"]) LOG.info(msg) @@ -856,7 +950,8 @@ class Controller(object): component_data = dict(component=component_data) return component_data except exception.Duplicate: - msg = _("component with identifier %s already exists!") % image_id + msg = (_("component with identifier %s already exists!") + % component_id) LOG.warn(msg) return exc.HTTPConflict(msg) except exception.Invalid as e: @@ -906,7 +1001,7 @@ class Controller(object): """Get components, wrapping in exception if necessary.""" try: return self.db_api.component_get_all(context, filters=filters, - **params) + **params) except exception.NotFound: LOG.warn(_LW("Invalid marker. Project %(id)s could not be " "found.") % {'id': params.get('marker')}) @@ -975,7 +1070,8 @@ class Controller(object): """ component_data = body['component'] try: - updated_component = self.db_api.component_update(req.context, id, component_data) + updated_component = self.db_api.component_update( + req.context, id, component_data) msg = _LI("Updating metadata for component %(id)s") % {'id': id} LOG.info(msg) @@ -1014,7 +1110,6 @@ class Controller(object): except Exception: LOG.exception(_LE("Unable to update component %s") % id) raise - @utils.mutating def add_service(self, req, body): @@ -1027,11 +1122,11 @@ class Controller(object): which will include the newly-created host's internal id in the 'id' field """ - + service_data = body["service"] service_id = service_data.get('id') - + if service_id and not utils.is_uuid_like(service_id): msg = _LI("Rejecting host creation request for invalid service " "id '%(bad_id)s'") % {'bad_id': service_id} @@ -1042,7 +1137,7 @@ class Controller(object): try: print service_data service_data = self.db_api.service_add(req.context, service_data) - #host_data = dict(host=make_image_dict(host_data)) + # host_data = dict(host=make_image_dict(host_data)) msg = (_LI("Successfully created service %s") % service_data["id"]) LOG.info(msg) @@ -1050,7 +1145,7 @@ class Controller(object): service_data = dict(service=service_data) return service_data except exception.Duplicate: - msg = _("service with identifier %s already exists!") % image_id + msg = _("service with identifier %s already exists!") % service_id LOG.warn(msg) return exc.HTTPConflict(msg) except exception.Invalid as e: @@ -1100,7 +1195,7 @@ class Controller(object): """Get services, wrapping in exception if necessary.""" try: return self.db_api.service_get_all(context, filters=filters, - **params) + **params) except exception.NotFound: LOG.warn(_LW("Invalid marker. Project %(id)s could not be " "found.") % {'id': params.get('marker')}) @@ -1169,7 +1264,8 @@ class Controller(object): """ service_data = body['service'] try: - updated_service = self.db_api.service_update(req.context, id, service_data) + updated_service = self.db_api.service_update( + req.context, id, service_data) msg = _LI("Updating metadata for service %(id)s") % {'id': id} LOG.info(msg) @@ -1209,7 +1305,6 @@ class Controller(object): LOG.exception(_LE("Unable to update service %s") % id) raise - @utils.mutating def add_role(self, req, body): """Registers a new host with the registry. @@ -1221,11 +1316,11 @@ class Controller(object): which will include the newly-created host's internal id in the 'id' field """ - + role_data = body["role"] role_id = role_data.get('id') - + if role_id and not utils.is_uuid_like(role_id): msg = _LI("Rejecting host creation request for invalid role " "id '%(bad_id)s'") % {'bad_id': role_id} @@ -1236,7 +1331,7 @@ class Controller(object): try: print role_data role_data = self.db_api.role_add(req.context, role_data) - #host_data = dict(host=make_image_dict(host_data)) + # host_data = dict(host=make_image_dict(host_data)) msg = (_LI("Successfully created role %s") % role_data["id"]) LOG.info(msg) @@ -1244,7 +1339,7 @@ class Controller(object): role_data = dict(role=role_data) return role_data except exception.Duplicate: - msg = _("role with identifier %s already exists!") % image_id + msg = _("role with identifier %s already exists!") % role_id LOG.warn(msg) return exc.HTTPConflict(msg) except exception.Invalid as e: @@ -1294,7 +1389,7 @@ class Controller(object): """Get roles, wrapping in exception if necessary.""" try: return self.db_api.role_get_all(context, filters=filters, - **params) + **params) except exception.NotFound: LOG.warn(_LW("Invalid marker. Project %(id)s could not be " "found.") % {'id': params.get('marker')}) @@ -1330,10 +1425,11 @@ class Controller(object): except Exception: LOG.exception(_LE("Unable to show role %s") % id) raise - role_services = self.db_api.role_services_get(req.context,id) + role_services = self.db_api.role_services_get(req.context, id) service_name = [] for role_service in role_services: - service_info = self.db_api.service_get(req.context, role_service['service_id']) + service_info = self.db_api.service_get( + req.context, role_service['service_id']) service_name.append(service_info['name']) if 'role' not in role_data: role_data = dict(role=role_data) @@ -1447,8 +1543,20 @@ class Controller(object): """ host_data = body['host'] try: + orig_config_set_id = None + if 'config_set_id' in host_data: + orig_host_data = self.db_api.host_get(req.context, id) + orig_config_set_id = orig_host_data.get('config_set_id', None) + updated_host = self.db_api.host_update(req.context, id, host_data) + if orig_config_set_id: + try: + self.db_api.config_set_destroy(req.context, + orig_config_set_id) + except exception.Forbidden as e: + LOG.info(e) + msg = _LI("Updating metadata for host %(id)s") % {'id': id} LOG.info(msg) if 'host' not in updated_host: @@ -1469,8 +1577,10 @@ class Controller(object): msg = _LI("Update denied for public host %(id)s") % {'id': id} LOG.info(msg) raise exc.HTTPForbidden() - except exception.Forbidden: - raise + except exception.Forbidden as e: + msg = (_("%s") % utils.exception_to_str(e)) + LOG.error(msg) + raise exc.HTTPForbidden(msg) except exception.Conflict as e: LOG.info(utils.exception_to_str(e)) raise exc.HTTPConflict(body='Host operation conflicts', @@ -1479,8 +1589,7 @@ class Controller(object): except Exception: LOG.exception(_LE("Unable to update host %s") % id) raise - - + @utils.mutating def update_cluster(self, req, id, body): """Updates an existing cluster with the registry. @@ -1493,7 +1602,8 @@ class Controller(object): """ cluster_data = body['cluster'] try: - updated_cluster = self.db_api.cluster_update(req.context, id, cluster_data) + updated_cluster = self.db_api.cluster_update( + req.context, id, cluster_data) msg = _LI("Updating metadata for cluster %(id)s") % {'id': id} LOG.info(msg) @@ -1531,7 +1641,7 @@ class Controller(object): content_type='text/plain') except Exception: LOG.exception(_LE("Unable to update cluster %s") % id) - raise + raise @utils.mutating def host_roles(self, req, id): @@ -1582,15 +1692,16 @@ class Controller(object): if 'role' not in role_data: role_data = dict(role=role_data) return role_data - + @utils.mutating def update_role_hosts(self, req, id, body): """Return role hosts list in the host_roles.""" role_data = body['role'] try: - updated_role = self.db_api.role_host_update(req.context, id, role_data) + updated_role = self.db_api.role_host_update( + req.context, id, role_data) - msg = _LI("Updating metadata for role_host id %(id)s") % {'id': id} + msg = _LI("Updating metadata for role_host id %(id)s") % {'id': id} return updated_role except exception.Invalid as e: msg = (_("Failed to update role host metadata. " @@ -1624,7 +1735,7 @@ class Controller(object): except Exception: LOG.exception(_LE("Unable to update host_role %s") % id) raise - + @utils.mutating def config_interface(self, req, body): """Registers a new config_interface with the registry. @@ -1636,20 +1747,21 @@ class Controller(object): which will include the newly-created host's internal id in the 'id' field """ - config_interface_meta=body - cluster_id = config_interface_meta.get('cluster-id') - role_name=config_interface_meta.get('role-name') + config_interface_meta = body try: - config_interface_meta = self.db_api.config_interface(req.context, config_interface_meta) + config_interface_meta = self.db_api.config_interface( + req.context, config_interface_meta) except exception.Invalid as e: msg = (_("Failed to add role metadata. " "Got error: %s") % utils.exception_to_str(e)) LOG.error(msg) return exc.HTTPBadRequest(msg) if 'config_interface_meta' not in config_interface_meta: - config_interface_meta = dict(config_interface_meta=config_interface_meta) + config_interface_meta = dict( + config_interface_meta=config_interface_meta) return config_interface_meta + def _limit_locations(image): locations = image.pop('locations', []) image['location_data'] = locations @@ -1658,7 +1770,8 @@ def _limit_locations(image): if loc['status'] == 'active': image['location'] = loc['url'] break - + + def make_image_dict(image): """Create a dict representation of an image which we can use to serialize the image. @@ -1666,7 +1779,7 @@ def make_image_dict(image): def _fetch_attrs(d, attrs): return dict([(a, d[a]) for a in attrs - if a in d.keys()]) + if a in d.keys()]) # TODO(sirp): should this be a dict, or a list of dicts? # A plain dict is more convenient, but list of dicts would provide @@ -1680,6 +1793,7 @@ def make_image_dict(image): return image_dict + def create_resource(): """Images resource factory method.""" deserializer = wsgi.JSONRequestDeserializer() diff --git a/code/daisy/daisy/registry/api/v1/hwms.py b/code/daisy/daisy/registry/api/v1/hwms.py new file mode 100755 index 00000000..d39b04a7 --- /dev/null +++ b/code/daisy/daisy/registry/api/v1/hwms.py @@ -0,0 +1,355 @@ +# Copyright 2010-2011 OpenStack Foundation +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +""" +Reference implementation registry server WSGI controller +""" + +from oslo_config import cfg +from oslo_log import log as logging +from oslo_utils import strutils +from oslo_utils import timeutils +from webob import exc + +from daisy.common import exception +from daisy.common import utils +from daisy.common import wsgi +import daisy.db +from daisy import i18n + + +LOG = logging.getLogger(__name__) +_ = i18n._ +_LE = i18n._LE +_LI = i18n._LI +_LW = i18n._LW +CONF = cfg.CONF +DISPLAY_FIELDS_IN_INDEX = ['id', 'description'] +SUPPORTED_FILTERS = ['name', 'description'] +SUPPORTED_SORT_KEYS = ('name', 'description''id', 'created_at', 'updated_at') +SUPPORTED_SORT_DIRS = ('asc', 'desc') +SUPPORTED_PARAMS = ('limit', 'marker', 'sort_key', 'sort_dir', 'name', + 'description') + + +class Controller(object): + + def __init__(self): + self.db_api = daisy.db.get_api() + + def _get_query_params(self, req): + """Extract necessary query parameters from http request. + + :param req: the Request object coming from the wsgi layer + :retval dictionary of filters to apply to list of templates + """ + params = { + 'filters': self._get_filters(req), + 'limit': self._get_limit(req), + 'sort_key': [self._get_sort_key(req)], + 'sort_dir': [self._get_sort_dir(req)], + 'marker': self._get_marker(req), + } + + for key, value in params.items(): + if value is None: + del params[key] + + return params + + def _get_filters(self, req): + """Return a dictionary of query param filters from the request + + :param req: the Request object coming from the wsgi layer + :retval a dict of key/value filters + """ + filters = {} + properties = {} + + for param in req.params: + if param in SUPPORTED_FILTERS: + filters[param] = req.params.get(param) + if param.startswith('property-'): + _param = param[9:] + properties[_param] = req.params.get(param) + + if 'changes-since' in filters: + isotime = filters['changes-since'] + try: + filters['changes-since'] = timeutils.parse_isotime(isotime) + except ValueError: + raise exc.HTTPBadRequest(_("Unrecognized changes-since value")) + + if 'protected' in filters: + value = self._get_bool(filters['protected']) + if value is None: + raise exc.HTTPBadRequest(_("protected must be True, or " + "False")) + + filters['protected'] = value + + # only allow admins to filter on 'deleted' + if req.context.is_admin: + deleted_filter = self._parse_deleted_filter(req) + if deleted_filter is not None: + filters['deleted'] = deleted_filter + elif 'changes-since' not in filters: + filters['deleted'] = False + elif 'changes-since' not in filters: + filters['deleted'] = False + + if properties: + filters['properties'] = properties + + return filters + + def _get_limit(self, req): + """Parse a limit query param into something usable.""" + try: + limit = int(req.params.get('limit', CONF.limit_param_default)) + except ValueError: + raise exc.HTTPBadRequest(_("limit param must be an integer")) + + if limit < 0: + raise exc.HTTPBadRequest(_("limit param must be positive")) + + return min(CONF.api_limit_max, limit) + + def _get_marker(self, req): + """Parse a marker query param into something usable.""" + marker = req.params.get('marker', None) + + if marker and not utils.is_uuid_like(marker): + msg = _('Invalid marker format') + raise exc.HTTPBadRequest(explanation=msg) + + return marker + + def _get_sort_key(self, req): + """Parse a sort key query param from the request object.""" + sort_key = req.params.get('sort_key', 'created_at') + if sort_key is not None and sort_key not in SUPPORTED_SORT_KEYS: + _keys = ', '.join(SUPPORTED_SORT_KEYS) + msg = _("Unsupported sort_key. Acceptable values: %s") % (_keys,) + raise exc.HTTPBadRequest(explanation=msg) + return sort_key + + def _get_sort_dir(self, req): + """Parse a sort direction query param from the request object.""" + sort_dir = req.params.get('sort_dir', 'desc') + if sort_dir is not None and sort_dir not in SUPPORTED_SORT_DIRS: + _keys = ', '.join(SUPPORTED_SORT_DIRS) + msg = _("Unsupported sort_dir. Acceptable values: %s") % (_keys,) + raise exc.HTTPBadRequest(explanation=msg) + return sort_dir + + def _get_bool(self, value): + value = value.lower() + if value == 'true' or value == '1': + return True + elif value == 'false' or value == '0': + return False + + return None + + def _parse_deleted_filter(self, req): + """Parse deleted into something usable.""" + deleted = req.params.get('deleted') + if deleted is None: + return None + return strutils.bool_from_string(deleted) + + @utils.mutating + def add_hwm(self, req, body): + """Registers a new hwm with the registry. + + :param req: wsgi Request object + :param body: Dictionary of information about the templatae + + :retval Returns the newly-created template information as a mapping, + which will include the newly-created template's internal id + in the 'id' field + """ + hwm_data = body["hwm"] + id = hwm_data.get('id') + + if id and not utils.is_uuid_like(id): + msg = _LI("Rejecting hwm creation request for invalid hwm " + "id '%(bad_id)s'") % {'bad_id': id} + LOG.info(msg) + msg = _("Invalid hwm id format") + return exc.HTTPBadRequest(explanation=msg) + + try: + hwm_data = self.db_api.hwm_add(req.context, hwm_data) + msg = (_LI("Successfully created hwm %s") % + hwm_data["id"]) + LOG.info(msg) + if 'hwm' not in hwm_data: + hwm_data = dict(hwm=hwm_data) + return hwm_data + except exception.Duplicate: + msg = _("hwm with identifier %s already exists!") % id + LOG.warn(msg) + return exc.HTTPConflict(msg) + except exception.Invalid as e: + msg = (_("Failed to add hwm metadata. " + "Got error: %s") % utils.exception_to_str(e)) + LOG.error(msg) + return exc.HTTPBadRequest(msg) + except Exception: + LOG.exception(_LE("Unable to create hwm %s"), id) + raise + + @utils.mutating + def update_hwm(self, req, id, body): + """Registers a new hwm with the registry. + + :param req: wsgi Request object + :param body: Dictionary of information about the template + + :retval Returns the newly-created template information as a mapping, + which will include the newly-created template's internal id + in the 'id' field + """ + hwm_data = body["hwm"] + if id and not utils.is_uuid_like(id): + msg = _LI("Rejecting cluster hwm creation request for invalid " + "hwm id '%(bad_id)s'") % {'bad_id': id} + LOG.info(msg) + msg = _("Invalid hwm id format") + return exc.HTTPBadRequest(explanation=msg) + + try: + hwm_data = self.db_api.hwm_update(req.context, id, hwm_data) + msg = (_LI("Successfully updated hwm %s") % + hwm_data["id"]) + LOG.info(msg) + if 'hwm' not in hwm_data: + hwm_data = dict(hwm=hwm_data) + return hwm_data + except exception.Duplicate: + msg = _("hwm with identifier %s already exists!") % id + LOG.warn(msg) + return exc.HTTPConflict(msg) + except exception.Invalid as e: + msg = (_("Failed to update hwm metadata.Got error: %s") % + utils.exception_to_str(e)) + LOG.error(msg) + return exc.HTTPBadRequest(msg) + except Exception: + LOG.exception(_LE("Unable to update hwm %s"), id) + raise + + @utils.mutating + def delete_hwm(self, req, id): + """Registers a new hwm with the registry. + + :param req: wsgi Request object + :param body: Dictionary of information about the template + + :retval Returns the newly-created template information as a mapping, + which will include the newly-created template's internal id + in the 'id' field + """ + if id and not utils.is_uuid_like(id): + msg = _LI("Rejecting hwm delete request for invalid hwm " + "id '%(bad_id)s'") % {'bad_id': id} + LOG.info(msg) + msg = _("Invalid hwm id format") + return exc.HTTPBadRequest(explanation=msg) + + try: + hwm_data = self.db_api.hwm_destroy(req.context, id) + msg = (_LI("Successfully deleted hwm %s") % id) + LOG.info(msg) + if 'hwm' not in hwm_data: + hwm_data = dict(hwm=hwm_data) + return hwm_data + except exception.Invalid as e: + msg = (_("Failed to delete hwm metadata. " + "Got error: %s") % utils.exception_to_str(e)) + LOG.error(msg) + return exc.HTTPBadRequest(msg) + except Exception: + LOG.exception(_LE("Unable to delete hwm %s"), id) + raise + + @utils.mutating + def hwm_list(self, req): + params = self._get_query_params(req) + try: + filters = params.pop('filters') + marker = params.get('marker') + limit = params.get('limit') + sort_key = params.get('sort_key') + sort_dir = params.get('sort_dir') + return self.db_api.hwm_get_all( + req.context, filters=filters, marker=marker, limit=limit, + sort_key=sort_key, sort_dir=sort_dir) + except exception.NotFound: + LOG.warn(_LW("Invalid marker. hwm %(id)s could not be " + "found.") % {'id': params.get('marker')}) + msg = _("Invalid marker. hwm could not be found.") + raise exc.HTTPBadRequest(explanation=msg) + except exception.Forbidden: + LOG.warn(_LW("Access denied to hwm %(id)s but returning " + "'not found'") % {'id': params.get('marker')}) + msg = _("Invalid marker. hwm could not be found.") + raise exc.HTTPBadRequest(explanation=msg) + except Exception: + LOG.exception(_LE("Unable to list hwm")) + raise + + @utils.mutating + def detail(self, req, id): + """Registers a new hwm with the registry. + + :param req: wsgi Request object + :param body: Dictionary of information about the template + + :retval Returns the newly-created template information as a mapping, + which will include the newly-created template's internal id + in the 'id' field + """ + if id and not utils.is_uuid_like(id): + msg = _LI("Rejecting hwm delete request for invalid hwm " + "id '%(bad_id)s'") % {'bad_id': id} + LOG.info(msg) + msg = _("Invalid hwm id format") + return exc.HTTPBadRequest(explanation=msg) + + try: + hwm_data = self.db_api.hwm_get(req.context, id) + msg = (_LI("Successfully get hwm information:%s") % id) + LOG.info(msg) + if 'hwm' not in hwm_data: + hwm_data = dict(hwm=hwm_data) + return hwm_data + except exception.Invalid as e: + msg = (_("Failed to get hwm metadata. Got error: %s") % + utils.exception_to_str(e)) + LOG.error(msg) + return exc.HTTPBadRequest(msg) + except Exception: + LOG.exception(_LE("Unable to get hwm %s"), id) + raise + + +def create_resource(): + """Hwms resource factory method.""" + deserializer = wsgi.JSONRequestDeserializer() + serializer = wsgi.JSONResponseSerializer() + return wsgi.Resource(Controller(), deserializer, serializer) diff --git a/code/daisy/daisy/registry/api/v1/images.py b/code/daisy/daisy/registry/api/v1/images.py index d15a8f93..84d943a7 100755 --- a/code/daisy/daisy/registry/api/v1/images.py +++ b/code/daisy/daisy/registry/api/v1/images.py @@ -533,7 +533,7 @@ def make_image_dict(image): def _fetch_attrs(d, attrs): return dict([(a, d[a]) for a in attrs - if a in d.keys()]) + if a in d.keys()]) # TODO(sirp): should this be a dict, or a list of dicts? # A plain dict is more convenient, but list of dicts would provide diff --git a/code/daisy/daisy/registry/api/v1/members.py b/code/daisy/daisy/registry/api/v1/members.py index fb62529b..ffdc1b3a 100755 --- a/code/daisy/daisy/registry/api/v1/members.py +++ b/code/daisy/daisy/registry/api/v1/members.py @@ -30,6 +30,7 @@ _LW = i18n._LW class Controller(object): + def __init__(self): self.db_api = daisy.db.get_api() @@ -47,11 +48,12 @@ class Controller(object): # If it's private and doesn't belong to them, don't let on # that it exists msg = _LW("Access denied to cluster %(id)s but returning" - " 'not found'") % {'id': cluster_id} + " 'not found'") % {'id': cluster_id} LOG.warn(msg) raise webob.exc.HTTPNotFound() - members = self.db_api.cluster_host_member_find(req.context, cluster_id=cluster_id, host_id=host_id) + members = self.db_api.cluster_host_member_find( + req.context, cluster_id=cluster_id, host_id=host_id) msg = "Returning member list for cluster %(id)s" % {'id': cluster_id} LOG.debug(msg) return dict(members=make_member_list(members, @@ -302,7 +304,7 @@ class Controller(object): # Make sure the cluster exists try: - cluster = self.db_api.cluster_get(req.context, cluster_id) + self.db_api.cluster_get(req.context, cluster_id) except exception.NotFound: msg = _("Project %(id)s not found") % {'id': cluster_id} LOG.warn(msg) @@ -314,10 +316,10 @@ class Controller(object): " 'not found'") % {'id': cluster_id} LOG.warn(msg) raise webob.exc.HTTPNotFound() - + # Make sure the host exists try: - host = self.db_api.host_get(req.context, host_id) + self.db_api.host_get(req.context, host_id) except exception.NotFound: msg = _("Host %(id)s not found") % {'id': host_id} LOG.warn(msg) @@ -329,20 +331,22 @@ class Controller(object): " 'not found'") % {'id': host_id} LOG.warn(msg) raise webob.exc.HTTPNotFound() - + # Look up an existing membership... members = self.db_api.cluster_host_member_find(req.context, - cluster_id=cluster_id, - host_id=host_id) + cluster_id=cluster_id, + host_id=host_id) if members: - msg = (_LI("Project %(cluster_id)s has host %(id)s membership already!") % - {'cluster_id': image_id,'host_id': host_id}) + msg = (_LI("Project %(cluster_id)s has host %(host_id)s " + "membership already!") % + {'cluster_id': cluster_id, 'host_id': host_id}) else: values = dict(cluster_id=cluster_id, host_id=host_id) self.db_api.cluster_host_member_create(req.context, values) - msg = (_LI("Successfully added a host %(host_id)s to cluster %(cluster_id)s") % - {'host_id':host_id,'cluster_id': cluster_id}) + msg = (_LI("Successfully added a host %(host_id)s to cluster %(" + "cluster_id)s") % + {'host_id': host_id, 'cluster_id': cluster_id}) LOG.info(msg) return webob.exc.HTTPNoContent() @@ -353,7 +357,7 @@ class Controller(object): """ # Make sure the cluster exists try: - cluster = self.db_api.cluster_get(req.context, cluster_id) + self.db_api.cluster_get(req.context, cluster_id) except exception.NotFound: msg = _("Project %(id)s not found") % {'id': cluster_id} LOG.warn(msg) @@ -365,10 +369,10 @@ class Controller(object): " 'not found'") % {'id': cluster_id} LOG.warn(msg) raise webob.exc.HTTPNotFound() - + # Make sure the host exists try: - host = self.db_api.host_get(req.context, host_id) + self.db_api.host_get(req.context, host_id) except exception.NotFound: msg = _("Host %(id)s not found") % {'id': host_id} LOG.warn(msg) @@ -383,10 +387,11 @@ class Controller(object): # Look up an existing membership members = self.db_api.cluster_host_member_find(req.context, - cluster_id=cluster_id, - host_id=host_id) + cluster_id=cluster_id, + host_id=host_id) if members: - self.db_api.cluster_host_member_delete(req.context, members[0]['id']) + self.db_api.cluster_host_member_delete( + req.context, members[0]['id']) else: msg = ("%(host_id)s is not a member of cluster %(cluster_id)s" % {'host_id': host_id, 'cluster_id': cluster_id}) @@ -395,11 +400,12 @@ class Controller(object): raise webob.exc.HTTPNotFound(explanation=msg) # Make an appropriate result - msg = (_LI("Successfully deleted a host %(host_id)s from cluster %(cluster_id)s") % + msg = (_LI("Successfully deleted a host %(host_id)s from cluster %(" + "cluster_id)s") % {'host_id': host_id, 'cluster_id': cluster_id}) LOG.info(msg) return webob.exc.HTTPNoContent() - + def default(self, req, *args, **kwargs): """This will cover the missing 'show' and 'create' actions""" LOG.debug("The method %s is not allowed for this resource" % @@ -412,14 +418,16 @@ class Controller(object): Retrieves clusters shared with the given host. """ try: - members = self.db_api.cluster_host_member_find(req.context, host_id=host_id) + members = self.db_api.cluster_host_member_find( + req.context, host_id=host_id) except exception.NotFound: msg = _LW("Host %(id)s not found") % {'id': host_id} LOG.warn(msg) msg = _("Membership could not be found.") raise webob.exc.HTTPBadRequest(explanation=msg) - msg = "Returning list of clusters shared with host %(id)s" % {'id': host_id} + msg = "Returning list of clusters shared with host %(id)s" % { + 'id': host_id} LOG.debug(msg) return dict(multi_clusters=make_member_list(members, cluster_id='cluster_id')) diff --git a/code/daisy/daisy/registry/api/v1/networks.py b/code/daisy/daisy/registry/api/v1/networks.py index df2dfde4..a223a495 100755 --- a/code/daisy/daisy/registry/api/v1/networks.py +++ b/code/daisy/daisy/registry/api/v1/networks.py @@ -44,7 +44,7 @@ DISPLAY_FIELDS_IN_INDEX = ['id', 'name', 'size', SUPPORTED_FILTERS = ['name', 'status', 'container_format', 'disk_format', 'min_ram', 'min_disk', 'size_min', 'size_max', - 'changes-since', 'protected'] + 'changes-since', 'protected', 'type'] SUPPORTED_SORT_KEYS = ('name', 'status', 'container_format', 'disk_format', 'size', 'id', 'created_at', 'updated_at') @@ -59,11 +59,12 @@ class Controller(object): def __init__(self): self.db_api = daisy.db.get_api() - def _get_networks(self, context,cluster_id, filters=None, **params): + def _get_networks(self, context, cluster_id, filters=None, **params): """Get networks, wrapping in exception if necessary.""" try: - return self.db_api.network_get_all(context, cluster_id,filters=filters, - **params) + return self.db_api.network_get_all(context, cluster_id, + filters=filters, + **params) except exception.NotFound: LOG.warn(_LW("Invalid marker. Network %(id)s could not be " "found.") % {'id': params.get('marker')}) @@ -79,23 +80,23 @@ class Controller(object): raise def update_phyname_of_network(self, req, body): - try: + try: self.db_api.update_phyname_of_network(req.context, body) return {} - except exception.NotFound: + except exception.NotFound: raise exc.HTTPServerError( - explanation="Update database for phyname of network table failed!") + explanation="Update database for phyname of network " + "table failed!") def get_all_networks(self, req): params = self._get_query_params(req) try: - networks = self.db_api.network_get_all(req.context,**params) + networks = self.db_api.network_get_all(req.context, **params) except Exception: raise exc.HTTPServerError(explanation="Get all networks failed") return networks - def detail_network(self, req, id): """Return a filtered list of public, non-deleted networks in detail @@ -108,7 +109,7 @@ class Controller(object): all network model fields. """ params = self._get_query_params(req) - networks = self._get_networks(req.context, id ,**params) + networks = self._get_networks(req.context, id, **params) return dict(networks=networks) @@ -129,7 +130,7 @@ class Controller(object): for key, value in params.items(): if value is None: del params[key] - + return params def _get_filters(self, req): @@ -245,7 +246,7 @@ class Controller(object): which will include the newly-created network's internal id in the 'id' field """ - + network_data = body["network"] network_id = network_data.get('id') @@ -254,7 +255,7 @@ class Controller(object): # add network_id and role # if role # self.db_api.get_role(req.context,role) - + if network_id and not utils.is_uuid_like(network_id): msg = _LI("Rejecting network creation request for invalid network " "id '%(bad_id)s'") % {'bad_id': network_id} @@ -264,7 +265,7 @@ class Controller(object): try: network_data = self.db_api.network_add(req.context, network_data) - #network_data = dict(network=make_image_dict(network_data)) + # network_data = dict(network=make_image_dict(network_data)) msg = (_LI("Successfully created node %s") % network_data["id"]) LOG.info(msg) @@ -272,7 +273,7 @@ class Controller(object): network_data = dict(network=network_data) return network_data except exception.Duplicate: - msg = _("node with identifier %s already exists!") % image_id + msg = _("node with identifier %s already exists!") % network_id LOG.warn(msg) return exc.HTTPConflict(msg) except exception.Invalid as e: @@ -295,12 +296,15 @@ class Controller(object): success, the body contains the deleted image information as a mapping. """ try: - deleted_network = self.db_api.network_destroy(req.context, network_id) - msg = _LI("Successfully deleted network %(network_id)s") % {'network_id': network_id} + deleted_network = self.db_api.network_destroy( + req.context, network_id) + msg = _LI("Successfully deleted network %(network_id)s") % { + 'network_id': network_id} LOG.info(msg) return dict(network=deleted_network) except exception.ForbiddenPublicImage: - msg = _LI("Delete denied for public network %(network_id)s") % {'network_id': network_id} + msg = _LI("Delete denied for public network %(network_id)s") % { + 'network_id': network_id} LOG.info(msg) raise exc.HTTPForbidden() except exception.Forbidden: @@ -311,7 +315,8 @@ class Controller(object): LOG.info(msg) return exc.HTTPNotFound() except exception.NotFound: - msg = _LI("Network %(network_id)s not found") % {'network_id': network_id} + msg = _LI("Network %(network_id)s not found") % { + 'network_id': network_id} LOG.info(msg) return exc.HTTPNotFound() except Exception: @@ -343,8 +348,6 @@ class Controller(object): network_data = dict(network=network_data) return network_data - - @utils.mutating def update_network(self, req, network_id, body): """Updates an existing network with the registry. @@ -357,9 +360,11 @@ class Controller(object): """ network_data = body['network'] try: - updated_network = self.db_api.network_update(req.context, network_id, network_data) + updated_network = self.db_api.network_update( + req.context, network_id, network_data) - msg = _LI("Updating metadata for network %(network_id)s") % {'network_id': network_id} + msg = _LI("Updating metadata for network %(network_id)s") % { + 'network_id': network_id} LOG.info(msg) if 'network' not in updated_network: network_data = dict(network=updated_network) @@ -370,17 +375,20 @@ class Controller(object): LOG.error(msg) return exc.HTTPBadRequest(msg) except exception.NotFound: - msg = _LI("Network %(network_id)s not found") % {'network_id': network_id} + msg = _LI("Network %(network_id)s not found") % { + 'network_id': network_id} LOG.info(msg) raise exc.HTTPNotFound(body='Network not found', request=req, content_type='text/plain') except exception.ForbiddenPublicImage: - msg = _LI("Update denied for public network %(network_id)s") % {'network_id': network_id} + msg = _LI("Update denied for public network %(network_id)s") % { + 'network_id': network_id} LOG.info(msg) raise exc.HTTPForbidden() - except exception.Forbidden: - raise + except exception.Forbidden as e: + LOG.info(e) + raise exc.HTTPForbidden(e) except exception.Conflict as e: LOG.info(utils.exception_to_str(e)) raise exc.HTTPConflict(body='Network operation conflicts', @@ -389,8 +397,7 @@ class Controller(object): except Exception: LOG.exception(_LE("Unable to update network %s") % network_id) raise - - + @utils.mutating def update_cluster(self, req, id, body): """Updates an existing cluster with the registry. @@ -403,7 +410,8 @@ class Controller(object): """ cluster_data = body['cluster'] try: - updated_cluster = self.db_api.cluster_update(req.context, id, cluster_data) + updated_cluster = self.db_api.cluster_update( + req.context, id, cluster_data) msg = _LI("Updating metadata for cluster %(id)s") % {'id': id} LOG.info(msg) @@ -441,7 +449,7 @@ class Controller(object): content_type='text/plain') except Exception: LOG.exception(_LE("Unable to update cluster %s") % id) - raise + raise def _limit_locations(image): @@ -452,7 +460,8 @@ def _limit_locations(image): if loc['status'] == 'active': image['location'] = loc['url'] break - + + def make_image_dict(image): """Create a dict representation of an image which we can use to serialize the image. @@ -460,7 +469,7 @@ def make_image_dict(image): def _fetch_attrs(d, attrs): return dict([(a, d[a]) for a in attrs - if a in d.keys()]) + if a in d.keys()]) # TODO(sirp): should this be a dict, or a list of dicts? # A plain dict is more convenient, but list of dicts would provide @@ -474,6 +483,7 @@ def make_image_dict(image): return image_dict + def create_resource(): """Images resource factory method.""" deserializer = wsgi.JSONRequestDeserializer() diff --git a/code/daisy/daisy/registry/api/v1/template.py b/code/daisy/daisy/registry/api/v1/template.py index e094e234..77f954b1 100755 --- a/code/daisy/daisy/registry/api/v1/template.py +++ b/code/daisy/daisy/registry/api/v1/template.py @@ -38,9 +38,24 @@ _LW = i18n._LW CONF = cfg.CONF DISPLAY_FIELDS_IN_INDEX = ['id', 'name', 'type', 'hosts', 'content'] SUPPORTED_FILTERS = ['name', 'type', 'cluster_name', 'hosts', 'content'] -SUPPORTED_SORT_KEYS = ('name', 'type', 'hosts', 'content', 'id', 'created_at', 'updated_at') +SUPPORTED_SORT_KEYS = ( + 'name', + 'type', + 'hosts', + 'content', + 'id', + 'created_at', + 'updated_at') SUPPORTED_SORT_DIRS = ('asc', 'desc') -SUPPORTED_PARAMS = ('limit', 'marker', 'sort_key', 'sort_dir', 'name', 'type', 'cluster_name') +SUPPORTED_PARAMS = ( + 'limit', + 'marker', + 'sort_key', + 'sort_dir', + 'name', + 'type', + 'cluster_name') + class Controller(object): @@ -64,7 +79,7 @@ class Controller(object): for key, value in params.items(): if value is None: del params[key] - + return params def _get_filters(self, req): @@ -188,16 +203,18 @@ class Controller(object): # add id and role # if role # self.db_api.get_role(req.context,role) - + if id and not utils.is_uuid_like(id): - msg = _LI("Rejecting template creation request for invalid template " + msg = _LI("Rejecting template creation request for " + "invalid template " "id '%(bad_id)s'") % {'bad_id': id} LOG.info(msg) msg = _("Invalid template id format") return exc.HTTPBadRequest(explanation=msg) try: - template_data = self.db_api.template_add(req.context, template_data) + template_data = self.db_api.template_add( + req.context, template_data) msg = (_LI("Successfully created template %s") % template_data["id"]) LOG.info(msg) @@ -230,14 +247,16 @@ class Controller(object): """ template_data = body["template"] if template_id and not utils.is_uuid_like(template_id): - msg = _LI("Rejecting cluster template creation request for invalid template " + msg = _LI("Rejecting cluster template creation request for " + "invalid template " "id '%(bad_id)s'") % {'bad_id': template_id} LOG.info(msg) msg = _("Invalid template id format") return exc.HTTPBadRequest(explanation=msg) try: - template_data = self.db_api.template_update(req.context, template_id, template_data) + template_data = self.db_api.template_update( + req.context, template_id, template_data) msg = (_LI("Successfully updated template %s") % template_data["id"]) LOG.info(msg) @@ -245,7 +264,8 @@ class Controller(object): template_data = dict(template=template_data) return template_data except exception.Duplicate: - msg = _("template with identifier %s already exists!") % template_id + msg = _("template with identifier %s already exists!") % \ + template_id LOG.warn(msg) return exc.HTTPConflict(msg) except exception.Invalid as e: @@ -256,7 +276,7 @@ class Controller(object): except Exception: LOG.exception(_LE("Unable to update template %s"), template_id) raise - + @utils.mutating def template_delete(self, req, template_id): """Registers a new template with the registry. @@ -276,8 +296,9 @@ class Controller(object): return exc.HTTPBadRequest(explanation=msg) try: - template_data = self.db_api.template_destroy(req.context, template_id) - #template_data = dict(template=make_image_dict(template_data)) + template_data = self.db_api.template_destroy( + req.context, template_id) + # template_data = dict(template=make_image_dict(template_data)) msg = (_LI("Successfully deleted template %s") % template_id) LOG.info(msg) if 'template' not in template_data: @@ -291,18 +312,20 @@ class Controller(object): except Exception: LOG.exception(_LE("Unable to delete template %s"), template_id) raise - + @utils.mutating def template_list(self, req): params = self._get_query_params(req) try: - filters=params.pop('filters') - marker=params.get('marker') - limit=params.get('limit') - sort_key=params.get('sort_key') - sort_dir=params.get('sort_dir') - return self.db_api.template_get_all(req.context, filters=filters,\ - marker=marker,limit=limit,sort_key=sort_key,sort_dir=sort_dir) + filters = params.pop('filters') + marker = params.get('marker') + limit = params.get('limit') + sort_key = params.get('sort_key') + sort_dir = params.get('sort_dir') + return self.db_api.template_get_all(req.context, filters=filters, + marker=marker, limit=limit, + sort_key=sort_key, + sort_dir=sort_dir) except exception.NotFound: LOG.warn(_LW("Invalid marker. template %(id)s could not be " "found.") % {'id': params.get('marker')}) @@ -328,7 +351,7 @@ class Controller(object): which will include the newly-created template's internal id in the 'id' field """ - + if template_id and not utils.is_uuid_like(template_id): msg = _LI("Rejecting template delete request for invalid template " "id '%(bad_id)s'") % {'bad_id': template_id} @@ -338,8 +361,11 @@ class Controller(object): try: template_data = self.db_api.template_get(req.context, template_id) - #service_disk_data = dict(service_disk=make_image_dict(service_disk_data)) - msg = (_LI("Successfully get template information:%s") % template_id) + # service_disk_data = dict(service_disk=make_image_dict( + # service_disk_data)) + msg = ( + _LI("Successfully get template information:%s") % + template_id) LOG.info(msg) if 'template' not in template_data: template_data = dict(template=template_data) @@ -352,7 +378,7 @@ class Controller(object): except Exception: LOG.exception(_LE("Unable to get template %s"), template_id) raise - + @utils.mutating def host_template_add(self, req, body): """Registers a new service_disk with the registry. @@ -360,7 +386,8 @@ class Controller(object): :param req: wsgi Request object :param body: Dictionary of information about the service_disk - :retval Returns the newly-created service_disk information as a mapping, + :retval Returns the newly-created service_disk information + as a mapping, which will include the newly-created service_disk's internal id in the 'id' field """ @@ -372,17 +399,20 @@ class Controller(object): # add id and role # if role # self.db_api.get_role(req.context,role) - + if id and not utils.is_uuid_like(id): - msg = _LI("Rejecting service_disk creation request for invalid service_disk " + msg = _LI("Rejecting service_disk creation request for " + "invalid service_disk " "id '%(bad_id)s'") % {'bad_id': id} LOG.info(msg) msg = _("Invalid service_disk id format") return exc.HTTPBadRequest(explanation=msg) try: - template_data = self.db_api.host_template_add(req.context, template_data) - #service_disk_data = dict(service_disk=make_image_dict(service_disk_data)) + template_data = self.db_api.host_template_add( + req.context, template_data) + # service_disk_data = dict(service_disk=make_image_dict( + # service_disk_data)) msg = (_LI("Successfully created node %s") % template_data["id"]) LOG.info(msg) @@ -409,22 +439,26 @@ class Controller(object): :param req: wsgi Request object :param body: Dictionary of information about the service_disk - :retval Returns the newly-created service_disk information as a mapping, + :retval Returns the newly-created service_disk information + as a mapping, which will include the newly-created service_disk's internal id in the 'id' field """ template_data = body["template"] - #template_id = template_data.get('template_id') + # template_id = template_data.get('template_id') if template_id and not utils.is_uuid_like(template_id): - msg = _LI("Rejecting cluster template creation request for invalid template " + msg = _LI("Rejecting cluster template creation request for " + "invalid template " "id '%(bad_id)s'") % {'bad_id': template_id} LOG.info(msg) msg = _("Invalid template id format") return exc.HTTPBadRequest(explanation=msg) try: - template_data = self.db_api.host_template_update(req.context, template_id, template_data) - #service_disk_data = dict(service_disk=make_image_dict(service_disk_data)) + template_data = self.db_api.host_template_update( + req.context, template_id, template_data) + # service_disk_data = dict(service_disk=make_image_dict( + # service_disk_data)) msg = (_LI("Successfully updated template %s") % template_data["id"]) LOG.info(msg) @@ -432,7 +466,8 @@ class Controller(object): template_data = dict(host_template=template_data) return template_data except exception.Duplicate: - msg = _("template with identifier %s already exists!") % template_id + msg = _("template with identifier %s already exists!") % \ + template_id LOG.warn(msg) return exc.HTTPConflict(msg) except exception.Invalid as e: @@ -443,7 +478,7 @@ class Controller(object): except Exception: LOG.exception(_LE("Unable to update template %s"), template_id) raise - + @utils.mutating def host_template_delete(self, req, template_id): """Registers a new service_disk with the registry. @@ -451,7 +486,8 @@ class Controller(object): :param req: wsgi Request object :param body: Dictionary of information about the service_disk - :retval Returns the newly-created service_disk information as a mapping, + :retval Returns the newly-created service_disk information + as a mapping, which will include the newly-created service_disk's internal id in the 'id' field """ @@ -463,8 +499,10 @@ class Controller(object): return exc.HTTPBadRequest(explanation=msg) try: - template_data = self.db_api.host_template_destroy(req.context, template_id) - #service_disk_data = dict(service_disk=make_image_dict(service_disk_data)) + template_data = self.db_api.host_template_destroy( + req.context, template_id) + # service_disk_data = dict(service_disk=make_image_dict( + # service_disk_data)) msg = (_LI("Successfully deleted template %s") % template_id) LOG.info(msg) if 'template' not in template_data: @@ -478,18 +516,22 @@ class Controller(object): except Exception: LOG.exception(_LE("Unable to delete template %s"), template_id) raise - + @utils.mutating def host_template_list(self, req): params = self._get_query_params(req) try: - filters=params.pop('filters') - marker=params.get('marker') - limit=params.get('limit') - sort_key=params.get('sort_key') - sort_dir=params.get('sort_dir') - return self.db_api.host_template_get_all(req.context, filters=filters,\ - marker=marker,limit=limit,sort_key=sort_key,sort_dir=sort_dir) + filters = params.pop('filters') + marker = params.get('marker') + limit = params.get('limit') + sort_key = params.get('sort_key') + sort_dir = params.get('sort_dir') + return self.db_api.host_template_get_all(req.context, + filters=filters, + marker=marker, + limit=limit, + sort_key=sort_key, + sort_dir=sort_dir) except exception.NotFound: LOG.warn(_LW("Invalid marker. template %(id)s could not be " "found.") % {'id': params.get('marker')}) @@ -511,11 +553,12 @@ class Controller(object): :param req: wsgi Request object :param body: Dictionary of information about the service_disk - :retval Returns the newly-created service_disk information as a mapping, + :retval Returns the newly-created service_disk information + as a mapping, which will include the newly-created service_disk's internal id in the 'id' field """ - + if template_id and not utils.is_uuid_like(template_id): msg = _LI("Rejecting template delete request for invalid template " "id '%(bad_id)s'") % {'bad_id': template_id} @@ -524,9 +567,13 @@ class Controller(object): return exc.HTTPBadRequest(explanation=msg) try: - template_data = self.db_api.host_template_get(req.context, template_id) - #service_disk_data = dict(service_disk=make_image_dict(service_disk_data)) - msg = (_LI("Successfully get template information:%s") % template_id) + template_data = self.db_api.host_template_get( + req.context, template_id) + # service_disk_data = dict(service_disk=make_image_dict( + # service_disk_data)) + msg = ( + _LI("Successfully get template information:%s") % + template_id) LOG.info(msg) if 'template' not in template_data: template_data = dict(host_template=template_data) @@ -540,6 +587,7 @@ class Controller(object): LOG.exception(_LE("Unable to get template %s"), template_id) raise + def create_resource(): """Images resource factory method.""" deserializer = wsgi.JSONRequestDeserializer() diff --git a/code/daisy/daisy/registry/client/v1/api.py b/code/daisy/daisy/registry/client/v1/api.py index 8ebe4054..20188051 100755 --- a/code/daisy/daisy/registry/client/v1/api.py +++ b/code/daisy/daisy/registry/client/v1/api.py @@ -201,64 +201,78 @@ def delete_member(context, image_id, member_id): c = get_registry_client(context) return c.delete_member(image_id, member_id) + def add_host_metadata(context, host_meta): LOG.debug("Adding host...") c = get_registry_client(context) return c.add_host(host_meta) + def delete_host_metadata(context, host_id): LOG.debug("Deleting host metadata for host %s...", host_id) c = get_registry_client(context) return c.delete_host(host_id) + def update_host_metadata(context, host_id, host_meta): LOG.debug("Updating host metadata for host %s...", host_id) c = get_registry_client(context) return c.update_host(host_id, host_meta) + def get_host_metadata(context, host_id): c = get_registry_client(context) return c.get_host(host_id) + def get_host_interface(context, host_meta): c = get_registry_client(context) return c.get_host_interface(host_meta) + def get_all_host_interfaces(context, params): c = get_registry_client(context) return c.get_all_host_interfaces(params) + def get_assigned_network(context, host_interface_id, network_id): c = get_registry_client(context) return c.get_assigned_network(host_interface_id, network_id) + def add_discover_host_metadata(context, discover_host_meta): LOG.debug("Adding discover host...") c = get_registry_client(context) - return c.add_discover_host(discover_host_meta) + return c.add_discover_host(discover_host_meta) + def delete_discover_host_metadata(context, discover_host_id): LOG.debug("Deleting host metadata for host %s...", discover_host_id) c = get_registry_client(context) return c.delete_discover_host(discover_host_id) - + + def get_discover_hosts_detail(context, **kwargs): c = get_registry_client(context) return c.get_discover_hosts_detailed(**kwargs) - + + def update_discover_host_metadata(context, host_id, host_meta): c = get_registry_client(context) return c.update_discover_host(host_id, host_meta) - + + def get_discover_host_metadata(context, host_id): c = get_registry_client(context) return c.get_discover_host_metadata(host_id) + def add_cluster_metadata(context, cluster_meta): LOG.debug("Adding cluster...") c = get_registry_client(context) return c.add_cluster(cluster_meta) + def update_cluster_metadata(context, cluster_id, cluster_meta): LOG.debug("Updating cluster metadata for cluster %s...", cluster_id) c = get_registry_client(context) @@ -266,31 +280,38 @@ def update_cluster_metadata(context, cluster_id, cluster_meta): print cluster_meta return c.update_cluster(cluster_id, cluster_meta) + def delete_cluster_metadata(context, cluster_id): LOG.debug("Deleting cluster metadata for cluster %s...", cluster_id) c = get_registry_client(context) return c.delete_cluster(cluster_id) + def get_cluster_metadata(context, cluster_id): c = get_registry_client(context) return c.get_cluster(cluster_id) + def add_cluster_host(context, cluster_id, host_id): c = get_registry_client(context) return c.add_cluster_host(cluster_id, host_id) + def delete_cluster_host(context, cluster_id, host_id): c = get_registry_client(context) return c.delete_cluster_host(cluster_id, host_id) + def get_hosts_detail(context, **kwargs): c = get_registry_client(context) return c.get_hosts_detailed(**kwargs) + def get_clusters_detail(context, **kwargs): c = get_registry_client(context) return c.get_clusters_detailed(**kwargs) + def get_cluster_hosts(context, cluster_id, host_id=None): c = get_registry_client(context) return c.get_cluster_hosts(cluster_id, host_id) @@ -300,283 +321,383 @@ def get_host_clusters(context, host_id): c = get_registry_client(context) return c.get_host_clusters(host_id) + def add_component_metadata(context, component_meta): LOG.debug("Adding component...") c = get_registry_client(context) return c.add_component(component_meta) + +def add_hwm_metadata(context, hwm): + c = get_registry_client(context) + return c.add_hwm(hwm) + + +def update_hwm_metadata(context, hwm_id, hwm): + c = get_registry_client(context) + return c.update_hwm(hwm_id, hwm) + + +def delete_hwm_metadata(context, hwm_id): + c = get_registry_client(context) + return c.delete_hwm(hwm_id) + + +def hwm_list_metadata(context, **kwargs): + c = get_registry_client(context) + return c.list_hwm(**kwargs) + + +def hwm_detail_metadata(context, hwm_id): + c = get_registry_client(context) + return c.get_hwm_detail(hwm_id) + + def add_template_metadata(context, template): c = get_registry_client(context) return c.add_template(template) - + + def update_template_metadata(context, template_id, template): c = get_registry_client(context) return c.update_template(template_id, template) - + + def delete_template_metadata(context, template_id): c = get_registry_client(context) return c.delete_template(template_id) - + def template_lists_metadata(context, **kwargs): c = get_registry_client(context) return c.list_template(**kwargs) + def template_detail_metadata(context, template_id): c = get_registry_client(context) return c.get_template_detail(template_id) + def add_host_template_metadata(context, template): c = get_registry_client(context) return c.add_host_template(template) - + + def update_host_template_metadata(context, template_id, template): c = get_registry_client(context) return c.update_host_template(template_id, template) - + + def delete_host_template_metadata(context, template_id): c = get_registry_client(context) return c.delete_host_template(template_id) - + def host_template_lists_metadata(context, **kwargs): c = get_registry_client(context) return c.list_host_template(**kwargs) + def host_template_detail_metadata(context, template_id): c = get_registry_client(context) return c.get_host_template_detail(template_id) - + + def delete_component_metadata(context, component_id): LOG.debug("Deleting component metadata for component %s...", component_id) c = get_registry_client(context) return c.delete_component(component_id) + def get_components_detail(context, **kwargs): c = get_registry_client(context) return c.get_components_detailed(**kwargs) + def get_component_metadata(context, component_id): c = get_registry_client(context) return c.get_component(component_id) + def update_component_metadata(context, component_id, component_meta): LOG.debug("Updating component metadata for component %s...", component_id) c = get_registry_client(context) return c.update_component(component_id, component_meta) + def add_service_metadata(context, service_meta): LOG.debug("Adding service...") c = get_registry_client(context) return c.add_service(service_meta) + def delete_service_metadata(context, service_id): LOG.debug("Deleting service metadata for service %s...", service_id) c = get_registry_client(context) return c.delete_service(service_id) + def get_services_detail(context, **kwargs): c = get_registry_client(context) return c.get_services_detailed(**kwargs) + def get_service_metadata(context, service_id): c = get_registry_client(context) return c.get_service(service_id) + def update_service_metadata(context, service_id, service_meta): LOG.debug("Updating service metadata for service %s...", service_id) c = get_registry_client(context) return c.update_service(service_id, service_meta) + def add_role_metadata(context, role_meta): LOG.debug("Adding role...") c = get_registry_client(context) return c.add_role(role_meta) + def delete_role_metadata(context, role_id): LOG.debug("Deleting role metadata for role %s...", role_id) c = get_registry_client(context) return c.delete_role(role_id) + def get_roles_detail(context, **kwargs): c = get_registry_client(context) return c.get_roles_detailed(**kwargs) + def get_role_metadata(context, role_id): c = get_registry_client(context) return c.get_role(role_id) + def update_role_metadata(context, role_id, role_meta): LOG.debug("Updating role metadata for role %s...", role_id) c = get_registry_client(context) return c.update_role(role_id, role_meta) + def get_role_services(context, role_id): c = get_registry_client(context) return c.get_role_services(role_id) + def get_role_host_metadata(context, role_id): LOG.debug("get role_host metadata for role %s...", role_id) c = get_registry_client(context) return c.get_role_host(role_id) + def delete_role_host_metadata(context, role_id): LOG.debug("delete role_host metadata for role %s...", role_id) c = get_registry_client(context) return c.delete_role_host(role_id) + def update_role_host_metadata(context, role_host_id, role_meta): LOG.debug("update role_host metadata for role %s...", role_host_id) c = get_registry_client(context) return c.update_role_host(role_host_id, role_meta) + def add_config_file_metadata(context, config_file_meta): LOG.debug("Adding config_file...") c = get_registry_client(context) return c.add_config_file(config_file_meta) + def delete_config_file_metadata(context, config_file_id): - LOG.debug("Deleting config_file metadata for config_file %s...", config_file_id) + LOG.debug( + "Deleting config_file metadata for config_file %s...", + config_file_id) c = get_registry_client(context) return c.delete_config_file(config_file_id) + def update_config_file_metadata(context, config_file_id, config_file_meta): - LOG.debug("Updating config_file metadata for config_file %s...", config_file_id) + LOG.debug( + "Updating config_file metadata for config_file %s...", + config_file_id) c = get_registry_client(context) return c.update_config_file(config_file_id, config_file_meta) + def get_config_file_metadata(context, config_file_id): c = get_registry_client(context) return c.get_config_file(config_file_id) + def get_config_files_detail(context, **kwargs): c = get_registry_client(context) return c.get_config_files_detailed(**kwargs) + + def add_config_set_metadata(context, config_set_meta): LOG.debug("Adding config_set...") c = get_registry_client(context) return c.add_config_set(config_set_meta) + def delete_config_set_metadata(context, config_set_id): - LOG.debug("Deleting config_set metadata for config_set %s...", config_set_id) + LOG.debug( + "Deleting config_set metadata for config_set %s...", + config_set_id) c = get_registry_client(context) return c.delete_config_set(config_set_id) + def update_config_set_metadata(context, config_set_id, config_set_meta): - LOG.debug("Updating config_set metadata for config_file %s...", config_set_id) + LOG.debug( + "Updating config_set metadata for config_file %s...", + config_set_id) c = get_registry_client(context) return c.update_config_set(config_set_id, config_set_meta) + def get_config_set_metadata(context, config_set_id): c = get_registry_client(context) return c.get_config_set(config_set_id) + def get_config_sets_detail(context, **kwargs): c = get_registry_client(context) return c.get_config_sets_detailed(**kwargs) + def add_config_metadata(context, config_meta): LOG.debug("Adding config...") c = get_registry_client(context) return c.add_config(config_meta) + def delete_config_metadata(context, config_id): LOG.debug("Deleting config metadata for config %s...", config_id) c = get_registry_client(context) return c.delete_config(config_id) + def update_config_metadata(context, config_id, config_meta): LOG.debug("Updating config metadata for config_file %s...", config_id) c = get_registry_client(context) return c.update_config(config_id, config_meta) + def update_configs_metadata_by_role_hosts(context, config_metas): c = get_registry_client(context) return c.update_config_by_role_hosts(config_metas) + def get_config_metadata(context, config_id): c = get_registry_client(context) return c.get_config(config_id) + def get_configs_detail(context, **kwargs): c = get_registry_client(context) return c.get_configs_detailed(**kwargs) + def add_network_metadata(context, network_meta): LOG.debug("Adding network...") c = get_registry_client(context) return c.add_network(network_meta) + def update_phyname_of_network(context, network_phyname_set): c = get_registry_client(context) return c.update_phyname_of_network(network_phyname_set) + def update_network_metadata(context, network_id, network_meta): LOG.debug("Updating cluster metadata for cluster %s...", network_id) c = get_registry_client(context) return c.update_network(network_id, network_meta) + def delete_network_metadata(context, network_id): LOG.debug("Deleting cluster metadata for cluster %s...", network_id) c = get_registry_client(context) return c.delete_network(network_id) + def get_network_metadata(context, network_id): c = get_registry_client(context) return c.get_networks(network_id) + def get_networks_detail(context, cluster_id, **kwargs): c = get_registry_client(context) return c.get_networks_detailed(cluster_id, **kwargs) + def get_all_networks(context, **kwargs): c = get_registry_client(context) return c.get_all_networks(**kwargs) + def config_interface_metadata(context, config_interface_meta): c = get_registry_client(context) return c.config_interface(config_interface_meta) + def add_service_disk_metadata(context, service_disk_meta): c = get_registry_client(context) return c.add_service_disk(service_disk_meta) + def delete_service_disk_metadata(context, service_disk_id): LOG.debug("Deleting service_disk metadata %s...", service_disk_id) c = get_registry_client(context) return c.delete_service_disk(service_disk_id) + def update_service_disk_metadata(context, service_disk_id, service_disk_meta): - LOG.debug("Updating config metadata for config_file %s...", service_disk_id) + LOG.debug( + "Updating config metadata for config_file %s...", + service_disk_id) c = get_registry_client(context) return c.update_service_disk(service_disk_id, service_disk_meta) - + + def get_service_disk_detail_metadata(context, service_disk_id): c = get_registry_client(context) return c.get_service_disk_detail(service_disk_id) - + + def list_service_disk_metadata(context, **kwargs): c = get_registry_client(context) return c.list_service_disk(**kwargs) - + + def add_cinder_volume_metadata(context, cinder_volume_meta): c = get_registry_client(context) return c.add_cinder_volume(cinder_volume_meta) - + + def delete_cinder_volume_metadata(context, cinder_volume_id): LOG.debug("Deleting cinder_volume metadata %s...", cinder_volume_id) c = get_registry_client(context) return c.delete_cinder_volume(cinder_volume_id) -def update_cinder_volume_metadata(context, cinder_volume_id, cinder_volume_meta): - LOG.debug("Updating config metadata for cinder_volume %s...", cinder_volume_id) + +def update_cinder_volume_metadata( + context, cinder_volume_id, cinder_volume_meta): + LOG.debug( + "Updating config metadata for cinder_volume %s...", + cinder_volume_id) c = get_registry_client(context) return c.update_cinder_volume(cinder_volume_id, cinder_volume_meta) - + + def get_cinder_volume_detail_metadata(context, cinder_volume_id): c = get_registry_client(context) return c.get_cinder_volume_detail(cinder_volume_id) - + + def list_cinder_volume_metadata(context, **kwargs): c = get_registry_client(context) - return c.list_cinder_volume(**kwargs) \ No newline at end of file + return c.list_cinder_volume(**kwargs) diff --git a/code/daisy/daisy/registry/client/v1/client.py b/code/daisy/daisy/registry/client/v1/client.py index 16d1bf3f..69ab6a48 100755 --- a/code/daisy/daisy/registry/client/v1/client.py +++ b/code/daisy/daisy/registry/client/v1/client.py @@ -32,6 +32,7 @@ from daisy.registry.api.v1 import config_sets from daisy.registry.api.v1 import configs from daisy.registry.api.v1 import networks from daisy.registry.api.v1 import template +from daisy.registry.api.v1 import hwms LOG = logging.getLogger(__name__) _LE = i18n._LE @@ -305,10 +306,14 @@ class RegistryClient(BaseClient): } # if 'host' not in host_metadata: - # host_metadata = dict(host=host_metadata) + # host_metadata = dict(host=host_metadata) body = jsonutils.dumps(host_metadata) - res = self.do_request("GET", "/host-interface", body=body, headers=headers) + res = self.do_request( + "GET", + "/host-interface", + body=body, + headers=headers) host_interface = jsonutils.loads(res.read()) return host_interface @@ -323,15 +328,19 @@ class RegistryClient(BaseClient): filters = dict(filters=kwargs) body = jsonutils.dumps(filters) - res = self.do_request("PUT", "/host-interfaces", body=body, headers=headers) + res = self.do_request( + "PUT", + "/host-interfaces", + body=body, + headers=headers) host_interface = jsonutils.loads(res.read()) return host_interface def get_assigned_network(self, host_interface_id, network_id): - """Returns a mapping of host_assigned_network metadata from Registry.""" + """Returns a mapping of host_assigned_network + metadata from Registry.""" - body = None headers = {} @@ -373,19 +382,26 @@ class RegistryClient(BaseClient): body = jsonutils.dumps(discover_host_meta) - res = self.do_request("POST", "/discover/nodes", body=body, headers=headers) + res = self.do_request( + "POST", + "/discover/nodes", + body=body, + headers=headers) # Registry returns a JSONified dict(image=image_info) data = jsonutils.loads(res.read()) return data['discover_host'] - + def delete_discover_host(self, discover_host_id): """ Deletes Registry's information about an host """ - res = self.do_request("DELETE", "/discover/nodes/%s" % discover_host_id) + res = self.do_request( + "DELETE", + "/discover/nodes/%s" % + discover_host_id) data = jsonutils.loads(res.read()) return data - + def get_discover_hosts_detailed(self, **kwargs): """ Returns a list of detailed host data mappings from Registry @@ -400,7 +416,7 @@ class RegistryClient(BaseClient): res = self.do_request("GET", "/discover/nodes", params=params) host_list = jsonutils.loads(res.read())['nodes'] return host_list - + def update_discover_host(self, host_id, discover_host_meta): ''' ''' @@ -413,11 +429,13 @@ class RegistryClient(BaseClient): body = jsonutils.dumps(discover_host_meta) - res = self.do_request("PUT", "/discover/nodes/%s" % host_id, body=body, headers=headers) + res = self.do_request( + "PUT", "/discover/nodes/%s" % + host_id, body=body, headers=headers) # Registry returns a JSONified dict(image=image_info) data = jsonutils.loads(res.read()) return data['discover_host'] - + def get_discover_host_metadata(self, host_id): res = self.do_request("GET", "/discover/nodes/%s" % host_id) data = jsonutils.loads(res.read())['discover_host'] @@ -524,7 +542,9 @@ class RegistryClient(BaseClient): def get_cluster_hosts(self, cluster_id, host_id=None): """Return a list of membership associations from Registry.""" if host_id: - res = self.do_request("GET", "/clusters/%s/nodes/%s" % (cluster_id, host_id)) + res = self.do_request( + "GET", "/clusters/%s/nodes/%s" % + (cluster_id, host_id)) else: res = self.do_request("GET", "/clusters/%s/nodes" % cluster_id) data = jsonutils.loads(res.read())['members'] @@ -551,7 +571,59 @@ class RegistryClient(BaseClient): # Registry returns a JSONified dict(image=image_info) data = jsonutils.loads(res.read()) return data['template'] - + + def add_hwm(self, hwm): + """ """ + headers = { + 'Content-Type': 'application/json', + } + + if 'hwm' not in hwm: + hwm = dict(hwm=hwm) + + body = jsonutils.dumps(hwm) + + res = self.do_request("POST", "/hwm", body=body, headers=headers) + # Registry returns a JSONified dict(image=image_info) + data = jsonutils.loads(res.read()) + return data['hwm'] + + def update_hwm(self, hwm_id, hwm): + headers = { + 'Content-Type': 'application/json', + } + if 'hwm' not in hwm: + hwm = dict(hwm=hwm) + + body = jsonutils.dumps(hwm) + + res = self.do_request( + "PUT", + "/hwm/%s" % + hwm_id, + body=body, + headers=headers) + # Registry returns a JSONified dict(image=image_info) + data = jsonutils.loads(res.read()) + return data['hwm'] + + def delete_hwm(self, hwm_id): + res = self.do_request("DELETE", "/hwm/%s" % hwm_id) + data = jsonutils.loads(res.read()) + return data['hwm'] + + def list_hwm(self, **kwargs): + """ """ + params = self._extract_params(kwargs, hwms.SUPPORTED_PARAMS) + res = self.do_request("GET", "/hwm", params=params) + data = jsonutils.loads(res.read()) + return data + + def get_hwm_detail(self, hwm_id): + res = self.do_request("GET", "/hwm/%s" % hwm_id) + data = jsonutils.loads(res.read()) + return data['hwm'] + def add_host_template(self, template): """ """ headers = { @@ -563,11 +635,15 @@ class RegistryClient(BaseClient): body = jsonutils.dumps(template) - res = self.do_request("POST", "/host_template", body=body, headers=headers) + res = self.do_request( + "POST", + "/host_template", + body=body, + headers=headers) # Registry returns a JSONified dict(image=image_info) data = jsonutils.loads(res.read()) return data['host_template'] - + def update_template(self, template_id, template): headers = { 'Content-Type': 'application/json', @@ -577,11 +653,16 @@ class RegistryClient(BaseClient): body = jsonutils.dumps(template) - res = self.do_request("PUT", "/template/%s" % template_id, body=body, headers=headers) + res = self.do_request( + "PUT", + "/template/%s" % + template_id, + body=body, + headers=headers) # Registry returns a JSONified dict(image=image_info) data = jsonutils.loads(res.read()) return data['template'] - + def update_host_template(self, template_id, template): headers = { 'Content-Type': 'application/json', @@ -591,48 +672,50 @@ class RegistryClient(BaseClient): body = jsonutils.dumps(template) - res = self.do_request("PUT", "/host_template/%s" % template_id, body=body, headers=headers) + res = self.do_request( + "PUT", + "/host_template/%s" % + template_id, + body=body, + headers=headers) # Registry returns a JSONified dict(image=image_info) data = jsonutils.loads(res.read()) return data['host_template'] - + def delete_template(self, template_id): res = self.do_request("DELETE", "/template/%s" % template_id) data = jsonutils.loads(res.read()) return data['template'] - + def delete_host_template(self, template_id): res = self.do_request("DELETE", "/host_template/%s" % template_id) data = jsonutils.loads(res.read()) return data['host_template'] - + def list_template(self, **kwargs): """ """ params = self._extract_params(kwargs, template.SUPPORTED_PARAMS) res = self.do_request("GET", "/template/list", params=params) data = jsonutils.loads(res.read()) return data - + def list_host_template(self, **kwargs): """ """ params = self._extract_params(kwargs, template.SUPPORTED_PARAMS) res = self.do_request("GET", "/host_template/list", params=params) data = jsonutils.loads(res.read()) return data - + def get_template_detail(self, template_id): res = self.do_request("GET", "/template/%s" % template_id) data = jsonutils.loads(res.read()) return data['template'] - - + def get_host_template_detail(self, template_id): res = self.do_request("GET", "/host_template/%s" % template_id) data = jsonutils.loads(res.read()) - return data['host_template'] - - - + return data['host_template'] + def get_component(self, component_id): """Returns a mapping of component metadata from Registry.""" res = self.do_request("GET", "/components/%s" % component_id) @@ -652,7 +735,11 @@ class RegistryClient(BaseClient): body = jsonutils.dumps(component_metadata) - res = self.do_request("POST", "/components", body=body, headers=headers) + res = self.do_request( + "POST", + "/components", + body=body, + headers=headers) # Registry returns a JSONified dict(image=image_info) data = jsonutils.loads(res.read()) return data['component'] @@ -693,8 +780,8 @@ class RegistryClient(BaseClient): 'Content-Type': 'application/json', } - res = self.do_request("PUT", "/components/%s" % component_id, body=body, - headers=headers) + res = self.do_request("PUT", "/components/%s" % component_id, + body=body, headers=headers) data = jsonutils.loads(res.read()) return data['component'] @@ -847,8 +934,8 @@ class RegistryClient(BaseClient): res = self.do_request("DELETE", "/roles/%s/hosts" % role_id) data = jsonutils.loads(res.read())['role'] return data - - def update_role_host(self, role_host_id,role_host): + + def update_role_host(self, role_host_id, role_host): """Returns a mapping of role_host metadata from Registry.""" if 'role' not in role_host: role_metadata = dict(role=role_host) @@ -859,8 +946,8 @@ class RegistryClient(BaseClient): 'Content-Type': 'application/json', } - res = self.do_request("PUT", "/roles/%s/hosts" % role_host_id, body=body, - headers=headers) + res = self.do_request("PUT", "/roles/%s/hosts" % role_host_id, + body=body, headers=headers) data = jsonutils.loads(res.read()) return data @@ -877,7 +964,11 @@ class RegistryClient(BaseClient): body = jsonutils.dumps(config_file_metadata) - res = self.do_request("POST", "/config_files", body=body, headers=headers) + res = self.do_request( + "POST", + "/config_files", + body=body, + headers=headers) # Registry returns a JSONified dict(image=image_info) data = jsonutils.loads(res.read()) return data['config_file'] @@ -909,8 +1000,8 @@ class RegistryClient(BaseClient): 'Content-Type': 'application/json', } - res = self.do_request("PUT", "/config_files/%s" % config_file_id, body=body, - headers=headers) + res = self.do_request("PUT", "/config_files/%s" % config_file_id, + body=body, headers=headers) data = jsonutils.loads(res.read()) return data['config_file'] @@ -942,7 +1033,11 @@ class RegistryClient(BaseClient): body = jsonutils.dumps(config_set_metadata) - res = self.do_request("POST", "/config_sets", body=body, headers=headers) + res = self.do_request( + "POST", + "/config_sets", + body=body, + headers=headers) # Registry returns a JSONified dict(image=image_info) data = jsonutils.loads(res.read()) return data['config_set'] @@ -974,8 +1069,8 @@ class RegistryClient(BaseClient): 'Content-Type': 'application/json', } - res = self.do_request("PUT", "/config_sets/%s" % config_set_id, body=body, - headers=headers) + res = self.do_request("PUT", "/config_sets/%s" % config_set_id, + body=body, headers=headers) data = jsonutils.loads(res.read()) return data['config_set'] @@ -1057,7 +1152,11 @@ class RegistryClient(BaseClient): 'Content-Type': 'application/json', } - res = self.do_request("POST", "/configs/update_config_by_role_hosts", body=body, headers=headers) + res = self.do_request( + "POST", + "/configs/update_config_by_role_hosts", + body=body, + headers=headers) data = jsonutils.loads(res.read()) return data['configs'] @@ -1110,7 +1209,11 @@ class RegistryClient(BaseClient): 'Content-Type': 'application/json', } - self.do_request("POST", "/networks/update_phyname_of_network", body=body, headers=headers) + self.do_request( + "POST", + "/networks/update_phyname_of_network", + body=body, + headers=headers) def update_network(self, network_id, network_metadata): """ @@ -1128,7 +1231,7 @@ class RegistryClient(BaseClient): res = self.do_request("PUT", "/networks/%s" % network_id, body=body, headers=headers) data = jsonutils.loads(res.read()) - return data['network'] + return data['network'] def delete_network(self, network_id): """ @@ -1149,7 +1252,9 @@ class RegistryClient(BaseClient): :param sort_dir: direction in which to order results (asc, desc) """ params = self._extract_params(kwargs, networks.SUPPORTED_PARAMS) - res = self.do_request("GET", "/clusters/%s/networks" % cluster_id, params=params) + res = self.do_request( + "GET", "/clusters/%s/networks" % + cluster_id, params=params) network_list = jsonutils.loads(res.read())['networks'] return network_list @@ -1168,8 +1273,12 @@ class RegistryClient(BaseClient): } body = jsonutils.dumps(config_interface) - res = self.do_request("POST", "/config_interface", body=body, headers=headers) - config_interface= jsonutils.loads(res.read())['config_interface_meta'] + res = self.do_request( + "POST", + "/config_interface", + body=body, + headers=headers) + config_interface = jsonutils.loads(res.read())['config_interface_meta'] return config_interface def add_service_disk(self, service_disk_metadata): @@ -1185,11 +1294,15 @@ class RegistryClient(BaseClient): body = jsonutils.dumps(service_disk_metadata) - res = self.do_request("POST", "/service_disk", body=body, headers=headers) + res = self.do_request( + "POST", + "/service_disk", + body=body, + headers=headers) # Registry returns a JSONified dict(image=image_info) data = jsonutils.loads(res.read()) return data['service_disk'] - + def delete_service_disk(self, service_disk_id): """ Deletes Registry's information about an network @@ -1197,8 +1310,7 @@ class RegistryClient(BaseClient): res = self.do_request("DELETE", "/service_disk/%s" % service_disk_id) data = jsonutils.loads(res.read()) return data['service_disk'] - - + def update_service_disk(self, service_disk_id, service_disk_metadata): """ Updates Registry's information about an service_disk @@ -1212,17 +1324,17 @@ class RegistryClient(BaseClient): 'Content-Type': 'application/json', } - res = self.do_request("PUT", "/service_disk/%s" % service_disk_id, body=body, - headers=headers) + res = self.do_request("PUT", "/service_disk/%s" % service_disk_id, + body=body, headers=headers) data = jsonutils.loads(res.read()) - return data['service_disk'] - + return data['service_disk'] + def get_service_disk_detail(self, service_disk_id): """Return a list of service_disk associations from Registry.""" res = self.do_request("GET", "/service_disk/%s" % service_disk_id) data = jsonutils.loads(res.read())['service_disk'] - return data - + return data + def list_service_disk(self, **kwargs): """ Returns a list of service_disk data mappings from Registry @@ -1231,7 +1343,7 @@ class RegistryClient(BaseClient): res = self.do_request("GET", "/service_disk/list", params=params) service_disk_list = jsonutils.loads(res.read())['service_disks'] return service_disk_list - + def add_cinder_volume(self, cinder_volume_metadata): """ Tells registry about an network's metadata @@ -1245,11 +1357,15 @@ class RegistryClient(BaseClient): body = jsonutils.dumps(cinder_volume_metadata) - res = self.do_request("POST", "/cinder_volume", body=body, headers=headers) + res = self.do_request( + "POST", + "/cinder_volume", + body=body, + headers=headers) # Registry returns a JSONified dict(image=image_info) data = jsonutils.loads(res.read()) return data['cinder_volume'] - + def delete_cinder_volume(self, cinder_volume_id): """ Deletes Registry's information about an network @@ -1257,7 +1373,7 @@ class RegistryClient(BaseClient): res = self.do_request("DELETE", "/cinder_volume/%s" % cinder_volume_id) data = jsonutils.loads(res.read()) return data['cinder_volume'] - + def update_cinder_volume(self, cinder_volume_id, cinder_volume_metadata): """ Updates Registry's information about an cinder_volume @@ -1271,18 +1387,17 @@ class RegistryClient(BaseClient): 'Content-Type': 'application/json', } - res = self.do_request("PUT", "/cinder_volume/%s" % cinder_volume_id, body=body, - headers=headers) + res = self.do_request("PUT", "/cinder_volume/%s" % cinder_volume_id, + body=body, headers=headers) data = jsonutils.loads(res.read()) - return data['cinder_volume'] - - + return data['cinder_volume'] + def get_cinder_volume_detail(self, cinder_volume_id): """Return a list of cinder_volume associations from Registry.""" res = self.do_request("GET", "/cinder_volume/%s" % cinder_volume_id) data = jsonutils.loads(res.read())['cinder_volume'] - return data - + return data + def list_cinder_volume(self, **kwargs): """ Returns a list of cinder_volume data mappings from Registry @@ -1290,4 +1405,4 @@ class RegistryClient(BaseClient): params = self._extract_params(kwargs, hosts.SUPPORTED_PARAMS) res = self.do_request("GET", "/cinder_volume/list", params=params) cinder_volume_list = jsonutils.loads(res.read())['cinder_volumes'] - return cinder_volume_list \ No newline at end of file + return cinder_volume_list diff --git a/code/daisy/daisy/tests/api/tecs.conf b/code/daisy/daisy/tests/api/tecs.conf index e9eafcfc..73ad3834 100644 --- a/code/daisy/daisy/tests/api/tecs.conf +++ b/code/daisy/daisy/tests/api/tecs.conf @@ -13,4 +13,4 @@ CONFIG_COMPONENT2_INSTALL = CONFIG_SERVER1_INSTALL_MODE = -CONFIG_SERVER2_INSTALL_MODE = \ No newline at end of file +CONFIG_SERVER2_INSTALL_MODE = diff --git a/code/daisy/daisy/tests/api/test_config.py b/code/daisy/daisy/tests/api/test_config.py old mode 100644 new mode 100755 index 8f30f8c2..9863d059 --- a/code/daisy/daisy/tests/api/test_config.py +++ b/code/daisy/daisy/tests/api/test_config.py @@ -5,39 +5,46 @@ import unittest import os compute_role = {"Compute": - {'services': - {'server1': 'component1', 'server2': 'component2'}, - 'host_interfaces': - [{'management': {'ip': '192.168.1.1'}, - 'deployment': {'ip': '192.168.0.1'}, }, - {'management': {'ip': '192.168.1.2'}, - 'deployment': {'ip': '192.168.0.2'}, }, ], - 'vip': '192.168.4.2', }, } + {'services': + {'server1': 'component1', 'server2': 'component2'}, + 'host_interfaces': + [{'management': {'ip': '192.168.1.1'}, + 'deployment': {'ip': '192.168.0.1'}, }, + {'management': {'ip': '192.168.1.2'}, + 'deployment': {'ip': '192.168.0.2'}, }, ], + 'vip': '192.168.4.2', }, } ha_role = {"CONTROLLER_HA": - {'services': - {'nova-api': 'component3', 'mariadb': 'component4'}, - 'host_interfaces': - [{'management': {'ip': '192.168.1.3', 'netmask': "255.255.255.0", 'name': 'eth0', }, - 'deployment': {'ip': '192.168.0.3'}, - 'storage': {'ip': '192.168.5.3'}, }, - {'management': {'ip': '192.168.1.4', 'netmask': "255.255.255.0", 'name': 'eth0', }, - 'deployment': {'ip': '192.168.0.4'}, - 'storage': {'ip': '192.168.5.3'}, }, ], + {'services': + {'nova-api': 'component3', 'mariadb': 'component4'}, + 'host_interfaces': + [{'management': {'ip': '192.168.1.3', + 'netmask': "255.255.255.0", 'name': 'eth0', }, + 'deployment': {'ip': '192.168.0.3'}, + 'storage': {'ip': '192.168.5.3'}, }, + {'management': {'ip': '192.168.1.4', + 'netmask': "255.255.255.0", + 'name': 'eth0', }, + 'deployment': {'ip': '192.168.0.4'}, + 'storage': {'ip': '192.168.5.3'}, }, ], 'vip': '192.168.4.4', }, } lb_role = {"CONTROLLER_LB": - {'services': - {'nova-api': 'component5', 'mariadb': 'component6'}, - 'host_interfaces': - [{'management': {'ip': '192.168.1.5', 'netmask': "255.255.255.0", 'name': 'eth0', }, - 'deployment': {'ip': '192.168.0.5'}, - 'storage': {'ip': '192.168.5.5'}, }, - {'management': {'ip': '192.168.1.6', 'netmask': "255.255.255.0", 'name': 'eth0', }, - 'deployment': {'ip': '192.168.0.6'}, - 'storage': {'ip': '192.168.5.6'}, }, ], + {'services': + {'nova-api': 'component5', 'mariadb': 'component6'}, + 'host_interfaces': + [{'management': {'ip': '192.168.1.5', + 'netmask': "255.255.255.0", 'name': 'eth0', }, + 'deployment': {'ip': '192.168.0.5'}, + 'storage': {'ip': '192.168.5.5'}, }, + {'management': {'ip': '192.168.1.6', + 'netmask': "255.255.255.0", + 'name': 'eth0', }, + 'deployment': {'ip': '192.168.0.6'}, + 'storage': {'ip': '192.168.5.6'}, }, ], 'vip': '192.168.4.6', }, } + def merge_dict(*args): result = dict() for a in args: @@ -48,33 +55,36 @@ mix_roles = merge_dict(compute_role, ha_role, lb_role) class TestTecsConfig(unittest.TestCase): + def setUp(self): - tecs_config.tecs_conf_template_path = os.path.dirname(os.path.realpath(__file__)) + tecs_config.tecs_conf_template_path = os.path.dirname( + os.path.realpath(__file__)) print tecs_config.tecs_conf_template_path def tearDown(self): - tecs_config.tecs_conf_template_path = tecs_config.default_tecs_conf_template_path + tecs_config.tecs_conf_template_path = \ + tecs_config.default_tecs_conf_template_path def test_config_with_nothing(self): tecs, ha = tecs_config.update_tecs_conf("ab-11", {}) self.assertTrue(True) def test_config_with_compute_role(self): - tecs,ha = tecs_config.update_tecs_conf("ab-11", compute_role ) + tecs, ha = tecs_config.update_tecs_conf("ab-11", compute_role) self.assertTrue(True) - print tecs,ha + print tecs, ha def test_config_with_ha_role(self): - tecs, ha = tecs_config.update_tecs_conf("ab-11", ha_role ) + tecs, ha = tecs_config.update_tecs_conf("ab-11", ha_role) self.assertTrue(True) def test_config_with_lb_role(self): - tecs, ha = tecs_config.update_tecs_conf("ab-11", lb_role ) + tecs, ha = tecs_config.update_tecs_conf("ab-11", lb_role) self.assertTrue(True) def test_config_with_all_role(self): - tecs, ha = tecs_config.update_tecs_conf("ab-11", lb_role ) + tecs, ha = tecs_config.update_tecs_conf("ab-11", lb_role) self.assertTrue(True) if __name__ == "__main__": - unittest.main() \ No newline at end of file + unittest.main() diff --git a/code/daisy/daisy/tests/api/test_roles.py b/code/daisy/daisy/tests/api/test_roles.py new file mode 100755 index 00000000..0508b007 --- /dev/null +++ b/code/daisy/daisy/tests/api/test_roles.py @@ -0,0 +1,179 @@ +import mock +import webob +from oslo.serialization import jsonutils +from daisy.api.v1 import roles +from daisy.context import RequestContext +from daisy import test + + +def set_role_meta(): + role_meta = {} + role_meta["name"] = "test_role" + role_meta["description"] = "111" + return role_meta + + +class TestRolesApiConfig(test.TestCase): + + def setUp(self): + super(TestRolesApiConfig, self).setUp() + self.controller = roles.Controller() + + @mock.patch('daisy.registry.client.v1.client.RegistryClient.do_request') + def test_add_role(self, mock_do_request): + role_meta = set_role_meta() + req = webob.Request.blank('/') + req.context = RequestContext(is_admin=True, + user='fake user', + tenant='fake tenamet') + + def fake_do_request(method, path, **params): + res = mock.Mock() + cluster_id = 'test-1234-1234-1244' + if method == "GET": + get_result = {'roles': + [{'cluster_id': None, + 'name': 'HA', + 'description': 'nothing'}, + {'cluster_id': None, + 'name': 'LB', + 'description': 'nothing'}, + {'cluster_id': cluster_id, + 'name': 'test_role', + 'description': 'nothing'} + ] + } + res.read.return_value = jsonutils.dumps(get_result) + return res + elif method == 'POST': + post_result = {'role': {'db_vip': None}} + res.read.return_value = jsonutils.dumps(post_result) + return res + + mock_do_request.side_effect = fake_do_request + add_role = self.controller.add_role(req, role_meta) + self.assertEqual({'role_meta': {u'db_vip': None}}, add_role) + + @mock.patch('daisy.registry.client.v1.client.RegistryClient.do_request') + def test_add_role_with_repeated_name(self, mock_do_request): + role_meta = set_role_meta() + req = webob.Request.blank('/') + req.context = RequestContext(is_admin=True, + user='fake user', + tenant='fake tenamet') + + def fake_do_request(method, path, **params): + res = mock.Mock() + cluster_id = 'test-1234-1234-1244' + if method == "GET": + get_result = {'roles': + [{'cluster_id': None, + 'name': 'HA', + 'description': 'nothing'}, + {'cluster_id': None, + 'name': 'test_role', + 'description': 'nothing'}, + {'cluster_id': cluster_id, + 'name': 'test_role', + 'description': 'nothing'} + ] + } + res.read.return_value = jsonutils.dumps(get_result) + return res + elif method == 'POST': + post_result = {'role': {'db_vip': None}} + res.read.return_value = jsonutils.dumps(post_result) + return res + + mock_do_request.side_effect = fake_do_request + # "The role %s has already been in the the template role." % role_name + self.assertRaises(webob.exc.HTTPForbidden, + self.controller.add_role, req, role_meta) + + @mock.patch('daisy.registry.client.v1.client.RegistryClient.do_request') + def test_add_template_role_with_cluster_raise_exception(self, + mock_do_request): + role_meta = set_role_meta() + cluster_id = 'test-1234-1234-1244' + role_meta['cluster_id'] = cluster_id + role_meta['type'] = 'template' + req = webob.Request.blank('/') + req.context = RequestContext(is_admin=True, + user='fake user', + tenant='fake tenamet') + + def fake_do_request(method, path, **params): + res = mock.Mock() + print 'path', path + if method == "GET": + if path == '/clusters/%s' % cluster_id: + get_result = { + u'vlan_end': None, + u'networking_parameters': { + u'vni_range': [ + None, + None + ], + u'public_vip': None, + u'net_l23_provider': None, + u'base_mac': u'', + u'gre_id_range': [ + None, + None + ], + u'vlan_range': [ + None, + None + ], + u'segmentation_type': u'vlan' + }, + u'owner': None, + u'gre_id_start': None, + u'deleted_at': None, + u'networks': [], + u'id': cluster_id, + u'base_mac': u'', + u'auto_scale': 0, + u'vni_end': None, + u'gre_id_end': None, + u'nodes': [], + u'description': u'', + u'deleted': False, + u'routers': [], + u'logic_networks': [], + u'net_l23_provider': None, + u'vlan_start': None, + u'name': u'testtt', + u'public_vip': None, + u'use_dns': 1, + u'vni_start': None, + u'segmentation_type': u'vlan' + } + res.read.return_value = jsonutils.dumps(get_result) + return res + else: + get_result = {'roles': + [{'cluster_id': None, + 'name': 'HA', + 'description': 'nothing'}, + {'cluster_id': None, + 'name': 'LB', + 'description': 'nothing'}, + {'cluster_id': cluster_id, + 'name': 'test', + 'description': 'nothing'} + ] + } + res.read.return_value = jsonutils.dumps(get_result) + return res + elif method == 'POST': + post_result = {'role': {'db_vip': None}} + res.read.return_value = jsonutils.dumps(post_result) + return res + + mock_do_request.side_effect = fake_do_request + # add_role = self.controller.add_role(req, role_meta) + # webob.exc.HTTPForbidden: + # Tht template role cannot be added to any cluster. + self.assertRaises(webob.exc.HTTPForbidden, + self.controller.add_role, req, role_meta) diff --git a/code/daisy/daisy/tests/common/test_utils.py b/code/daisy/daisy/tests/common/test_utils.py new file mode 100755 index 00000000..ca8f595d --- /dev/null +++ b/code/daisy/daisy/tests/common/test_utils.py @@ -0,0 +1,47 @@ +import unittest +from daisy.common import utils + + +class TestUtils(unittest.TestCase): + + def test_get_numa_node_cpus(self): + host_cpu = {'numa_node0': '0-5,12-17', + 'numa_node1': '6-11,18-23'} + node0_cpus = range(0, 6) + range(12, 18) + node1_cpus = range(6, 12) + range(18, 24) + real_numa_cpus = utils.get_numa_node_cpus(host_cpu) + expect_numa_cpus = {'numa_node0': node0_cpus, + 'numa_node1': node1_cpus, } + self.assertEqual(expect_numa_cpus, real_numa_cpus) + + host_cpu = {'numa_node0': '0-5,12-17'} + node0_cpus = range(0, 6) + range(12, 18) + real_numa_cpus = utils.get_numa_node_cpus(host_cpu) + expect_numa_cpus = {'numa_node0': node0_cpus} + self.assertEqual(expect_numa_cpus, real_numa_cpus) + + host_cpu = {'numa_node1': '6-11,18-23'} + node1_cpus = range(6, 12) + range(18, 24) + real_numa_cpus = utils.get_numa_node_cpus(host_cpu) + expect_numa_cpus = {'numa_node1': node1_cpus, } + self.assertEqual(expect_numa_cpus, real_numa_cpus) + + host_cpu = {} + real_numa_cpus = utils.get_numa_node_cpus(host_cpu) + expect_numa_cpus = {} + self.assertEqual(expect_numa_cpus, real_numa_cpus) + + def test_get_numa_node_from_cpus(self): + node0_cpus = range(0, 6) + range(12, 18) + node1_cpus = range(6, 12) + range(18, 24) + numa_cpus = {'numa_node0': node0_cpus, + 'numa_node1': node1_cpus, } + cpus_str = '1,2,12-17' + real_numas = utils.get_numa_node_from_cpus(numa_cpus, cpus_str) + expect_numas = [0] + self.assertEqual(expect_numas, real_numas) + + cpus_str = '0-5,6-11,12-17,18-23' + real_numas = utils.get_numa_node_from_cpus(numa_cpus, cpus_str) + expect_numas = [0, 1] + self.assertEqual(expect_numas, real_numas) diff --git a/code/daisy/daisy/tests/common/test_vcpu_pin.py b/code/daisy/daisy/tests/common/test_vcpu_pin.py new file mode 100755 index 00000000..1daca00a --- /dev/null +++ b/code/daisy/daisy/tests/common/test_vcpu_pin.py @@ -0,0 +1,32 @@ +import unittest +import mock +from daisy.common import vcpu_pin + + +class TestVcpuPin(unittest.TestCase): + + def test_allocate_cpus_with_dvs_and_pci_on_same_node(self): + host_cpu = {'numa_node0': '0-5,12-17', + 'numa_node1': '6-11,18-23'} + roles = ['CONTROLLER_LB', 'CONTROLLER_HA', 'COMPUTER'] + #interfaces = [{'switch_type': 'dvs', 'name': 'ens33'}] + host_detail = {'id': "host_test_id", + 'name': "host_test_name", + 'cpu': host_cpu, + 'role': roles} + dvs_cpus = [14, 15, 16, 17] + high_cpusets = range(12, 18) + range(0, 6) + low_cpusets = range(18, 24) + range(6, 12) + dvs_cpu_sets = mock.Mock(return_value={'high': high_cpusets, + 'low': low_cpusets, + 'dvs': dvs_cpus}) + vcpu_pin.allocate_dvs_cpus = dvs_cpu_sets + pci_cpu_sets = mock.Mock(return_value={'high': high_cpusets, + 'low': low_cpusets}) + vcpu_pin.allocate_clc_cpus = pci_cpu_sets + real_cpu_sets = vcpu_pin.allocate_cpus(host_detail) + expect_cpu_sets = {'dvs_high_cpuset': '0-5,12-17', + 'pci_high_cpuset': '0-5,12-17', + 'suggest_dvs_cpus': '14-17', + 'suggest_os_cpus': '0,6-8'} + self.assertEqual(expect_cpu_sets, real_cpu_sets) diff --git a/code/daisy/daisy/tests/conf_fixture.py b/code/daisy/daisy/tests/conf_fixture.py new file mode 100755 index 00000000..377cf764 --- /dev/null +++ b/code/daisy/daisy/tests/conf_fixture.py @@ -0,0 +1,25 @@ +# Copyright 2010 United States Government as represented by the +# Administrator of the National Aeronautics and Space Administration. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from oslo_config import cfg + + +CONF = cfg.CONF + + +def set_defaults(conf): + #conf.set_default('default_volume_type', def_vol_type) + pass diff --git a/code/daisy/daisy/tests/fake_notifier.py b/code/daisy/daisy/tests/fake_notifier.py new file mode 100755 index 00000000..9e22a8df --- /dev/null +++ b/code/daisy/daisy/tests/fake_notifier.py @@ -0,0 +1,72 @@ +# Copyright 2014 Red Hat, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import collections +import functools + +import anyjson +import oslo_messaging as messaging + +#from cinder import rpc + +NOTIFICATIONS = [] + + +def reset(): + del NOTIFICATIONS[:] + + +FakeMessage = collections.namedtuple('Message', + ['publisher_id', 'priority', + 'event_type', 'payload']) + + +class FakeNotifier(object): + + def __init__(self, transport, publisher_id, serializer=None, driver=None, + topic=None, retry=None): + self.transport = transport + self.publisher_id = publisher_id + for priority in ['debug', 'info', 'warn', 'error', 'critical']: + setattr(self, priority, + functools.partial(self._notify, priority.upper())) + self._serializer = serializer or messaging.serializer.NoOpSerializer() + self._topic = topic + self.retry = retry + + def prepare(self, publisher_id=None): + if publisher_id is None: + publisher_id = self.publisher_id + return self.__class__(self.transport, publisher_id, self._serializer) + + def _notify(self, priority, ctxt, event_type, payload): + payload = self._serializer.serialize_entity(ctxt, payload) + # NOTE(sileht): simulate the kombu serializer + # this permit to raise an exception if something have not + # been serialized correctly + anyjson.serialize(payload) + msg = dict(publisher_id=self.publisher_id, + priority=priority, + event_type=event_type, + payload=payload) + NOTIFICATIONS.append(msg) + + +# def stub_notifier(stubs): + # stubs.Set(messaging, 'Notifier', FakeNotifier) + # if rpc.NOTIFIER: + # serializer = getattr(rpc.NOTIFIER, '_serializer', None) + # stubs.Set(rpc, 'NOTIFIER', FakeNotifier(rpc.NOTIFIER.transport, + # rpc.NOTIFIER.publisher_id, + # serializer=serializer)) diff --git a/code/daisy/doc/source/conf.py b/code/daisy/doc/source/conf.py index b42fb729..713fd060 100755 --- a/code/daisy/doc/source/conf.py +++ b/code/daisy/doc/source/conf.py @@ -29,14 +29,12 @@ import os import sys +from daisy.version import version_info as daisy_version # If extensions (or modules to document with autodoc) are in another directory, # add these directories to sys.path here. If the directory is relative to the # documentation root, use os.path.abspath to make it absolute, like shown here. -sys.path = [ - os.path.abspath('../..'), - os.path.abspath('../../bin') - ] + sys.path +sys.path = [os.path.abspath('../..'), os.path.abspath('../../bin')] + sys.path # -- General configuration --------------------------------------------------- @@ -56,7 +54,7 @@ extensions = ['sphinx.ext.coverage', source_suffix = '.rst' # The encoding of source files. -#source_encoding = 'utf-8' +# source_encoding = 'utf-8' # The master toctree document. master_doc = 'index' @@ -70,7 +68,6 @@ copyright = u'2010-2014, OpenStack Foundation.' # built documents. # # The short X.Y version. -from daisy.version import version_info as daisy_version # The full version, including alpha/beta/rc tags. release = daisy_version.version_string_with_vcs() # The short X.Y version. @@ -78,30 +75,30 @@ version = daisy_version.canonical_version_string() # The language for content autogenerated by Sphinx. Refer to documentation # for a list of supported languages. -#language = None +# language = None # There are two options for replacing |today|: either, you set today to some # non-false value, then it is used: -#today = '' +# today = '' # Else, today_fmt is used as the format for a strftime call. -#today_fmt = '%B %d, %Y' +# today_fmt = '%B %d, %Y' # List of documents that shouldn't be included in the build. -#unused_docs = [] +# unused_docs = [] # List of directories, relative to source directory, that shouldn't be searched # for source files. exclude_trees = ['api'] # The reST default role (for this markup: `text`) to use for all documents. -#default_role = None +# default_role = None # If true, '()' will be appended to :func: etc. cross-reference text. -#add_function_parentheses = True +# add_function_parentheses = True # If true, the current module name will be prepended to all description # unit titles (such as .. function::). -#add_module_names = True +# add_module_names = True # If true, sectionauthor and moduleauthor directives will be shown in the # output. They are ignored by default. @@ -152,26 +149,26 @@ man_pages = [ # Theme options are theme-specific and customize the look and feel of a theme # further. For a list of options available for each theme, see the # documentation. -#html_theme_options = {} +# html_theme_options = {} # Add any paths that contain custom themes here, relative to this directory. -#html_theme_path = ['_theme'] +# html_theme_path = ['_theme'] # The name for this set of Sphinx documents. If None, it defaults to # " v documentation". -#html_title = None +# html_title = None # A shorter title for the navigation bar. Default is the same as html_title. -#html_short_title = None +# html_short_title = None # The name of an image file (relative to this directory) to place at the top # of the sidebar. -#html_logo = None +# html_logo = None # The name of an image file (within the static path) to use as favicon of the # docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32 # pixels large. -#html_favicon = None +# html_favicon = None # Add any paths that contain custom static files (such as style sheets) here, # relative to this directory. They are copied after the builtin static files, @@ -180,20 +177,20 @@ html_static_path = ['_static'] # If not '', a 'Last updated on:' timestamp is inserted at every page bottom, # using the given strftime format. -#html_last_updated_fmt = '%b %d, %Y' +# html_last_updated_fmt = '%b %d, %Y' git_cmd = "git log --pretty=format:'%ad, commit %h' --date=local -n1" html_last_updated_fmt = os.popen(git_cmd).read() # If true, SmartyPants will be used to convert quotes and dashes to # typographically correct entities. -#html_use_smartypants = True +# html_use_smartypants = True # Custom sidebar templates, maps document names to template names. -#html_sidebars = {} +# html_sidebars = {} # Additional templates that should be rendered to pages, maps page names to # template names. -#html_additional_pages = {} +# html_additional_pages = {} # If false, no module index is generated. html_use_modindex = False @@ -202,18 +199,18 @@ html_use_modindex = False html_use_index = False # If true, the index is split into individual pages for each letter. -#html_split_index = False +# html_split_index = False # If true, links to the reST sources are added to the pages. -#html_show_sourcelink = True +# html_show_sourcelink = True # If true, an OpenSearch description file will be output, and all pages will # contain a tag referring to it. The value of this option must be the # base URL from which the finished HTML is served. -#html_use_opensearch = '' +# html_use_opensearch = '' # If nonempty, this is the file name suffix for HTML files (e.g. ".xhtml"). -#html_file_suffix = '' +# html_file_suffix = '' # Output file base name for HTML help builder. htmlhelp_basename = 'glancedoc' @@ -222,10 +219,10 @@ htmlhelp_basename = 'glancedoc' # -- Options for LaTeX output ------------------------------------------------ # The paper size ('letter' or 'a4'). -#latex_paper_size = 'letter' +# latex_paper_size = 'letter' # The font size ('10pt', '11pt' or '12pt'). -#latex_font_size = '10pt' +# latex_font_size = '10pt' # Grouping the document tree into LaTeX files. List of tuples # (source start file, target name, title, author, @@ -237,17 +234,17 @@ latex_documents = [ # The name of an image file (relative to this directory) to place at the top of # the title page. -#latex_logo = None +# latex_logo = None # For "manual" documents, if this is true, then toplevel headings are parts, # not chapters. -#latex_use_parts = False +# latex_use_parts = False # Additional stuff for the LaTeX preamble. -#latex_preamble = '' +# latex_preamble = '' # Documents to append as an appendix to all manuals. -#latex_appendices = [] +# latex_appendices = [] # If false, no module index is generated. -#latex_use_modindex = True +# latex_use_modindex = True diff --git a/code/daisy/doc/source/guide/Version_rollback.md b/code/daisy/doc/source/guide/Version_rollback.md new file mode 100755 index 00000000..3f9b58ec --- /dev/null +++ b/code/daisy/doc/source/guide/Version_rollback.md @@ -0,0 +1,163 @@ +#銆恠print42銆 42242: 鐗堟湰鍥為@daisy1 + +|淇敼鏃ユ湡 |淇敼浜 | 淇敼鍐呭| +|:----|:----|:----:|:----| +|2016-06-17| 闄堥洩鑻 | 鏂板| + +##1銆佺壒鎬х畝浠 +鐗堟湰鍥為鏂规楠岃瘉鑳屾櫙锛歍ECS鍗囩骇澶辫触锛岄渶瑕佸洖閫銆 +鐜淇℃伅 + +| 瑙掕壊 |涓绘帶 |澶囨帶 | 璁$畻鑺傜偣| +|:----|:----|:----:|:----| +|涓绘満 | A | B | C| +|褰撳墠鑰佺増鏈瑋 100 | 100 | 100| +|寰呭崌鏂扮増鏈 | 101 | 101 | 101 | +姝e父鍗囩骇娴佺▼锛 +绗竴姝 HA绂佹鍊掓崲锛屽崌绾/C鐗堟湰鍒版渶鏂帮紝 B 100--->101锛 C 100----->101 +绗簩姝 A--->B锛孉B涓诲鍊掓崲锛孊涓轰富 +绗笁姝 绛夊緟20鍒嗛挓锛岃繘琛屽熀鏈姛鑳介獙璇 +绗洓姝 鍗囩骇A 100----->101 +绗簲姝 鍙栨秷绂佹鍊掓崲 + +濡傛灉鐗堟湰鍗囩骇涔嬪悗锛岀涓夋楠岃瘉鏃跺彂鐜版柊鐗堟湰101鏈夐棶棰橈紝闇瑕佹敮鎸佺幆澧冨洖閫鍒拌佺増鏈100銆 + +##2銆佹柟妗堥獙璇 + +**楠岃瘉缁勭綉**锛氭帶鍒惰妭鐐筭lance/db/dbbackup/monagodb浣跨敤鍏变韩纾佺洏鏂瑰紡+OVS +瑕佹眰姝e父鍗囩骇鐜鏃跺埌浠ヤ笂绗笁姝ュ彂鐜板紓甯稿悗闇瑕佸皢B,C鍥為鍒拌佺増鏈 +### A 鏂扮増鏈娇鐢ㄦ湁闂锛屽洖閫B,C + +褰撳墠鐜淇℃伅 + +| 瑙掕壊 |涓绘帶 |澶囨帶 | 璁$畻鑺傜偣| +|:----|:----|:----:|:----| +|涓绘満 | A澶 | B涓 | C璁 +|褰撳墠鐜鐗堟湰| 100 | 101锛堝紓甯革級 | 101| + +==**鍥為姝ラ**锛== +**1銆佸彇娑堢姝㈠掓崲锛孊--->A锛孉B涓诲鍊掓崲锛孉杞富**锛圓杞富澶辫触鐨勬儏鍐垫殏涓嶈冭檻锛 +鍙栨秷绂佹鍊掓崲锛歱cs resource clear lb_float_ip +ABy 涓诲鍊掓崲 pcs cluster move host_162_160_1_132锛 host_162_160_1_132 涓篈鐨勪富鏈哄悕 +time:10s + +**2銆佸啀娆$姝㈠掓崲** +绂佹鍊掓崲鍛戒护锛歱cs resource ban lb_float_ip host_162_160_1_132 +host_162_160_1_132鍗充负B鐨勪富鏈哄悕 +time:2s + +**3銆佸湪daisy涓繚瀛樺緟鍥為鐗堟湰100** +copy time:10s + +**4銆佸洖閫璁$畻鑺傜偣C锛101---->100锛** +1锛夊鏋滆绠楄妭鐐逛娇鐢ㄧ殑鏄疍VS缃戠粶锛屼繚瀛樿绠楄妭鐐逛笂鐨刵ova.conf鏂囦欢涓簄ova_old.conf锛堥渶瑕佸叧娉ㄨ绠楄妭鐐规槸鍚﹁繕鏈夊叾浠栭厤缃渶瑕佷繚瀛橈紝澶氬悗绔痠scsi_use_multipath鐨勯厤缃級 +save nova.conf time:10s + +2锛夊鏋滆绠楄妭鐐规湁铏氭嫙鏈猴紝闇瑕佽縼绉荤浉鍏宠櫄鎷熸満鍒板叾浠栬绠楄妭鐐 +migrate vm time:unknown, 120 +娉ㄦ剰锛氳櫄鎷熷鏋滀笉杩佺Щ锛岄渶瑕佺敤鎴锋帴鍙椾互涓嬩笁绉嶆儏鍐碉細 +a銆佽绠楄妭鐐规晠闅 +b銆佺敤鎴疯兘鎺ュ彈铏氭嫙鏈轰腑鏂笟鍔 +c銆佺敤鎴蜂笉鑳芥帴鍙楅渶瑕乀ECS淇濊瘉涓嶅悓鐗堟湰涔嬮棿鏀寔杩佺Щ + +3锛夎繍琛屽湪docker鐨刣aisy锛岃繘鍏ュ涓/var/lib/daisy/tecs//f3e0fd0c-512b-43b6-a078-f9c41c0daa8a/鐩綍鑾峰彇tecs.conf锛宮appings.json鏂囦欢锛屽苟鎷疯礉鍒癲aisy鐨/home/tecs_install鐩綍涓 +time:60s + +4锛変慨鏀箃ecs.conf鏂囦欢涓殑EXCLUDE_SERVERS鍜孋ONFIG_NEUTRON_ML2_JSON_PATH閰嶇疆椤 +EXCLUDE_SERVERS=A,B (濉啓涓嶉渶瑕侀噸鏂板畨瑁呯殑鑺傜偣A鍜孊) +CONFIG_NEUTRON_ML2_JSON_PATH = /home/tecs_install/mappings.json (濉啓mappings.json鏂囦欢鎵鍦ㄧ殑璺緞) +time=60s + +5锛夊埌璁$畻鑺傜偣涓夿鐢╞in鍖呮墜鍔ㄥ嵏杞藉紓甯哥増鏈101 +time=120s + +6锛夊湪daisy鑺傜偣鎵ц100.bin鏂囦欢锛屼娇鐢5 conf鏂瑰紡瀹夎璁$畻鑺傜偣TECS鐗堟湰銆 +**娉ㄦ剰锛**瀹夎bin鍓嶉渶瑕佹竻鐞哾aisy涓绘満涓/root/.ssh/known_hosts鏂囦欢涓殑C鐐圭殑鍏挜淇℃伅锛屽惁鍒欏畨瑁呮椂浼氳杈撳叆ssh瀵嗙爜 +Adding post install manifest entries [ DONE ] +root@10.43.203.90's password: +time=200s + +7锛塗ECS瀹夎鎴愬姛涔嬪悗锛 +闇瑕佹墜鍔ㄤ慨鏀筺ova.conf鏂囦欢锛 +compute_manager=nova.compute.manager.ComputeManager閰嶇疆椤逛慨鏀逛负 +compute_manager=nova.compute.tecs_manager.TecsComputeManager锛(宸茬煡闂锛孴ECS2.0_P7B1涓湭鍚堝叆锛屾渶鏂癉EV鐗堟湰NOVA宸茬粡淇敼) +vncserver_proxyclient_address=127.0.0.1閰嶇疆椤逛慨鏀逛负 +vncserver_proxyclient_address=host-162-160-1-12.STORAGE +濡傛灉閰嶇疆鐨勬槸DVS锛屾瘮杈僴ova_old.conf涓庡綋鍓嶈绠楄妭鐐圭殑nova.conf鏂囦欢锛屾墜鍔ㄤ慨鏀筺ova.conf涓殑vcpu_pin_set鍜寁cpupin_policy绛夐厤缃」锛 +鎴栬呯洿鎺ョ敤鍘熸潵鐨刵ova_old.conf鏂囦欢鏇挎崲褰撳墠nova.conf鏂囦欢 + +8)閲嶅惎C鐨勬湇鍔penstack-service restart +娉ㄦ剰:openstack-nova-storage鏈嶅姟涔熻鎵嬪姩閲嶅惎涓涓 + +9)妫鏌ヨ绠楄妭鐐规湇鍔″叏閮ㄦ甯稿惎鍔ㄣ + +**5銆佸洖閫鎺у埗鑺傜偣澶囨澘B锛100----->101锛** +1锛夊弬鑰冨洖閫璁$畻鑺傜偣鏂规硶閰嶇疆鎺у埗鑺傜偣B鐨則ecs.conf鏂囦欢锛孍XCLUDE_SERVERS=A锛孋 +CONFIG_NEUTRON_ML2_JSON_PATH = /home/tecs_install/mappings.json +2锛夊埌鎺у埗鑺傜偣B涓婄敤bin鍖呮墜鍔ㄥ嵏杞藉紓甯哥増鏈101 +time=3'35"=215s + +3锛夊湪B鏉夸笂鎵ц100.bin鏂囦欢锛屼娇鐢5 conf鏂瑰紡瀹夎TECS鐗堟湰銆 +time=11'15"=615s + +娉ㄦ剰锛氬畨瑁卋in鍓嶉渶瑕佹竻鐞哾aisy涓绘満涓/root/.ssh/known_hosts鏂囦欢涓殑B鐐圭殑鍏挜淇℃伅锛屽惁鍒欏畨瑁呮椂浼氳杈撳叆ssh瀵嗙爜 +Adding post install manifest entries [ DONE ] +root@10.43.203.90's password: + +4锛塗ECS鐗堟湰瀹夎瀹屾垚涔嬪悗锛屽湪澶囨澘B涓婃墽琛/home/tecs_install/storage_auto_config鐩綍涓嬫墽琛宲ython storage_auto_config.py cinder_conf cinderManageIP锛屽悓姝inder澶氬悗绔厤缃 +娉ㄦ剰锛 濡傛灉鏄嚜鐮擪S3200纾侀樀锛屽浜庡悗绔娇鐢ㄧ殑cinder_zte_conf.xml鏂囦欢闇瑕佹墜鍔ㄤ慨鏀硅繖涓瓧娈碉細`129.0.0.8`锛 +涔熷彲浠ユ墽琛屼笂闈㈠懡浠ゆ椂鍔營P鍦板潃锛屽python storage_auto_config.py cinder_conf cinderManageIP +cinderManageIP涓篶inder缁勪欢鐨勭鐞嗗钩闈㈠湴鍧銆 +time=10s + +5)浠庢甯哥殑鎺у埗鑺傜偣A鎷疯礉/etc/corosync/corosync.conf锛/var/lib/pcsd/ip_name.conf锛/var/lib/pcsd/pcs_settings.conf 鍒癇鐨勭浉鍚岀洰褰曚笅 + + +濡傛灉鏄泦缇ゆ柟寮忥紝杩橀渶瑕佹嫹璐/etc/drdb.d/WebData.res 鏂囦欢鍒扮浉鍚岀洰褰曚笅 +6锛夊湪澶囪妭鐐笲涓婃墽琛宲cs cluster start鍛戒护鍚姩闆嗙兢 +time=10s + +7锛夌敤systemctl status pacemaker.service妫鏌ユ湇鍔$姸鎬丱K锛宑rm_mon -1璧勬簮鍚姩姝e父 +8锛夋竻闄ょ姝A鍊掓崲鎿嶄綔锛宲cs resource clear lb_float_ip +9锛塇A鍙兘浼氳嚜鍔ㄨ缃负缁存姢妯″紡锛岄渶瑕佹墜鍔ㄥ彇娑堢淮鎶ゆā寮忥紝鐩稿叧鍛戒护锛宲cs property set unmanaged=false +10锛夊鏋済lance/db/dbbackup/monagodb鐩樻槸闆嗙兢鏂瑰紡杩橀渶瑕佸皢涓绘澘涓婄殑鏁版嵁鎵嬪姩鍚屾鍒板鏉跨殑纾佺洏涓娿 + + +**6銆佹鏌ュ洖閫鐨勭増鏈槸鍚︽甯** +1锛変富澶囨帶鍒惰妭鐐笻A杩涜鍊掓崲锛屽彲浠ユ甯稿掓崲 +2锛夊垱寤鸿櫄鎷熸満姝e父 + +### B 鍗囩骇鑺傜偣B鏃跺紓甯稿洖閫B +鍙傝冧互涓5銆佸洖閫鎺у埗鑺傜偣澶囨澘B + +| 瑙掕壊 |涓绘帶 |澶囨帶 | 璁$畻鑺傜偣| +|:----|:----|:----:|:----| +|涓绘満 | A澶 | B涓 | C璁 +|褰撳墠鐜鐗堟湰| 100 | 101锛堝紓甯革級 | 100| + +### C 鍗囩骇鑺傜偣C鏃跺紓甯稿洖閫C +| 瑙掕壊 |涓绘帶 |澶囨帶 | 璁$畻鑺傜偣| +|:----|:----|:----:|:----| +|涓绘満 | A澶 | B涓 | C璁 +|褰撳墠鐜鐗堟湰| 100 | 100 | 101(寮傚父)| +鍙傝冧互涓4銆佸洖閫璁$畻鑺傜偣C + +##3銆侀仐鐣欓棶棰 +鏈湴闆嗙兢鍜屽叡浜泦缇ゆ柟寮忛渶瑕佺户缁獙璇 + +##4銆佸悎鍏ョ増鏈 +鏃 + +##5銆佹敞鎰忎簨椤 +LB鐨勮祫婧愪笉鑳芥墜鍔ㄩ厤缃紝閫氳繃瀹夎淇濊瘉 +璁$畻鑺傜偣瀹夎瀹屾垚涔嬪悗闇瑕佹墜鍔ㄤ慨鏀筺ova.conf鏂囦欢涓殑宸ㄩ〉閰嶇疆淇℃伅 +璁$畻鑺傜偣瀹夎瀹屾垚涔嬪悗闇瑕佹墜鍔ㄤ慨鏀筺ova.conf鏂囦欢涓殑澶氳矾寰勯厤缃俊鎭 +鎺у埗鑺傜偣锛宑inder澶氬悗绔厤缃細涓㈠け锛宐in鏂瑰紡瀹夎鍚庨粯璁ゅ彧鏈塋VM鍚庣 +CEPH鍚庣闇瑕佹墜鍔ㄤ慨鏀笴EPH鐩稿叧閰嶇疆銆 +瀵逛簬鏈変簺鏁版嵁搴撲笉鍏煎鐨勬儏鍐甸渶瑕佸崟鐙冭檻 + +##6銆佹枃妗d慨鏀 +鏃 + +##7銆侀獙鏀舵柟娉 +鏃 \ No newline at end of file diff --git a/code/daisy/doc/source/guide/daisy_guide.md b/code/daisy/doc/source/guide/daisy_guide.md new file mode 100755 index 00000000..43639deb --- /dev/null +++ b/code/daisy/doc/source/guide/daisy_guide.md @@ -0,0 +1,979 @@ +# Daisy 瀹夎鎸囧涔 + + + +[TOC] + + + + + + + +## 1 寮曡█ + +### 1.1 缂栧啓鐩殑 + +鏈枃妗d粠Daisy鐨勫熀鏈蹇靛叆鎵嬫棬鍦ㄤ粙缁岲aisy閮ㄧ讲鐨勫熀鏈厤缃拰鐢ㄦ硶锛岀粨鍚堢粍缃戝浘浠嬬粛閮ㄧ讲杩囩▼涓弬鏁伴厤缃拰鐣岄潰浣跨敤銆 + + +### 1.2 鏈鍜岀缉鐣ヨ + + +|鏈嶅姟 |椤圭洰鍚嶇О |鍔熻兘鎻忚堪| +|----|:----:|:----| +|Daisy|Daisy|TECS鐨勮嚜鍔ㄥ寲閮ㄧ讲宸ュ叿| +|Dashboard| Dashboard|Web鐣岄潰锛岀敤鎴烽氳繃Dashboard鑳藉杩涜鐣岄潰鍖栫殑daisy閮ㄧ讲| +|Identity Service| Keystone |鍚戞湇鍔℃彁渚涜璇佸拰閴存潈鍔熻兘| +|Ironic| Ironic| Ironic鏄疧penStack鎻愪緵瑁搁噾灞為儴缃茬殑瑙e喅鏂规,鑰岄潪铏氭嫙鏈恒傞粯璁ゆ儏鍐典笅,瀹冨皢浣跨敤PXE鍜孖PMI鎺ュ彛绠$悊涓绘満,浣咺ronic杩樻敮鎸佺壒瀹氫緵搴斿晢鐨勬彃浠跺彲浠ュ疄鐜伴澶栫殑鍔熻兘| +|TECS | Tulip Elastic Computer System |閮侀噾棣欏脊鎬ц绠楃郴缁焲 +|IPMI| Intelligent Platform Management Interface| 鏅鸿兘骞冲彴绠$悊鎺ュ彛 (IPMI)鏄竴绉嶅紑鏀炬爣鍑嗙殑纭欢绠$悊鎺ュ彛瑙勬牸锛屽畾涔変簡宓屽叆寮忕鐞嗗瓙绯荤粺杩涜閫氫俊鐨勭壒瀹氭柟娉晐 +|BMC| Board Management Controller |鍗曟澘绠$悊妯″潡| + + + +### 1.3 鏀寔IPMI鏈嶅姟鍣ㄥ垪琛 + +Daisy閮ㄧ讲鐨勬湇鍔″櫒闇瑕佹敮鎸両PMI鍗忚锛岀幇鏈夌‖浠舵湇鍔″櫒涓敮鎸両PMI鐨勬湇鍔″櫒绫诲瀷鏈夛細 + +E9000锛孒PC7000锛孌ELL锛孯S4300锛孖8350绛夈 + + + + + +##2 缁勭綉瑙勫垝 + +###2.1 缁勭綉璇存槑 + +鏈瀹夎缁勭綉閲囩敤2涓帶鍒惰妭鐐癸紙HA+LB锛+2涓绠楄妭鐐癸紙compute锛+1涓狵S3200 IPSAN銆 + +![image](image/netplan.JPG) + + + +###2.2 纭欢璇存槑 + +|鏈嶅姟鍣ㄧ被鍨媩 鏁伴噺 | 鐢ㄩ攟 +|----| :----: |:----| +|E9000鍒鐗 | 4 |2鍧楁帶鍒惰妭鐐癸紙鍏朵腑涓鍧楀惈 瀹夎daisy鏈嶅姟鍣級2鍧楄绠楄妭鐐箌 +|KS3200 IPSAN | 1 |IPSAN锛屼綔涓篢ECS鐨勫瓨鍌ㄥ悗绔瘄 + + + +###2.3 缃戠粶瑙勫垝 + +鏈満鏅负鍏稿瀷鐨凞aisy閮ㄧ讲TECS鐜鍦烘櫙锛 瑙勫垝浜嗙鐞嗗钩闈紙MANAGEMENT锛夛紝public骞抽潰锛圥UBLICAPI锛夛紝瀛樺偍绠$悊骞抽潰锛圫TORAGE锛夛紝瀛樺偍涓氬姟闈紙storage_data锛変互鍙婅櫄鎷熸満涓氬姟骞抽潰锛圖ATAPLANE锛夛紱鍏朵腑绠$悊骞抽潰锛圡ANAGEMENT锛変笌瀛樺偍绠$悊骞抽潰锛圫TORAGE锛夊悎涓銆 + +娉細瀹為檯鐗╃悊鏈嶅姟鍣ㄤ笂杩樻湁PXE閮ㄧ讲骞抽潰锛圖EPLOYMENT锛夛紝鐢ㄤ簬涓绘満鍙戠幇鍜屽畨瑁呭叾浠栬妭鐐规搷浣滅郴缁燂紝浣嗙綉缁滄槧灏勪笉闇瑕佸叧娉紝鎵浠ヨ骞抽潰涓嶅垪鍦ㄤ笅闈㈠垪琛ㄤ腑銆 + +E9000鍒鐗囧寘鍚4涓綉鍙o紝缃戝彛涓庡钩闈㈢殑鏄犲皠鍏崇郴濡備笅锛 + +![image](image/netconf.JPG) + + + +Daisy鎵鍦ㄦ湇鍔″櫒鐨勫湴鍧闇瑕佹墜宸ラ厤缃紝缃戠粶瑙勫垝濡備笅锛 + +![image](image/daisynetconf.JPG) + + + +##3 Daisy瀹夎鍜岃鏄 + +###3.1 OS+DAISY+TECS鐗堟湰璇存槑 + +鏁村鐜瀹夎闇瑕佸彇鐢3涓増鏈紝OS+DAISY+TECS鐗堟湰锛屾湰渚嬬殑daisy鏈嶅姟鍣ㄨ繍琛屽湪docker涓紝鎵浠aisy瀹夎鏃堕渶瑕佸彇鐢ㄥ甫docker鐨勬搷浣滅郴缁熴 + +|绯荤粺 | 鐗堟湰| +|----|:----| +|OS|Mimosa-V02.16.11.P7B1I59-CGSL_VPLAT-5.1-x86_64-KVM-director003-daisy117.iso +|DAISY| daisy-2016.2.11-7.11.119.el7.x86_64.rpm| +|TECS| ZXTECS_V02.16.11_P7B1_I156.zip| + + + +###3.2 绗竴鍙拌8鏈猴紙Daisy鏈嶅姟鍣級鐨勬搷浣滅郴缁熷畨瑁 + +绗竴鍙拌8鏈猴紙鍚庣画鍋歞aisy鏈嶅姟鍣ㄤ娇鐢級鐨勬搷浣滅郴缁熼渶瑕佺敤U鐩樿繘琛屽畨瑁咃紙U鐩樺埗浣滄柟娉曡锛.锛戦檮浠1 U鐩樼殑鍒朵綔鏂规硶锛 + + + +#### 3.2.1鏈嶅姟鍣˙MC鍦板潃璁剧疆 + +閫氳繃鐧诲綍绯荤粺绠$悊鍗曟澘锛圫MM锛夌殑web椤甸潰璁剧疆瀵瑰簲鏈嶅姟鍣ㄧ殑BMC鍦板潃銆 + +浠9000涓轰緥锛孲MM鐨勫垵濮嬬櫥褰曞湴鍧锛屽乏鏉匡細192.168.5.7锛屽彸鏉匡細192.168.5.8銆傚闇瑕佷慨鏀笽P锛屽彲浠ラ氳繃浣跨敤缃戠嚎杩炴帴PC涓嶴MM涓婄殑缃戝彛锛岀櫥褰昐MM鐨勫湴鍧锛屼緥濡傦細https://192.168.5.7锛岀敤鎴峰悕锛歾teroot锛屽瘑鐮侊細superuser銆 + + 閫変腑瀵艰埅鏉′腑鐨勩愭満妗嗙鐞嗐戦〉闈紝鍦ㄥ乏渚у鑸爲涓夈愭満妗嗛厤缃-銆愮綉缁滈厤缃戯紝鍙互淇敼SMM鍜屽垁鐗囩殑BMC IP锛屽瓙缃戞帺鐮佸拰缃戝叧銆 + +![image](image/bmcip.JPG) + + + +娉細 + +1锛 SMM璁剧疆鐨勫湴鍧蹇呴』鐗╃悊鍙氾紝鍚﹀垯淇敼鍚庢棤娉曠櫥褰曘 + +2锛 鍒鐗囩殑BMC鍦板潃蹇呴』涓嶴MM璁剧疆鐨勫湴鍧鍦ㄥ悓涓缃戞锛屽惁鍒橞MC鍦板潃涓嶉氥 + +濡傛灉浣跨敤Daisy鏈嶅姟鍣ㄥ畨瑁呴儴缃茬洰鏍囧垁鐗囷紝蹇呴』纭繚Daisy鏈嶅姟鍣ㄦ墍鍦ㄥ垁鐗囦笌鐩爣鍒鐗囩殑BMC鍦板潃鐗╃悊鍙氥 + + + +#### 3.2.2瀹夎TFG鎿嶄綔绯荤粺 + +鏈珷鎻忚堪濡備綍瀹夎绗竴鍙版湇鍔″櫒鐨凾FG鎿嶄綔绯荤粺銆 + +鍑嗗宸ヤ綔锛 + +1. 璋冭瘯鏈轰笌鍒鐗噑mm绠$悊妯″潡缃戠粶浜掗氾紝閫氳繃kvm閾炬帴 + +2. 瑕佸畨瑁呯殑鍒鐗囦笂鐢 + +3. 鏈嶅姟鍣ㄥ畨瑁呭仛濂絩aid + +4. 灏哢鐩樻帴鍏ュ緟瀹夎鐨勭涓鍙版湇鍔″櫒7妲 + + + +娉細鎵鏈夋湇鍔″櫒瀹夎鎿嶄綔绯荤粺涔嬪墠閮借鍏堣缃ソraid銆傛湇鍔″櫒璁剧疆濂絉AID涔嬪悗锛屽悗缁鏋滄病鏈変慨鏀归渶瑕佹棤闇鍐嶉噸澶嶅仛RAID銆 + + + +#####3.2.2.1鍒鐗囧惎鍔ㄦā寮忎慨鏀 + +閰嶇疆鍓嶆彁锛孲MM鍗曟澘宸茬粡閰嶇疆濂斤紝骞朵笖鍙互鐧婚檰锛屽涓嬪浘锛 + +![image](image/smmlogin1.JPG) + + + +杈撳叆鐢ㄦ埛鍚嶅拰瀵嗙爜锛岄粯璁や负zteroot/superuser锛岀櫥闄嗭細 + +![image](image/smmlogin2.JPG) + + + +鐐瑰嚮銆愭満妗嗙鐞嗐戯紝宸︿晶鐨勩愭満妗嗛厤缃-銆愬崟鏉块厤缃戯紝鍦ㄥ彸杈圭晫闈腑锛岄変腑绗竴鍧楁湇鍔″櫒鎵鍦ㄧ殑7妲藉垁鐗囷紝鐐瑰嚮銆愯缃戯紝閰嶇疆涓篣SB鍚姩銆 + +![image](image/setusb.JPG) + + + +#####3.2.2.2 閫氳繃BMC鍦板潃鐧诲綍鏈嶅姟鍣↘VM + +銆愭満妗嗙鐞嗐戯紝宸︿晶鐨勩愭満妗嗛厤缃-銆愮綉缁滈厤缃戯紝鏌ョ湅璇ユ湇鍔″櫒鐨凚MC鍦板潃涓10.43.203.239銆傞氳繃璇ュ湴鍧锛坔ttps://10.43.203.239/锛岄粯璁ょ敤鎴峰悕涓庡瘑鐮佷负zteroot/superuser锛夌櫥褰曡鏈嶅姟鍣ㄧ殑KVM瑙傚療鏈嶅姟鍣ㄩ噸鍚強鎿嶄綔绯荤粺瀹夎杩囩▼銆 + +![image](image/bmcipcheck.JPG) + + + +#####3.2.2.3 鎿嶄綔绯荤粺瀹夎 + +灏哢鐩樻彃鍦ㄧ涓鍧楁湇鍔″櫒涓婏紝閲嶅惎璇ユ湇鍔″櫒锛岄氳繃鏈嶅姟鍣↘VM鍙湅瑙傚療鍒板畨瑁呰繃绋. + +![image](image/installos.JPG) + + + +瀹夎瀹屾垚鍚庝細鎻愮ず閫鍑洪噸鍚 + +娉ㄦ剰锛氱郴缁熼噸鍚墠鍙互鍏堟洿鏀硅鍒鐗囩殑鍚姩鏂瑰紡涓虹‖鐩樺惎鍔紝骞舵嫈鍑篣鐩樼劧鍚庡啀閲嶅惎鍒鐗囥 + + + +####3.2.4鎿嶄綔绯荤粺璁剧疆 + +鎿嶄綔绯荤粺娑夊強鐨勪富鏈哄悕銆両P鍦板潃銆佺綉鍗¤鍒掕鍙傜収瑙勫垝璇存槑銆 + +鏈妭鎿嶄綔鎵鏈夋帶鍒惰妭鐐瑰拰璁$畻鑺傜偣閮介渶瑕佹墽琛岋紝鍙槸绗竴鍙癲aisy鏈嶅姟闇瑕佹墜鍔ㄩ厤缃富鏈哄悕鍜孖P鍦板潃锛屽叾浠栬妭鐐瑰拰璁$畻鑺傜偣daisy瀹夎鏃朵細鑷姩杩涜閰嶇疆銆 + +#####3.2.4.1 鍦板潃瑙勫垝 + +姝aisy鎵鍦ㄦ湇鍔″櫒鐨勫湴鍧瑙勫垝瑙2.3缃戠粶瑙勫垝銆 + +1銆佺涓鍙版湇鍔″櫒闇瑕佹墜宸ラ厤缃紝鍏朵粬鏈嶅姟鍣ㄩ氳繃Daisy涓殑缃戠粶骞抽潰閰嶇疆浼氳嚜鍔ㄥ垎閰嶃 + +2銆丼TORAGE瀛樺偍鎺у埗闈㈠湴鍧涓嶮ANAGEMENT鍚堜竴銆 + + + +#####3.2.4.2 涓绘満鍚嶈缃 + +[root@localhost]# **vi /etc/hostname** + +==host-10-43-203-132== + +[root@localhost]# **hostnamectl set-hostname host-10-43-203-132** + +[root@localhost]# logout + + logout涔嬪悗閲嶆柊鐧诲綍. + + + +#####3.2.4.3 缃戝彛閰嶇疆 + +1銆乪np132s0f0銆乪np132s0f1缃戝崱鍋氱粦瀹氾紝缁戝畾鍙e悕绉颁负bond0,骞跺垎鍒厤缃甐LAN涓161,162鐨勫瓙鎺ュ彛bond0.161锛宐ond0.162锛 + + + +淇敼/etc/sysconfig/network-scripts鐩綍涓媔fcfg-enp132s0f0銆乮fcfg-enp132s0f1鏂囦欢锛屽浣欑殑閰嶇疆鍒犻櫎銆傘 + +[root@host-10-43-203-132 network-scripts]# **cat ifcfg-enp132s0f0** + +TYPE=Ethernet + +BOOTPROTO=static + +NAME=enp132s0f0 + +DEVICE="enp132s0f0" + +ONBOOT="yes" + +MASTER=bond0 + +SLAVE=yes + +[root@host-10-43-203-132 network-scripts]# **cat ifcfg-enp132s0f1** + +TYPE=Ethernet + +BOOTPROTO=static + +NAME=enp132s0f1 + +DEVICE="enp132s0f1" + +ONBOOT="yes" + +MASTER=bond0 + +SLAVE=yes + +[root@host-10-43-203-132 network-scripts]# **cat ifcfg-bond0** + +BOOTPROTO="static" + +ONBOOT="yes" + +DEVICE="bond0" + +BONDING_OPTS="miimon=100 mode=1" + +[root@host-10-43-203-132 network-scripts]# + +[root@host-10-43-203-132 network-scripts]# **cat ifcfg-bond0.161** + +OTPROTO="static" + +ONBOOT="yes" + +DEVICE="bond0.161" + +IPADDR="162.161.1.132 + +NETMASK="255.255.255.0" + +VLAN=yes + +[root@host-10-43-203-132 network-scripts]# **cat ifcfg-bond0.162** + +OTPROTO="static" + +ONBOOT="yes" + +DEVICE="bond0.162" + +IPADDR="162.162.1.132" + +NETMASK="255.255.255.0" + +VLAN=yes + +[root@host-10-43-203-132 network-scripts]# + + + +2銆乪np129s0f0缃戝彛涓婇厤缃湴鍧鍜孷LAN160鎺ュ彛 + +鍦ㄦ湇鍔″櫒涓婃墽琛宨p link add link enp129s0f0 name enp129s0f0.160 type vlan id 160鍛戒护澧炲姞VLAN160瀛愭帴鍙o紝鐒跺悗淇敼浠ヤ笅閰嶇疆鏂囦欢銆 + +[root@host-10-43-203-132 network-scripts]# **ip link add link enp129s0f0 name enp129s0f0.160 type vlan id 160** + +[root@host-10-43-203-132 network-scripts]# **cat ifcfg-enp129s0f0** + +HWADDR=4C:09:B4:B1:C1:F0 + +TYPE=Ethernet + +BOOTPROTO=static + +NAME=enp129s0f0 + +UUID=56ae2b62-8826-4c11-9a89-290e7ca67071 + +DEVICE="enp129s0f0" + +ONBOOT="yes" + +IPADDR=10.43.203.132 + +NETMASK=255.255.254.0 + +GATEWAY=10.43.202.1 + + [root@host-10-43-203-132 network-scripts]# **cat ifcfg-enp129s0f0.160** + +OTPROTO="static" + +ONBOOT="yes" + +DEVICE="enp129s0f0.160" + +IPADDR="162.160.1.132" + +NETMASK="255.255.255.0" + +VLAN=yes + +鏈緥涓凡缁忚缃ぇ缃戠綉鍏10.43.202.1锛屽鏋滄病鏈夎缃綉鍏筹紝闇瑕佽缃竴涓嬬綉鍏,鍚﹀垯瀹夎TECS浼氬け璐ワ紱 + +璁剧疆缃戝叧鍚庢墽琛宻ervice network restart鐢熸晥閰嶇疆锛岄噸鍚箣鍚巒etwork鏈嶅姟姝e父锛宨fconfig鏌ョ湅閰嶇疆鐨勫湴鍧鍧囧凡鐢熸晥銆 + + + +###3.3 杩愯鍦―ocker涓殑daisy瀹夎 + +鎵ц锛歞aisy_docker_init.sh input1 input2 input3 input4 input5 input6 鏉ヨ捣daisy鍜寁director瀹瑰櫒 + +鍏ュ弬鐨勫惈涔夛細 + +input1 锛歞aisy閲岄潰dhcp鐨勭綉鍙 + +input2 锛歞aisy绠$悊闈p + +input3 锛欼CT缁戝畾鐨勫湴鍧 + +input4 锛歞irector鐨刵ginx瀵瑰鏆撮湶鐨処P锛岀敤鏉ユ浛鎹ndpoints涓殑openstackIP锛坉irector鎻愪緵缁檔fvo璁块棶鐨刬p鍦板潃--------nfvo璁块棶vim鐢級 + +input5 锛 nvfo鐨処P锛孷IM涓诲姩nvfo涓婃姤PM鏁版嵁鐢ㄧ殑 + +input6 : nvfo鐨勭鍙o紝VIM涓诲姩nvfo涓婃姤PM鏁版嵁鐢ㄧ殑 + + + + 鏈枃涓彧璧穌aisy瀹瑰櫒锛屾墍浠ュ彧闇濉啓input1 input2鍙傛暟鍗冲彲銆 鏍规嵁鍓嶉潰瑙勫垝锛宐ond0涓篸aisy閲岄潰dhcp鐨勭綉鍙o紝10.43.203.132涓篸aisy绠$悊鍙P銆 + +[root@host-10-43-203-132 home]# **daisy_docker_init.sh bond0 10.43.203.132** + +daisy docker config file: /var/lib/daisy/scripts/daisy.conf + +ln -s '/usr/lib/systemd/system/nfs-server.service' '/etc/systemd/system/nfs.target.wants/nfs-server.service' + +rsyslogd restart! + +vdirector config file not exist! + +creating daisy ... + +daisy-mysql + +daisy-mysql.backup + +WARNING: IPv4 forwarding is disabled. Networking will not work. + +ccda0709026a7a185f6cf9e28bae39436144d8dafc60684eb79d591572fbba4f + +VOL_pLP3o + +WARNING: IPv4 forwarding is disabled. Networking will not work. + +"daisy" created + +[root@host-10-43-203-132 home]# **docker-manage ps** + +CONTAINER ID IMAGE COMMAND CREATED STATUS PORTS NAMES + +==afa9b42d4736 937cb3bd58a4 "/bin/sh -c /bin/sh" 8 seconds ago Up 7 seconds daisy== + +docker-manage ps鍛戒护鏌ョ湅鏈夊涓婇珮浜儴鍒嗕俊鎭椂锛岃〃绀篸aisy瀹瑰櫒宸茶繍琛屻 + +浣跨敤docker-manage enter daisy鍛戒护杩涜daisy瀹瑰櫒璁剧疆hosts銆 + +[root@host-10-43-203-132 home]# **docker-manage enter daisy** + +-bash-4.2#**cat /etc/hosts** + +127.0.0.1 localhost localhost.localdomain localhost4 localhost4.localdomain4 + +::1 localhost localhost.localdomain localhost6 localhost6.localdomain6 + +==10.43.203.132 localhost== + + + +###3.4 鐧婚檰daisy鐨剋eb椤甸潰 + +Daisy瀹瑰櫒鍒涘缓鍚庣◢绛2鍒嗛挓锛岀瓑寰卍aisy鐩稿叧鏈嶅姟姝e父杩愯鍚庯紝鍗冲彲杩涘叆daisy鐨刣ashboard椤甸潰锛屽嵆http://10.43.203.132:18080/ 锛堝叿浣搃p灏辨槸鍦ㄨ捣daisy鐨勬椂鍊欒緭鍏ョ殑ip鍦板潃锛岀鍙e彿涓18080锛夛紝榛樿甯愭埛admin/keystone + +![image](image/daisylogin1.JPG) + + + +鑷虫daisy鏈嶅姟鍣ㄥ畨瑁呭畬姣曪紝濡傞渶閮ㄧ讲TECS涓绘満锛屽垯鍙互鍙傝冧笅涓绔 4 闆嗙兢閮ㄧ讲 + + + +##4 DAISY鐙珛閮ㄧ讲TECS绀轰緥 + +###4.1鐗堟湰涓婁紶 + +鐧诲綍daisy DASHBOARD鐣岄潰锛岃繘鍏ャ愮増鏈戦〉闈紝鐐瑰嚮銆愰夋嫨鏂囦欢銆戝垎鍒笂浼燨S鍜孴ECS鐗堟湰銆 + +![image](image/versionupload.JPG) + + + +涓婁紶瀹屾垚涔嬪悗锛屽彲浠ユ樉绀篒SO涓巄in鏂囦欢. + +![image](image/versionuploadOK.JPG) + + + +###4.2鍒涘缓闆嗙兢 + +鐧诲綍daisy DASHBOARD鐣岄潰杩涘叆銆愰泦缇ゃ戝垪琛紝绗竴娆$櫥褰曟椂闆嗙兢鏄┖鐨勶紝鐐瑰嚮钃濊壊閮ㄥ垎鍗冲彲杩涜闆嗙兢鍒涘缓. + +![image](image/clustercreate.JPG) + + + +####4.2.1鍩烘湰淇℃伅 + +闆嗙兢鍒涘缓鏃讹紝鍏堥渶瑕侀厤缃熀鏈俊鎭紝鍦ㄥ熀鏈俊鎭腑濉啓闆嗙兢鍚嶇О锛屾槸鍚﹀惎鐢―NS浠ュ強鎻忚堪淇℃伅锛屾弿杩颁俊鎭彲浠ヤ笉濉 + +![image](image/clustercreatebase.JPG) + + + +####4.2.2瑙掕壊閰嶇疆 + +1銆佹帶鍒惰妭鐐笻A閰嶇疆 + +![image](image/createroleha.JPG) + +Public娴姩IP闇瑕佷笌Public骞抽潰鍦ㄥ悓涓缃戞锛屽叾浠栨诞鍔↖P闇瑕佷笌绠$悊骞抽潰鍦ㄥ悓涓缃戞锛屾诞鍔↖P鍙互鎵嬪姩閰嶇疆锛屼篃鍙互閫夋嫨鑷姩鍒嗛厤锛屽鏋滆嚜鍔ㄥ垎閰嶏紝缃戠粶骞抽潰鐨勫湴鍧姹犱腑闇瑕佸鍒嗛厤4涓狪P鍦板潃鐢ㄤ簬鍒嗛厤鍚勬诞鍔↖P鍦板潃銆 + +NTP IP闇瑕佸~涓撴湁鏃堕挓鏈嶅姟鍣ㄥ湴鍧锛屽鏋滄病鏈夛紝鍙互濉獺A娴姩IP鍦板潃 + + + +2銆乬lance/DB/DBBackup/MongoDB鍚庣绫诲瀷閰嶇疆 + +![image](image/createhasharedisk.JPG) + +![image](image/createhasharedisk2.JPG) + +3銆乧inder鍚庣绫诲瀷閰嶇疆 + +鏍规嵁缁勭綉瑕佹眰KS3200 IPSAN纾侀樀鍋氫负cinder鍚庣锛宑inder鐐瑰嚮鏂板涓鏉″悗绔厤缃 + +![image](image/cinderbackend1.JPG) + + + +鏂板鍚庣閰嶇疆 + +![image](image/cinderbackendconf.JPG) + +鎸夌‘瀹氬悗鍗冲彲澧炲姞涓鏉inder鍚庣閰嶇疆銆 + +![image](image/cinderbackendok.JPG) + + + +4銆丩B娴姩IP閰嶇疆 + +![image](image/createlbconf.JPG) + + + +####4.2.3缃戠粶骞抽潰閰嶇疆 + +鍙傝锛.锛撶綉缁滆鍒 + +璇存槑锛氭湰缁勭綉涓璏ANAGEMENT涓嶴TORAGE骞抽潰鍚堜竴銆 + +鏍规嵁缁勭綉鍥惧仛濡備笅閰嶇疆锛 + +1銆丮ANAGEMENT缃戠粶骞抽潰閰嶇疆 + +![image](image/management.JPG) + +2銆丳UBLICAPI缃戠粶骞抽潰閰嶇疆 + +![image](image/public.JPG) + + + +3銆丏ATAPLANE缃戠粶骞抽潰閰嶇疆 + +![image](image/dataplane.JPG) + + + +4銆丼TORAGE缃戠粶骞抽潰閰嶇疆 + +![image](image/storage1.JPG) + +storage澧炲姞涓や釜瀛樺偍涓氬姟闈㈢綉缁滈厤缃 + +鐐瑰嚮澧炲姞缃戠粶骞抽潰鎸夐挳 + +![image](image/storage2.JPG) + +鍒嗗埆澧炲姞storage_data161, storage_data162缃戠粶骞抽潰閰嶇疆(姝ゅ鍙埅鍥161鐨勯厤缃繃绋) + +![image](image/storage3.JPG) + +![image](image/storage4.JPG) + +閰嶇疆瀹屾垚涔嬪悗鎸夊彸涓嬭鍒涘缓鎸夐挳鍗冲彲杩涜闆嗙兢鍒涘缓锛岄泦缇ゅ垱寤烘垚鍔熶箣鍚庤嚜鍔ㄨ烦杞埌闆嗙兢涓绘満閮ㄧ讲椤甸潰杩涜娣诲姞涓绘満銆 + + + +###4.3 闆嗙兢涓绘満閮ㄧ讲 + +####4.3.1 涓绘満鍙戠幇 + +涓绘満鍙戠幇鏈変袱绉嶆柟寮忥細 +涓绉嶆槸瀵规搷浣滅郴缁熷凡缁忓畨瑁呮垚鍔熺殑鏈嶅姟鍣ㄩ氳繃SSH鏂瑰紡鍙戠幇锛屽姝ゅdaisy鎵鍦ㄧ殑鏈嶅姟鍣紝鍥犱负鎿嶄綔绯荤粺宸茬粡瀹夎濂斤紝鍙渶瑕佸畨瑁匱ECS锛屽彲浠ラ氳繃SSH鏂瑰紡鍙戠幇锛 +杩樻湁涓绉嶆槸PXE鏂瑰紡鍙戠幇銆 + + + +#####4.3.1.1 SSH鍙戠幇瀹夸富鏈 + +鐧诲綍daisy闆嗙兢椤甸潰锛屽垱寤轰竴涓泦缇xy1锛岀劧鍚庤繘鍏ラ泦缇xy1锛岀偣鍑婚泦缇や富鏈洪儴缃诧紝鍗冲彲寮濮嬪彂鐜颁富鏈烘搷浣溿 + +![image](image/findhost.JPG) + +鐐瑰嚮鍙戠幇涓绘満锛岀劧鍚庨愪竴杈撳叆鐗╃悊鐩爣涓绘満鐨凷SH璁块棶鍦板潃10.43.203.132锛岀敤鎴峰悕鍜屽瘑鐮侊紙榛樿鏄痳oot/ossdbg1锛夛紝鐐瑰嚮鎻愪氦鍚庡彸渚у嵆鍙樉绀哄緟SSH鍙戠幇鐨勪富鏈哄垪琛 + +![image](image/sshfindhost.JPG) + +鐐瑰嚮寮濮嬪彂鐜帮紝鍙戠幇鎴愬姛涔嬪悗涓绘満鍒楄〃鍙互鏄剧ずSSH鍙戠幇鐨勪富鏈恒 + +![image](image/sshfindhostok.JPG) + + + +#####4.3.1.2 PXE鏂瑰紡鍙戠幇鍏朵粬涓绘満 + +PXE鍙戠幇棣栧厛瑕佷繚璇佷富鏈虹殑PXE缃戠粶鏄氱殑锛屽湪daisy鏈嶅姟鍣ㄤ笂鐢╥pmiping鍛戒护妫鏌ュ悇涓绘満鐨処PMI鏄惁鏄氱殑銆 +涓绘満鐨処PMI鍦板潃鍙傝3.2.1鑺備腑BMC IP鍦板潃鐨勪俊鎭 +[root@host-10-43-203-132 home]# **ipmiping 10.43.203.236** +ipmiping 10.43.203.236 (10.43.203.236) +*response received from 10.43.203.236: rq_seq=44* + +鐧诲綍SMM锛屽皢鎵鏈夊緟瀹夎绯荤粺鐨勬湇鍔″櫒璁剧疆涓篜XE锛堢綉缁滐級鍚姩锛屽涓嬪浘 3銆4銆12妲戒负寰匬XE鍙戠幇鐨勪富鏈猴紝鐐光滄搷浣溾濅腑鐨勮缃紝鍒嗗埆灏嗗叾璁剧疆涓虹綉缁滃惎鍔ㄣ + +![image](image/pxefindhost1.JPG) + + + +閫氳繃鍗曟澘IPMI鍦板潃鍒嗗埆鐧诲綍鍏禟VM鏌ョ湅绯荤粺鍚姩鎯呭喌锛屽3妲絀PMI鍦板潃涓10.43.203.247锛 + +鍒欑櫥褰昲ttps://10.43.203.247锛岀敤鎴峰悕/瀵嗙爜榛樿涓簔teroot/superuser; 鍦 璁惧鎿嶄綔---鍗曟澘鎿嶄綔 涓鏈嶅姟鍣ㄨ缃噸鍚紝鐒跺悗鐐瑰嚮鍗曟澘鎿嶄綔鏃佽竟鐨 璁剧疆 鎸夐挳锛屾寜纭畾鍚庢湇鍔″櫒鍗冲彲閲嶅惎銆 + +![image](image/pxefindhost2.JPG) + + + +鐧诲綍鏈嶅姟鍣ㄧ殑KVM锛屾煡鐪嬪叾鑳芥甯搁噸鍚紝涓旈氳繃DHCP缃戝彛姝g‘鑾峰彇鍒板皬绯荤粺锛屽苟鑳芥甯歌繘鍏ュ皬绯荤粺銆 + +![image](image/pxefindhost3.JPG) + + + +鍥炲埌daisy椤甸潰杩涜鍙戠幇涓绘満锛屾甯稿嵆鍙甯告樉绀篜XE鍙戠幇鐨勪富鏈哄垪琛ㄣ + +![image](image/pxefindhostok.JPG) + +娉ㄦ剰锛氬鏋10鍒嗛挓鍐呮病鏈夊彂鐜颁富鏈猴紝闇瑕佹鏌XE缃戠粶鏄惁姝g‘銆 +PXE缃戠粶鐨勫嚑绉嶇粍缃戣鏄庯紙闇瑕佽ˉ鍏呯浉鍏崇粍缃戝浘鍜屾枃瀛楋級 +琚畨瑁呯殑鐩爣涓绘満浣跨敤鐨凱XE鍙e鏋滀娇鐢╒LAN闅旂锛岄渶瑕佷娇鐢╪ative鏂瑰紡銆 +1銆丳XE鍙d笉鍋欱OND鐨勬儏鍐 +2銆丳XE鍙e仛BOND鐨勬儏鍐 +涓涓富鏈哄彧鍏佽涓涓綉鍙e姞鍏XE鎵鍦ㄧ殑VLAN锛屾缃戠粶鐨勬姤鏂囦笉甯LAN锛屼娇鐢ㄧ殑鏄痭ative缃戠粶 +3銆佸悓涓鏈烘鍐呮湁澶氫釜PXE鏈嶅姟鍣ㄧ殑鎯呭喌銆 + +#####4.3.1.3 娣诲姞涓绘満 + +鍙戠幇鎴愬姛涔嬪悗閫変腑鎵鏈変富鏈猴紝鎸変笅涓姝ヤ細鑷姩灏嗕富鏈哄姞鍏ュ埌闆嗙兢涓紝骞惰繘鍏ュ垎閰嶈鑹查〉闈€ + +![image](image/addhost.JPG) + + + +#####4.3.1.4纾侀樀閰嶇疆 + +> 璇﹁闄勪欢2 KS3200 IPSAN纾侀樀閰嶇疆daiy瀹夎鎸囧.md + + + + + +####4.3.2鍒嗛厤瑙掕壊 + +Daisy鐨勪富鏈鸿鑹插寘鎷涓3绉嶏細 + + + +|鍚嶇О |鎻忚堪 |涓枃鎻忚堪| +|----|:----|:----| +|CONTROLLER_LB| Controller role锛宐ackup type is loadbalance| 鎺у埗瑙掕壊锛屽浠芥柟寮忎负LB| +|CONTROLLER_HA| Controller role锛宐ackup type is HA锛宎ctive/standby |鎺у埗瑙掕壊锛屽浠芥柟寮忎负HA| +|COMPUTER| Compute role| 璁$畻瑙掕壊| + + + +涓涓富鏈哄彲浠ユ壙杞戒竴绉嶈鑹诧紝涔熷彲浠ユ壙杞藉涓鑹诧紝瑙掕壊鐨勯厤缃紝鏈缁堝奖鍝嶈閮ㄧ讲涓绘満瀹夎鍝簺TECS缁勪欢鏈嶅姟銆傝鑹插拰TECS缁勪欢鐨勫搴斿叧绯籇aisy宸查粯璁ゅ畾涔夈傚涓嶆洿鏀圭敤鎴蜂笉闇瑕佸叧蹇冦 + +瀵逛簬鎺у埗鑺傜偣鍙互鏈塇A/LB涓や腑缁勫悎鐨勮鑹插垎閰嶆柟寮忥紝HA鐨勬柟寮忎负鎺у埗鑺傜偣蹇呴夎鑹诧紝LB涓烘帶鍒惰妭鐐瑰彲閫夎鑹诧紝浣滀负鎺у埗鑺傜偣鐨勪富鏈哄悓鏃朵篃鍙互鎷ユ湁COMPUTE瑙掕壊锛屼互鍚屾椂鍏峰璁$畻鑺傜偣鐨勫姛鑳姐傛帶鍒惰妭鐐规渶灏戣鏈変袱涓富鏈鸿妭鐐癸紝涓旇鑹蹭竴鑷淬傚浜庤绠楄妭鐐瑰彧鏈塁OMPUTE杩欎竴绉嶈鑹层 + +鏍规嵁缁勭綉瑙勫垝锛屽綋鍓4涓妭鐐圭殑瑙掕壊瑙勫垝涓猴細涓や釜鎺у埗鑺傜偣锛圚A+LB锛 +涓や釜璁$畻鑺傜偣锛圕OMPUTER 锛,閫氳繃鎷栨嫿鐨勬柟寮忕粰姣忎釜鐗╃悊涓绘満鍒嗛厤瑙掕壊銆 + +![image](image/addrole.JPG) + +鍒嗛厤瀹岃鑹蹭箣鍚庡嵆鍙寜涓嬩竴姝ヨ繘琛岀粦瀹氱綉鍙c + + + +####4.3.3缁戝畾缃戝彛 + +涓绘満瑙掕壊鍒嗛厤瀹屾垚涔嬪悗锛岀偣鍑讳笅涓姝ワ紝鍗冲彲杩涜缁戝畾缃戝彛鎿嶄綔銆 + +閫変腑鏌愪釜浣犺缁戝畾缃戝崱鐨勪富鏈猴紝鐐瑰嚮鍙充笂瑙掔殑缁戝畾缃戝彛鎸夐挳锛屾寜鐓ц嚜宸辩殑闇瑕佽繘琛岀墿鐞嗙綉鍙g殑缁戝畾銆 + +鏍规嵁鍓嶉潰缁勭綉瑙勫垝锛岄夋嫨enp132s0f0, enp132s0f1鍋歭inux缁戝畾锛岀粦瀹氬彛鍚嶇О涓篵ond0锛岀粦瀹氭ā寮忎负active-backup銆 + +![image](image/bond.JPG) + +鎸夌粦瀹氬悗锛屽嵆鍙樉绀虹粦瀹氭垚鍔燂紱鍐嶉夋嫨鍏朵粬涓绘満锛屽垎鍒仛缁戝畾銆 + +![image](image/bondok.JPG) + +####4.3.4缃戠粶鏄犲皠 + +缁戝畾瀹岀綉鍗″悗锛岀偣鍑讳笅涓姝ワ紝杩涘叆缃戠粶鏄犲皠閰嶇疆椤甸潰锛屽湪杩欓噷闇瑕佸皢鐗╃悊涓绘満鐨勭墿鐞嗙綉鍗★紙鎴栬呯粦瀹氬悗鐨勭綉鍗★級瀵瑰簲鍒颁箣鍓嶅湪闆嗙兢閲岄潰閰嶇疆鐨勭綉缁滃钩闈€ + +娉ㄦ剰锛氬浜庣鐞嗛潰浠ュ強瀛樺偍闈㈢浉鍏冲湴鍧鐨勫垎閰嶈鍒欑洰鍓嶆槸锛屾寜鐓у畬鎴愮綉缁滄槧灏勯厤缃殑椤哄簭浠庡湴鍧姹犲湴鍧涓粠灏忓埌澶ч『搴忓垎閰嶏紝鍥犳闇瑕佹寜鐓у垁鐗囩殑妲戒綅椤哄簭浠庡皬鍒板簳锛岄愪釜璁剧疆缃戠粶鏄犲皠銆 + +![image](image/mapnetcard.JPG) + + + +閫変腑鍏朵腑涓涓富鏈猴紝鐐瑰嚮銆愰厤缃綉缁滃钩闈€ + +![image](image/mapnetcard1.JPG) + +瀵逛簬璁$畻鑺傜偣杩橀渶瑕侀厤缃畃hysnet1 缃戠粶锛岄渶瑕侀夋嫨铏氭嫙浜ゆ崲鏈虹被鍨嬶紝鏈粍缃戜腑浣跨敤OVS缃戠粶銆 + +![image](image/mapnetcard2.JPG) + +渚濇鍒嗛厤濂芥墍鏈変富鏈虹殑缃戠粶骞抽潰骞朵繚瀛樹慨鏀 + +![image](image/mapnetcardok.JPG) + + + +####4.3.5涓绘満閰嶇疆 + +缃戠粶鏄犲皠閰嶇疆瀹屾垚涔嬪悗锛岀偣鍑讳笅涓姝ュ紑濮嬩富鏈洪厤缃 + +閫夋嫨涓涓富鏈猴紝鐐瑰嚮銆愪富鏈洪厤缃戯紝鍗冲彲杩涜閰嶇疆 + +![image](image/hostconf.JPG) + +閰嶇疆鍙傛暟 + +飦 鎿嶄綔绯荤粺鐗堟湰锛氱偣鍑讳笅鎷夋閫夋嫨涔嬪墠涓婁紶鐨凮S鐗堟湰 + +飦 绯荤粺鐩橈細姝e父鎯呭喌涓嬪~鍐檚da + +飦 绯荤粺鐩樺ぇ灏忥細榛樿50G锛屽缓璁牴鎹湇鍔″櫒鐨勫疄闄呯‖鐩樺ぇ灏忓~鍐欙紝姝ゅ垎閰100G銆 + +飦 IPMI User鍜孭assword锛氱墿鐞嗕富鏈虹殑IPMI鐨勭敤鎴峰拰瀵嗙爜锛孍9000鍒鐗囬粯璁よ缃负zteroot/superuser + +鍙互鍦7妲界殑daisy鏈嶅姟鍣ㄤ笂锛岄氳繃鍛戒护瀵瑰悇鍒鐗囩殑IPMI鐢ㄦ埛杩涜楠岃瘉銆 + +鍦ㄤ富鏈---涓绘満鍒楄〃椤甸潰鍙互鏌ヨ鍒板悇涓湇鍔″櫒鐨処PMI鍦板潃锛岀劧鍚庣敤濡備笅鍛戒护杩涜楠岃瘉銆 + +![image](image/hostlist.JPG) + + [root@host-10-43-203-132 /(daisy_admin)]$ ipmitool -I lanplus -H 10.43.203.26 -U zteroot -P superuser chassis power status + +Chassis Power is on + +[root@host-10-43-203-132 /(daisy_admin)]$ ipmitool -I lanplus -H 10.43.203.236 -U zteroot -P superuser chassis power status + +Chassis Power is on + +[root@host-10-43-203-132 /(daisy_admin)]$ ipmitool -I lanplus -H 10.43.203.247 -U zteroot -P superuser chassis power status + +Chassis Power is on + +楠岃瘉鎴愬姛锛屽氨鍙互閫氳繃ipmi锛岃缃垁鐗囦粠PXE鎴杁isk鏂瑰紡鍚姩锛屽苟鑷姩瀹夎绯荤粺銆 + +飦 宸ㄩ〉澶у皬锛氬湪physnet1缃戠粶浣跨敤DVS浜ゆ崲鏃堕渶瑕侀厤缃法椤靛弬鏁帮紝榛樿1G锛岃繖涓弬鏁板彧鍦ㄨ鑹蹭负璁$畻鑺傜偣鐨勪富鏈洪厤缃 + +飦 宸ㄩ〉涓暟锛氬湪physnet1缃戠粶浣跨敤DVS浜ゆ崲鏃堕渶瑕侀厤缃法椤靛弬鏁, 杩欎釜鍙傛暟鍙湪瑙掕壊涓鸿绠楄妭鐐圭殑涓绘満閰嶇疆锛岃鍒掕姹備负128G鍐呭瓨璁剧疆108锛64G鍐呭瓨璁剧疆44锛涙澶勫洜涓轰娇鐢ㄧ殑鏄疧VS缃戠粶锛屾墍浠ュ彲浠ヤ笉閰嶇疆銆 + +飦 琚殧绂荤殑鏍革細鍦╬hysnet1缃戠粶浣跨敤DVS浜ゆ崲鏃堕渶瑕侀厤缃法椤靛弬鏁, 杩欎釜鍙傛暟鍙湪瑙掕壊涓鸿绠楄妭鐐圭殑涓绘満閰嶇疆銆 + + + +鎵鏈変富鏈洪厤缃畬鎴愪箣鍚庡嵆鍙寜銆愰儴缃层戣繘琛岃嚜鍔ㄩ儴缃 + +![image](image/hostconfok.JPG) + + + +####4.3.6閮ㄧ讲 + +鎸夐儴缃叉寜閽箣鍚庯紝鑷姩璺宠浆鍒伴泦缇ら儴缃蹭俊鎭〉闈㈤氳繃杩涘害鏉℃彁绀哄彲浠ュ疄鏃舵樉绀烘暣涓儴缃茶繘绋嬨傚畨瑁呰繃绋嬩腑鍏堝畨瑁匫S鍐嶅畨瑁匱ECS鐗堟湰銆 + +![image](image/deploy1.JPG) + +OS瀹夎鎴愬姛涔嬪悗寮濮嬪畨瑁匱ECS鐗堟湰 + +![image](image/deploy2.JPG) + +![image](image/deployok.JPG) + +鐘舵佷腑鏄剧ずTECS瀹夎鎴愬姛锛岃繘搴︿负100%鏃惰〃绀篢ECS瀹夎鎴愬姛銆 + + + +##5 Daisy WEB鐣岄潰浠嬬粛 + +###5.1 闆嗙兢 + +####5.1.1 鎴戠殑闆嗙兢鍒楄〃 + + + +####5.1.2 鍒涘缓闆嗙兢 + + + +#####5.1.2.1 鍩烘湰淇℃伅 + + + +#####5.1.2.2 瑙掕壊閰嶇疆 + + + +#####5.1.2.3 缃戠粶骞抽潰閰嶇疆 + + + +####5.1.3闆嗙兢閮ㄧ讲淇℃伅 + +#####5.1.3.1 闆嗙兢涓绘満閮ㄧ讲 + + + +######5.1.3.1.1 娣诲姞涓绘満 + + +1. SSH鍙戠幇 + +2. PXE鍙戠幇 + +3. 娣诲姞涓绘満 + + + +######5.1.3.1.2 鍒嗛厤瑙掕壊 + + + +######5.1.3.1.3 缁戝畾缃戝彛 + + + +######5.1.3.1.4 缃戠粶鏄犲皠 + + + +######5.1.3.1.5 涓绘満閰嶇疆 + + + +#####5.1.3.2 閲嶇疆涓绘満 + +闄ゅ涓绘満涔嬪锛屾墍鏈夊叾浠栦富鏈哄潎鍙繘琛岄噸缃富鏈猴紱 + +閫夋嫨涓鍙颁富鏈猴紝鐒跺悗鐐瑰嚮閲嶇疆涓绘満锛屼富鏈洪噸缃箣鍚庡彲浠ユ搷浣滅郴缁熷彲浠ラ噸鏂拌繘琛屽畨瑁匫S銆 + + + +#####5.1.3.3 鐩存帴閮ㄧ讲 + +鐩存帴閮ㄧ讲鍙互灏嗕富鏈虹殑绯荤粺杩涜閲嶆柊瀹夎鐨勬搷浣 + + + +#####5.1.3.4 绉诲嚭闆嗙兢 + +绉诲嚭闆嗙兢鍙互灏嗕富鏈轰粠褰撳墠闆嗙兢绉诲嚭锛屾鏃惰繖鍙颁富鏈哄皢閲嶆柊杩涘叆涓绘満鍙戠幇鍒楄〃锛岄渶瑕佽繘琛岄噸鏂伴厤缃紝鍔犲叆鍏朵粬闆嗙兢銆 + + + +#####5.1.3.5 鐢熸垚涓绘満妯℃澘 + + + +####5.1.4 闆嗙兢鎿嶄綔 + +#####5.1.4.1 淇敼闆嗙兢 + + + +#####5.1.4.2 鍗囩骇闆嗙兢 +Daisy鏀寔宸插畨瑁呯殑闆嗙兢鐨勫崌绾у姛鑳姐傚湪鍗囩骇鍓嶏紝闇瑕佸皢鍗囩骇鍖呬笂浼犲埌/var/lib/daisy/tecs/(鐗瑰埆璇存槑锛岃鐩綍涓嬪彧鑳藉瓨鏀句竴浠芥渶鏂扮殑TFG鍜孴ECS鐨刡in鍖)锛屽彲浠ラ氳繃5.1鑺傚疄鐜皐eb椤甸潰涓婁紶鐗堟湰銆備笂浼犵殑鐗堟湰浼氳嚜鍔ㄦ浛鎹㈠浐瀹氱洰褰曚笅鐨勫師鏈夌増鏈 + 鐧诲綍web锛岃繘鍏ラ泦缇ら〉闈紝榧犳爣绉诲姩鍒板緟鍗囩骇鐨勯泦缇わ紝浼氭樉绀轰竴浜涘浘鏍囷紝鐐瑰嚮鍚戜笂绠ご鐨勫浘鏍囷紝浼氳烦鍑虹‘璁ゅ璇濇锛岀偣鍑烩滅‘璁も濓紝鍗宠嚜鍔ㄨ烦杞嚦闆嗙兢杩涘害鐣岄潰锛屾樉绀哄崌绾ц繘搴︺ + +#####5.1.4.3 鍗歌浇TECS +Daisy鏀寔宸插畨瑁呯殑闆嗙兢鍦ㄩ儴缃插悗锛屽嵏杞介泦缇や腑鐨勫垁鐗囩殑tecs杞欢銆傝繘鍏ラ泦缇ら〉闈紝榧犳爣绉诲姩鍒板緟鍗歌浇鐨勯泦缇わ紝浼氭樉绀轰竴浜涘浘鏍囷紝鐐瑰嚮鍚戜笅绠ご鐨勫浘鏍囷紝浼氳烦鍑虹‘璁ゅ璇濇锛岀偣鍑烩滅‘璁も濓紝鍗宠嚜鍔ㄨ烦杞嚦闆嗙兢杩涘害鐣岄潰锛屾樉绀哄嵏杞借繘搴︺ + +#####5.1.4.4 鐢熸垚闆嗙兢妯℃澘 + + + + + +###5.2 鐗堟湰 + +####5.2.1 涓婁紶OS鍜孴ECS鐗堟湰 + +涓婁紶OS鍜孴ECS鐗堟湰鏈変袱绉嶆柟寮忥細涓绉嶆槸閫氳繃docker灏嗙増鏈笂浼犲埌daisy鏈嶅姟鍣ㄧ殑/var/lib/daisy/tecs鐩綍锛屼竴绉嶆槸浠巇ashboard涓婁紶銆 + +1銆侀氳繃docker涓婁紶鐗堟湰 + +鎷疯礉鐗堟湰鍒扮涓鍙版湇鍔″櫒锛坉aisy鐨勫涓绘満锛/home/鐩綍涓嬨 + +鎵ц鍛戒护锛歞ocker cp Mimosa-V02.16.10.P6B1I32-CGSL_VPLAT-5.1-x86_64-KVM.iso daisy:/var/lib/daisy/tecs + +鎵ц鍛戒护锛歞ocker cp ZXTECS_V02.16.10_P6B1_I151_installtecs_el7_noarch.bin daisy:/var/lib/daisy/tecs + + + +2銆侀氳繃dashboard涓婁紶 + +杩涘叆web鐣岄潰鍚庯紝鐐瑰嚮宸︿笂瑙掔増鏈紝鐐瑰嚮娴忚锛岄夋嫨闇瑕佷笂浼犵殑OS鍜孴ECS鐗堟湰锛岃繖閲屼笂浼犵殑OS鐗堟湰涓猴細Mimosa-V02.16.10.P6B1I22-CGSL_VPLAT-5.1-x86_64-KVM.iso锛屼笂浼犵殑TECS鐗堟湰涓篫XTECS_V02.16.10_P6B1_I102_installtecs_el7_noarch.bin锛屼笂浼犲悗锛屽彲浠ュ湪鐗堟湰椤甸潰鐪嬪埌涓婁紶鍚庣殑鐗堟湰鏂囦欢銆 + + + +###5.3 涓绘満 + +####5.3.1 涓绘満鍒楄〃 + + + +####5.3.2 鍔犲叆闆嗙兢 + + + +####5.3.3 绉诲嚭闆嗙兢 + + + + + +###5.4 妯℃澘 + + + +###5.5 绯荤粺 + +####5.5.1 澶囦唤鍜屾仮澶 + +#####5.5.1.1 澶囦唤 + +#####5.5.1.2 鎭㈠ + + + +####5.5.2 绯荤粺閰嶇疆 + +##6 daisy瀹夎鍗囩骇鍗歌浇鎿嶄綔鎸囧 + +##7 闄勪欢 + +###7.1 闄勪欢1 U鐩樼殑鍒朵綔 + +###7.3 闄勪欢2 KS3200 IPSAN纾侀樀閰嶇疆 + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/code/daisy/doc/source/guide/image/Thumbs.db b/code/daisy/doc/source/guide/image/Thumbs.db new file mode 100755 index 00000000..2ff3be8e Binary files /dev/null and b/code/daisy/doc/source/guide/image/Thumbs.db differ diff --git a/code/daisy/doc/source/guide/image/addhost.JPG b/code/daisy/doc/source/guide/image/addhost.JPG new file mode 100755 index 00000000..a9178d3b Binary files /dev/null and b/code/daisy/doc/source/guide/image/addhost.JPG differ diff --git a/code/daisy/doc/source/guide/image/addrole.JPG b/code/daisy/doc/source/guide/image/addrole.JPG new file mode 100755 index 00000000..30d05be8 Binary files /dev/null and b/code/daisy/doc/source/guide/image/addrole.JPG differ diff --git a/code/daisy/doc/source/guide/image/bmcip.JPG b/code/daisy/doc/source/guide/image/bmcip.JPG new file mode 100755 index 00000000..ba4001ee Binary files /dev/null and b/code/daisy/doc/source/guide/image/bmcip.JPG differ diff --git a/code/daisy/doc/source/guide/image/bmcipcheck.JPG b/code/daisy/doc/source/guide/image/bmcipcheck.JPG new file mode 100755 index 00000000..a65b2a69 Binary files /dev/null and b/code/daisy/doc/source/guide/image/bmcipcheck.JPG differ diff --git a/code/daisy/doc/source/guide/image/bond.JPG b/code/daisy/doc/source/guide/image/bond.JPG new file mode 100755 index 00000000..9d742210 Binary files /dev/null and b/code/daisy/doc/source/guide/image/bond.JPG differ diff --git a/code/daisy/doc/source/guide/image/bondok.JPG b/code/daisy/doc/source/guide/image/bondok.JPG new file mode 100755 index 00000000..a51476d0 Binary files /dev/null and b/code/daisy/doc/source/guide/image/bondok.JPG differ diff --git a/code/daisy/doc/source/guide/image/cinderbackend1.JPG b/code/daisy/doc/source/guide/image/cinderbackend1.JPG new file mode 100755 index 00000000..46a2701b Binary files /dev/null and b/code/daisy/doc/source/guide/image/cinderbackend1.JPG differ diff --git a/code/daisy/doc/source/guide/image/cinderbackendconf.JPG b/code/daisy/doc/source/guide/image/cinderbackendconf.JPG new file mode 100755 index 00000000..789d4d55 Binary files /dev/null and b/code/daisy/doc/source/guide/image/cinderbackendconf.JPG differ diff --git a/code/daisy/doc/source/guide/image/cinderbackendok.JPG b/code/daisy/doc/source/guide/image/cinderbackendok.JPG new file mode 100755 index 00000000..210a9fe6 Binary files /dev/null and b/code/daisy/doc/source/guide/image/cinderbackendok.JPG differ diff --git a/code/daisy/doc/source/guide/image/clustercreate.JPG b/code/daisy/doc/source/guide/image/clustercreate.JPG new file mode 100755 index 00000000..8854eef5 Binary files /dev/null and b/code/daisy/doc/source/guide/image/clustercreate.JPG differ diff --git a/code/daisy/doc/source/guide/image/clustercreatebase.JPG b/code/daisy/doc/source/guide/image/clustercreatebase.JPG new file mode 100755 index 00000000..df881d70 Binary files /dev/null and b/code/daisy/doc/source/guide/image/clustercreatebase.JPG differ diff --git a/code/daisy/doc/source/guide/image/createhasharedisk.JPG b/code/daisy/doc/source/guide/image/createhasharedisk.JPG new file mode 100755 index 00000000..ad5c815d Binary files /dev/null and b/code/daisy/doc/source/guide/image/createhasharedisk.JPG differ diff --git a/code/daisy/doc/source/guide/image/createhasharedisk2.JPG b/code/daisy/doc/source/guide/image/createhasharedisk2.JPG new file mode 100755 index 00000000..6a09edd7 Binary files /dev/null and b/code/daisy/doc/source/guide/image/createhasharedisk2.JPG differ diff --git a/code/daisy/doc/source/guide/image/createlbconf.JPG b/code/daisy/doc/source/guide/image/createlbconf.JPG new file mode 100755 index 00000000..c769a0c2 Binary files /dev/null and b/code/daisy/doc/source/guide/image/createlbconf.JPG differ diff --git a/code/daisy/doc/source/guide/image/createroleha.JPG b/code/daisy/doc/source/guide/image/createroleha.JPG new file mode 100755 index 00000000..2f07e66c Binary files /dev/null and b/code/daisy/doc/source/guide/image/createroleha.JPG differ diff --git a/code/daisy/doc/source/guide/image/daisylogin1.JPG b/code/daisy/doc/source/guide/image/daisylogin1.JPG new file mode 100755 index 00000000..3b731455 Binary files /dev/null and b/code/daisy/doc/source/guide/image/daisylogin1.JPG differ diff --git a/code/daisy/doc/source/guide/image/daisynetconf.JPG b/code/daisy/doc/source/guide/image/daisynetconf.JPG new file mode 100755 index 00000000..383b7bf0 Binary files /dev/null and b/code/daisy/doc/source/guide/image/daisynetconf.JPG differ diff --git a/code/daisy/doc/source/guide/image/dataplane.JPG b/code/daisy/doc/source/guide/image/dataplane.JPG new file mode 100755 index 00000000..15940cc5 Binary files /dev/null and b/code/daisy/doc/source/guide/image/dataplane.JPG differ diff --git a/code/daisy/doc/source/guide/image/deploy1.JPG b/code/daisy/doc/source/guide/image/deploy1.JPG new file mode 100755 index 00000000..0c81cca4 Binary files /dev/null and b/code/daisy/doc/source/guide/image/deploy1.JPG differ diff --git a/code/daisy/doc/source/guide/image/deploy2.JPG b/code/daisy/doc/source/guide/image/deploy2.JPG new file mode 100755 index 00000000..7143f7bc Binary files /dev/null and b/code/daisy/doc/source/guide/image/deploy2.JPG differ diff --git a/code/daisy/doc/source/guide/image/deployok.JPG b/code/daisy/doc/source/guide/image/deployok.JPG new file mode 100755 index 00000000..3a02b56f Binary files /dev/null and b/code/daisy/doc/source/guide/image/deployok.JPG differ diff --git a/code/daisy/doc/source/guide/image/findhost.JPG b/code/daisy/doc/source/guide/image/findhost.JPG new file mode 100755 index 00000000..9d14d6f2 Binary files /dev/null and b/code/daisy/doc/source/guide/image/findhost.JPG differ diff --git a/code/daisy/doc/source/guide/image/hostconf.JPG b/code/daisy/doc/source/guide/image/hostconf.JPG new file mode 100755 index 00000000..7742c2bb Binary files /dev/null and b/code/daisy/doc/source/guide/image/hostconf.JPG differ diff --git a/code/daisy/doc/source/guide/image/hostconfok.JPG b/code/daisy/doc/source/guide/image/hostconfok.JPG new file mode 100755 index 00000000..b3c04b11 Binary files /dev/null and b/code/daisy/doc/source/guide/image/hostconfok.JPG differ diff --git a/code/daisy/doc/source/guide/image/hostlist.JPG b/code/daisy/doc/source/guide/image/hostlist.JPG new file mode 100755 index 00000000..caaea73b Binary files /dev/null and b/code/daisy/doc/source/guide/image/hostlist.JPG differ diff --git a/code/daisy/doc/source/guide/image/installos.JPG b/code/daisy/doc/source/guide/image/installos.JPG new file mode 100755 index 00000000..51bdae26 Binary files /dev/null and b/code/daisy/doc/source/guide/image/installos.JPG differ diff --git a/code/daisy/doc/source/guide/image/management.JPG b/code/daisy/doc/source/guide/image/management.JPG new file mode 100755 index 00000000..67e7e494 Binary files /dev/null and b/code/daisy/doc/source/guide/image/management.JPG differ diff --git a/code/daisy/doc/source/guide/image/mapnetcard.JPG b/code/daisy/doc/source/guide/image/mapnetcard.JPG new file mode 100755 index 00000000..f8d55286 Binary files /dev/null and b/code/daisy/doc/source/guide/image/mapnetcard.JPG differ diff --git a/code/daisy/doc/source/guide/image/mapnetcard1.JPG b/code/daisy/doc/source/guide/image/mapnetcard1.JPG new file mode 100755 index 00000000..ab55aa9f Binary files /dev/null and b/code/daisy/doc/source/guide/image/mapnetcard1.JPG differ diff --git a/code/daisy/doc/source/guide/image/mapnetcard2.JPG b/code/daisy/doc/source/guide/image/mapnetcard2.JPG new file mode 100755 index 00000000..a605de8b Binary files /dev/null and b/code/daisy/doc/source/guide/image/mapnetcard2.JPG differ diff --git a/code/daisy/doc/source/guide/image/mapnetcardok.JPG b/code/daisy/doc/source/guide/image/mapnetcardok.JPG new file mode 100755 index 00000000..8ec1331f Binary files /dev/null and b/code/daisy/doc/source/guide/image/mapnetcardok.JPG differ diff --git a/code/daisy/doc/source/guide/image/netconf.JPG b/code/daisy/doc/source/guide/image/netconf.JPG new file mode 100755 index 00000000..155d9aaa Binary files /dev/null and b/code/daisy/doc/source/guide/image/netconf.JPG differ diff --git a/code/daisy/doc/source/guide/image/netplan.JPG b/code/daisy/doc/source/guide/image/netplan.JPG new file mode 100755 index 00000000..e2553d03 Binary files /dev/null and b/code/daisy/doc/source/guide/image/netplan.JPG differ diff --git a/code/daisy/doc/source/guide/image/public.JPG b/code/daisy/doc/source/guide/image/public.JPG new file mode 100755 index 00000000..2e671b62 Binary files /dev/null and b/code/daisy/doc/source/guide/image/public.JPG differ diff --git a/code/daisy/doc/source/guide/image/pxefindhost1.JPG b/code/daisy/doc/source/guide/image/pxefindhost1.JPG new file mode 100755 index 00000000..ee076070 Binary files /dev/null and b/code/daisy/doc/source/guide/image/pxefindhost1.JPG differ diff --git a/code/daisy/doc/source/guide/image/pxefindhost2.JPG b/code/daisy/doc/source/guide/image/pxefindhost2.JPG new file mode 100755 index 00000000..84ed8ee9 Binary files /dev/null and b/code/daisy/doc/source/guide/image/pxefindhost2.JPG differ diff --git a/code/daisy/doc/source/guide/image/pxefindhost3.JPG b/code/daisy/doc/source/guide/image/pxefindhost3.JPG new file mode 100755 index 00000000..75f5b61a Binary files /dev/null and b/code/daisy/doc/source/guide/image/pxefindhost3.JPG differ diff --git a/code/daisy/doc/source/guide/image/pxefindhostok.JPG b/code/daisy/doc/source/guide/image/pxefindhostok.JPG new file mode 100755 index 00000000..c649f634 Binary files /dev/null and b/code/daisy/doc/source/guide/image/pxefindhostok.JPG differ diff --git a/code/daisy/doc/source/guide/image/setusb.JPG b/code/daisy/doc/source/guide/image/setusb.JPG new file mode 100755 index 00000000..f062aa81 Binary files /dev/null and b/code/daisy/doc/source/guide/image/setusb.JPG differ diff --git a/code/daisy/doc/source/guide/image/smmlogin1.JPG b/code/daisy/doc/source/guide/image/smmlogin1.JPG new file mode 100755 index 00000000..25e69a23 Binary files /dev/null and b/code/daisy/doc/source/guide/image/smmlogin1.JPG differ diff --git a/code/daisy/doc/source/guide/image/smmlogin2.JPG b/code/daisy/doc/source/guide/image/smmlogin2.JPG new file mode 100755 index 00000000..7b967d50 Binary files /dev/null and b/code/daisy/doc/source/guide/image/smmlogin2.JPG differ diff --git a/code/daisy/doc/source/guide/image/sshfindhost.JPG b/code/daisy/doc/source/guide/image/sshfindhost.JPG new file mode 100755 index 00000000..14b2a922 Binary files /dev/null and b/code/daisy/doc/source/guide/image/sshfindhost.JPG differ diff --git a/code/daisy/doc/source/guide/image/sshfindhostok.JPG b/code/daisy/doc/source/guide/image/sshfindhostok.JPG new file mode 100755 index 00000000..c8829549 Binary files /dev/null and b/code/daisy/doc/source/guide/image/sshfindhostok.JPG differ diff --git a/code/daisy/doc/source/guide/image/storage1.JPG b/code/daisy/doc/source/guide/image/storage1.JPG new file mode 100755 index 00000000..7cd2974d Binary files /dev/null and b/code/daisy/doc/source/guide/image/storage1.JPG differ diff --git a/code/daisy/doc/source/guide/image/storage2.JPG b/code/daisy/doc/source/guide/image/storage2.JPG new file mode 100755 index 00000000..97131b8e Binary files /dev/null and b/code/daisy/doc/source/guide/image/storage2.JPG differ diff --git a/code/daisy/doc/source/guide/image/storage3.JPG b/code/daisy/doc/source/guide/image/storage3.JPG new file mode 100755 index 00000000..6e6c9247 Binary files /dev/null and b/code/daisy/doc/source/guide/image/storage3.JPG differ diff --git a/code/daisy/doc/source/guide/image/storage4.JPG b/code/daisy/doc/source/guide/image/storage4.JPG new file mode 100755 index 00000000..d5068f8f Binary files /dev/null and b/code/daisy/doc/source/guide/image/storage4.JPG differ diff --git a/code/daisy/doc/source/guide/image/versionupload.JPG b/code/daisy/doc/source/guide/image/versionupload.JPG new file mode 100755 index 00000000..eea3ceac Binary files /dev/null and b/code/daisy/doc/source/guide/image/versionupload.JPG differ diff --git a/code/daisy/doc/source/guide/image/versionuploadOK.JPG b/code/daisy/doc/source/guide/image/versionuploadOK.JPG new file mode 100755 index 00000000..ce2fe658 Binary files /dev/null and b/code/daisy/doc/source/guide/image/versionuploadOK.JPG differ diff --git a/code/daisy/etc/daisy-api.conf b/code/daisy/etc/daisy-api.conf index c3630d93..e7b29838 100755 --- a/code/daisy/etc/daisy-api.conf +++ b/code/daisy/etc/daisy-api.conf @@ -39,7 +39,7 @@ data_api = daisy.db.sqlalchemy.api # The number of child process workers that will be # created to service API requests. The default will be # equal to the number of CPUs available. (integer value) -#workers = 4 +workers = 4 # Maximum line size of message headers to be accepted. # max_header_line may need to be increased when using large tokens diff --git a/code/daisy/etc/daisy-registry.conf b/code/daisy/etc/daisy-registry.conf index 4cfe2adb..f34c1649 100755 --- a/code/daisy/etc/daisy-registry.conf +++ b/code/daisy/etc/daisy-registry.conf @@ -33,7 +33,7 @@ backlog = 4096 # The number of child process workers that will be # created to service Registry requests. The default will be # equal to the number of CPUs available. (integer value) -#workers = None +workers = 4 # Enable Registry API versions individually or simultaneously #enable_v1_registry = True diff --git a/code/daisy/test-requirements.txt b/code/daisy/test-requirements.txt index 217038a5..237b9f3b 100755 --- a/code/daisy/test-requirements.txt +++ b/code/daisy/test-requirements.txt @@ -32,3 +32,8 @@ oslosphinx>=2.5.0,<2.6.0 # Apache-2.0 # Glance catalog index elasticsearch>=1.3.0 +python-daisyclient +python-ironicclient +ironic +mox + diff --git a/code/daisy/tools/install_venv.py b/code/daisy/tools/install_venv.py index f523f3e1..76030ba6 100755 --- a/code/daisy/tools/install_venv.py +++ b/code/daisy/tools/install_venv.py @@ -18,7 +18,7 @@ # under the License. """ -Installation script for Glance's development virtualenv +Installation script for Daisy's development virtualenv """ from __future__ import print_function @@ -31,12 +31,12 @@ import install_venv_common as install_venv # noqa def print_help(): help = """ - Glance development environment setup is complete. + Daisy development environment setup is complete. - Glance development uses virtualenv to track and manage Python dependencies + Daisy development uses virtualenv to track and manage Python dependencies while in development and testing. - To activate the Glance virtualenv for the extent of your current shell session + To activate the Daisy virtualenv for the extent of your current shell session you can run: $ source .venv/bin/activate @@ -57,7 +57,7 @@ def main(argv): pip_requires = os.path.join(root, 'requirements.txt') test_requires = os.path.join(root, 'test-requirements.txt') py_version = "python%s.%s" % (sys.version_info[0], sys.version_info[1]) - project = 'Glance' + project = 'Daisy' install = install_venv.InstallVenv(root, venv, pip_requires, test_requires, py_version, project) options = install.parse_args(argv) diff --git a/code/daisy/tox.ini b/code/daisy/tox.ini index 9b03bf30..0df567e7 100755 --- a/code/daisy/tox.ini +++ b/code/daisy/tox.ini @@ -49,8 +49,8 @@ commands = python setup.py build_sphinx # H404 multi line docstring should start with a summary # H405 multi line docstring summary not separated with an empty line # H904 Wrap long lines in parentheses instead of a backslash -ignore = E711,E712,H302,H402,H404,H405,H904 -exclude = .venv,.git,.tox,dist,doc,etc,*daisy/locale*,*openstack/common*,*lib/python*,*egg,build +ignore = E711,E712,H302,H402,H404,H405,H904,F841,F821,E265,F812,F402,E226,E731 +exclude = .venv,.git,.tox,dist,doc,etc,*daisy/locale*,*openstack/common*,*lib/python*,*egg,build,daisy/db/sqlalchemy/api.py,daisy/i18n.py [hacking] local-check-factory = daisy.hacking.checks.factory diff --git a/code/daisy/unittest_install.sh b/code/daisy/unittest_install.sh new file mode 100755 index 00000000..655279cb --- /dev/null +++ b/code/daisy/unittest_install.sh @@ -0,0 +1,93 @@ +#!/bin/bash +#******** +# This file is used to develop unittest environment +# +# 1 please copy it to the modules you want to +# such as: cp unittest_install.sh ../../openstack/keystone/ +# 2 then run the bash, unittest environment can be developed +# +# note: this bash only support CGSLV5 +#***** +Install_version=`uname -a` +Right_version="3.10" +result=$(echo $Install_version | grep "${Right_version}") +if [[ "$result" == "" ]] +then + echo "only support CGSLV5,please change your version first..." + exit 1 +fi + +pip_ip=10.43.177.17 + +log_path=logs +mkdir -p $log_path + +rm -rf /etc/yum.repos.d/opencos.repo +opencos_repo=/etc/yum.repos.d/opencos.repo +echo "Create $opencos_repo ..." +echo "[opencos]">>$opencos_repo +echo "name=opencos">>$opencos_repo +echo "baseurl=http://$pip_ip/pypi/">>$opencos_repo +echo "enabled=1">>$opencos_repo +echo "gpgcheck=0">>$opencos_repo + +rm -rf ~/.pip/pip.conf +pip_config=~/.pip/pip.conf +echo "Create $pip_config ..." +if [ ! -d `dirname $pip_config` ]; then + mkdir -p `dirname $pip_config` +fi +echo "[global]">$pip_config +echo "find-links = http://$pip_ip/pypi">>$pip_config +echo "no-index = true">>$pip_config +echo "[install]">>$pip_config +echo "trusted-host = $pip_ip">>$pip_config + +rm -rf ~/.pydistutils.cfg +pydistutils_cfg=~/.pydistutils.cfg +echo "Create $pydistutils_cfg ..." +echo "[easy_install]">$pydistutils_cfg +echo "index_url = http://$pip_ip/pypi">>$pydistutils_cfg + + +modules=(virtualenv mariadb-devel postgresql-devel libffi-devel m2crypto openssl-devel + cyrus-sasl-devel sqlite-devel libxslt-devel openldap-devel mongodb-server) + +yum clean all 1>/dev/null 2>/dev/null +# for virtual environment demand pip version>=1.6, so install it whether installed. +yum --disablerepo=* --enablerepo=opencos install -y pip 1>$log_path/pip.log 2>$log_path/pip.err +yum --disablerepo=* --enablerepo=opencos install -y swig 1>$log_path/swig.log 2>$log_path/swig.err +yum --disablerepo=* --enablerepo=opencos install -y openstack-ceilometer-api 1>$log_path/ceilometer-api.log \ + 2>$log_path/ceilometer-api.err +# install modules +for mod in ${modules[@]}; do + echo -n "yum install $mod ... " + already_install=`rpm -qa | grep $mod` + if [ "$already_install" == "" ]; then + yum --disablerepo=* --enablerepo=opencos install -y $mod 1>$log_path/$mod.log 2>$log_path/$mod.err + if [ -s $log_path/$mod.err ]; then + echo "fail" + echo "Please contact li.guomin3@zte.com.cn,wu.wei266@zte.com.cn,liang.jingtao@zte.com.cn " + exit 1 + else + echo "ok(install finish)" + fi + else + echo "ok(already exist)" + fi +done + +#modify for heat M2Crypto install error +file_name=/usr/include/openssl/opensslconf.h +action=`sed -i 's/#error "This openssl-devel package does not work your architecture?"/#include "opensslconf-x86_64.h"/g' $file_name` + +echo "install venv ... " +chmod +x tools/* +python tools/install_venv.py 1>$log_path/install_venv.log 2>$log_path/install_venv.err +if grep "development environment setup is complete." $log_path/install_venv.log + then + echo "development environment setup is complete..." +else + echo "development environment setup is fail,please check logs/install_venv.err" + cat $log_path/install_venv.err +fi diff --git a/code/daisyclient/daisyclient/__init__.py b/code/daisyclient/daisyclient/__init__.py index efe3ea6a..55d87e6d 100755 --- a/code/daisyclient/daisyclient/__init__.py +++ b/code/daisyclient/daisyclient/__init__.py @@ -12,7 +12,7 @@ # License for the specific language governing permissions and limitations # under the License. -#NOTE(bcwaldon): this try/except block is needed to run setup.py due to +# NOTE(bcwaldon): this try/except block is needed to run setup.py due to # its need to import local code before installing required dependencies try: import daisyclient.client diff --git a/code/daisyclient/daisyclient/common/http.py b/code/daisyclient/daisyclient/common/http.py index 8a746a65..f3e80f17 100755 --- a/code/daisyclient/daisyclient/common/http.py +++ b/code/daisyclient/daisyclient/common/http.py @@ -16,9 +16,13 @@ import copy import logging import socket - +from oslo_utils import encodeutils +from daisyclient.common import https +from daisyclient.common.utils import safe_header +from daisyclient import exc from oslo_utils import importutils from oslo_utils import netutils + import requests try: from requests.packages.urllib3.exceptions import ProtocolError @@ -37,11 +41,6 @@ if not hasattr(parse, 'parse_qsl'): import cgi parse.parse_qsl = cgi.parse_qsl -from oslo_utils import encodeutils - -from daisyclient.common import https -from daisyclient.common.utils import safe_header -from daisyclient import exc osprofiler_web = importutils.try_import("osprofiler.web") diff --git a/code/daisyclient/daisyclient/common/https.py b/code/daisyclient/daisyclient/common/https.py index d42566f2..c0df710d 100755 --- a/code/daisyclient/daisyclient/common/https.py +++ b/code/daisyclient/daisyclient/common/https.py @@ -247,7 +247,8 @@ class VerifiedHTTPSConnection(HTTPSConnection): # List of exceptions reported by Python3 instead of # SSLConfigurationError if six.PY3: - excp_lst = (TypeError, FileNotFoundError, ssl.SSLError) + excp_lst = (TypeError, IOError, ssl.SSLError) + # https.py:250:36: F821 undefined name 'FileNotFoundError' else: # NOTE(jamespage) # Accomodate changes in behaviour for pep-0467, introduced diff --git a/code/daisyclient/daisyclient/common/utils.py b/code/daisyclient/daisyclient/common/utils.py index c2a41070..2666a740 100755 --- a/code/daisyclient/daisyclient/common/utils.py +++ b/code/daisyclient/daisyclient/common/utils.py @@ -23,21 +23,19 @@ import re import sys import threading import uuid - -from oslo_utils import importutils -import six - -if os.name == 'nt': - import msvcrt -else: - msvcrt = None - from oslo_utils import encodeutils from oslo_utils import strutils import prettytable import six from daisyclient import exc +from oslo_utils import importutils + +if os.name == 'nt': + import msvcrt +else: + msvcrt = None + _memoized_property_lock = threading.Lock() @@ -130,7 +128,8 @@ def pretty_choice_list(l): return ', '.join("'%s'" % i for i in l) -def print_list(objs, fields, formatters=None, field_settings=None): +def print_list(objs, fields, formatters=None, field_settings=None, + conver_field=True): formatters = formatters or {} field_settings = field_settings or {} pt = prettytable.PrettyTable([f for f in fields], caching=False) @@ -147,8 +146,11 @@ def print_list(objs, fields, formatters=None, field_settings=None): if field in formatters: row.append(formatters[field](o)) else: - field_name = field.lower().replace(' ', '_') - data = getattr(o, field_name, None) + if conver_field: + field_name = field.lower().replace(' ', '_') + else: + field_name = field.replace(' ', '_') + data = getattr(o, field_name, None) row.append(data) pt.add_row(row) @@ -162,7 +164,7 @@ def print_dict(d, max_column_width=80): for k, v in six.iteritems(d): if isinstance(v, (dict, list)): v = json.dumps(v) - pt.add_row([k, v ]) + pt.add_row([k, v]) print(encodeutils.safe_decode(pt.get_string(sortby='Property'))) diff --git a/code/daisyclient/daisyclient/openstack/common/apiclient/utils.py b/code/daisyclient/daisyclient/openstack/common/apiclient/utils.py index de0a34d4..0f40c76c 100755 --- a/code/daisyclient/daisyclient/openstack/common/apiclient/utils.py +++ b/code/daisyclient/daisyclient/openstack/common/apiclient/utils.py @@ -88,7 +88,7 @@ def find_resource(manager, name_or_id, **find_args): { "name": manager.resource_class.__name__.lower(), "name_or_id": name_or_id - } + } raise exceptions.CommandError(msg) except exceptions.NoUniqueMatch: msg = _("Multiple %(name)s matches found for " @@ -96,5 +96,5 @@ def find_resource(manager, name_or_id, **find_args): { "name": manager.resource_class.__name__.lower(), "name_or_id": name_or_id - } + } raise exceptions.CommandError(msg) diff --git a/code/daisyclient/daisyclient/shell.py b/code/daisyclient/daisyclient/shell.py index 548ecef4..25db76d0 100755 --- a/code/daisyclient/daisyclient/shell.py +++ b/code/daisyclient/daisyclient/shell.py @@ -218,10 +218,11 @@ class DaisyShell(object): action='store_true', help=argparse.SUPPRESS, ) - + ''' parser.add_argument('--version', action='version', version=daisyclient.__version__) + ''' parser.add_argument('-d', '--debug', default=bool(utils.env('GLANCECLIENT_DEBUG')), @@ -453,8 +454,9 @@ class DaisyShell(object): image_url = self._get_image_url(args) auth_token = args.os_auth_token - auth_reqd = force_auth or (utils.is_authentication_required(args.func) - and not (auth_token and image_url)) + auth_reqd = force_auth or\ + (utils.is_authentication_required(args.func) and not + (auth_token and image_url)) if not auth_reqd: endpoint = image_url @@ -545,12 +547,13 @@ class DaisyShell(object): return endpoint, token def _get_versioned_client(self, api_version, args, force_auth=False): - #endpoint, token = self._get_endpoint_and_token(args,force_auth=force_auth) - #endpoint = "http://10.43.175.62:19292" + # ndpoint, token = self._get_endpoint_and_token( + # args,force_auth=force_auth) + # endpoint = "http://10.43.175.62:19292" endpoint = args.os_endpoint - #print endpoint + # print endpoint kwargs = { - #'token': token, + # 'token': token, 'insecure': args.insecure, 'timeout': args.timeout, 'cacert': args.os_cacert, @@ -699,6 +702,7 @@ class DaisyShell(object): class HelpFormatter(argparse.HelpFormatter): + def start_section(self, heading): # Title-case the headings heading = '%s%s' % (heading[0].upper(), heading[1:]) diff --git a/code/daisyclient/daisyclient/v1/backup_restore.py b/code/daisyclient/daisyclient/v1/backup_restore.py new file mode 100755 index 00000000..d35d3366 --- /dev/null +++ b/code/daisyclient/daisyclient/v1/backup_restore.py @@ -0,0 +1,145 @@ +# Copyright 2012 OpenStack Foundation +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import copy +import six + +from daisyclient.common import utils +from daisyclient.openstack.common.apiclient import base + +OS_REQ_ID_HDR = 'x-openstack-request-id' + +BACKUP_PARAMS = () +RESTORE_PARAMS = ('backup_file_path') +VERSION_PARAMS = ('type') + + +class BackupRestore(base.Resource): + + def __repr__(self): + return "" % self._info + + def update(self, **fields): + self.manager.update(self, **fields) + + def delete(self, **kwargs): + return self.manager.delete(self) + + def data(self, **kwargs): + return self.manager.data(self, **kwargs) + + +class BackupRestoreManager(base.ManagerWithFind): + resource_class = BackupRestore + + def _backup_meta_to_headers(self, fields): + headers = {} + fields_copy = copy.deepcopy(fields) + + # NOTE(flaper87): Convert to str, headers + # that are not instance of basestring. All + # headers will be encoded later, before the + # request is sent. + + for key, value in six.iteritems(fields_copy): + headers['%s' % key] = utils.to_str(value) + return headers + + def _restore_meta_to_headers(self, fields): + headers = {} + fields_copy = copy.deepcopy(fields) + + # NOTE(flaper87): Convert to str, headers + # that are not instance of basestring. All + # headers will be encoded later, before the + # request is sent. + + for key, value in six.iteritems(fields_copy): + headers['%s' % key] = utils.to_str(value) + return headers + + def list(self, **kwargs): + pass + + def backup(self, **kwargs): + """Backup daisy data. + TODO(bcwaldon): document accepted params + """ + fields = {} + for field in kwargs: + if field in BACKUP_PARAMS: + fields[field] = kwargs[field] + else: + msg = 'install() got an unexpected keyword argument \'%s\'' + raise TypeError(msg % field) + + url = '/v1/backup' + hdrs = self._backup_meta_to_headers(fields) + resp, body = self.client.post(url, headers=hdrs, data=hdrs) + return BackupRestore(self, body) + + def restore(self, **kwargs): + """Restore daisy data + + TODO(bcwaldon): document accepted params + """ + fields = {} + for field in kwargs: + if field in RESTORE_PARAMS: + fields[field] = kwargs[field] + else: + msg = 'install() got an unexpected keyword argument \'%s\'' + raise TypeError(msg % field) + + url = '/v1/restore' + + hdrs = self._restore_meta_to_headers(fields) + resp, body = self.client.post(url, headers=hdrs, data=hdrs) + + def backup_file_version(self, **kwargs): + """Get version of backup file. + + TODO(bcwaldon): document accepted params + """ + fields = {} + for field in kwargs: + if field in RESTORE_PARAMS: + fields[field] = kwargs[field] + else: + msg = 'install() got an unexpected keyword argument \'%s\'' + raise TypeError(msg % field) + + url = '/v1/backup_file_version' + hdrs = self._restore_meta_to_headers(fields) + resp, body = self.client.post(url, headers=hdrs, data=hdrs) + return BackupRestore(self, body) + + def version(self, **kwargs): + """Get internal or external version of daisy. + + TODO(bcwaldon): document accepted params + """ + fields = {} + for field in kwargs: + if field in VERSION_PARAMS: + fields[field] = kwargs[field] + else: + msg = 'install() got an unexpected keyword argument \'%s\'' + raise TypeError(msg % field) + + url = '/v1/version' + hdrs = self._restore_meta_to_headers(fields) + resp, body = self.client.post(url, headers=hdrs, data=hdrs) + return BackupRestore(self, body) diff --git a/code/daisyclient/daisyclient/v1/client.py b/code/daisyclient/daisyclient/v1/client.py index 3c065b8c..60bd195e 100755 --- a/code/daisyclient/daisyclient/v1/client.py +++ b/code/daisyclient/daisyclient/v1/client.py @@ -32,6 +32,11 @@ from daisyclient.v1.uninstall import UninstallManager from daisyclient.v1.update import UpdateManager from daisyclient.v1.disk_array import DiskArrayManager from daisyclient.v1.template import TemplateManager +from daisyclient.v1.hwm_nodes import NodeManager +from daisyclient.v1.hwms import HwmManager +from daisyclient.v1.backup_restore import BackupRestoreManager + + class Client(object): """Client for the OpenStack Images v1 API. @@ -64,3 +69,6 @@ class Client(object): self.update = UpdateManager(self.http_client) self.disk_array = DiskArrayManager(self.http_client) self.template = TemplateManager(self.http_client) + self.node = NodeManager(self.http_client) + self.hwm = HwmManager(self.http_client) + self.backup_restore = BackupRestoreManager(self.http_client) diff --git a/code/daisyclient/daisyclient/v1/cluster_hosts.py b/code/daisyclient/daisyclient/v1/cluster_hosts.py index 7064774f..85bf2077 100755 --- a/code/daisyclient/daisyclient/v1/cluster_hosts.py +++ b/code/daisyclient/daisyclient/v1/cluster_hosts.py @@ -17,6 +17,7 @@ from daisyclient.openstack.common.apiclient import base class ClusterHost(base.Resource): + def __repr__(self): return "" % self._info @@ -36,28 +37,30 @@ class ClusterHostManager(base.ManagerWithFind): def list(self, cluster=None, host=None): pass - # out = [] - # if cluster and host: - # out.extend(self._list_by_cluster_and_host(cluster, host)) - # elif cluster: - # out.extend(self._list_by_cluster(cluster)) - # elif host: - # out.extend(self._list_by_host(host)) - # else: - # pass - # return out +# out = [] +# if cluster and host: +# out.extend(self._list_by_cluster_and_host(cluster, host)) +# elif cluster: +# out.extend(self._list_by_cluster(cluster)) +# elif host: +# out.extend(self._list_by_host(host)) +# else: +# pass +# return out + +# def _list_by_cluster_and_host(self, cluster, host): +# url = '/v1/clusters/%s/nodes/%s' % (cluster, host) +# resp, body = self.client.get(url) - # def _list_by_cluster_and_host(self, cluster, host): - # url = '/v1/clusters/%s/nodes/%s' % (cluster, host) - # resp, body = self.client.get(url) # out = [] # for member in body['members']: # member['cluster'] = cluster - # out.append(ClusterHost(self, member, loaded=True)) - # return out - # - # def _list_by_cluster(self, cluster): - # url = '/v1/clusters/%s/nodes' % cluster +# out.append(ClusterHost(self, member, loaded=True)) +# return out + +# def _list_by_cluster(self, cluster): +# url = '/v1/clusters/%s/nodes' % cluster + # resp, body = self.client.get(url) # out = [] # for member in body['members']: @@ -66,15 +69,13 @@ class ClusterHostManager(base.ManagerWithFind): # return out # def _list_by_host(self, host): - # url = '/v1/multi-clusters/nodes/%s' % host - # resp, body = self.client.get(url) - # out = [] - # for member in body['multi-clusters']: - # member['host_id'] = host - # out.append(ClusterHost(self, member, loaded=True)) - # return out +# url = '/v1/multi-clusters/nodes/%s' % host +# resp, body = self.client.get(url) +# out = [] +# for member in body['multi-clusters']: +# member['host_id'] = host +# out.append(ClusterHost(self, member, loaded=True)) +# return out def delete(self, cluster_id, host_id): self._delete("/v1/clusters/%s/nodes/%s" % (cluster_id, host_id)) - - diff --git a/code/daisyclient/daisyclient/v1/clusters.py b/code/daisyclient/daisyclient/v1/clusters.py index 530e9ccb..a57595b7 100755 --- a/code/daisyclient/daisyclient/v1/clusters.py +++ b/code/daisyclient/daisyclient/v1/clusters.py @@ -24,30 +24,33 @@ from daisyclient.common import utils from daisyclient.openstack.common.apiclient import base UPDATE_PARAMS = ( - 'name', 'description', 'networks', 'deleted', 'nodes','floating_ranges', - 'dns_nameservers','net_l23_provider','base_mac','internal_gateway', - 'internal_cidr', 'external_cidr','gre_id_range', 'vlan_range', + 'name', 'description', 'networks', 'deleted', 'nodes', 'floating_ranges', + 'dns_nameservers', 'net_l23_provider', 'base_mac', 'internal_gateway', + 'internal_cidr', 'external_cidr', 'gre_id_range', 'vlan_range', 'vni_range', 'segmentation_type', 'public_vip', 'logic_networks', - 'networking_parameters', 'routers', 'auto_scale', 'use_dns' + 'networking_parameters', 'routers', 'auto_scale', 'use_dns', + 'hwm_ip' ) CREATE_PARAMS = ( - 'id', 'name', 'nodes', 'description', 'networks','floating_ranges', - 'dns_nameservers','net_l23_provider','base_mac','internal_gateway', + 'id', 'name', 'nodes', 'description', 'networks', 'floating_ranges', + 'dns_nameservers', 'net_l23_provider', 'base_mac', 'internal_gateway', 'internal_cidr', 'external_cidr', 'gre_id_range', 'vlan_range', 'vni_range', 'segmentation_type', 'public_vip', 'logic_networks', - 'networking_parameters', 'routers', 'auto_scale', 'use_dns' + 'networking_parameters', 'routers', 'auto_scale', 'use_dns', + 'hwm_ip' ) DEFAULT_PAGE_SIZE = 20 SORT_DIR_VALUES = ('asc', 'desc') -SORT_KEY_VALUES = ('name','auto_scale', 'id', 'created_at', 'updated_at') +SORT_KEY_VALUES = ('name', 'auto_scale', 'id', 'created_at', 'updated_at') OS_REQ_ID_HDR = 'x-openstack-request-id' class Cluster(base.Resource): + def __repr__(self): return "" % self._info @@ -91,7 +94,7 @@ class ClusterManager(base.ManagerWithFind): meta[key] = strutils.bool_from_string(meta[key]) return self._format_cluster_meta_for_user(meta) - + def _cluster_meta_to_headers(self, fields): headers = {} fields_copy = copy.deepcopy(fields) @@ -104,7 +107,7 @@ class ClusterManager(base.ManagerWithFind): for key, value in six.iteritems(fields_copy): headers['%s' % key] = utils.to_str(value) return headers - + @staticmethod def _format_image_meta_for_user(meta): for key in ['size', 'min_ram', 'min_disk']: @@ -133,13 +136,14 @@ class ClusterManager(base.ManagerWithFind): """ cluster_id = base.getid(cluster) resp, body = self.client.get('/v1/clusters/%s' - % urlparse.quote(str(cluster_id))) - #meta = self._cluster_meta_from_headers(resp.headers) + % urlparse.quote(str(cluster_id))) + # meta = self._cluster_meta_from_headers(resp.headers) return_request_id = kwargs.get('return_req_id', None) if return_request_id is not None: return_request_id.append(resp.headers.get(OS_REQ_ID_HDR, None)) - #return Host(self, meta) - return Cluster(self, self._format_cluster_meta_for_user(body['cluster'])) + # return Host(self, meta) + return Cluster(self, self._format_cluster_meta_for_user( + body['cluster'])) def data(self, image, do_checksum=True, **kwargs): """Get the raw data for a specific image. @@ -193,7 +197,8 @@ class ClusterManager(base.ManagerWithFind): :param page_size: number of items to request in each paginated request :param limit: maximum number of clusters to return - :param marker: begin returning clusters that appear later in the cluster + :param marker: begin returning clusters that + appear later in the cluster list than that represented by this cluster id :param filters: dict of direct comparison filters that mimics the structure of an cluster object @@ -267,7 +272,7 @@ class ClusterManager(base.ManagerWithFind): TODO(bcwaldon): document accepted params """ - + fields = {} for field in kwargs: if field in CREATE_PARAMS: @@ -277,7 +282,7 @@ class ClusterManager(base.ManagerWithFind): else: msg = 'create() got an unexpected keyword argument \'%s\'' raise TypeError(msg % field) - + hdrs = self._cluster_meta_to_headers(fields) resp, body = self.client.post('/v1/clusters', headers=hdrs, @@ -286,7 +291,8 @@ class ClusterManager(base.ManagerWithFind): if return_request_id is not None: return_request_id.append(resp.headers.get(OS_REQ_ID_HDR, None)) - return Cluster(self, self._format_cluster_meta_for_user(body['cluster'])) + return Cluster(self, self._format_cluster_meta_for_user( + body['cluster'])) def delete(self, cluster, **kwargs): """Delete an cluster.""" @@ -295,7 +301,7 @@ class ClusterManager(base.ManagerWithFind): return_request_id = kwargs.get('return_req_id', None) if return_request_id is not None: return_request_id.append(resp.headers.get(OS_REQ_ID_HDR, None)) - + def update(self, cluster, **kwargs): """Update an cluster @@ -308,7 +314,7 @@ class ClusterManager(base.ManagerWithFind): fields[field] = kwargs[field] elif field == 'return_req_id': continue - #else: + # else: # msg = 'update() got an unexpected keyword argument \'%s\'' # raise TypeError(msg % field) @@ -319,4 +325,5 @@ class ClusterManager(base.ManagerWithFind): if return_request_id is not None: return_request_id.append(resp.headers.get(OS_REQ_ID_HDR, None)) - return Cluster(self, self._format_cluster_meta_for_user(body['cluster'])) + return Cluster(self, self._format_cluster_meta_for_user( + body['cluster'])) diff --git a/code/daisyclient/daisyclient/v1/components.py b/code/daisyclient/daisyclient/v1/components.py index fe51b095..97a6125f 100755 --- a/code/daisyclient/daisyclient/v1/components.py +++ b/code/daisyclient/daisyclient/v1/components.py @@ -23,13 +23,13 @@ import six.moves.urllib.parse as urlparse from daisyclient.common import utils from daisyclient.openstack.common.apiclient import base -UPDATE_PARAMS = ('name', 'description', - #NOTE(bcwaldon: an attempt to update 'deleted' will be +UPDATE_PARAMS = ('name', 'description', + # NOTE(bcwaldon: an attempt to update 'deleted' will be # ignored, but we need to support it for backwards- # compatibility with the legacy client library 'deleted') -CREATE_PARAMS = ('id', 'name','description') +CREATE_PARAMS = ('id', 'name', 'description') DEFAULT_PAGE_SIZE = 20 @@ -40,6 +40,7 @@ OS_REQ_ID_HDR = 'x-openstack-request-id' class Component(base.Resource): + def __repr__(self): return "" % self._info @@ -83,7 +84,7 @@ class ComponentManager(base.ManagerWithFind): meta[key] = strutils.bool_from_string(meta[key]) return self._format_component_meta_for_user(meta) - + def _component_meta_to_headers(self, fields): headers = {} fields_copy = copy.deepcopy(fields) @@ -96,7 +97,7 @@ class ComponentManager(base.ManagerWithFind): for key, value in six.iteritems(fields_copy): headers['%s' % key] = utils.to_str(value) return headers - + @staticmethod def _format_image_meta_for_user(meta): for key in ['size', 'min_ram', 'min_disk']: @@ -125,13 +126,14 @@ class ComponentManager(base.ManagerWithFind): """ component_id = base.getid(component) resp, body = self.client.get('/v1/components/%s' - % urlparse.quote(str(component_id))) - #meta = self._component_meta_from_headers(resp.headers) + % urlparse.quote(str(component_id))) + # meta = self._component_meta_from_headers(resp.headers) return_request_id = kwargs.get('return_req_id', None) if return_request_id is not None: return_request_id.append(resp.headers.get(OS_REQ_ID_HDR, None)) - #return Host(self, meta) - return Component(self, self._format_component_meta_for_user(body['component'])) + # return Host(self, meta) + return Component(self, self._format_component_meta_for_user( + body['component'])) def data(self, image, do_checksum=True, **kwargs): """Get the raw data for a specific image. @@ -185,7 +187,8 @@ class ComponentManager(base.ManagerWithFind): :param page_size: number of items to request in each paginated request :param limit: maximum number of components to return - :param marker: begin returning components that appear later in the component + :param marker: begin returning components that + appear later in the component list than that represented by this component id :param filters: dict of direct comparison filters that mimics the structure of an component object @@ -259,7 +262,7 @@ class ComponentManager(base.ManagerWithFind): TODO(bcwaldon): document accepted params """ - + fields = {} for field in kwargs: if field in CREATE_PARAMS: @@ -269,7 +272,7 @@ class ComponentManager(base.ManagerWithFind): else: msg = 'create() got an unexpected keyword argument \'%s\'' raise TypeError(msg % field) - + hdrs = self._component_meta_to_headers(fields) resp, body = self.client.post('/v1/components', headers=hdrs, @@ -278,7 +281,8 @@ class ComponentManager(base.ManagerWithFind): if return_request_id is not None: return_request_id.append(resp.headers.get(OS_REQ_ID_HDR, None)) - return Component(self, self._format_component_meta_for_user(body['component'])) + return Component(self, self._format_component_meta_for_user( + body['component'])) def delete(self, component, **kwargs): """Delete an component.""" @@ -287,7 +291,7 @@ class ComponentManager(base.ManagerWithFind): return_request_id = kwargs.get('return_req_id', None) if return_request_id is not None: return_request_id.append(resp.headers.get(OS_REQ_ID_HDR, None)) - + def update(self, component, **kwargs): """Update an component @@ -312,4 +316,5 @@ class ComponentManager(base.ManagerWithFind): if return_request_id is not None: return_request_id.append(resp.headers.get(OS_REQ_ID_HDR, None)) - return Component(self, self._format_component_meta_for_user(body['component_meta'])) + return Component(self, self._format_component_meta_for_user( + body['component_meta'])) diff --git a/code/daisyclient/daisyclient/v1/config_files.py b/code/daisyclient/daisyclient/v1/config_files.py index 5862a3c6..eec99fcd 100755 --- a/code/daisyclient/daisyclient/v1/config_files.py +++ b/code/daisyclient/daisyclient/v1/config_files.py @@ -36,6 +36,7 @@ OS_REQ_ID_HDR = 'x-openstack-request-id' class Config_file(base.Resource): + def __repr__(self): return "" % self._info @@ -79,7 +80,7 @@ class Config_fileManager(base.ManagerWithFind): meta[key] = strutils.bool_from_string(meta[key]) return self._format_config_file_meta_for_user(meta) - + def _config_file_meta_to_headers(self, fields): headers = {} fields_copy = copy.deepcopy(fields) @@ -111,13 +112,14 @@ class Config_fileManager(base.ManagerWithFind): """ config_file_id = base.getid(config_file) resp, body = self.client.get('/v1/config_files/%s' - % urlparse.quote(str(config_file_id))) - #meta = self._config_file_meta_from_headers(resp.headers) + % urlparse.quote(str(config_file_id))) + # meta = self._config_file_meta_from_headers(resp.headers) return_request_id = kwargs.get('return_req_id', None) if return_request_id is not None: return_request_id.append(resp.headers.get(OS_REQ_ID_HDR, None)) - #return Config_file(self, meta) - return Config_file(self, self._format_config_file_meta_for_user(body['config_file'])) + # return Config_file(self, meta) + return Config_file(self, self._format_config_file_meta_for_user( + body['config_file'])) def _build_params(self, parameters): params = {'limit': parameters.get('page_size', DEFAULT_PAGE_SIZE)} @@ -151,7 +153,8 @@ class Config_fileManager(base.ManagerWithFind): :param page_size: number of items to request in each paginated request :param limit: maximum number of config_files to return - :param marker: begin returning config_files that appear later in the config_file + :param marker: begin returning config_files that + appear later in the config_file list than that represented by this config_file id :param filters: dict of direct comparison filters that mimics the structure of an config_file object @@ -225,7 +228,7 @@ class Config_fileManager(base.ManagerWithFind): TODO(bcwaldon): document accepted params """ - + fields = {} for field in kwargs: if field in CREATE_PARAMS: @@ -235,7 +238,7 @@ class Config_fileManager(base.ManagerWithFind): else: msg = 'create() got an unexpected keyword argument \'%s\'' raise TypeError(msg % field) - + hdrs = self._config_file_meta_to_headers(fields) resp, body = self.client.post('/v1/config_files', headers=hdrs, @@ -244,7 +247,8 @@ class Config_fileManager(base.ManagerWithFind): if return_request_id is not None: return_request_id.append(resp.headers.get(OS_REQ_ID_HDR, None)) - return Config_file(self, self._format_config_file_meta_for_user(body['config_file'])) + return Config_file(self, self._format_config_file_meta_for_user( + body['config_file'])) def delete(self, config_file, **kwargs): """Delete an config_file.""" @@ -253,7 +257,7 @@ class Config_fileManager(base.ManagerWithFind): return_request_id = kwargs.get('return_req_id', None) if return_request_id is not None: return_request_id.append(resp.headers.get(OS_REQ_ID_HDR, None)) - + def update(self, config_file, **kwargs): """Update an config_file @@ -278,5 +282,5 @@ class Config_fileManager(base.ManagerWithFind): if return_request_id is not None: return_request_id.append(resp.headers.get(OS_REQ_ID_HDR, None)) - return Config_file(self, self._format_config_file_meta_for_user(body['config_file_meta'])) - + return Config_file(self, self._format_config_file_meta_for_user( + body['config_file_meta'])) diff --git a/code/daisyclient/daisyclient/v1/config_sets.py b/code/daisyclient/daisyclient/v1/config_sets.py index 84629313..747486f2 100755 --- a/code/daisyclient/daisyclient/v1/config_sets.py +++ b/code/daisyclient/daisyclient/v1/config_sets.py @@ -23,9 +23,9 @@ import six.moves.urllib.parse as urlparse from daisyclient.common import utils from daisyclient.openstack.common.apiclient import base -UPDATE_PARAMS = ('name', 'description', 'deleted','cluster','role') +UPDATE_PARAMS = ('name', 'description', 'deleted', 'cluster', 'role') -CREATE_PARAMS = ('id', 'name', 'description','cluster','role') +CREATE_PARAMS = ('id', 'name', 'description', 'cluster', 'role') DEFAULT_PAGE_SIZE = 20 @@ -36,6 +36,7 @@ OS_REQ_ID_HDR = 'x-openstack-request-id' class Config_set(base.Resource): + def __repr__(self): return "" % self._info @@ -79,7 +80,7 @@ class Config_setManager(base.ManagerWithFind): meta[key] = strutils.bool_from_string(meta[key]) return self._format_config_set_meta_for_user(meta) - + def _config_set_meta_to_headers(self, fields): headers = {} fields_copy = copy.deepcopy(fields) @@ -111,13 +112,14 @@ class Config_setManager(base.ManagerWithFind): """ config_set_id = base.getid(config_set) resp, body = self.client.get('/v1/config_sets/%s' - % urlparse.quote(str(config_set_id))) - #meta = self._config_set_meta_from_headers(resp.headers) + % urlparse.quote(str(config_set_id))) + # meta = self._config_set_meta_from_headers(resp.headers) return_request_id = kwargs.get('return_req_id', None) if return_request_id is not None: return_request_id.append(resp.headers.get(OS_REQ_ID_HDR, None)) - #return Config_set(self, meta) - return Config_set(self, self._format_config_set_meta_for_user(body['config_set'])) + # return Config_set(self, meta) + return Config_set(self, self._format_config_set_meta_for_user( + body['config_set'])) def _build_params(self, parameters): params = {'limit': parameters.get('page_size', DEFAULT_PAGE_SIZE)} @@ -151,7 +153,8 @@ class Config_setManager(base.ManagerWithFind): :param page_size: number of items to request in each paginated request :param limit: maximum number of config_sets to return - :param marker: begin returning config_sets that appear later in the config_set + :param marker: begin returning config_sets that + appear later in the config_set list than that represented by this config_set id :param filters: dict of direct comparison filters that mimics the structure of an config_set object @@ -225,7 +228,7 @@ class Config_setManager(base.ManagerWithFind): TODO(bcwaldon): document accepted params """ - + fields = {} for field in kwargs: if field in CREATE_PARAMS: @@ -235,7 +238,7 @@ class Config_setManager(base.ManagerWithFind): else: msg = 'create() got an unexpected keyword argument \'%s\'' raise TypeError(msg % field) - + hdrs = self._config_set_meta_to_headers(fields) resp, body = self.client.post('/v1/config_sets', headers=hdrs, @@ -244,7 +247,8 @@ class Config_setManager(base.ManagerWithFind): if return_request_id is not None: return_request_id.append(resp.headers.get(OS_REQ_ID_HDR, None)) - return Config_set(self, self._format_config_set_meta_for_user(body['config_set'])) + return Config_set(self, self._format_config_set_meta_for_user( + body['config_set'])) def delete(self, config_set, **kwargs): """Delete an config_set.""" @@ -253,7 +257,7 @@ class Config_setManager(base.ManagerWithFind): return_request_id = kwargs.get('return_req_id', None) if return_request_id is not None: return_request_id.append(resp.headers.get(OS_REQ_ID_HDR, None)) - + def update(self, config_set, **kwargs): """Update an config_set @@ -278,7 +282,8 @@ class Config_setManager(base.ManagerWithFind): if return_request_id is not None: return_request_id.append(resp.headers.get(OS_REQ_ID_HDR, None)) - return Config_set(self, self._format_config_set_meta_for_user(body['config_set_meta'])) + return Config_set(self, self._format_config_set_meta_for_user( + body['config_set_meta'])) def cluster_config_set_update(self, **kwargs): """config_interface effect @@ -293,12 +298,13 @@ class Config_setManager(base.ManagerWithFind): else: msg = 'create() got an unexpected keyword argument \'%s\'' raise TypeError(msg % field) - + hdrs = self._config_set_meta_to_headers(fields) resp, body = self.client.post('/v1/cluster_config_set_update', headers=hdrs, data=hdrs) - return Config_set(self, self._format_config_set_meta_for_user(body['config_set'])) + return Config_set(self, self._format_config_set_meta_for_user( + body['config_set'])) def cluster_config_set_progress(self, **kwargs): """config_interface effect @@ -313,7 +319,7 @@ class Config_setManager(base.ManagerWithFind): else: msg = 'create() got an unexpected keyword argument \'%s\'' raise TypeError(msg % field) - + hdrs = self._config_set_meta_to_headers(fields) resp, body = self.client.post('/v1/cluster_config_set_progress', headers=hdrs, diff --git a/code/daisyclient/daisyclient/v1/configs.py b/code/daisyclient/daisyclient/v1/configs.py index f9954a96..ff899aa0 100755 --- a/code/daisyclient/daisyclient/v1/configs.py +++ b/code/daisyclient/daisyclient/v1/configs.py @@ -23,9 +23,13 @@ import six.moves.urllib.parse as urlparse from daisyclient.common import utils from daisyclient.openstack.common.apiclient import base -UPDATE_PARAMS = ('section', 'description', 'deleted','config_set_id','config_file_id','key','value','cluster','role','config_set','config') +UPDATE_PARAMS = ('section', 'description', 'deleted', 'config_set_id', + 'config_file_id', 'key', 'value', 'cluster', 'role', + 'config_set', 'config', 'host_id') -CREATE_PARAMS = ('id', 'section', 'description','config_set_id','config_file_id','key','value','cluster','role','config_set','config') +CREATE_PARAMS = ('id', 'section', 'description', 'config_set_id', + 'config_file_id', 'key', 'value', 'cluster', 'role', + 'config_set', 'config', 'host_id') DEFAULT_PAGE_SIZE = 20 @@ -36,6 +40,7 @@ OS_REQ_ID_HDR = 'x-openstack-request-id' class Config(base.Resource): + def __repr__(self): return "" % self._info @@ -79,7 +84,7 @@ class ConfigManager(base.ManagerWithFind): meta[key] = strutils.bool_from_string(meta[key]) return self._format_config_meta_for_user(meta) - + def _config_meta_to_headers(self, fields): headers = {} fields_copy = copy.deepcopy(fields) @@ -111,12 +116,12 @@ class ConfigManager(base.ManagerWithFind): """ config_id = base.getid(config) resp, body = self.client.get('/v1/configs/%s' - % urlparse.quote(str(config_id))) - #meta = self._config_meta_from_headers(resp.headers) + % urlparse.quote(str(config_id))) + # meta = self._config_meta_from_headers(resp.headers) return_request_id = kwargs.get('return_req_id', None) if return_request_id is not None: return_request_id.append(resp.headers.get(OS_REQ_ID_HDR, None)) - #return Config(self, meta) + # return Config(self, meta) return Config(self, self._format_config_meta_for_user(body['config'])) def _build_params(self, parameters): @@ -225,7 +230,7 @@ class ConfigManager(base.ManagerWithFind): TODO(bcwaldon): document accepted params """ - + fields = {} for field in kwargs: if field in CREATE_PARAMS: @@ -235,7 +240,7 @@ class ConfigManager(base.ManagerWithFind): # else: # msg = 'create() got an unexpected keyword argument \'%s\'' # raise TypeError(msg % field) - + hdrs = self._config_meta_to_headers(fields) resp, body = self.client.post('/v1/configs', headers=hdrs, @@ -255,7 +260,7 @@ class ConfigManager(base.ManagerWithFind): continue hdrs = self._config_meta_to_headers(fields) url = "/v1/configs_delete" - resp, body = self.client.delete(url,headers=hdrs,data=hdrs) + resp, body = self.client.delete(url, headers=hdrs, data=hdrs) return_request_id = kwargs.get('return_req_id', None) if return_request_id is not None: return_request_id.append(resp.headers.get(OS_REQ_ID_HDR, None)) diff --git a/code/daisyclient/daisyclient/v1/disk_array.py b/code/daisyclient/daisyclient/v1/disk_array.py index 3d8795c2..eaa9e1c5 100755 --- a/code/daisyclient/daisyclient/v1/disk_array.py +++ b/code/daisyclient/daisyclient/v1/disk_array.py @@ -16,31 +16,30 @@ import copy from oslo_utils import encodeutils -from oslo_utils import strutils import six import six.moves.urllib.parse as urlparse -from webob.exc import HTTPBadRequest from daisyclient.common import utils from daisyclient.openstack.common.apiclient import base CREATE_SERVICE_DISK_PARAMS = ('service', 'data_ips', 'size', - 'disk_location', 'role_id','lun') + 'disk_location', 'role_id', 'lun', + 'protocol_type') CREATE_CINDER_BACKEND_PARAMS = ('disk_array', 'role_id') CREATE_CINDER_BACKEND_INTER_PARAMS = ('management_ips', 'data_ips', 'pools', 'volume_driver', 'volume_type', 'role_id', - 'user_name','user_pwd') + 'user_name', 'user_pwd') UPDATE_CINDER_BACKEND_PARAMS = ('id', 'disk_array', 'role_id') DEFAULT_PAGE_SIZE = 20 SORT_DIR_VALUES = ('asc', 'desc') SORT_KEY_VALUES = ('id', 'role_id', 'created_at', 'updated_at', 'status') -SERVICE_DISK_UPDATE_PARAMS = CREATE_SERVICE_DISK_PARAMS +SERVICE_DISK_UPDATE_PARAMS = CREATE_SERVICE_DISK_PARAMS OS_REQ_ID_HDR = 'x-openstack-request-id' - class Disk_array(base.Resource): + def __repr__(self): return "" % self._info @@ -67,7 +66,6 @@ class DiskArrayManager(base.ManagerWithFind): return ([obj_class(self, res, loaded=True) for res in data if res], resp) - def _service_disk_meta_to_headers(self, fields): headers = {} fields_copy = copy.deepcopy(fields) @@ -80,7 +78,7 @@ class DiskArrayManager(base.ManagerWithFind): for key, value in six.iteritems(fields_copy): headers['%s' % key] = utils.to_str(value) return headers - + def _cinder_volume_meta_to_headers(self, fields): headers = {} fields_copy = copy.deepcopy(fields) @@ -115,12 +113,13 @@ class DiskArrayManager(base.ManagerWithFind): """ service_disk_id = base.getid(service_disk) resp, body = self.client.get('/v1/service_disk/%s' - % urlparse.quote(str(service_disk_id))) + % urlparse.quote(str(service_disk_id))) return_request_id = kwargs.get('return_req_id', None) if return_request_id is not None: return_request_id.append(resp.headers.get(OS_REQ_ID_HDR, None)) - #return Host(self, meta) - return Disk_array(self, self._format_service_disk_meta_for_user(body['disk_meta'])) + # return Host(self, meta) + return Disk_array(self, self._format_service_disk_meta_for_user( + body['disk_meta'])) def data(self, image, do_checksum=True, **kwargs): """Get the raw data for a specific image. @@ -181,12 +180,13 @@ class DiskArrayManager(base.ManagerWithFind): else: msg = 'Disk_array() got an unexpected keyword argument \'%s\'' raise TypeError(msg % field) - + url = '/v1/service_disk' - + hdrs = self._service_disk_meta_to_headers(fields) - resp, body = self.client.post(url,headers=hdrs,data=hdrs) - return Disk_array(self, self._format_service_disk_meta_for_user(body['disk_meta'])) + resp, body = self.client.post(url, headers=hdrs, data=hdrs) + return Disk_array(self, self._format_service_disk_meta_for_user( + body['disk_meta'])) def service_disk_delete(self, id, **kwargs): """Delete an service_disk.""" @@ -219,7 +219,8 @@ class DiskArrayManager(base.ManagerWithFind): if return_request_id is not None: return_request_id.append(resp.headers.get(OS_REQ_ID_HDR, None)) - return Disk_array(self, self._format_service_disk_meta_for_user(body['disk_meta'])) + return Disk_array(self, self._format_service_disk_meta_for_user( + body['disk_meta'])) def service_disk_detail(self, id, **kwargs): """Get the metadata for a specific service_disk. @@ -229,20 +230,22 @@ class DiskArrayManager(base.ManagerWithFind): """ service_disk_id = base.getid(id) resp, body = self.client.get('/v1/service_disk/%s' - % urlparse.quote(str(service_disk_id))) + % urlparse.quote(str(service_disk_id))) return_request_id = kwargs.get('return_req_id', None) if return_request_id is not None: return_request_id.append(resp.headers.get(OS_REQ_ID_HDR, None)) - return Disk_array(self, self._format_service_disk_meta_for_user(body['disk_meta'])) - + return Disk_array(self, self._format_service_disk_meta_for_user( + body['disk_meta'])) + def service_disk_list(self, **kwargs): """Get a list of service_disks. :param page_size: number of items to request in each paginated request :param limit: maximum number of service_disks to return - :param marker: begin returning service_disks that appear later in the service_disk + :param marker: begin returning service_disks that + appear later in the service_disk list than that represented by this service_disk id :param filters: dict of direct comparison filters that mimics the structure of an service_disk object @@ -296,7 +299,8 @@ class DiskArrayManager(base.ManagerWithFind): seen += seen_last_page if seen_last_page + filtered == 0: - # Note(kragniz): we didn't get any service_disks in the last page + # Note(kragniz): we didn't get any service_disks in the last + # page return if absolute_limit is not None and seen >= absolute_limit: @@ -304,14 +308,14 @@ class DiskArrayManager(base.ManagerWithFind): return if page_size and seen_last_page + filtered < page_size: - # Note(kragniz): we've reached the last page of the service_disks + # Note(kragniz): we've reached the last page of the + # service_disks return # Note(kragniz): there are more service_disks to come params['marker'] = last_service_disk seen_last_page = 0 - def cinder_volume_add(self, **kwargs): """Disk_array a cluster @@ -325,10 +329,11 @@ class DiskArrayManager(base.ManagerWithFind): msg = 'Disk_array() got an unexpected keyword argument \'%s\'' raise TypeError(msg % field) url = '/v1/cinder_volume' - + hdrs = self._service_disk_meta_to_headers(fields) - resp, body = self.client.post(url,headers=hdrs,data=hdrs) - return Disk_array(self, self._format_service_disk_meta_for_user(body['disk_meta'])) + resp, body = self.client.post(url, headers=hdrs, data=hdrs) + return Disk_array(self, self._format_service_disk_meta_for_user( + body['disk_meta'])) def cinder_volume_delete(self, id, **kwargs): """Delete an cinder_volume.""" @@ -353,7 +358,7 @@ class DiskArrayManager(base.ManagerWithFind): else: msg = 'update() got an unexpected keyword argument \'%s\'' raise TypeError(msg % field) - + hdrs.update(self._cinder_volume_meta_to_headers(fields)) url = '/v1/cinder_volume/%s' % base.getid(id) @@ -362,7 +367,8 @@ class DiskArrayManager(base.ManagerWithFind): if return_request_id is not None: return_request_id.append(resp.headers.get(OS_REQ_ID_HDR, None)) - return Disk_array(self, self._format_service_disk_meta_for_user(body['disk_meta'])) + return Disk_array(self, self._format_service_disk_meta_for_user( + body['disk_meta'])) def cinder_volume_detail(self, id, **kwargs): """Get the metadata for a specific cinder_volume. @@ -373,20 +379,22 @@ class DiskArrayManager(base.ManagerWithFind): cinder_volume_id = base.getid(id) resp, body = self.client.get('/v1/cinder_volume/%s' - % urlparse.quote(str(cinder_volume_id))) + % urlparse.quote(str(cinder_volume_id))) return_request_id = kwargs.get('return_req_id', None) if return_request_id is not None: return_request_id.append(resp.headers.get(OS_REQ_ID_HDR, None)) - return Disk_array(self, self._format_service_disk_meta_for_user(body['disk_meta'])) - + return Disk_array(self, self._format_service_disk_meta_for_user( + body['disk_meta'])) + def cinder_volume_list(self, **kwargs): """Get a list of cinder_volumes. :param page_size: number of items to request in each paginated request :param limit: maximum number of cinder_volumes to return - :param marker: begin returning cinder_volumes that appear later in the cinder_volume + :param marker: begin returning cinder_volumes that appear later in + the cinder_volume list than that represented by this cinder_volume id :param filters: dict of direct comparison filters that mimics the structure of an cinder_volume object @@ -440,7 +448,8 @@ class DiskArrayManager(base.ManagerWithFind): seen += seen_last_page if seen_last_page + filtered == 0: - # Note(kragniz): we didn't get any service_disks in the last page + # Note(kragniz): we didn't get any service_disks in the last + # page return if absolute_limit is not None and seen >= absolute_limit: @@ -448,9 +457,10 @@ class DiskArrayManager(base.ManagerWithFind): return if page_size and seen_last_page + filtered < page_size: - # Note(kragniz): we've reached the last page of the service_disks + # Note(kragniz): we've reached the last page of the + # service_disks return # Note(kragniz): there are more service_disks to come params['marker'] = last_cinder_volume - seen_last_page = 0 \ No newline at end of file + seen_last_page = 0 diff --git a/code/daisyclient/daisyclient/v1/hosts.py b/code/daisyclient/daisyclient/v1/hosts.py index f85a33ad..6c7d057f 100755 --- a/code/daisyclient/daisyclient/v1/hosts.py +++ b/code/daisyclient/daisyclient/v1/hosts.py @@ -23,19 +23,35 @@ import six.moves.urllib.parse as urlparse from daisyclient.common import utils from daisyclient.openstack.common.apiclient import base -UPDATE_PARAMS = ('name', 'resource_type', 'dmi_uuid', 'role', 'cluster', 'root_disk','root_lv_size','swap_lv_size','isolcpus','hugepagesize','hugepages','root_pwd','os_version', 'os_status', 'interfaces', 'is_deployment', 'description', 'deleted', 'status','ipmi_user','ipmi_passwd','ipmi_addr', 'ip', 'status', 'user', 'passwd') +UPDATE_PARAMS = ('name', 'resource_type', 'dmi_uuid', 'role', 'cluster', + 'root_disk', 'root_lv_size', 'swap_lv_size', 'isolcpus', + 'hugepagesize', 'hugepages', 'root_pwd', 'os_version', + 'os_status', 'interfaces', 'is_deployment', + 'description', 'deleted', 'status', 'ipmi_user', + 'ipmi_passwd', 'ipmi_addr', 'ip', 'status', 'user', + 'passwd', 'hwm_id', 'hwm_ip', 'cluster_id', + 'vcpu_pin_set', 'dvs_high_cpuset', 'pci_high_cpuset', + 'os_cpus', 'dvs_cpus', 'config_set_id') -CREATE_PARAMS = ('id', 'name', 'description', 'resource_type', 'dmi_uuid','role', 'cluster', 'os_version', 'os_status', 'interfaces', 'is_deployment','status','ipmi_user','ipmi_passwd','ipmi_addr', 'ip', 'status', 'user', 'passwd') +CREATE_PARAMS = ('id', 'name', 'description', 'resource_type', 'dmi_uuid', + 'role', 'cluster', 'os_version', 'os_status', + 'interfaces', 'is_deployment', 'status', 'ipmi_user', + 'ipmi_passwd', 'ipmi_addr', 'ip', 'status', 'user', + 'passwd', 'hwm_id', 'hwm_ip', 'cluster_id', + 'vcpu_pin_set', 'dvs_high_cpuset', 'pci_high_cpuset', + 'os_cpus', 'dvs_cpus', 'config_set_id') -DEFAULT_PAGE_SIZE = 20 +DEFAULT_PAGE_SIZE = 200 SORT_DIR_VALUES = ('asc', 'desc') -SORT_KEY_VALUES = ('name', 'id', 'cluster_id', 'created_at', 'updated_at', 'status') +SORT_KEY_VALUES = ( + 'name', 'id', 'cluster_id', 'created_at', 'updated_at', 'status') OS_REQ_ID_HDR = 'x-openstack-request-id' class Host(base.Resource): + def __repr__(self): return "" % self._info @@ -79,7 +95,7 @@ class HostManager(base.ManagerWithFind): meta[key] = strutils.bool_from_string(meta[key]) return self._format_host_meta_for_user(meta) - + def _host_meta_to_headers(self, fields): headers = {} fields_copy = copy.deepcopy(fields) @@ -111,12 +127,12 @@ class HostManager(base.ManagerWithFind): """ host_id = base.getid(host) resp, body = self.client.get('/v1/nodes/%s' - % urlparse.quote(str(host_id))) - #meta = self._host_meta_from_headers(resp.headers) + % urlparse.quote(str(host_id))) + # meta = self._host_meta_from_headers(resp.headers) return_request_id = kwargs.get('return_req_id', None) if return_request_id is not None: return_request_id.append(resp.headers.get(OS_REQ_ID_HDR, None)) - #return Host(self, meta) + # return Host(self, meta) return Host(self, self._format_host_meta_for_user(body['host'])) def _build_params(self, parameters): @@ -225,7 +241,7 @@ class HostManager(base.ManagerWithFind): TODO(bcwaldon): document accepted params """ - + fields = {} for field in kwargs: if field in CREATE_PARAMS: @@ -235,7 +251,7 @@ class HostManager(base.ManagerWithFind): else: msg = 'create() got an unexpected keyword argument \'%s\'' raise TypeError(msg % field) - + hdrs = self._host_meta_to_headers(fields) resp, body = self.client.post('/v1/nodes', @@ -254,7 +270,7 @@ class HostManager(base.ManagerWithFind): return_request_id = kwargs.get('return_req_id', None) if return_request_id is not None: return_request_id.append(resp.headers.get(OS_REQ_ID_HDR, None)) - + def update(self, host, **kwargs): """Update an host @@ -267,7 +283,7 @@ class HostManager(base.ManagerWithFind): fields[field] = kwargs[field] elif field == 'return_req_id': continue - #else: + # else: # msg = 'update() got an unexpected keyword argument \'%s\'' # raise TypeError(msg % field) @@ -280,7 +296,7 @@ class HostManager(base.ManagerWithFind): return_request_id.append(resp.headers.get(OS_REQ_ID_HDR, None)) return Host(self, self._format_host_meta_for_user(body['host_meta'])) - + def discover_host(self, **kwargs): """discovery host TODO(bcwaldon): document accepted params @@ -292,18 +308,35 @@ class HostManager(base.ManagerWithFind): fields[field] = kwargs[field] elif field == 'return_req_id': continue - + hdrs.update(self._host_meta_to_headers(fields)) url = '/v1/discover_host/' resp, body = self.client.post(url, headers=hdrs, data=hdrs) return Host(self, self._format_host_meta_for_user(body)) - + + def get_min_mac(self, hwm_id): + params = dict() + resp, body = self.client.get('/v1/nodes') + hosts = body.get('nodes') + if hosts: + for host in hosts: + if hwm_id == host.get('hwm_id'): + resp, host_body = self.client.get('/v1/nodes/%s' % + host['id']) + interfaces = host_body['host'].get('interfaces') + if interfaces: + mac_list = [interface['mac'] for interface in + interfaces if interface.get('mac')] + if mac_list: + params['mac'] = min(mac_list) + return params + def add_discover_host(self, **kwargs): """Add a discover host TODO(bcwaldon): document accepted params """ - + fields = {} for field in kwargs: if field in CREATE_PARAMS: @@ -313,19 +346,22 @@ class HostManager(base.ManagerWithFind): else: msg = 'create() got an unexpected keyword argument \'%s\'' raise TypeError(msg % field) - + + hwm_id = fields.get('hwm_id') + params = self.get_min_mac(hwm_id) + fields['mac'] = params.get('mac') hdrs = self._host_meta_to_headers(fields) resp, body = self.client.post('/v1/discover/nodes', headers=hdrs, data=hdrs) - + return_request_id = kwargs.get('return_req_id', None) if return_request_id is not None: return_request_id.append(resp.headers.get(OS_REQ_ID_HDR, None)) return Host(self, self._format_host_meta_for_user(body['host'])) - + def delete_discover_host(self, host, **kwargs): """Delete a discover host.""" url = "/v1/discover/nodes/%s" % base.getid(host) @@ -333,7 +369,7 @@ class HostManager(base.ManagerWithFind): return_request_id = kwargs.get('return_req_id', None) if return_request_id is not None: return_request_id.append(resp.headers.get(OS_REQ_ID_HDR, None)) - + def list_discover_host(self, **kwargs): """Get a list of hosts. @@ -420,7 +456,7 @@ class HostManager(base.ManagerWithFind): fields[field] = kwargs[field] elif field == 'return_req_id': continue - #else: + # else: # msg = 'update() got an unexpected keyword argument \'%s\'' # raise TypeError(msg % field) @@ -433,14 +469,14 @@ class HostManager(base.ManagerWithFind): return_request_id.append(resp.headers.get(OS_REQ_ID_HDR, None)) return Host(self, self._format_host_meta_for_user(body['host'])) - + def get_discover_host_detail(self, host_id, **kwargs): ''' ''' resp, body = self.client.get('/v1/discover/nodes/%s' % host_id) - #meta = self._host_meta_from_headers(resp.headers) + # meta = self._host_meta_from_headers(resp.headers) return_request_id = kwargs.get('return_req_id', None) if return_request_id is not None: return_request_id.append(resp.headers.get(OS_REQ_ID_HDR, None)) - #return Host(self, meta) - return Host(self, self._format_host_meta_for_user(body['host'])) \ No newline at end of file + # return Host(self, meta) + return Host(self, self._format_host_meta_for_user(body['host'])) diff --git a/code/daisyclient/daisyclient/v1/hwm_nodes.py b/code/daisyclient/daisyclient/v1/hwm_nodes.py new file mode 100755 index 00000000..c701ab6a --- /dev/null +++ b/code/daisyclient/daisyclient/v1/hwm_nodes.py @@ -0,0 +1,407 @@ +# Copyright 2012 OpenStack Foundation +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import sys +import copy + +from oslo_utils import encodeutils +from oslo_utils import strutils +import six + +from daisyclient.common import utils +from daisyclient.openstack.common.apiclient import base +from daisyclient.common.http import HTTPClient + +reload(sys) +sys.setdefaultencoding('utf-8') + +DEFAULT_PAGE_SIZE = 200 + +SORT_DIR_VALUES = ('asc', 'desc') +SORT_KEY_VALUES = ('serialNo', 'created_at', 'updated_at', 'status') + +OS_REQ_ID_HDR = 'x-openstack-request-id' + + +class Node(base.Resource): + def __repr__(self): + return "" % self._info + + def update(self, **fields): + self.manager.update(self, **fields) + + def delete(self, **kwargs): + return self.manager.delete(self) + + def data(self, **kwargs): + return self.manager.data(self, **kwargs) + + +class NodeManager(base.ManagerWithFind): + resource_class = Node + + def get_hwm_client(self, hwm_ip): + if hwm_ip: + endpoint = "http://" + hwm_ip + ":8089" + client = HTTPClient(endpoint) + else: + client = self.client + + return client + + def _list(self, url, hwm_ip, response_key, obj_class=None, body=None): + hwm_client = self.get_hwm_client(hwm_ip) + resp, body = hwm_client.get(url) + + if obj_class is None: + obj_class = self.resource_class + + data = body[response_key] + return ([obj_class(self, res, loaded=True) for res in data if res], + resp) + + def _host_meta_from_headers(self, headers): + meta = {'properties': {}} + safe_decode = encodeutils.safe_decode + for key, value in six.iteritems(headers): + value = safe_decode(value, incoming='utf-8') + if key.startswith('x-image-meta-property-'): + _key = safe_decode(key[22:], incoming='utf-8') + meta['properties'][_key] = value + elif key.startswith('x-image-meta-'): + _key = safe_decode(key[13:], incoming='utf-8') + meta[_key] = value + + for key in ['is_public', 'protected', 'deleted']: + if key in meta: + meta[key] = strutils.bool_from_string(meta[key]) + + return self._format_host_meta_for_user(meta) + + def _host_meta_to_headers(self, fields): + headers = {} + fields_copy = copy.deepcopy(fields) + + # NOTE(flaper87): Convert to str, headers + # that are not instance of basestring. All + # headers will be encoded later, before the + # request is sent. + + for key, value in six.iteritems(fields_copy): + headers['%s' % key] = utils.to_str(value) + return headers + + @staticmethod + def _format_host_meta_for_user(meta): + for key in ['size', 'min_ram', 'min_disk']: + if key in meta: + try: + meta[key] = int(meta[key]) if meta[key] else 0 + except ValueError: + pass + return meta + + def _build_params(self, parameters): + params = {'limit': parameters.get('page_size', DEFAULT_PAGE_SIZE)} + + if 'marker' in parameters: + params['marker'] = parameters['marker'] + + sort_key = parameters.get('sort_key') + if sort_key is not None: + if sort_key in SORT_KEY_VALUES: + params['sort_key'] = sort_key + else: + raise ValueError('sort_key must be one of the following: %s.' + % ', '.join(SORT_KEY_VALUES)) + + sort_dir = parameters.get('sort_dir') + if sort_dir is not None: + if sort_dir in SORT_DIR_VALUES: + params['sort_dir'] = sort_dir + else: + raise ValueError('sort_dir must be one of the following: %s.' + % ', '.join(SORT_DIR_VALUES)) + + filters = parameters.get('filters', {}) + params.update(filters) + + return params + + def list(self, **kwargs): + """Get a list of nodes. + :param page_size: number of items to request in each paginated request + :param limit: maximum number of hosts to return + :param marker: begin returning hosts that appear later in the host + list than that represented by this host id + :param filters: dict of direct comparison filters that mimics the + structure of an host object + :param return_request_id: If an empty list is provided, populate this + list with the request ID value from the header + x-openstack-request-id + :rtype: list of :class:`Host` + """ + absolute_limit = kwargs.get('limit') + page_size = kwargs.get('page_size', DEFAULT_PAGE_SIZE) + + def paginate(qp, return_request_id=None): + for param, value in six.iteritems(qp): + if isinstance(value, six.string_types): + # Note(flaper87) Url encoding should + # be moved inside http utils, at least + # shouldn't be here. + # + # Making sure all params are str before + # trying to encode them + qp[param] = encodeutils.safe_decode(value) + + hwm_ip = kwargs.get('hwm_ip') + url = '/api/v1.0/hardware/nodes' + nodes, resp = self._list(url, hwm_ip, "nodes") + + if return_request_id is not None: + return_request_id.append(resp.headers.get(OS_REQ_ID_HDR, None)) + + for node in nodes: + yield node + + return_request_id = kwargs.get('return_req_id', None) + + params = self._build_params(kwargs) + + seen = 0 + while True: + seen_last_page = 0 + filtered = 0 + for host in paginate(params, return_request_id): + last_host = host.serialNo + + if (absolute_limit is not None and + seen + seen_last_page >= absolute_limit): + # Note(kragniz): we've seen enough images + return + else: + seen_last_page += 1 + yield host + + seen += seen_last_page + + if seen_last_page + filtered == 0: + # Note(kragniz): we didn't get any hosts in the last page + return + + if absolute_limit is not None and seen >= absolute_limit: + # Note(kragniz): reached the limit of hosts to return + return + + if page_size and seen_last_page + filtered < page_size: + # Note(kragniz): we've reached the last page of the hosts + return + + # Note(kragniz): there are more hosts to come + params['marker'] = last_host + seen_last_page = 0 + + def location(self, **kwargs): + """Get location of node.""" + hwm_ip = kwargs.get('hwm_ip') + hwm_id = kwargs.get('hwm_id') + hwm_client = self.get_hwm_client(hwm_ip) + url = '/api/v1.0/hardware/nodes/%s/location' % hwm_id + resp, body = hwm_client.get(url) + return_request_id = kwargs.get('return_req_id', None) + if return_request_id is not None: + return_request_id.append(resp.headers.get(OS_REQ_ID_HDR, None)) + + return Node(self, self._format_host_meta_for_user(body)) + + def restart(self, **kwargs): + """Restart node.""" + hdrs = {} + hwm_ip = kwargs.get('hwm_ip') + hwm_id = kwargs.get('hwm_id') + hwm_client = self.get_hwm_client(hwm_ip) + url = '/api/v1.0/hardware/nodes/%s/restart_actions' % hwm_id + resp, body = hwm_client.post(url, headers=hdrs, data=hdrs) + return_request_id = kwargs.get('return_req_id', None) + if return_request_id is not None: + return_request_id.append(resp.headers.get(OS_REQ_ID_HDR, None)) + + return Node(self, self._format_host_meta_for_user(body)) + + def restart_state(self, **kwargs): + """Get restart state of node.""" + hwm_ip = kwargs.get('hwm_ip') + action_id = kwargs.get('action_id') + hwm_client = self.get_hwm_client(hwm_ip) + url = '/api/v1.0/hardware/nodes/restart_actions/%s' % action_id + resp, body = hwm_client.get(url) + return_request_id = kwargs.get('return_req_id', None) + if return_request_id is not None: + return_request_id.append(resp.headers.get(OS_REQ_ID_HDR, None)) + + return Node(self, self._format_host_meta_for_user(body)) + + def set_boot(self, **kwargs): + """Set boot type of node.""" + hdrs = {} + hwm_ip = kwargs.get('hwm_ip') + hwm_id = kwargs.get('hwm_id') + boot_type = kwargs.get('boot_type') + hwm_client = self.get_hwm_client(hwm_ip) + url = '/api/v1.0/hardware/nodes/%s/one_time_boot?from=%s' % \ + (hwm_id, boot_type) + resp, body = hwm_client.post(url, headers=hdrs, data=hdrs) + return_request_id = kwargs.get('return_req_id', None) + if return_request_id is not None: + return_request_id.append(resp.headers.get(OS_REQ_ID_HDR, None)) + + return Node(self, self._format_host_meta_for_user(body)) + + def update(self, **kwargs): + """Update hosts.""" + absolute_limit = kwargs.get('limit') + page_size = kwargs.get('page_size', DEFAULT_PAGE_SIZE) + + hwm_ip = kwargs.get('hwm_ip') + hwm_client = self.get_hwm_client(hwm_ip) + hwm_url = '/api/v1.0/hardware/nodes' + hwm_resp, hwm_body = hwm_client.get(hwm_url) + hwm_body['hwm_ip'] = hwm_ip + + def paginate(qp, return_request_id=None): + for param, value in six.iteritems(qp): + if isinstance(value, six.string_types): + # Note(flaper87) Url encoding should + # be moved inside http utils, at least + # shouldn't be here. + # + # Making sure all params are str before + # trying to encode them + qp[param] = encodeutils.safe_decode(value) + + hdrs = self._host_meta_to_headers(hwm_body) + url = '/v1/hwm_nodes' + resp, body = self.client.post(url, headers={}, data=hdrs) + obj_class = self.resource_class + hosts = [obj_class(self, res, loaded=True) for res in body['nodes'] + if res] + + if return_request_id is not None: + return_request_id.append(resp.headers.get(OS_REQ_ID_HDR, None)) + + for host in hosts: + yield host + + return_request_id = kwargs.get('return_req_id', None) + + params = self._build_params(kwargs) + + seen = 0 + while True: + seen_last_page = 0 + filtered = 0 + for host in paginate(params, return_request_id): + last_host = host.id + + if (absolute_limit is not None and + seen + seen_last_page >= absolute_limit): + # Note(kragniz): we've seen enough images + return + else: + seen_last_page += 1 + yield host + + seen += seen_last_page + + if seen_last_page + filtered == 0: + # Note(kragniz): we didn't get any hosts in the last page + return + + if absolute_limit is not None and seen >= absolute_limit: + # Note(kragniz): reached the limit of hosts to return + return + + if page_size and seen_last_page + filtered < page_size: + # Note(kragniz): we've reached the last page of the hosts + return + + # Note(kragniz): there are more hosts to come + params['marker'] = last_host + seen_last_page = 0 + + def cloud_state(self, **kwargs): + """To inform provider the cloud state.""" + hdrs = dict() + fields = dict() + provider_ip = kwargs.pop('provider_ip') + operation = kwargs.get('operation') + fields["envName"] = kwargs.get('name') + fields["envUrl"] = kwargs.get('url') + hwm_url = '/v1/hwm' + resp, hwm_body = self.client.get(hwm_url) + hwms_ip = [hwm['hwm_ip'] for hwm in hwm_body['hwm']] + if provider_ip in hwms_ip: + url = '/api/envChangeNotification' + provider_client = self.get_hwm_client(provider_ip) + if operation == "add": + hdrs = {"add_environment": fields} + if operation == "delete": + hdrs = {"delete_environment": fields} + + resp, body = provider_client.post(url, data=hdrs) + else: + return + + def get_min_mac(self, hwm_id): + params = dict() + resp, body = self.client.get('/v1/nodes') + hosts = body.get('nodes') + if hosts: + for host in hosts: + if hwm_id == host.get('hwm_id'): + params['host_id'] = host['id'] + resp, host_body = self.client.get('/v1/nodes/%s' % + host['id']) + interfaces = host_body['host'].get('interfaces') + if interfaces: + mac_list = [interface['mac'] for interface in + interfaces if interface.get('mac')] + if mac_list: + params['mac'] = min(mac_list) + return params + + def pxe_host_discover(self, **kwargs): + """Pxe host discover.""" + hdrs = dict() + hwm_ip = kwargs.get('hwm_ip') + hwm_id = kwargs.get('hwm_id') + hwm_client = self.get_hwm_client(hwm_ip) + pxe_url = '/api/v1.0/hardware/nodes/%s/one_time_boot?from=pxe' % \ + hwm_id + resp, pxe_body = hwm_client.post(pxe_url, headers=hdrs, data=hdrs) + params = self.get_min_mac(hwm_id) + params['status'] = "DISCOVERING" + resp, body = self.client.post( + '/v1/pxe_discover/nodes', headers=params, data=params) + restart_url = '/api/v1.0/hardware/nodes/%s/restart_actions' % \ + hwm_id + resp, restart_body = hwm_client.post(restart_url, headers=hdrs, + data=hdrs) + return_request_id = kwargs.get('return_req_id', None) + if return_request_id is not None: + return_request_id.append(resp.headers.get(OS_REQ_ID_HDR, None)) + + return Node(self, self._format_host_meta_for_user(restart_body)) diff --git a/code/daisyclient/daisyclient/v1/hwms.py b/code/daisyclient/daisyclient/v1/hwms.py new file mode 100755 index 00000000..8b597ab7 --- /dev/null +++ b/code/daisyclient/daisyclient/v1/hwms.py @@ -0,0 +1,248 @@ +# Copyright 2012 OpenStack Foundation +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import copy + +from oslo_utils import encodeutils +from oslo_utils import strutils +import six +import six.moves.urllib.parse as urlparse + +from daisyclient.common import utils +from daisyclient.openstack.common.apiclient import base + +UPDATE_PARAMS = ('hwm_ip', 'description') + +CREATE_PARAMS = ('id', 'hwm_ip', 'description') + +DEFAULT_PAGE_SIZE = 20 + +SORT_DIR_VALUES = ('asc', 'desc') +SORT_KEY_VALUES = ('name', 'created_at', 'updated_at') + +OS_REQ_ID_HDR = 'x-openstack-request-id' + + +class Hwm(base.Resource): + def __repr__(self): + return "" % self._info + + def update(self, **fields): + self.manager.update(self, **fields) + + def delete(self, **kwargs): + return self.manager.delete(self) + + def data(self, **kwargs): + return self.manager.data(self, **kwargs) + + +class HwmManager(base.ManagerWithFind): + resource_class = Hwm + + def _list(self, url, response_key, obj_class=None, body=None): + resp, body = self.client.get(url) + + if obj_class is None: + obj_class = self.resource_class + + data = body[response_key] + return ([obj_class(self, res, loaded=True) for res in data if res], + resp) + + def _service_meta_from_headers(self, headers): + meta = {'properties': {}} + safe_decode = encodeutils.safe_decode + for key, value in six.iteritems(headers): + value = safe_decode(value, incoming='utf-8') + if key.startswith('x-image-meta-property-'): + _key = safe_decode(key[22:], incoming='utf-8') + meta['properties'][_key] = value + elif key.startswith('x-image-meta-'): + _key = safe_decode(key[13:], incoming='utf-8') + meta[_key] = value + + for key in ['is_public', 'protected', 'deleted']: + if key in meta: + meta[key] = strutils.bool_from_string(meta[key]) + + return self._format_template_meta_for_user(meta) + + def _template_meta_to_headers(self, fields): + headers = {} + fields_copy = copy.deepcopy(fields) + + # NOTE(flaper87): Convert to str, headers + # that are not instance of basestring. All + # headers will be encoded later, before the + # request is sent. + + for key, value in six.iteritems(fields_copy): + headers['%s' % key] = utils.to_str(value) + return headers + + @staticmethod + def _format_image_meta_for_user(meta): + for key in ['size', 'min_ram', 'min_disk']: + if key in meta: + try: + meta[key] = int(meta[key]) if meta[key] else 0 + except ValueError: + pass + return meta + + @staticmethod + def _format_template_meta_for_user(meta): + for key in ['size', 'min_ram', 'min_disk']: + if key in meta: + try: + meta[key] = int(meta[key]) if meta[key] else 0 + except ValueError: + pass + return meta + + def _build_params(self, parameters): + params = {'limit': parameters.get('page_size', DEFAULT_PAGE_SIZE)} + + if 'marker' in parameters: + params['marker'] = parameters['marker'] + + sort_key = parameters.get('sort_key') + if sort_key is not None: + if sort_key in SORT_KEY_VALUES: + params['sort_key'] = sort_key + else: + raise ValueError('sort_key must be one of the following: %s.' + % ', '.join(SORT_KEY_VALUES)) + + sort_dir = parameters.get('sort_dir') + if sort_dir is not None: + if sort_dir in SORT_DIR_VALUES: + params['sort_dir'] = sort_dir + else: + raise ValueError('sort_dir must be one of the following: %s.' + % ', '.join(SORT_DIR_VALUES)) + + filters = parameters.get('filters', {}) + params.update(filters) + + return params + + def get(self, hwm_id): + """get hwm information by id.""" + url = "/v1/hwm/%s" % base.getid(hwm_id) + resp, body = self.client.get(url) + return Hwm(self, self._format_template_meta_for_user(body['hwm'])) + + def list(self, **kwargs): + """Get a list of hwm. + + :param page_size: number of items to request in each paginated request + :param limit: maximum number of services to return + :param marker: begin returning services that appear later in the + service ist than that represented by this service id + :param filters: dict of direct comparison filters that mimics the + structure of an service object + :param return_request_id: If an empty list is provided, populate this + list with the request ID value from the header + x-openstack-request-id + :rtype: list of :class:`Service` + """ + absolute_limit = kwargs.get('limit') + + def paginate(qp, return_request_id=None): + for param, value in six.iteritems(qp): + if isinstance(value, six.string_types): + # Note(flaper87) Url encoding should + # be moved inside http utils, at least + # shouldn't be here. + # + # Making sure all params are str before + # trying to encode them + qp[param] = encodeutils.safe_decode(value) + url = '/v1/hwm?%s' % urlparse.urlencode(qp) + hwms, resp = self._list(url, "hwm") + + if return_request_id is not None: + return_request_id.append(resp.headers.get(OS_REQ_ID_HDR, None)) + + for hwm in hwms: + yield hwm + + return_request_id = kwargs.get('return_req_id', None) + + params = self._build_params(kwargs) + + seen = 0 + seen_last_page = 0 + + for hwm in paginate(params, return_request_id): + if (absolute_limit is not None and + seen + seen_last_page >= absolute_limit): + # Note(kragniz): we've seen enough images + return + else: + seen_last_page += 1 + yield hwm + + def add(self, **kwargs): + """Add a hwm. + + TODO(bcwaldon): document accepted params + """ + fields = {} + for field in kwargs: + if field in CREATE_PARAMS: + fields[field] = kwargs[field] + elif field == 'return_req_id': + continue + else: + msg = 'create() got an unexpected keyword argument \'%s\'' + raise TypeError(msg % field) + hdrs = self._template_meta_to_headers(fields) + + resp, body = self.client.post('/v1/hwm', headers=hdrs, data=hdrs) + return_request_id = kwargs.get('return_req_id', None) + if return_request_id is not None: + return_request_id.append(resp.headers.get(OS_REQ_ID_HDR, None)) + + return Hwm(self, self._format_template_meta_for_user(body['hwm'])) + + def delete(self, hwm_id): + """Delete a hwm.""" + url = "/v1/hwm/%s" % base.getid(hwm_id) + resp, body = self.client.delete(url) + + def update(self, hwm_id, **kwargs): + """Update an hwm""" + hdrs = {} + fields = {} + for field in kwargs: + if field in UPDATE_PARAMS: + fields[field] = kwargs[field] + elif field == 'return_req_id': + continue + else: + msg = 'update() got an unexpected keyword argument \'%s\'' + raise TypeError(msg % field) + + hdrs.update(self._template_meta_to_headers(fields)) + url = '/v1/hwm/%s' % base.getid(hwm_id) + resp, body = self.client.put(url, headers=None, data=hdrs) + return_request_id = kwargs.get('return_req_id', None) + if return_request_id is not None: + return_request_id.append(resp.headers.get(OS_REQ_ID_HDR, None)) + + return Hwm(self, self._format_template_meta_for_user(body['hwm'])) diff --git a/code/daisyclient/daisyclient/v1/image_members.py b/code/daisyclient/daisyclient/v1/image_members.py index f78fe513..a9ed37b0 100755 --- a/code/daisyclient/daisyclient/v1/image_members.py +++ b/code/daisyclient/daisyclient/v1/image_members.py @@ -17,6 +17,7 @@ from daisyclient.openstack.common.apiclient import base class ImageMember(base.Resource): + def __repr__(self): return "" % self._info @@ -44,7 +45,7 @@ class ImageMemberManager(base.ManagerWithFind): if image and member: try: out.append(self.get(image, member)) - #TODO(bcwaldon): narrow this down to 404 + # TODO(bcwaldon): narrow this down to 404 except Exception: pass elif image: @@ -52,7 +53,7 @@ class ImageMemberManager(base.ManagerWithFind): elif member: out.extend(self._list_by_member(member)) else: - #TODO(bcwaldon): figure out what is appropriate to do here as we + # TODO(bcwaldon): figure out what is appropriate to do here as we # are unable to provide the requested response pass return out diff --git a/code/daisyclient/daisyclient/v1/images.py b/code/daisyclient/daisyclient/v1/images.py index 39ee24aa..9016a57f 100755 --- a/code/daisyclient/daisyclient/v1/images.py +++ b/code/daisyclient/daisyclient/v1/images.py @@ -26,7 +26,7 @@ from daisyclient.openstack.common.apiclient import base UPDATE_PARAMS = ('name', 'disk_format', 'container_format', 'min_disk', 'min_ram', 'owner', 'size', 'is_public', 'protected', 'location', 'checksum', 'copy_from', 'properties', - #NOTE(bcwaldon: an attempt to update 'deleted' will be + # NOTE(bcwaldon: an attempt to update 'deleted' will be # ignored, but we need to support it for backwards- # compatibility with the legacy client library 'deleted') @@ -43,6 +43,7 @@ OS_REQ_ID_HDR = 'x-openstack-request-id' class Image(base.Resource): + def __repr__(self): return "" % self._info diff --git a/code/daisyclient/daisyclient/v1/install.py b/code/daisyclient/daisyclient/v1/install.py index bac60a8a..9df6add5 100755 --- a/code/daisyclient/daisyclient/v1/install.py +++ b/code/daisyclient/daisyclient/v1/install.py @@ -14,23 +14,20 @@ # under the License. import copy -import os -from oslo_utils import encodeutils -from oslo_utils import strutils import six -import six.moves.urllib.parse as urlparse from daisyclient.common import utils from daisyclient.openstack.common.apiclient import base -#import daisy.queue_process as queue -#from daisy.queue_process import exec_cmd +# import daisy.queue_process as queue +# from daisy.queue_process import exec_cmd -CREATE_PARAMS = ('cluster_id', 'version_id','deployment_interface') +CREATE_PARAMS = ('cluster_id', 'version_id', 'deployment_interface') OS_REQ_ID_HDR = 'x-openstack-request-id' class Install(base.Resource): + def __repr__(self): return "" % self._info @@ -46,7 +43,7 @@ class Install(base.Resource): class InstallManager(base.ManagerWithFind): resource_class = Install - + def _install_meta_to_headers(self, fields): headers = {} fields_copy = copy.deepcopy(fields) @@ -72,7 +69,7 @@ class InstallManager(base.ManagerWithFind): def list(self, **kwargs): pass - + def install(self, **kwargs): """Install a cluster @@ -87,12 +84,13 @@ class InstallManager(base.ManagerWithFind): raise TypeError(msg % field) # if fields.has_key("version_id"): - # url = '/v1/install/%s/version/%s' % (fields['cluster_id'], fields['version_id']) + # url = '/v1/install/%s/version/%s' % (fields['cluster_id'], + # fields['version_id']) # else: url = '/v1/install' - + hdrs = self._install_meta_to_headers(fields) - resp, body = self.client.post(url,headers=hdrs,data=hdrs) + resp, body = self.client.post(url, headers=hdrs, data=hdrs) return Install(self, self._format_install_meta_for_user(body)) def export_db(self, **kwargs): @@ -107,12 +105,12 @@ class InstallManager(base.ManagerWithFind): else: msg = 'export_db() got an unexpected keyword argument \'%s\'' raise TypeError(msg % field) - + url = '/v1/export_db' hdrs = self._install_meta_to_headers(fields) - resp, body = self.client.post(url,headers=hdrs,data=hdrs) + resp, body = self.client.post(url, headers=hdrs, data=hdrs) return Install(self, self._format_install_meta_for_user(body)) - + def disk_array_update(self, cluster, **kwargs): UPDATE_DISK_ARRAY_PARAMS = [] fields = {} @@ -120,10 +118,11 @@ class InstallManager(base.ManagerWithFind): if field in UPDATE_DISK_ARRAY_PARAMS: fields[field] = kwargs[field] else: - msg = 'disk_array_update() got an unexpected keyword argument \'%s\'' + msg = 'disk_array_update() \ + got an unexpected keyword argument \'%s\'' raise TypeError(msg % field) - + url = '/v1/disk_array/%s' % base.getid(cluster) hdrs = self._install_meta_to_headers(fields) - resp, body = self.client.post(url,headers=hdrs,data=hdrs) - return Install(self, self._format_install_meta_for_user(body)) \ No newline at end of file + resp, body = self.client.post(url, headers=hdrs, data=hdrs) + return Install(self, self._format_install_meta_for_user(body)) diff --git a/code/daisyclient/daisyclient/v1/networks.py b/code/daisyclient/daisyclient/v1/networks.py index 49f0be9c..a453fabe 100755 --- a/code/daisyclient/daisyclient/v1/networks.py +++ b/code/daisyclient/daisyclient/v1/networks.py @@ -23,9 +23,19 @@ import six.moves.urllib.parse as urlparse from daisyclient.common import utils from daisyclient.openstack.common.apiclient import base -UPDATE_PARAMS = ('alias', 'mtu', 'vlan_id', 'ip', 'name', 'cluster_id','ip_ranges', 'vlan_start','vlan_end','gateway','cidr', 'description', 'type','ml2_type','network_type','physnet_name','capability') +UPDATE_PARAMS = ('alias', 'mtu', 'vlan_id', 'ip', 'name', 'cluster_id', + 'ip_ranges', 'vlan_start', 'vlan_end', + 'gateway', 'cidr', 'description', 'type', 'ml2_type', + 'network_type', 'physnet_name', 'capability', + 'segmentation_type', 'vni_start', 'vni_end', + 'gre_id_start', 'gre_id_end') -CREATE_PARAMS = ('alias', 'mtu', 'vlan_id', 'ip', 'id', 'name', 'cluster_id','ip_ranges', 'vlan_start','vlan_end','gateway','cidr', 'description', 'type', 'ml2_type','network_type','physnet_name','capability') +CREATE_PARAMS = ('alias', 'mtu', 'vlan_id', 'ip', 'id', 'name', 'cluster_id', + 'ip_ranges', 'vlan_start', + 'vlan_end', 'gateway', 'cidr', 'description', 'type', + 'ml2_type', 'network_type', 'physnet_name', 'capability', + 'segmentation_type', 'vni_start', 'vni_end', 'gre_id_start', + 'gre_id_end') DEFAULT_PAGE_SIZE = 20 @@ -36,6 +46,7 @@ OS_REQ_ID_HDR = 'x-openstack-request-id' class Network(base.Resource): + def __repr__(self): return "" % self._info @@ -79,7 +90,7 @@ class NetworkManager(base.ManagerWithFind): meta[key] = strutils.bool_from_string(meta[key]) return self._format_network_meta_for_user(meta) - + def _network_meta_to_headers(self, fields): headers = {} fields_copy = copy.deepcopy(fields) @@ -92,7 +103,7 @@ class NetworkManager(base.ManagerWithFind): for key, value in six.iteritems(fields_copy): headers['%s' % key] = utils.to_str(value) return headers - + @staticmethod def _format_image_meta_for_user(meta): for key in ['size', 'min_ram', 'min_disk']: @@ -121,12 +132,13 @@ class NetworkManager(base.ManagerWithFind): """ network_id = base.getid(network) resp, body = self.client.get('/v1/networks/%s' - % urlparse.quote(str(network_id))) - #meta = self._network_meta_from_headers(resp.headers) + % urlparse.quote(str(network_id))) + # meta = self._network_meta_from_headers(resp.headers) return_request_id = kwargs.get('return_req_id', None) if return_request_id is not None: return_request_id.append(resp.headers.get(OS_REQ_ID_HDR, None)) - return Network(self, self._format_network_meta_for_user(body['network'])) + return Network(self, self._format_network_meta_for_user( + body['network'])) def data(self, image, do_checksum=True, **kwargs): """Get the raw data for a specific image. @@ -180,7 +192,8 @@ class NetworkManager(base.ManagerWithFind): :param page_size: number of items to request in each paginated request :param limit: maximum number of networks to return - :param marker: begin returning networks that appear later in the network + :param marker: begin returning networks that + appear later in the network list than that represented by this network id :param filters: dict of direct comparison filters that mimics the structure of an network object @@ -202,8 +215,10 @@ class NetworkManager(base.ManagerWithFind): # Making sure all params are str before # trying to encode them qp[param] = encodeutils.safe_decode(value) - url = '/v1/clusters/%s/networks?%s' % (qp['cluster_id'], urlparse.urlencode(qp)) \ - if qp.get('cluster_id', None) else '/v1/networks?%s' % urlparse.urlencode(qp) + url = '/v1/clusters/%s/networks?%s' % ( + qp['cluster_id'], urlparse.urlencode(qp)) \ + if qp.get('cluster_id', None) else\ + '/v1/networks?%s' % urlparse.urlencode(qp) networks, resp = self._list(url, "networks") if return_request_id is not None: @@ -254,7 +269,7 @@ class NetworkManager(base.ManagerWithFind): TODO(bcwaldon): document accepted params """ - + fields = {} for field in kwargs: if field in CREATE_PARAMS: @@ -264,7 +279,7 @@ class NetworkManager(base.ManagerWithFind): else: msg = 'create() got an unexpected keyword argument \'%s\'' raise TypeError(msg % field) - + hdrs = self._network_meta_to_headers(fields) resp, body = self.client.post('/v1/networks', headers=hdrs, @@ -273,7 +288,8 @@ class NetworkManager(base.ManagerWithFind): if return_request_id is not None: return_request_id.append(resp.headers.get(OS_REQ_ID_HDR, None)) - return Network(self, self._format_network_meta_for_user(body['network'])) + return Network(self, self._format_network_meta_for_user( + body['network'])) def delete(self, network, **kwargs): """Delete an network.""" @@ -282,7 +298,7 @@ class NetworkManager(base.ManagerWithFind): return_request_id = kwargs.get('return_req_id', None) if return_request_id is not None: return_request_id.append(resp.headers.get(OS_REQ_ID_HDR, None)) - + def update(self, network, **kwargs): """Update an network @@ -306,4 +322,5 @@ class NetworkManager(base.ManagerWithFind): if return_request_id is not None: return_request_id.append(resp.headers.get(OS_REQ_ID_HDR, None)) - return Network(self, self._format_network_meta_for_user(body['network_meta'])) + return Network(self, self._format_network_meta_for_user(body[ + 'network_meta'])) diff --git a/code/daisyclient/daisyclient/v1/param_helper.py b/code/daisyclient/daisyclient/v1/param_helper.py index 3e860db5..362cd90c 100755 --- a/code/daisyclient/daisyclient/v1/param_helper.py +++ b/code/daisyclient/daisyclient/v1/param_helper.py @@ -2,13 +2,15 @@ import os + def _read_template_file(args): template_file = args.params_file_path if not os.path.exists(template_file): print("Params_file not exist or permission deiny.") return with open(template_file) as tfp: - params = ''.join(tfp.read().replace("\\'", "").split(" ")).replace("\n", "") + params = ''.join( + tfp.read().replace("\\'", "").split(" ")).replace("\n", "") return dict(eval(params)) CLUSTER_ADD_PARAMS_FILE = { @@ -22,48 +24,48 @@ CLUSTER_ADD_PARAMS_FILE = { 'networks': [], 'nodes': [], 'logic_networks': [{ - 'name': 'internal1', - 'physnet_name': 'PRIVATE1', - 'segmentation_id': 200, - 'segmentation_type': 'vlan', - 'shared': True, - 'subnets': [{'cidr': '192.168.1.0/24', - 'dns_nameservers': ['8.8.4.4', - '8.8.8.8'], - 'floating_ranges': [['192.168.1.2', - '192.168.1.200']], - 'gateway': '192.168.1.1', - 'name': 'subnet2'}, - {'cidr': '172.16.1.0/24', - 'dns_nameservers': ['8.8.4.4', - '8.8.8.8'], - 'floating_ranges': [['172.16.1.130', - '172.16.1.150'], - ['172.16.1.151', - '172.16.1.254']], - 'gateway': '172.16.1.1', - 'name': 'subnet10'}], - 'type': 'internal'}, - {'name': 'flat1', - 'physnet_name': 'physnet1', - 'segmentation_type': 'flat', - 'segmentation_id': -1, - 'shared': True, - 'subnets': [{'cidr': '192.168.2.0/24', - 'dns_nameservers': ['8.8.4.4', - '8.8.8.8'], - 'floating_ranges': [['192.168.2.130', - '192.168.2.254']], - 'gateway': '192.168.2.1', - 'name': 'subnet123'}], - 'type': 'external'} - ], - 'networking_parameters':{ - 'base_mac': 'fa:16:3e:00:00:00', - 'gre_id_range': [2, 4094], - 'net_l23_provider': 'ovs', - 'public_vip': '172.16.0.3', - 'segmentation_type': 'vlan,flat,vxlan,gre', - 'vlan_range': [2, 4094], - 'vni_range': [2, 4094]} + 'name': 'internal1', + 'physnet_name': 'PRIVATE1', + 'segmentation_id': 200, + 'segmentation_type': 'vlan', + 'shared': True, + 'subnets': [{'cidr': '192.168.1.0/24', + 'dns_nameservers': ['8.8.4.4', + '8.8.8.8'], + 'floating_ranges': [['192.168.1.2', + '192.168.1.200']], + 'gateway': '192.168.1.1', + 'name': 'subnet2'}, + {'cidr': '172.16.1.0/24', + 'dns_nameservers': ['8.8.4.4', + '8.8.8.8'], + 'floating_ranges': [['172.16.1.130', + '172.16.1.150'], + ['172.16.1.151', + '172.16.1.254']], + 'gateway': '172.16.1.1', + 'name': 'subnet10'}], + 'type': 'internal'}, + {'name': 'flat1', + 'physnet_name': 'physnet1', + 'segmentation_type': 'flat', + 'segmentation_id': -1, + 'shared': True, + 'subnets': [{'cidr': '192.168.2.0/24', + 'dns_nameservers': ['8.8.4.4', + '8.8.8.8'], + 'floating_ranges': [['192.168.2.130', + '192.168.2.254']], + 'gateway': '192.168.2.1', + 'name': 'subnet123'}], + 'type': 'external'} + ], + 'networking_parameters': { + 'base_mac': 'fa:16:3e:00:00:00', + 'gre_id_range': [2, 4094], + 'net_l23_provider': 'ovs', + 'public_vip': '172.16.0.3', + 'segmentation_type': 'vlan,flat,vxlan,gre', + 'vlan_range': [2, 4094], + 'vni_range': [2, 4094]} } diff --git a/code/daisyclient/daisyclient/v1/roles.py b/code/daisyclient/daisyclient/v1/roles.py index 4bd708f0..53b51286 100755 --- a/code/daisyclient/daisyclient/v1/roles.py +++ b/code/daisyclient/daisyclient/v1/roles.py @@ -19,22 +19,23 @@ from oslo_utils import encodeutils from oslo_utils import strutils import six import six.moves.urllib.parse as urlparse -from webob.exc import HTTPBadRequest from daisyclient.common import utils from daisyclient.openstack.common.apiclient import base -UPDATE_PARAMS = ('name', 'description','status','progress','config_set_id', - 'nodes','services', 'cluster_id','type','vip', 'glance_lv_size', - 'deployment_backend', - #NOTE(bcwaldon: an attempt to update 'deleted' will be +UPDATE_PARAMS = ('name', 'description', 'status', 'progress', 'config_set_id', + 'nodes', 'services', 'cluster_id', 'type', 'vip', + 'glance_lv_size', 'deployment_backend', + # NOTE(bcwaldon: an attempt to update 'deleted' will be # ignored, but we need to support it for backwards- # compatibility with the legacy client library 'deleted', 'db_lv_size', 'nova_lv_size', 'disk_location', - 'ntp_server', 'role_type', 'db_vip', 'glance_vip', 'public_vip', 'mongodb_vip') + 'ntp_server', 'role_type', 'db_vip', 'glance_vip', + 'public_vip', 'mongodb_vip') -CREATE_PARAMS = ('id', 'name','description','status','progress','config_set_id', - 'nodes', 'services', 'cluster_id', 'type', 'vip', - 'glance_lv_size', 'db_vip', 'glance_vip', 'public_vip', 'mongodb_vip', 'deployment_backend', +CREATE_PARAMS = ('id', 'name', 'description', 'status', 'progress', + 'config_set_id', 'nodes', 'services', 'cluster_id', 'type', + 'vip', 'glance_lv_size', 'db_vip', 'glance_vip', 'public_vip', + 'mongodb_vip', 'deployment_backend', 'db_lv_size', 'nova_lv_size', 'disk_location', 'role_type') DEFAULT_PAGE_SIZE = 20 @@ -46,6 +47,7 @@ OS_REQ_ID_HDR = 'x-openstack-request-id' class Role(base.Resource): + def __repr__(self): return "" % self._info @@ -89,7 +91,7 @@ class RoleManager(base.ManagerWithFind): meta[key] = strutils.bool_from_string(meta[key]) return self._format_role_meta_for_user(meta) - + def _role_meta_to_headers(self, fields): headers = {} fields_copy = copy.deepcopy(fields) @@ -102,7 +104,7 @@ class RoleManager(base.ManagerWithFind): for key, value in six.iteritems(fields_copy): headers['%s' % key] = utils.to_str(value) return headers - + @staticmethod def _format_image_meta_for_user(meta): for key in ['size', 'min_ram', 'min_disk']: @@ -131,12 +133,12 @@ class RoleManager(base.ManagerWithFind): """ role_id = base.getid(role) resp, body = self.client.get('/v1/roles/%s' - % urlparse.quote(str(role_id))) - #meta = self._role_meta_from_headers(resp.headers) + % urlparse.quote(str(role_id))) + # meta = self._role_meta_from_headers(resp.headers) return_request_id = kwargs.get('return_req_id', None) if return_request_id is not None: return_request_id.append(resp.headers.get(OS_REQ_ID_HDR, None)) - #return Host(self, meta) + # return Host(self, meta) return Role(self, self._format_role_meta_for_user(body['role'])) def data(self, image, do_checksum=True, **kwargs): @@ -265,7 +267,7 @@ class RoleManager(base.ManagerWithFind): TODO(bcwaldon): document accepted params """ - + fields = {} for field in kwargs: if field in CREATE_PARAMS: @@ -275,7 +277,7 @@ class RoleManager(base.ManagerWithFind): else: msg = 'create() got an unexpected keyword argument \'%s\'' raise TypeError(msg % field) - + hdrs = self._role_meta_to_headers(fields) resp, body = self.client.post('/v1/roles', headers=hdrs, @@ -293,7 +295,7 @@ class RoleManager(base.ManagerWithFind): return_request_id = kwargs.get('return_req_id', None) if return_request_id is not None: return_request_id.append(resp.headers.get(OS_REQ_ID_HDR, None)) - + def update(self, role, **kwargs): """Update an role diff --git a/code/daisyclient/daisyclient/v1/services.py b/code/daisyclient/daisyclient/v1/services.py index 9ad0015c..fb55dba3 100755 --- a/code/daisyclient/daisyclient/v1/services.py +++ b/code/daisyclient/daisyclient/v1/services.py @@ -23,13 +23,13 @@ import six.moves.urllib.parse as urlparse from daisyclient.common import utils from daisyclient.openstack.common.apiclient import base -UPDATE_PARAMS = ('name', 'description','component_id','backup_type', - #NOTE(bcwaldon: an attempt to update 'deleted' will be +UPDATE_PARAMS = ('name', 'description', 'component_id', 'backup_type', + # NOTE(bcwaldon: an attempt to update 'deleted' will be # ignored, but we need to support it for backwards- # compatibility with the legacy client library 'deleted') -CREATE_PARAMS = ('id', 'name','description','component_id','backup_type') +CREATE_PARAMS = ('id', 'name', 'description', 'component_id', 'backup_type') DEFAULT_PAGE_SIZE = 20 @@ -40,6 +40,7 @@ OS_REQ_ID_HDR = 'x-openstack-request-id' class Service(base.Resource): + def __repr__(self): return "" % self._info @@ -83,7 +84,7 @@ class ServiceManager(base.ManagerWithFind): meta[key] = strutils.bool_from_string(meta[key]) return self._format_service_meta_for_user(meta) - + def _service_meta_to_headers(self, fields): headers = {} fields_copy = copy.deepcopy(fields) @@ -96,7 +97,7 @@ class ServiceManager(base.ManagerWithFind): for key, value in six.iteritems(fields_copy): headers['%s' % key] = utils.to_str(value) return headers - + @staticmethod def _format_image_meta_for_user(meta): for key in ['size', 'min_ram', 'min_disk']: @@ -125,13 +126,14 @@ class ServiceManager(base.ManagerWithFind): """ service_id = base.getid(service) resp, body = self.client.get('/v1/services/%s' - % urlparse.quote(str(service_id))) - #meta = self._service_meta_from_headers(resp.headers) + % urlparse.quote(str(service_id))) + # meta = self._service_meta_from_headers(resp.headers) return_request_id = kwargs.get('return_req_id', None) if return_request_id is not None: return_request_id.append(resp.headers.get(OS_REQ_ID_HDR, None)) - #return Host(self, meta) - return Service(self, self._format_service_meta_for_user(body['service'])) + # return Host(self, meta) + return Service(self, self._format_service_meta_for_user( + body['service'])) def data(self, image, do_checksum=True, **kwargs): """Get the raw data for a specific image. @@ -185,7 +187,8 @@ class ServiceManager(base.ManagerWithFind): :param page_size: number of items to request in each paginated request :param limit: maximum number of services to return - :param marker: begin returning services that appear later in the service + :param marker: begin returning services that \ + appear later in the service list than that represented by this service id :param filters: dict of direct comparison filters that mimics the structure of an service object @@ -259,7 +262,7 @@ class ServiceManager(base.ManagerWithFind): TODO(bcwaldon): document accepted params """ - + fields = {} for field in kwargs: if field in CREATE_PARAMS: @@ -269,7 +272,7 @@ class ServiceManager(base.ManagerWithFind): else: msg = 'create() got an unexpected keyword argument \'%s\'' raise TypeError(msg % field) - + hdrs = self._service_meta_to_headers(fields) resp, body = self.client.post('/v1/services', headers=hdrs, @@ -278,7 +281,8 @@ class ServiceManager(base.ManagerWithFind): if return_request_id is not None: return_request_id.append(resp.headers.get(OS_REQ_ID_HDR, None)) - return Service(self, self._format_service_meta_for_user(body['service'])) + return Service(self, self._format_service_meta_for_user( + body['service'])) def delete(self, service, **kwargs): """Delete an service.""" @@ -287,7 +291,7 @@ class ServiceManager(base.ManagerWithFind): return_request_id = kwargs.get('return_req_id', None) if return_request_id is not None: return_request_id.append(resp.headers.get(OS_REQ_ID_HDR, None)) - + def update(self, service, **kwargs): """Update an service @@ -312,4 +316,5 @@ class ServiceManager(base.ManagerWithFind): if return_request_id is not None: return_request_id.append(resp.headers.get(OS_REQ_ID_HDR, None)) - return Service(self, self._format_service_meta_for_user(body['service_meta'])) + return Service(self, self._format_service_meta_for_user( + body['service_meta'])) diff --git a/code/daisyclient/daisyclient/v1/shell.py b/code/daisyclient/daisyclient/v1/shell.py index 739a171f..a2413092 100755 --- a/code/daisyclient/daisyclient/v1/shell.py +++ b/code/daisyclient/daisyclient/v1/shell.py @@ -19,14 +19,11 @@ import copy import functools import pprint import os -import six -import sys import json from oslo_utils import encodeutils from oslo_utils import strutils -from daisyclient.common import progressbar from daisyclient.common import utils from daisyclient import exc import daisyclient.v1.hosts @@ -44,7 +41,9 @@ import daisyclient.v1.uninstall import daisyclient.v1.update import daisyclient.v1.disk_array import daisyclient.v1.template +import daisyclient.v1.hwms from daisyclient.v1 import param_helper +import daisyclient.v1.backup_restore _bool_strict = functools.partial(strutils.bool_from_string, strict=True) @@ -53,7 +52,7 @@ def _daisy_show(daisy, max_column_width=80): info = copy.deepcopy(daisy._info) exclusive_field = ('deleted', 'deleted_at') for field in exclusive_field: - if info.has_key(field): + if field in info: info.pop(field) utils.print_dict(info, max_column_width=max_column_width) @@ -63,7 +62,8 @@ def _daisy_show(daisy, max_column_width=80): @utils.arg('description', metavar='', help='node description to be added.') @utils.arg('--resource-type', metavar='', - help='node resource type to be added, supported type are "baremetal", "server" and "docker".\ + help='node resource type to be added, supported type are \ + "baremetal", "server" and "docker".\ "baremetal" is traditional physical server ,\ "server" is virtual machine and \ "docker" is container created by docker.') @@ -75,20 +75,44 @@ def _daisy_show(daisy, max_column_width=80): help='ipmi user of password to be added.') @utils.arg('--ipmi-addr', metavar='', help='ipmi ip to be added.') -@utils.arg('--role', metavar='',nargs='+', +@utils.arg('--role', metavar='', nargs='+', help='name of node role to be added.') -#@utils.arg('--status', metavar='', -# help='node status to be added.') +# @utils.arg('--status', metavar='', +# help='node status to be added.') @utils.arg('--cluster', metavar='', help='id of cluster that the node will be added.') @utils.arg('--os-version', metavar='', help='os version of the host.') @utils.arg('--os-status', metavar='', help='os status of the host.') -@utils.arg('--interfaces', metavar='', +@utils.arg('--interfaces', metavar='', nargs='+', - help='node network interface detail, ip must be given if assigned_networks is empty,\ + help='node network interface detail, \ + ip must be given if assigned_networks is empty,\ and cluster must be given if assigned_networks is not empty.') +@utils.arg('--hwm-id', metavar='', + help='The id of hwm host.') +@utils.arg('--hwm-ip', metavar='', + help='The ip of hwm.') +@utils.arg('--vcpu-pin-set', metavar='', + help='Set the vcpu pin.') +@utils.arg('--dvs-high-cpuset', metavar='', + help='Set the dvs high cpu cores.') +@utils.arg('--pci-high-cpuset', metavar='', + help='Set the pci high cpu cores.') +@utils.arg('--os-cpus', metavar='', + help='Set the os cpu cores.') +@utils.arg('--dvs-cpus', metavar='', + help='Set the dvs cpu cores.') +@utils.arg('--config-set-id', metavar='', + help='Set host config set id.') def do_host_add(gc, args): """Add a host.""" if args.cluster: @@ -102,24 +126,29 @@ def do_host_add(gc, args): # msg = "No role with an ID of '%s' exists." % role.id # raise exc.CommandError(msg) interface_list = [] - if args.interfaces: + if args.interfaces: for interface in args.interfaces: - interface_info = {"pci":"", "mode":"", "gateway":"", "type": "", "name": "", "mac": "", "ip": "", "netmask": "", "assigned_networks": "", "slaves":"", "is_deployment":"", "vswitch_type":""} - for kv_str in interface.split(","): - try: + interface_info = {"pci": "", "mode": "", "gateway": "", + "type": "", "name": "", "mac": "", "ip": "", + "netmask": "", "assigned_networks": "", + "slaves": "", "is_deployment": "", + "vswitch_type": ""} + for kv_str in interface.split(","): + try: k, v = kv_str.split("=", 1) - except ValueError: + except ValueError: raise exc.CommandError("interface error") - if k in interface_info: + if k in interface_info: interface_info[k] = v if k == "assigned_networks": - networks_list_obj = interface_info['assigned_networks'].split("_") - networks_list=[] + networks_list_obj = interface_info[ + 'assigned_networks'].split("_") + networks_list = [] for network in networks_list_obj: - network_dict={} + network_dict = {} name, ip = network.split(":", 1) - network_dict={'name':name,'ip':ip} + network_dict = {'name': name, 'ip': ip} networks_list.append(network_dict) interface_info['assigned_networks'] = networks_list if k == "slaves": @@ -127,9 +156,9 @@ def do_host_add(gc, args): interface_info['slaves'] = slaves_list interface_list.append(interface_info) args.interfaces = interface_list - + fields = dict(filter(lambda x: x[1] is not None, vars(args).items())) - + # Filter out values we can't use CREATE_PARAMS = daisyclient.v1.hosts.CREATE_PARAMS fields = dict(filter(lambda x: x[0] in CREATE_PARAMS, fields.items())) @@ -137,7 +166,8 @@ def do_host_add(gc, args): host = gc.hosts.add(**fields) _daisy_show(host) - + + @utils.arg('hosts', metavar='', nargs='+', help='ID of host(s) to delete.') def do_host_delete(gc, args): @@ -162,11 +192,13 @@ def do_host_delete(gc, args): print('[Fail]') print('%s: Unable to delete host %s' % (e, args_host)) + @utils.arg('host', metavar='', help='ID of host to modify.') @utils.arg('--name', metavar='', help='Name of host.') @utils.arg('--resource-type', metavar='', - help='node resource type to be added, supported type are "baremetal", "server" and "docker".\ + help='node resource type to be added, \ + supported type are "baremetal", "server" and "docker".\ "baremetal" is traditional physical server ,\ "server" is virtual machine and \ "docker" is container created by docker.') @@ -178,10 +210,10 @@ def do_host_delete(gc, args): help='ipmi user of password for the host.') @utils.arg('--ipmi-addr', metavar='', help='ipmi ip for the host.') -@utils.arg('--description', metavar='', +@utils.arg('--description', metavar='', help='Description of host.') @utils.arg('--root-disk', metavar='', - help='the disk used to install OS.') + help='the disk used to install OS.') @utils.arg('--root-lv-size', metavar='', help='the size of root_lv(M).') @utils.arg('--swap-lv-size', metavar='', @@ -196,18 +228,43 @@ def do_host_delete(gc, args): help='os version for the host.') @utils.arg('--os-status', metavar='', help='os status for the host.') -#@utils.arg('--status', metavar='', -# help='node status for the host.') -@utils.arg('--role', metavar='',nargs='+', +# s@utils.arg('--status', metavar='', +# help='node status for the host.') +@utils.arg('--role', metavar='', nargs='+', help='name of node role for the host.') -@utils.arg('--interfaces', metavar='', +@utils.arg('--interfaces', metavar='', nargs='+', - help='node network interface detail, ip must be given if assigned_networks is empty,\ + help='node network interface detail,\ + ip must be given if assigned_networks is empty,\ and cluster must be given if assigned_networks is not empty.') @utils.arg('--hugepagesize', metavar='', help='size of hugepage.') @utils.arg('--hugepages', metavar='', help='number of hugepages.') +@utils.arg('--hwm-id', metavar='', + help='The id of hwm host.') +@utils.arg('--hwm-ip', metavar='', + help='The ip of hwm.') +@utils.arg('--vcpu-pin-set', metavar='', + help='Set the vcpu pin.') +@utils.arg('--dvs-high-cpuset', metavar='', + help='Set the dvs high cpu cores.') +@utils.arg('--pci-high-cpuset', metavar='', + help='Set the pci high cpu cores.') +@utils.arg('--os-cpus', metavar='', + help='Set the os cpu cores.') +@utils.arg('--dvs-cpus', metavar='', + help='Set the dvs cpu cores.') +@utils.arg('--config-set-id', metavar='', + help='Update host config set id.') def do_host_update(gc, args): """Update a specific host.""" # Filter out None values @@ -217,30 +274,35 @@ def do_host_update(gc, args): msg = "No cluster with an ID of '%s' exists." % cluster.id raise exc.CommandError(msg) interface_list = [] - if args.interfaces: + if args.interfaces: for interfaces in args.interfaces: - interface_info = {"pci":"", "mode":"", "gateway":"", "type": "", "name": "", "mac": "", "ip": "", "netmask": "", "mode": "","assigned_networks": "", "slaves":"", "is_deployment":"", "vswitch_type":""} - for kv_str in interfaces.split(","): - try: + interface_info = {"pci": "", "mode": "", "gateway": "", + "type": "", "name": "", "mac": "", "ip": "", + "netmask": "", "mode": "", + "assigned_networks": "", "slaves": "", + "is_deployment": "", "vswitch_type": ""} + for kv_str in interfaces.split(","): + try: k, v = kv_str.split("=", 1) - except ValueError: + except ValueError: raise exc.CommandError("interface error") - if k in interface_info: + if k in interface_info: interface_info[k] = v if k == "assigned_networks": - networks_list_obj = interface_info['assigned_networks'].split("_") - networks_list=[] + networks_list_obj = interface_info[ + 'assigned_networks'].split("_") + networks_list = [] for network in networks_list_obj: - network_dict={} + network_dict = {} name, ip = network.split(":", 1) - network_dict={'name':name,'ip':ip} + network_dict = {'name': name, 'ip': ip} networks_list.append(network_dict) interface_info['assigned_networks'] = networks_list if k == "slaves": slaves_list = interface_info['slaves'].split("_", 1) interface_info['slaves'] = slaves_list interface_list.append(interface_info) - args.interfaces = interface_list + args.interfaces = interface_list fields = dict(filter(lambda x: x[1] is not None, vars(args).items())) host_arg = fields.pop('host') @@ -283,17 +345,21 @@ def do_host_list(gc, args): hosts = gc.hosts.list(**kwargs) - columns = ['ID', 'Name','Description', 'Resource_type', 'Status', 'Os_progress','Os_status','Messages'] - if filters.has_key('cluster_id'): - role_columns = ['Role_progress','Role_status', 'Role_messages'] + columns = ['ID', 'Hwm_id', 'Name', 'Description', 'Resource_type', + 'Status', 'Os_progress', 'Os_status', 'Discover_state', + 'Messages', 'Hwm_ip'] +# if filters.has_key('cluster_id'): + if 'cluster_id' in filters: + role_columns = ['Role_progress', 'Role_status', 'Role_messages'] columns += role_columns utils.print_list(hosts, columns) - + + @utils.arg('id', metavar='', help='Filter host to those that have this id.') def do_host_detail(gc, args): - """List host you can access.""" + """List host you can access.""" host = utils.find_resource(gc.hosts, args.id) _daisy_show(host) @@ -330,24 +396,28 @@ def do_host_detail(gc, args): # @utils.arg('--public_vip', metavar='', # help='Cluster public vip.') + @utils.arg('ip', metavar='', help='ip of the host will be discovered.') @utils.arg('passwd', metavar='', help='passwd of the host.') @utils.arg('--user', metavar='', help='user name of the host.') +@utils.arg('--cluster-id', metavar='', + help='id of cluster that the node will be added.') def do_discover_host_add(gc, args): """Add a discover host.""" - + fields = dict(filter(lambda x: x[1] is not None, vars(args).items())) - + # Filter out values we can't use CREATE_PARAMS = daisyclient.v1.hosts.CREATE_PARAMS fields = dict(filter(lambda x: x[0] in CREATE_PARAMS, fields.items())) - + host = gc.hosts.add_discover_host(**fields) _daisy_show(host) + @utils.arg('id', metavar='', nargs='+', help='ID of discover host(s) to delete.') def do_discover_host_delete(gc, args): @@ -369,30 +439,37 @@ def do_discover_host_delete(gc, args): if args.verbose: print('[Fail]') print('%s: Unable to delete host %s' % (e, args_host)) - + + @utils.arg('--ip', metavar='', help='Filter hosts to those that have this ip.') @utils.arg('--user', metavar='', help='Filter by user.') +@utils.arg('--cluster-id', metavar='', + help='Filter by cluster_id.') def do_discover_host_list(gc, args): """List hosts you can access.""" - filter_keys = ['ip', 'user'] + filter_keys = ['ip', 'user', 'cluster_id'] filter_items = [(key, getattr(args, key)) for key in filter_keys] filters = dict([item for item in filter_items if item[1] is not None]) kwargs = {'filters': filters} discover_hosts = gc.hosts.list_discover_host(**kwargs) - columns = ['Id', 'Ip','User', 'Passwd', 'Status', 'Message', 'Host_id'] + columns = ['Id', 'Mac', 'Ip', 'User', 'Passwd', 'Status', 'Message', + 'Host_id', 'Cluster_id'] utils.print_list(discover_hosts, columns) - + + @utils.arg('id', metavar='', - help='id of the host.') + help='id of the host.') @utils.arg('--ip', metavar='', help='ip of the host.') @utils.arg('--passwd', metavar='', help='passwd of the host.') @utils.arg('--user', metavar='', help='user name of the host.') +@utils.arg('--cluster-id', metavar='', + help='id of cluster that the node will be added.') def do_discover_host_update(gc, args): """Add a discover host.""" @@ -401,10 +478,11 @@ def do_discover_host_update(gc, args): # Filter out values we can't use CREATE_PARAMS = daisyclient.v1.hosts.CREATE_PARAMS fields = dict(filter(lambda x: x[0] in CREATE_PARAMS, fields.items())) - + host = gc.hosts.update_discover_host(host, **fields) _daisy_show(host) - + + @utils.arg('id', metavar='', help='ID of discover host.') def do_discover_host_detail(gc, args): @@ -417,10 +495,13 @@ def do_discover_host_detail(gc, args): discover_host = gc.hosts.get_discover_host_detail(host_id, **fields) _daisy_show(discover_host) + @utils.arg('params_file_path', metavar='', help="""Template file path. - Run \"daisy params-helper params_file_path\" for the template content. - Then save the output to a template file.Just use this path.""") + Run \"daisy params-helper params_file_path\" for \ + the template content. + Then save the output to a template file.\ + Just use this path.""") def do_cluster_add(gc, args): """Add a cluster.""" fields = None @@ -454,6 +535,7 @@ def do_cluster_add(gc, args): cluster = gc.clusters.add(**fields) _daisy_show(cluster) + @utils.arg('cluster', metavar='', help='ID of cluster to modify.') # @utils.arg('--name', metavar='', # help='Name of host.') @@ -489,8 +571,10 @@ def do_cluster_add(gc, args): # help='Cluster public vip.') @utils.arg('params_file_path', metavar='', help="""Template file path. - Run \"daisy params-helper params_file_path\" for the template content. - Then save the output to a template file.Just use this path.""") + Run \"daisy params-helper params_file_path\" for \ + the template content. + Then save the output to a template file.\ + Just use this path.""") def do_cluster_update(gc, args): """Update a specific cluster.""" # Filter out None values @@ -532,6 +616,7 @@ def do_cluster_update(gc, args): cluster = gc.clusters.update(cluster, **fields) _daisy_show(cluster) + @utils.arg('subcommand_param', nargs='+', metavar='', help='Subcommand param, [\'params_file_path\', \'test\'].') @@ -544,7 +629,8 @@ def do_params_helper(gc, args): for valid_param in valid_params_list: if 0 == cmp(valid_param, u"params_file_path"): print("------------------------------------------") - print("Cluster \'name\' and \'description\' segment must be supportted.Template:") + print("Cluster \'name\' and \'description\' segment must " + "be supportted.Template:") pprint.pprint(param_helper.CLUSTER_ADD_PARAMS_FILE) print("------------------------------------------") elif 0 == cmp(valid_param, u"test"): @@ -552,6 +638,7 @@ def do_params_helper(gc, args): print("test") print("------------------------------------------") + @utils.arg('clusters', metavar='', nargs='+', help=' ID of cluster(s) to delete.') def do_cluster_delete(gc, args): @@ -576,9 +663,9 @@ def do_cluster_delete(gc, args): print('[Fail]') print('%s: Unable to delete cluster %s' % (e, args_cluster)) + @utils.arg('--name', metavar='', help='Filter clusters to those that have this name.') - @utils.arg('--auto-scale', metavar='', help='auto-scale:1 or 0.') @utils.arg('--page-size', metavar='', default=None, type=int, @@ -591,7 +678,7 @@ def do_cluster_delete(gc, args): help='Sort cluster list in specified direction.') def do_cluster_list(gc, args): """List clusters you can access.""" - filter_keys = ['name','auto_scale'] + filter_keys = ['name', 'auto_scale'] filter_items = [(key, getattr(args, key)) for key in filter_keys] filters = dict([item for item in filter_items if item[1] is not None]) @@ -604,9 +691,11 @@ def do_cluster_list(gc, args): clusters = gc.clusters.list(**kwargs) - columns = ['ID', 'Name', 'Description', 'Nodes', 'Networks', 'Auto_scale', 'Use_dns'] + columns = ['ID', 'Name', 'Description', 'Nodes', 'Networks', + 'Auto_scale', 'Use_dns', 'Hwm_ip', 'Status'] utils.print_list(clusters, columns) + @utils.arg('id', metavar='', help='Filter cluster to those that have this id.') def do_cluster_detail(gc, args): @@ -621,21 +710,22 @@ def do_cluster_detail(gc, args): _daisy_show(cluster) else: cluster = gc.clusters.list(**kwargs) - columns = ['ID', 'Name','Description','Nodes', 'Networks','Auto_scale', 'Use_dns'] - utils.print_list(cluster, columns) + columns = ['ID', 'Name', 'Description', 'Nodes', + 'Networks', 'Auto_scale', 'Use_dns'] + utils.print_list(cluster, columns) -#@utils.arg('cluster', metavar='', +# @utils.arg('cluster', metavar='', # help='Filter results by an cluster ID.') -#def do_cluster_host_list(gc, args): +# def do_cluster_host_list(gc, args): # """Show cluster host membership by cluster or host.""" - # if not args.cluster: - # utils.exit('Unable to list all members. Specify cluster-id') - # if args.cluster: - # kwargs = {'cluster': args.cluster} +# if not args.cluster: +# utils.exit('Unable to list all members. Specify cluster-id') +# if args.cluster: +# kwargs = {'cluster': args.cluster} # - # members = gc.cluster_hosts.list(**kwargs) - # columns = ['Cluster_ID', 'Host_ID'] - # utils.print_list(members, columns) +# members = gc.cluster_hosts.list(**kwargs) +# columns = ['Cluster_ID', 'Host_ID'] +# utils.print_list(members, columns) @utils.arg('cluster', metavar='', @@ -644,12 +734,11 @@ def do_cluster_detail(gc, args): help='id of host to remove as member.') def do_cluster_host_del(gc, args): """Remove a host from cluster.""" - #cluster_id = utils.find_resource(gc.clusters, args.cluster).id - #host_id = utils.find_resource(gc.hosts, args.node).id +# cluster_id = utils.find_resource(gc.clusters, args.cluster).id +# host_id = utils.find_resource(gc.hosts, args.node).id cluster_id = args.cluster host_id = args.node gc.cluster_hosts.delete(cluster_id, host_id) - @utils.arg('name', metavar='', @@ -658,9 +747,9 @@ def do_cluster_host_del(gc, args): help='Component description to be added.') def do_component_add(gc, args): """Add a component.""" - + fields = dict(filter(lambda x: x[1] is not None, vars(args).items())) - + # Filter out values we can't use CREATE_PARAMS = daisyclient.v1.components.CREATE_PARAMS fields = dict(filter(lambda x: x[0] in CREATE_PARAMS, fields.items())) @@ -669,6 +758,7 @@ def do_component_add(gc, args): _daisy_show(component) + @utils.arg('components', metavar='', nargs='+', help='ID of component(s) to delete.') def do_component_delete(gc, args): @@ -693,6 +783,7 @@ def do_component_delete(gc, args): print('[Fail]') print('%s: Unable to delete component %s' % (e, args_component)) + @utils.arg('--id', metavar='', help='Filter components to those that have this name.') def do_component_list(gc, args): @@ -707,13 +798,15 @@ def do_component_list(gc, args): _daisy_show(component) else: components = gc.components.list(**kwargs) - columns = ['ID', 'Name','Description'] + columns = ['ID', 'Name', 'Description'] utils.print_list(components, columns) -@utils.arg('component', metavar='', help='ID of component to modify.') + +@utils.arg('component', metavar='', + help='ID of component to modify.') @utils.arg('--name', metavar='', help='Name of component.') -@utils.arg('--description', metavar='', +@utils.arg('--description', metavar='', help='Description of component.') def do_component_update(gc, args): """Update a specific component.""" @@ -730,19 +823,20 @@ def do_component_update(gc, args): component = gc.components.update(component, **fields) _daisy_show(component) + @utils.arg('name', metavar='', help='Service name to be added.') @utils.arg('description', metavar='', help='Service description to be added.') @utils.arg('--component-id', metavar='', - help='Services that belong to the component of the ID.') + help='Services that belong to the component of the ID.') @utils.arg('--backup-type', metavar='', - help='The backup-type mybe lb or ha.') + help='The backup-type mybe lb or ha.') def do_service_add(gc, args): """Add a service.""" - + fields = dict(filter(lambda x: x[1] is not None, vars(args).items())) - + # Filter out values we can't use CREATE_PARAMS = daisyclient.v1.services.CREATE_PARAMS fields = dict(filter(lambda x: x[0] in CREATE_PARAMS, fields.items())) @@ -751,6 +845,7 @@ def do_service_add(gc, args): _daisy_show(service) + @utils.arg('services', metavar='', nargs='+', help='ID of service(s) to delete.') def do_service_delete(gc, args): @@ -775,6 +870,7 @@ def do_service_delete(gc, args): print('[Fail]') print('%s: Unable to delete service %s' % (e, args_service)) + @utils.arg('--id', metavar='', help='Filter services to those that have this name.') def do_service_list(gc, args): @@ -789,18 +885,19 @@ def do_service_list(gc, args): _daisy_show(service) else: services = gc.services.list(**kwargs) - columns = ['ID', 'Name','Description', 'Component_ID', 'Backup_Type'] + columns = ['ID', 'Name', 'Description', 'Component_ID', 'Backup_Type'] utils.print_list(services, columns) + @utils.arg('service', metavar='', help='ID of service to modify.') @utils.arg('--name', metavar='', help='Name of service.') -@utils.arg('--description', metavar='', +@utils.arg('--description', metavar='', help='Description of service.') -@utils.arg('--component-id', metavar='', +@utils.arg('--component-id', metavar='', help='Services that belong to the component of the ID.') @utils.arg('--backup-type', metavar='', - help='The backup-type mybe lb or ha.') + help='The backup-type mybe lb or ha.') def do_service_update(gc, args): """Update a specific service.""" # Filter out None values @@ -816,19 +913,22 @@ def do_service_update(gc, args): service = gc.services.update(service, **fields) _daisy_show(service) + @utils.arg('name', metavar='', help='Role name to be added.') @utils.arg('description', metavar='', help='Role description to be added.') -#@utils.arg('--progress', metavar='', -# help='The role of the progress.') +# @utils.arg('--progress', metavar='', +# help='The role of the progress.') @utils.arg('--config-set-id', metavar='', help='Roles that belong to the config-set of the ID.') @utils.arg('--nodes', metavar='', nargs='+', - help='Roles that belong to the host of the id,host id can be more than one') + help='Roles that belong to the host of the id,\ + host id can be more than one') @utils.arg('--services', metavar='', nargs='+', - help='Roles that belong to the service of the id, service id can be more than one') -#@utils.arg('--status', metavar='', + help='Roles that belong to the service of the id, \ + service id can be more than one') +# @utils.arg('--status', metavar='', # help='The role of the status.') @utils.arg('--cluster-id', metavar='', help='Roles that belong to cluster of id.') @@ -845,23 +945,26 @@ def do_service_update(gc, args): @utils.arg('--mongodb-vip', metavar='', help='float ip of mongodb.') @utils.arg('--glance-lv-size', metavar='', - help='the size of logic volume disk for storaging image, and the unit is M.') + help='the size of logic volume disk for storaging image,\ + and the unit is M.') @utils.arg('--deployment-backend', metavar='', - help="deployment backend, supported bacends are 'tecs' and 'zenic' now.") + help="deployment backend, supported bacends are \ + 'tecs' and 'zenic' now.") @utils.arg('--db-lv-size', metavar='', help='the size of database disk(M).') @utils.arg('--nova-lv-size', metavar='', help='the size of logic volume disk for nvoa, and the unit is MB.') @utils.arg('--disk-location', metavar='', help='where disks used by backends application from, default is "local". \ - "local" means disks come from local host, "share" means disks come from share storage devices') + "local" means disks come from local host, \ + "share" means disks come from share storage devices') @utils.arg('--role-type', metavar='', help='type of role') def do_role_add(gc, args): """Add a role.""" - + fields = dict(filter(lambda x: x[1] is not None, vars(args).items())) - + # Filter out values we can't use CREATE_PARAMS = daisyclient.v1.roles.CREATE_PARAMS fields = dict(filter(lambda x: x[0] in CREATE_PARAMS, fields.items())) @@ -870,6 +973,7 @@ def do_role_add(gc, args): _daisy_show(role) + @utils.arg('roles', metavar='', nargs='+', help='ID of role(s) to delete.') def do_role_delete(gc, args): @@ -894,6 +998,7 @@ def do_role_delete(gc, args): print('[Fail]') print('%s: Unable to delete role %s' % (e, args_role)) + @utils.arg('--cluster-id', metavar='', help='Roles that belong to cluster.') def do_role_list(gc, args): @@ -901,13 +1006,16 @@ def do_role_list(gc, args): filter_keys = ['cluster_id'] filter_items = [(key, getattr(args, key)) for key in filter_keys] filters = dict([item for item in filter_items if item[1] is not None]) - fields = dict(filter(lambda x: x[1] is not None, vars(args).items())) + # fields = dict(filter(lambda x: x[1] is not None, vars(args).items())) kwargs = {'filters': filters} roles = gc.roles.list(**kwargs) - columns = ['ID', 'Name','Description','Status','Progress','Config_Set_ID','CLUSTER_ID','TYPE','VIP','Deployment_Backend'] + columns = ['ID', 'Name', 'Description', 'Status', 'Progress', + 'Config_Set_ID', 'CLUSTER_ID', 'TYPE', 'VIP', + 'Deployment_Backend'] utils.print_list(roles, columns) + @utils.arg('id', metavar='', help='Filter roles to those that have this name.') def do_role_detail(gc, args): @@ -922,24 +1030,28 @@ def do_role_detail(gc, args): _daisy_show(role) else: roles = gc.roles.list(**kwargs) - columns = ['ID', 'Name','Description','Status','Progress','Config_Set_ID','CLUSTER_ID','TYPE','VIP'] + columns = ['ID', 'Name', 'Description', 'Status', + 'Progress', 'Config_Set_ID', 'CLUSTER_ID', 'TYPE', 'VIP'] utils.print_list(roles, columns) + @utils.arg('role', metavar='', help='ID of role to modify.') @utils.arg('--name', metavar='', help='Name of role.') -@utils.arg('--description', metavar='', +@utils.arg('--description', metavar='', help='Description of role.') -@utils.arg('--config-set-id', metavar='', +@utils.arg('--config-set-id', metavar='', help='Roles that belong to the config-set of the ID.') @utils.arg('--nodes', metavar='', nargs='+', - help='Roles that belong to the host of the id,host id can be more than one') + help='Roles that belong to the host of the id,\ + host id can be more than one') @utils.arg('--services', metavar='', nargs='+', - help='Roles that belong to the service of the id, service id can be more than one') -#@utils.arg('--status', metavar='', -# help='The role of the status.') -#@utils.arg('--progress', metavar='', -# help='The role of the progress.') + help='Roles that belong to the service of the id, \ + service id can be more than one') +# @utils.arg('--status', metavar='', +# help='The role of the status.') +# @utils.arg('--progress', metavar='', +# help='The role of the progress.') @utils.arg('--cluster-id', metavar='', help='Roles that belong to cluster of id.') @utils.arg('--type', metavar='', @@ -947,16 +1059,19 @@ def do_role_detail(gc, args): @utils.arg('--vip', metavar='', help='float ip.') @utils.arg('--glance-lv-size', metavar='', - help='the size of logic volume disk for storaging image, and the unit is M.') + help='the size of logic volume disk for storaging image,\ + and the unit is M.') @utils.arg('--deployment-backend', metavar='', - help="deployment backend, supported bacends are 'tecs' and 'zenic' now.") + help="deployment backend,\ + supported bacends are 'tecs' and 'zenic' now.") @utils.arg('--db-lv-size', metavar='', help='the size of database disk(M).') @utils.arg('--nova-lv-size', metavar='', help='the size of logic volume disk for nvoa, and the unit is MB.') @utils.arg('--disk-location', metavar='', help='where disks used by backends application from, default is "local". \ - "local" means disks come from local host, "share" means disks come from share storage devices') + "local" means disks come from local host, \ + "share" means disks come from share storage devices') @utils.arg('--ntp-server', metavar='', help='ip of ntp server') @utils.arg('--role-type', metavar='', @@ -966,7 +1081,7 @@ def do_role_detail(gc, args): @utils.arg('--glance-vip', metavar='', help='float ip of glance') @utils.arg('--public-vip', metavar='', - help='float ip of public') + help='float ip of public.') @utils.arg('--mongodb-vip', metavar='', help='float ip of mongodb') def do_role_update(gc, args): @@ -983,7 +1098,7 @@ def do_role_update(gc, args): role = gc.roles.update(role, **fields) _daisy_show(role) - + @utils.arg('name', metavar='', help='config_file name to be added.') @@ -991,9 +1106,9 @@ def do_role_update(gc, args): help='config_file description to be added.') def do_config_file_add(gc, args): """Add a config_file.""" - + fields = dict(filter(lambda x: x[1] is not None, vars(args).items())) - + # Filter out values we can't use CREATE_PARAMS = daisyclient.v1.config_files.CREATE_PARAMS fields = dict(filter(lambda x: x[0] in CREATE_PARAMS, fields.items())) @@ -1001,7 +1116,8 @@ def do_config_file_add(gc, args): config_file = gc.config_files.add(**fields) _daisy_show(config_file) - + + @utils.arg('config_files', metavar='', nargs='+', help='ID of config_file(s) to delete.') def do_config_file_delete(gc, args): @@ -1024,12 +1140,15 @@ def do_config_file_delete(gc, args): except exc.HTTPException as e: if args.verbose: print('[Fail]') - print('%s: Unable to delete config_file %s' % (e, args_config_file)) + print('%s: Unable to delete config_file %s' % + (e, args_config_file)) -@utils.arg('config_file', metavar='', help='ID of config_file to modify.') + +@utils.arg('config_file', metavar='', + help='ID of config_file to modify.') @utils.arg('--name', metavar='', help='Name of config_file.') -@utils.arg('--description', metavar='', +@utils.arg('--description', metavar='', help='Description of config_file.') def do_config_file_update(gc, args): """Update a specific config_file.""" @@ -1059,9 +1178,10 @@ def do_config_file_list(gc, args): _daisy_show(config_file) else: config_files = gc.config_files.list(**kwargs) - columns = ['ID', 'Name','Description'] - utils.print_list(config_files, columns) - + columns = ['ID', 'Name', 'Description'] + utils.print_list(config_files, columns) + + @utils.arg('id', metavar='', help='Filter config_file to those that have this id.') def do_config_file_detail(gc, args): @@ -1076,18 +1196,19 @@ def do_config_file_detail(gc, args): _daisy_show(config_file) else: config_files = gc.config_files.list(**kwargs) - columns = ['ID', 'Name','Description'] - utils.print_list(config_files, columns) - + columns = ['ID', 'Name', 'Description'] + utils.print_list(config_files, columns) + + @utils.arg('name', metavar='', help='config_set name to be added.') @utils.arg('description', metavar='', help='config_set description to be added.') def do_config_set_add(gc, args): """Add a config_set.""" - + fields = dict(filter(lambda x: x[1] is not None, vars(args).items())) - + # Filter out values we can't use CREATE_PARAMS = daisyclient.v1.config_sets.CREATE_PARAMS fields = dict(filter(lambda x: x[0] in CREATE_PARAMS, fields.items())) @@ -1095,7 +1216,8 @@ def do_config_set_add(gc, args): config_set = gc.config_sets.add(**fields) _daisy_show(config_set) - + + @utils.arg('config_sets', metavar='', nargs='+', help='ID of config_set(s) to delete.') def do_config_set_delete(gc, args): @@ -1120,10 +1242,12 @@ def do_config_set_delete(gc, args): print('[Fail]') print('%s: Unable to delete config_set %s' % (e, args_config_set)) -@utils.arg('config_set', metavar='', help=' ID of config_set to modify.') + +@utils.arg('config_set', metavar='', + help=' ID of config_set to modify.') @utils.arg('--name', metavar='', help='Name of config_set.') -@utils.arg('--description', metavar='', +@utils.arg('--description', metavar='', help='Description of config_set.') def do_config_set_update(gc, args): """Update a specific config_set.""" @@ -1141,7 +1265,6 @@ def do_config_set_update(gc, args): _daisy_show(config_set) - def do_config_set_list(gc, args): """List config_sets you can access.""" filter_keys = '' @@ -1154,8 +1277,9 @@ def do_config_set_list(gc, args): _daisy_show(config_set) else: config_sets = gc.config_sets.list(**kwargs) - columns = ['ID', 'Name','Description'] - utils.print_list(config_sets, columns) + columns = ['ID', 'Name', 'Description'] + utils.print_list(config_sets, columns) + @utils.arg('id', metavar='', help='Filter components to those that have this name.') @@ -1171,8 +1295,9 @@ def do_config_set_detail(gc, args): _daisy_show(config_set) else: config_sets = gc.config_sets.list(**kwargs) - columns = ['ID', 'Name','Description'] - utils.print_list(config_sets, columns) + columns = ['ID', 'Name', 'Description'] + utils.print_list(config_sets, columns) + @utils.arg('config', metavar='', nargs='+', help='ID of config(s) to delete.') @@ -1184,6 +1309,7 @@ def do_config_delete(gc, args): fields = dict(filter(lambda x: x[0] in UPDATE_PARAMS, fields.items())) gc.configs.delete(**fields) + def do_config_list(gc, args): """List configs you can access.""" filter_keys = '' @@ -1196,8 +1322,10 @@ def do_config_list(gc, args): _daisy_show(config) else: configs = gc.configs.list(**kwargs) - columns = ['ID','Section' ,'Key','Value','Description', 'Config_file_id','Config_version','Running_version'] - utils.print_list(configs, columns) + columns = ['ID', 'Section', 'Key', 'Value', 'Description', + 'Config_file_id', 'Config_version', 'Running_version'] + utils.print_list(configs, columns) + @utils.arg('id', metavar='', help='Filter configs to those that have this id.') @@ -1213,46 +1341,68 @@ def do_config_detail(gc, args): _daisy_show(config) else: configs = gc.configs.list(**kwargs) - columns = ['ID','Section' ,'Key','Value','Description', 'Config_file_id','Config_version','Running_version'] + columns = ['ID', 'Section', 'Key', 'Value', 'Description', + 'Config_file_id', 'Config_version', 'Running_version'] utils.print_list(configs, columns) + @utils.arg('name', metavar='', help='NAME of network.') -@utils.arg('description', metavar='', +@utils.arg('description', metavar='', help='Description of network.') -@utils.arg('network_type', metavar='' , - help='type of network:PUBLIC,PRIVATE,STORAGE,MANAGEMENT,EXTERNAL,DEPLOYMENT') -@utils.arg('--cluster-id', metavar='', help='ID of cluster, must be given.') -@utils.arg('--vlan-start', metavar='', - help='vlan start of network.it should be a integer in "1~4096", and it must be appeared with vlan end') -@utils.arg('--vlan-end', metavar='', - help='vlan end of network.it should be a integer in "1~4096", and it must be appeared with vlan start') -@utils.arg('--cidr', metavar='', +@utils.arg('network_type', metavar='', + help='type of network:PUBLICAPI,DATAPLANE,STORAGE,\ + MANAGEMENT,EXTERNAL,DEPLOYMENT,HEARTBEAT') +@utils.arg('--cluster-id', metavar='', + help='ID of cluster, must be given.') +@utils.arg('--vlan-start', metavar='', + help='vlan start of network.it should be a integer in "1~4096",\ + and it must be appeared with vlan end') +@utils.arg('--vlan-end', metavar='', + help='vlan end of network.it should be a integer in "1~4096",\ + and it must be appeared with vlan start') +@utils.arg('--gre-id-start', metavar='', + help='gre-id start of network.it should be a integer in "1~4096",\ + and it must be appeared with gre-id end') +@utils.arg('--gre-id-end', metavar='', + help='gre-id end of network.it should be a integer in "1~4096",\ + and it must be appeared with gre-id start') +@utils.arg('--vni-start', metavar='', + help='vni start of network.it should be a integer in "1~16777216",\ + and it must be appeared with vni end') +@utils.arg('--vni-end', metavar='', + help='vni end of network.it should be a integer in "1~16777216",\ + and it must be appeared with vni start') +@utils.arg('--cidr', metavar='', help='specifying ip range of network. eg:192.168.1.1/24') @utils.arg('--ip', metavar='', - help='network ip') -@utils.arg('--ip-ranges', metavar='' ,nargs='+', - help='ip ranges of network. for example:"start":"172.16.0.2","end":"172.16.0.126"') -@utils.arg('--gateway', metavar='' , - help='gate way of network') -@utils.arg('--type', metavar='' , + help='ip of build pxe server') +@utils.arg('--ip-ranges', metavar='', nargs='+', + help='ip ranges of network. \ + for example:"start":"172.16.0.2","end":"172.16.0.126"') +@utils.arg('--gateway', metavar='', + help='gate way of network') +@utils.arg('--type', metavar='', help='type of network:custom or template') -@utils.arg('--ml2-type', metavar='' , - help='ml2 type:"ovs", "sriov(direct)", "sriov(macvtap)", "ovs,sriov(direct)" or "ovs,sriov(macvtap)".\ - when network-type is PRIVATE, ml2-type must be given') -@utils.arg('--physnet-name', metavar='' , +@utils.arg('--ml2-type', metavar='', + help='ml2 type:"ovs", "sriov(direct)", "sriov(macvtap)", \ + "ovs,sriov(direct)" or "ovs,sriov(macvtap)".\ + when network-type is DATAPLANE, ml2-type must be given') +@utils.arg('--physnet-name', metavar='', help='physnet name,eg:physnet_eth0') -@utils.arg('--capability', metavar='' , +@utils.arg('--capability', metavar='', help='CAPABILITY of network:high or low') -@utils.arg('--vlan-id', metavar='' , +@utils.arg('--vlan-id', metavar='', help='Vlan Tag.') -@utils.arg('--mtu', metavar='' , +@utils.arg('--mtu', metavar='', help='Private plane mtu.eg.:1600.') +@utils.arg('--segmentation-type', metavar='', + help='network plane segmentation type.') def do_network_add(gc, args): """Add a network.""" ip_range_list = [] - if args.ip_ranges: + if args.ip_ranges: for ip_range in args.ip_ranges: - ip_range_ref={} + ip_range_ref = {} for range_value in ip_range.split(","): try: k, v = range_value.split(":", 1) @@ -1260,13 +1410,13 @@ def do_network_add(gc, args): ip_range_ref['start'] = str(v) if str(k) == "end": ip_range_ref['end'] = str(v) - except ValueError: - raise exc.CommandError("ip_ranges error") + except ValueError: + raise exc.CommandError("ip_ranges error") ip_range_list.append(ip_range_ref) args.ip_ranges = ip_range_list - + fields = dict(filter(lambda x: x[1] is not None, vars(args).items())) - + # Filter out values we can't use CREATE_PARAMS = daisyclient.v1.networks.CREATE_PARAMS fields = dict(filter(lambda x: x[0] in CREATE_PARAMS, fields.items())) @@ -1275,47 +1425,69 @@ def do_network_add(gc, args): _daisy_show(network) + @utils.arg('network', metavar='', help='ID of network.') -@utils.arg('--network-type', metavar='' , - help='type of network:PUBLIC,PRIVATE,STORAGE,MANAGEMENT,EXTERNAL,DEPLOYMENT') +@utils.arg('--network-type', metavar='', + help='type of network:PUBLICAPI,DATAPLANE,\ + STORAGE,MANAGEMENT,EXTERNAL,DEPLOYMENT,HEARTBEAT') @utils.arg('--name', metavar='', help='Name of network.') -@utils.arg('--description', metavar='', +@utils.arg('--description', metavar='', help='Description of network.') -@utils.arg('--vlan-start', metavar='', - help='vlan start of network.it should be a integer in "1~4096", and it must be appeared with vlan end') -@utils.arg('--vlan-end', metavar='', - help='vlan end of network.it should be a integer in "1~4096", and it must be appeared with vlan start') -@utils.arg('--cidr', metavar='', +@utils.arg('--vlan-start', metavar='', + help='vlan start of network.it should be a integer in "1~4096", \ + and it must be appeared with vlan end') +@utils.arg('--vlan-end', metavar='', + help='vlan end of network.it should be a integer in "1~4096",\ + and it must be appeared with vlan start') +@utils.arg('--gre-id-start', metavar='', + help='gre-id start of network.it should be a integer in "1~4096",\ + and it must be appeared with gre-id end') +@utils.arg('--gre-id-end', metavar='', + help='gre-id end of network.it should be a integer in "1~4096",\ + and it must be appeared with gre-id start') +@utils.arg('--vni-start', metavar='', + help='vni start of network.it should be a integer in "1~16777216",\ + and it must be appeared with vni end') +@utils.arg('--vni-end', metavar='', + help='vni end of network.it should be a integer in "1~16777216",\ + and it must be appeared with vni start') +@utils.arg('--cidr', metavar='', help='specifying ip range of network. eg:192.168.1.1/24') -@utils.arg('--ip-ranges', metavar='' ,nargs='+', - help='ip ranges of network,for example:"start":"172.16.0.2","end":"172.16.0.126"') -@utils.arg('--gateway', metavar='' , - help='gate way of network') -@utils.arg('--type', metavar='' , +@utils.arg('--ip', metavar='', + help='ip of build pxe server') +@utils.arg('--ip-ranges', metavar='', nargs='+', + help='ip ranges of network,for example:"start":\ + "172.16.0.2","end":"172.16.0.126"') +@utils.arg('--gateway', metavar='', + help='gate way of network') +@utils.arg('--type', metavar='', help='type of network:custom or template') -@utils.arg('--ml2-type', metavar='' , - help='ml2 type:"ovs", "sriov(direct)", "sriov(macvtap)", "ovs,sriov(direct)" or "ovs,sriov(macvtap)".\ - when network-type is PRIVATE, ml2-type must be given') -@utils.arg('--physnet-name', metavar='' , +@utils.arg('--ml2-type', metavar='', + help='ml2 type:"ovs", "sriov(direct)", "sriov(macvtap)", \ + "ovs,sriov(direct)" or "ovs,sriov(macvtap)".\ + when network-type is DATAPLANE, ml2-type must be given') +@utils.arg('--physnet-name', metavar='', help='physnet name,eg:physnet_eth0') -@utils.arg('--capability', metavar='' , +@utils.arg('--capability', metavar='', help='CAPABILITY of network:high or low') -@utils.arg('--vlan-id', metavar='' , +@utils.arg('--vlan-id', metavar='', help='Vlan Tag.') -@utils.arg('--mtu', metavar='' , +@utils.arg('--mtu', metavar='', help='Private plane mtu.eg.:1600.') -@utils.arg('--alias', metavar='' , +@utils.arg('--alias', metavar='', help='alias of network') +@utils.arg('--segmentation-type', metavar='', + help='network plane segmentation type.') def do_network_update(gc, args): """Update a specific network.""" # Filter out None values - + ip_range_list = [] - if args.ip_ranges: + if args.ip_ranges: for ip_range in args.ip_ranges: - ip_range_ref={} + ip_range_ref = {} for range_value in ip_range.split(","): try: k, v = range_value.split(":", 1) @@ -1323,8 +1495,8 @@ def do_network_update(gc, args): ip_range_ref['start'] = str(v) if str(k) == "end": ip_range_ref['end'] = str(v) - except ValueError: - raise exc.CommandError("ip_ranges error") + except ValueError: + raise exc.CommandError("ip_ranges error") ip_range_list.append(ip_range_ref) args.ip_ranges = ip_range_list fields = dict(filter(lambda x: x[1] is not None, vars(args).items())) @@ -1335,14 +1507,13 @@ def do_network_update(gc, args): # Filter out values we can't use UPDATE_PARAMS = daisyclient.v1.networks.UPDATE_PARAMS fields = dict(filter(lambda x: x[0] in UPDATE_PARAMS, fields.items())) - network = gc.networks.update(network, **fields) _daisy_show(network) - + @utils.arg('networks', metavar='', nargs='+', help='ID of network.') -@utils.arg('--cluster-id', metavar='', help='ID of cluster .') +@utils.arg('--cluster-id', metavar='', help='ID of cluster .') def do_network_delete(gc, args): """Delete specified network(s).""" @@ -1365,8 +1536,12 @@ def do_network_delete(gc, args): print('[Fail]') print('%s: Unable to delete network %s' % (e, args_network)) + @utils.arg('--cluster-id', metavar='', help='Filter networks to those that have this name.') +@utils.arg('--type', metavar='', + help='Filter networks by type, ' + 'support "custom", "default", "template" and "system".') @utils.arg('--page-size', metavar='', default=None, type=int, help='Number of networks to request in each paginated request.') @utils.arg('--sort-key', default='name', @@ -1377,7 +1552,7 @@ def do_network_delete(gc, args): help='Sort networks list in specified direction.') def do_network_list(gc, args): """List networks you can access.""" - filter_keys = ['cluster_id'] + filter_keys = ['cluster_id', 'type'] filter_items = [(key, getattr(args, key)) for key in filter_keys] filters = dict([item for item in filter_items if item[1] is not None]) kwargs = {'id': args.cluster_id, 'filters': filters} @@ -1388,8 +1563,10 @@ def do_network_list(gc, args): kwargs['sort_dir'] = args.sort_dir networks = gc.networks.list(**kwargs) - - columns = ['ID', 'Name', 'Cluster_id', 'Description', 'Vlan_start','Vlan_end','Gateway','Cidr','Type', 'Ip_ranges'] + + columns = ['ID', 'Name', 'Cluster_id', 'Description', + 'Vlan_start', 'Vlan_end', 'Gateway', 'Cidr', + 'Type', 'Ip_ranges', 'Segmentation_type'] utils.print_list(networks, columns) @@ -1407,12 +1584,14 @@ def do_network_detail(gc, args): _daisy_show(network) else: network = gc.networks.list(**kwargs) - columns = ['ID', 'Name', 'Cluster_id', 'Description', 'Vlan_start','Vlan_end','Gateway','Cidr','Type', 'Ip_ranges'] - utils.print_list(network, columns) + columns = ['ID', 'Name', 'Cluster_id', 'Description', + 'Vlan_start', 'Vlan_end', 'Gateway', 'Cidr', + 'Type', 'Ip_ranges', 'Segmentation_type'] + utils.print_list(network, columns) -@utils.arg('cluster_id', metavar='', - help='ID of cluster to install TECS.') +@utils.arg('cluster_id', metavar='', + help='ID of cluster to install TECS.') @utils.arg('--version-id', metavar='', help='Version of TECS.') @utils.arg('--deployment-interface', metavar='', @@ -1424,14 +1603,14 @@ def do_install(dc, args): # Filter out values we can't use CREATE_PARAMS = daisyclient.v1.install.CREATE_PARAMS fields = dict(filter(lambda x: x[0] in CREATE_PARAMS, fields.items())) - + install = dc.install.install(**fields) _daisy_show(install) -@utils.arg('cluster_id', metavar='', - help='The cluster ID to uninstall TECS.') +@utils.arg('cluster_id', metavar='', + help='The cluster ID to uninstall TECS.') def do_uninstall(gc, args): """Uninstall TECS.""" fields = dict(filter(lambda x: x[1] is not None, vars(args).items())) @@ -1439,26 +1618,27 @@ def do_uninstall(gc, args): # Filter out values we can't use CREATE_PARAMS = daisyclient.v1.uninstall.CREATE_PARAMS fields = dict(filter(lambda x: x[0] in CREATE_PARAMS, fields.items())) - + uninstall = gc.uninstall.uninstall(**fields) _daisy_show(uninstall) -@utils.arg('cluster_id', metavar='', - help='The cluster ID to query progress of uninstall TECS .') + +@utils.arg('cluster_id', metavar='', + help='The cluster ID to query progress of uninstall TECS .') def do_query_uninstall_progress(gc, args): """Query uninstall progress.""" fields = dict(filter(lambda x: x[1] is not None, vars(args).items())) CREATE_PARAMS = daisyclient.v1.uninstall.CREATE_PARAMS fields = dict(filter(lambda x: x[0] in CREATE_PARAMS, fields.items())) - + query_progress = gc.uninstall.query_progress(**fields) _daisy_show(query_progress) -@utils.arg('cluster_id', metavar='', - help='The cluster ID to update os and TECS.') +@utils.arg('cluster_id', metavar='', + help='The cluster ID to update os and TECS.') def do_update(gc, args): """update TECS.""" fields = dict(filter(lambda x: x[1] is not None, vars(args).items())) @@ -1466,24 +1646,27 @@ def do_update(gc, args): # Filter out values we can't use CREATE_PARAMS = daisyclient.v1.update.CREATE_PARAMS fields = dict(filter(lambda x: x[0] in CREATE_PARAMS, fields.items())) - + update = gc.update.update(**fields) _daisy_show(update) -@utils.arg('cluster_id', metavar='', - help='The cluster ID to query progress of update os and TECS .') + +@utils.arg('cluster_id', metavar='', + help='The cluster ID to query progress of update os and TECS .') def do_query_update_progress(gc, args): """Query update progress.""" fields = dict(filter(lambda x: x[1] is not None, vars(args).items())) CREATE_PARAMS = daisyclient.v1.update.CREATE_PARAMS fields = dict(filter(lambda x: x[0] in CREATE_PARAMS, fields.items())) - - query_progress = gc.update.query_progress(**fields) - _daisy_show(query_progress) -@utils.arg('cluster_id', metavar='', - help='The cluster ID on which to export tecs and HA config file from database.') + query_progress = gc.update.query_progress(**fields) + _daisy_show(query_progress) + + +@utils.arg('cluster_id', metavar='', + help='The cluster ID on which to export tecs \ + and HA config file from database.') def do_export_db(gc, args): """export database.""" fields = dict(filter(lambda x: x[1] is not None, vars(args).items())) @@ -1491,30 +1674,54 @@ def do_export_db(gc, args): # Filter out values we can't use CREATE_PARAMS = daisyclient.v1.install.CREATE_PARAMS fields = dict(filter(lambda x: x[0] in CREATE_PARAMS, fields.items())) - + config_file = gc.install.export_db(**fields) _daisy_show(config_file) - -@utils.arg('--cluster', metavar='', - help='ID of cluster to config file.') + +@utils.arg('--cluster', metavar='', + help='cluster id to add config. ' + 'when add config to role, cluster must be given.') @utils.arg('--role', metavar='', - help=' role name.') + help='add config to role, this is the first way to add config.') +@utils.arg('--host-id', metavar='', + help='add config to host, ' + 'this is the second way to add config.') @utils.arg('--config-set', metavar='', - help='id of the config-set.') -@utils.arg('--config', metavar='', + help='add config by config set id, ' + 'this is the third way to add config.') +@utils.arg('--config', metavar='', nargs='+', - help='file-name must take full path.such as:file-name=/etc/nova/nova.conf,section=DEFAULT,key=port,value=5661,description=description') + help='file-name must take full path.such as:\ + file-name=/etc/nova/nova.conf,section=DEFAULT,\ + key=port,value=5661,description=description') def do_config_add(gc, args): """add and update config interfaces.""" config_interface_list = [] - if args.config: + if args.config: for interfaces in args.config: - interface_info = {"file-name":"", "section":"", "key":"", "value": "","description": ""} - for kv_str in interfaces.split(","): - try: + interface_info = { + "file-name": "", "section": "", "key": "", + "value": "", "description": ""} + # if ',' in value of a confit item, merge them. + config_items = interfaces.split(",") + real_config_items = [] + for item in config_items: + try: + if len(item.split("=")) == 1: + real_config_items[ + len(real_config_items) - 1] += (',' + item) + else: + real_config_items.append(item) + except ValueError: + raise exc.CommandError("config arguments error") + + # get key and value of config item + for kv_str in real_config_items: + try: k, v = kv_str.split("=", 1) - except ValueError: + except ValueError: raise exc.CommandError("config-interface error") if k in interface_info: interface_info[k] = v @@ -1525,9 +1732,10 @@ def do_config_add(gc, args): fields = dict(filter(lambda x: x[0] in CREATE_PARAMS, fields.items())) config_interface_info = gc.configs.add(**fields) _daisy_show(config_interface_info) - -@utils.arg('cluster', metavar='', - help='ID of cluster to config file.') + + +@utils.arg('cluster', metavar='', + help='ID of cluster to config file.') @utils.arg('--role', metavar='', nargs='+', help=' role name.') @@ -1538,9 +1746,10 @@ def do_cluster_config_set_update(gc, args): fields = dict(filter(lambda x: x[0] in CREATE_PARAMS, fields.items())) config_interface_info = gc.config_sets.cluster_config_set_update(**fields) _daisy_show(config_interface_info) - -@utils.arg('cluster', metavar='', - help='ID of cluster to config file.') + + +@utils.arg('cluster', metavar='', + help='ID of cluster to config file.') @utils.arg('--role', metavar='', nargs='+', help=' role name.') @@ -1551,46 +1760,57 @@ def do_cluster_config_set_progress(gc, args): fields = dict(filter(lambda x: x[0] in CREATE_PARAMS, fields.items())) config_set_progress = gc.config_sets.cluster_config_set_progress(**fields) _daisy_show(config_set_progress) - + + +@utils.arg('--cluster-id', metavar='', + help='the host that will discover for cluster.') def do_discover_host(gc, args): - filter_keys = '' + filter_keys = ['cluster_id'] filter_items = [(key, getattr(args, key)) for key in filter_keys] filters = dict([item for item in filter_items if item[1] is not None]) - kwargs = {'filters': filters} - discover_host = gc.hosts.discover_host(**kwargs) + discover_host = gc.hosts.discover_host(**filters) _daisy_show(discover_host) -@utils.arg('service', metavar='', - help='service name who will use disk storage, suport db, glance and dbbackup.') -@utils.arg('role_id', metavar='', - help='which role service come from.') + +@utils.arg('service', metavar='', + help='service name who will use disk storage, suport db, \ + glance and dbbackup.') +@utils.arg('role_id', metavar='', + help='which role service come from.') @utils.arg('--disk-location', metavar='', help='where disks from, default is "local". \ "local" means disks come from local host,\ - "share" means disks come from share storage devices') -@utils.arg('--data-ips', metavar='', - help='data interfaces ip of Disk Array device, separate by ",", \ + "share" means disks come from share storage devices,\ + "share cluster" means disks come from share cluster \ + storage devices.') +@utils.arg('--data-ips', metavar='', + help='data interfaces ip of Disk Array device, separate by ",", \ when DISK_LOCATION is share, DATA_IPS cannot be empty') -@utils.arg('--size', metavar='', - help='unit is G, and default is -1, it means to use all of the disk.') -@utils.arg('--lun', metavar='', - help='mark which volume is used for glance sharing disk.') +@utils.arg('--size', metavar='', + help='unit is G, and default is -1,\ + it means to use all of the disk.') +@utils.arg('--lun', metavar='', + help='mark which volume is used for glance sharing disk.') +@utils.arg('--protocol-type', metavar='', + help='protocol type of share disks') def do_service_disk_add(dc, args): """ config services share disk. """ - + fields = dict(filter(lambda x: x[1] is not None, vars(args).items())) - + # Filter out values we can't use SERVICE_DISK_PARAMS = daisyclient.v1.disk_array.CREATE_SERVICE_DISK_PARAMS - fields = dict(filter(lambda x: x[0] in SERVICE_DISK_PARAMS, fields.items())) - #if fields.has_key('data_ips'): + fields = dict( + filter(lambda x: x[0] in SERVICE_DISK_PARAMS, fields.items())) + # if fields.has_key('data_ips'): # fields['data_ips'] = fields['data_ips'].split(",") - + service_disk_info = dc.disk_array.service_disk_add(**fields) _daisy_show(service_disk_info) + @utils.arg('service_disks', metavar='', nargs='+', help='ID(s) of service_disk to delete.') def do_service_disk_delete(dc, args): @@ -1613,57 +1833,67 @@ def do_service_disk_delete(dc, args): except exc.HTTPException as e: if args.verbose: print('[Fail]') - print('%s: Unable to delete service_disk %s' % (e, service_disk_id)) - -@utils.arg('id', metavar='', - help='ID of service_disk.') -@utils.arg('--service', metavar='', - help='service name who will use Disk Array device, suport db, glance and dbbackup.') -@utils.arg('--role-id', metavar='', - help='which role service come from.') + print('%s: Unable to delete service_disk %s' % + (e, service_disk_id)) + + +@utils.arg('id', metavar='', + help='ID of service_disk.') +@utils.arg('--service', metavar='', + help='service name who will use Disk Array device, ' + 'suport db, glance and dbbackup.') +@utils.arg('--role-id', metavar='', + help='which role service come from.') @utils.arg('--disk-location', metavar='', help='where disks from, default is "local". \ "local" means disks come from local host,\ - "share" means disks come from Disk Array device') -@utils.arg('--data-ips', metavar='', - help='data interfaces ip of Disk Array device, separate by ",", \ + "share" means disks come from Disk Array device') +@utils.arg('--data-ips', metavar='', + help='data interfaces ip of Disk Array device, separate by ",", \ when DISK_LOCATION is share, DATA_IPS cannot be empty') -@utils.arg('--size', metavar='', - help='unit is G, and default is -1, it means to use all of the disk.') -@utils.arg('--lun', metavar='', - help='mark which lun is used for Disk Array device,default is 0.') +@utils.arg('--size', metavar='', + help='unit is G, and default is -1, ' + 'it means to use all of the disk.') +@utils.arg('--lun', metavar='', + help='mark which lun is used for Disk Array device,' + 'default is 0.') +@utils.arg('--protocol-type', metavar='', + help='protocol type of share disks') def do_service_disk_update(dc, args): """Update a specific service_disk.""" # Filter out None values fields = dict(filter(lambda x: x[1] is not None, vars(args).items())) id = fields.pop('id') - service_disk = utils.find_resource(dc.disk_array, id) + utils.find_resource(dc.disk_array, id) # Filter out values we can't use SERVICE_DISK_PARAMS = daisyclient.v1.disk_array.CREATE_SERVICE_DISK_PARAMS - fields = dict(filter(lambda x: x[0] in SERVICE_DISK_PARAMS, fields.items())) + fields = dict( + filter(lambda x: x[0] in SERVICE_DISK_PARAMS, fields.items())) service_disk_info = dc.disk_array.service_disk_update(id, **fields) _daisy_show(service_disk_info) - -@utils.arg('--role-id', metavar='', - help='filter service_disks by role id.') + + +@utils.arg('--role-id', metavar='', + help='filter service_disks by role id.') def do_service_disk_list(dc, args): """List service_disk you can access.""" filter_keys = ['role_id'] filter_items = [(key, getattr(args, key)) for key in filter_keys] filters = dict([item for item in filter_items if item[1] is not None]) - fields = dict(filter(lambda x: x[1] is not None, vars(args).items())) + # fields = dict(filter(lambda x: x[1] is not None, vars(args).items())) kwargs = {'filters': filters} disk_array_list = dc.disk_array.service_disk_list(**kwargs) - columns = ['ID', 'SERVICE','ROLE_ID','DISK_LOCATION','DATA_IPS','SIZE', 'LUN'] + columns = ['ID', 'SERVICE', 'ROLE_ID', + 'DISK_LOCATION', 'DATA_IPS', 'SIZE', 'LUN'] utils.print_list(disk_array_list, columns) - - -@utils.arg('id', metavar='', - help='get service_disk detail by its id.') + + +@utils.arg('id', metavar='', + help='get service_disk detail by its id.') def do_service_disk_detail(dc, args): """detail service_disk you can access.""" filter_keys = ['id'] @@ -1672,36 +1902,40 @@ def do_service_disk_detail(dc, args): fields = dict(filter(lambda x: x[1] is not None, vars(args).items())) kwargs = {'filters': filters} if filters: - #service_disk = utils.find_resource(dc.disk_array, fields.pop('id')) - service_disk_info = dc.disk_array.service_disk_detail(fields.pop('id'), **fields) + # service_disk = utils.find_resource(dc.disk_array, fields.pop('id')) + service_disk_info = dc.disk_array.service_disk_detail( + fields.pop('id'), **fields) _daisy_show(service_disk_info) else: service_disk = dc.disk_array.service_disk_list(**kwargs) - columns = ['ID', 'SERVICE','ROLE_ID','DISK_LOCATION','DATA_IPS','SIZE', 'LUN'] + columns = ['ID', 'SERVICE', 'ROLE_ID', + 'DISK_LOCATION', 'DATA_IPS', 'SIZE', 'LUN'] utils.print_list(service_disk, columns) - + + def _paraser_disk_array(disk_array): disk_arrays = [] CINDER_VOLUME_BACKEND_PARAMS =\ daisyclient.v1.disk_array.CREATE_CINDER_BACKEND_INTER_PARAMS - if disk_array: + if disk_array: for array in disk_array: disk_array_info = {} for kv_str in array.split(","): - try: + try: k, v = kv_str.split("=", 1) except ValueError: raise exc.CommandError("disk_array error") if k in CINDER_VOLUME_BACKEND_PARAMS: if (k == 'pools' or - k == 'data_ips' or - k == 'management_ips'): + k == 'data_ips' or + k == 'management_ips'): disk_array_info[k] = ','.join(v.split("_")) else: disk_array_info[k] = v disk_arrays.append(disk_array_info) return disk_arrays - + + @utils.arg('disk_array', metavar='', @@ -1710,34 +1944,37 @@ def _paraser_disk_array(disk_array): device, separate by "_";\ data_ips:data interfaces ip of Disk Array device,\ separate by ",", \ - when using FUJISTU Disk Array, DATA_IPS cannot be empty;\ + when using FUJITSU Disk Array, DATA_IPS cannot be empty;\ pools: pools name which are configed in Disk Array device;\ user_name: user name to login Disk Array device;\ user_pwd: user password to login Disk Array device;\ volume_driver: supports "KS3200_FCSAN", "KS3200_IPSAN"\ - and "FUJISTU_ETERNUS" according by Disk Array device type,\ + and "FUJITSU_ETERNUS" according by Disk Array device type,\ separate by "_";\ volume_type: maybe same in two backends.') -@utils.arg('role_id', metavar='', - help='filter cinder_volumes by role id.') +@utils.arg('role_id', metavar='', + help='filter cinder_volumes by role id.') def do_cinder_volume_add(dc, args): """config cinder volume backend.""" args.disk_array = _paraser_disk_array(args.disk_array) fields = dict(filter(lambda x: x[1] is not None, vars(args).items())) # Filter out values we can't use - CINDER_BACKEND_PARAMS = daisyclient.v1.disk_array.CREATE_CINDER_BACKEND_PARAMS - fields = dict(filter(lambda x: x[0] in CINDER_BACKEND_PARAMS, fields.items())) + CINDER_BACKEND_PARAMS = (daisyclient.v1.disk_array. + CREATE_CINDER_BACKEND_PARAMS) + fields = dict( + filter(lambda x: x[0] in CINDER_BACKEND_PARAMS, fields.items())) cinder_volume_info = dc.disk_array.cinder_volume_add(**fields) - + _daisy_show(cinder_volume_info) - + + @utils.arg('cinder_volumes', metavar='', nargs='+', help='ID(s) of cinder volumes to delete.') def do_cinder_volume_delete(dc, args): """delete specified cinder_volume backend.""" for cinder_volume_id in args.cinder_volumes: - #cinder_volume = utils.find_resource(dc.disk_array, cinder_volume_id) - #if cinder_volume and cinder_volume.deleted: + # cinder_volume = utils.find_resource(dc.disk_array, cinder_volume_id) + # if cinder_volume and cinder_volume.deleted: # msg = "No cinder_volume with ID '%s' exists." % cinder_volume_id # raise exc.CommandError(msg) try: @@ -1752,28 +1989,31 @@ def do_cinder_volume_delete(dc, args): except exc.HTTPException as e: if args.verbose: print('[Fail]') - print('%s: Unable to delete cinder volume %s' % (e, cinder_volume_id)) + print('%s: Unable to delete cinder volume %s' % + (e, cinder_volume_id)) -@utils.arg('id', metavar='', - help='ID of cinder_volume.') -@utils.arg('--management-ips', metavar='', - help='management interfaces ip of Disk Array device, separate by ","') -@utils.arg('--data-ips', metavar='', - help='data interfaces ip of Disk Array device, separate by ",", \ - when using FUJISTU Disk Array, DATA_IPS cannot be empty') -@utils.arg('--pools', metavar='', - help='pools name which are configed in Disk Array device') -@utils.arg('--volume-driver', metavar='', - help='supports "KS3200_FCSAN", "KS3200_IPSAN" and "FUJISTU_ETERNUS"\ + +@utils.arg('id', metavar='', + help='ID of cinder_volume.') +@utils.arg('--management-ips', metavar='', + help='management interfaces ip of Disk Array device, \ + separate by ","') +@utils.arg('--data-ips', metavar='', + help='data interfaces ip of Disk Array device, separate by ",", \ + when using FUJITSU Disk Array, DATA_IPS cannot be empty') +@utils.arg('--pools', metavar='', + help='pools name which are configed in Disk Array device') +@utils.arg('--volume-driver', metavar='', + help='supports "KS3200_FCSAN", "KS3200_IPSAN" and "FUJITSU_ETERNUS"\ according by Disk Array device type, separate by ","') -@utils.arg('--volume-type', metavar='', - help='it maybe same in two backends, supprot "" and ""') -@utils.arg('--role-id', metavar='', - help='which role cinder_volume come from.') -@utils.arg('--user-name', metavar='', - help='user name of disk array') -@utils.arg('--user-pwd', metavar='', - help='user password of disk arry') +@utils.arg('--volume-type', metavar='', + help='it maybe same in two backends, supprot "" and ""') +@utils.arg('--role-id', metavar='', + help='which role cinder_volume come from.') +@utils.arg('--user-name', metavar='', + help='user name of disk array') +@utils.arg('--user-pwd', metavar='', + help='user password of disk arry') def do_cinder_volume_update(dc, args): """Update a specific cinder_volume.""" # Filter out None values @@ -1782,38 +2022,44 @@ def do_cinder_volume_update(dc, args): # Filter out values we can't use CINDER_VOLUME_PARAMS =\ daisyclient.v1.disk_array.CREATE_CINDER_BACKEND_INTER_PARAMS - fields = dict(filter(lambda x: x[0] in CINDER_VOLUME_PARAMS, fields.items())) - - if fields.has_key('management_ips'): - fields['management_ips'] = ','.join(fields['management_ips'].split("_")) - if fields.has_key('data_ips'): + fields = dict( + filter(lambda x: x[0] in CINDER_VOLUME_PARAMS, fields.items())) + +# if fields.has_key('management_ips'): + if 'management_ips' in fields: + fields['management_ips'] = ','.join( + fields['management_ips'].split("_")) +# if fields.has_key(): + if 'data_ips' in fields: fields['data_ips'] = ','.join(fields['data_ips'].split("_")) - if fields.has_key('pools'): +# if fields.has_key(): + if 'pools' in fields: fields['pools'] = ','.join(fields['pools'].split("_")) - + cinder_volume_info = dc.disk_array.cinder_volume_update(id, **fields) _daisy_show(cinder_volume_info) - -@utils.arg('--role-id', metavar='', - help='filter cinder_volumes by role id.') + +@utils.arg('--role-id', metavar='', + help='filter cinder_volumes by role id.') def do_cinder_volume_list(dc, args): """List cinder_volume you can access.""" filter_keys = ['role_id'] filter_items = [(key, getattr(args, key)) for key in filter_keys] filters = dict([item for item in filter_items if item[1] is not None]) - fields = dict(filter(lambda x: x[1] is not None, vars(args).items())) + # fields = dict(filter(lambda x: x[1] is not None, vars(args).items())) kwargs = {'filters': filters} disk_array_list = dc.disk_array.cinder_volume_list(**kwargs) - columns = ['ID', 'MANAGEMENT_IPS','DATA_IPS','POOLS', - 'VOLUME_DRIVER','VOLUME_TYPE','BACKEND_INDEX', - 'USER_NAME','USER_PWD', 'ROLE_ID'] + columns = ['ID', 'MANAGEMENT_IPS', 'DATA_IPS', 'POOLS', + 'VOLUME_DRIVER', 'VOLUME_TYPE', 'BACKEND_INDEX', + 'USER_NAME', 'USER_PWD', 'ROLE_ID'] utils.print_list(disk_array_list, columns) - -@utils.arg('id', metavar='', - help='get cinder_volume detail by its id.') + + +@utils.arg('id', metavar='', + help='get cinder_volume detail by its id.') def do_cinder_volume_detail(dc, args): """detail cinder_volume you can access.""" filter_keys = ['id'] @@ -1822,17 +2068,19 @@ def do_cinder_volume_detail(dc, args): fields = dict(filter(lambda x: x[1] is not None, vars(args).items())) kwargs = {'filters': filters} if filters: - cinder_volume_info = dc.disk_array.cinder_volume_detail(fields.pop('id'), **fields) + cinder_volume_info = dc.disk_array.cinder_volume_detail( + fields.pop('id'), **fields) _daisy_show(cinder_volume_info) else: cinder_volume = dc.disk_array.service_disk_list(**kwargs) - columns = ['ID', 'MANAGEMENT_IPS','DATA_IPS','POOLS', - 'VOLUME_DRIVER','VOLUME_TYPE','BACKEND_INDEX', - 'USER_NAME','USER_PWD', 'ROLE_ID'] + columns = ['ID', 'MANAGEMENT_IPS', 'DATA_IPS', 'POOLS', + 'VOLUME_DRIVER', 'VOLUME_TYPE', 'BACKEND_INDEX', + 'USER_NAME', 'USER_PWD', 'ROLE_ID'] utils.print_list(cinder_volume, columns) -@utils.arg('cluster', metavar='', - help='ID of cluster to update disk array.') + +@utils.arg('cluster', metavar='', + help='ID of cluster to update disk array.') def do_disk_array_update(dc, args): """update cluster disk array configuration for tecs backend only.""" # Filter out None values @@ -1841,10 +2089,11 @@ def do_disk_array_update(dc, args): # Filter out values we can't use DISK_ARRAY_PARAMS = [] fields = dict(filter(lambda x: x[0] in DISK_ARRAY_PARAMS, fields.items())) - + update_result = dc.install.disk_array_update(cluster_id, **fields) _daisy_show(update_result) + @utils.arg('name', metavar='', help='Template name of the cluster.') @utils.arg('--description', metavar='', @@ -1863,8 +2112,9 @@ def do_template_add(gc, args): template = gc.template.add(**fields) _daisy_show(template) + @utils.arg('id', metavar='', - help='Id of the cluster template.') + help='Id of the cluster template.') @utils.arg('--name', metavar='', help='Template name of the cluster.') @utils.arg('--description', metavar='', @@ -1884,7 +2134,8 @@ def do_template_update(gc, args): fields = dict(filter(lambda x: x[0] in UPDATE_PARAMS, fields.items())) template = gc.template.update(template_id, **fields) _daisy_show(template) - + + @utils.arg('id', metavar='', nargs='+', help='ID of templates.') def do_template_delete(gc, args): @@ -1903,7 +2154,8 @@ def do_template_delete(gc, args): if args.verbose: print('[Fail]') print('%s: Unable to delete cluster template %s' % (e, template)) - + + @utils.arg('--name', metavar='', help='Filter cluster templates to those that have this name.') @utils.arg('--type', metavar='', @@ -1923,9 +2175,10 @@ def do_template_list(gc, args): kwargs['sort_key'] = args.sort_key kwargs['sort_dir'] = args.sort_dir templates = gc.template.list(**kwargs) - columns = ['ID', 'Name','Type', 'Hosts', 'Content'] + columns = ['ID', 'Name', 'Type', 'Hosts', 'Content'] utils.print_list(templates, columns) - + + @utils.arg('id', metavar='', help='ID of template.') def do_template_detail(gc, args): @@ -1946,14 +2199,16 @@ def do_template_detail(gc, args): print('[Fail]') print('%s: Unable to get template infomation %s' % (e, template_id)) -@utils.arg('cluster_name', metavar = '', - help = 'Name of cluster to create template.') -@utils.arg('template_name', metavar = '', - help = 'the name of json.') -@utils.arg('--description', metavar = '', - help = 'Description of the template.') -@utils.arg('--type', metavar = '', - help = 'Export backend database based on type,for example:tecs,zenic') + +@utils.arg('cluster_name', metavar='', + help='Name of cluster to create template.') +@utils.arg('template_name', metavar='', + help='the name of json.') +@utils.arg('--description', metavar='', + help='Description of the template.') +@utils.arg('--type', metavar='', + help='Export backend database based on type,' + 'for example:tecs,zenic') def do_export_db_to_json(dc, args): """export db to json.""" fields = dict(filter(lambda x: x[1] is not None, vars(args).items())) @@ -1962,8 +2217,9 @@ def do_export_db_to_json(dc, args): export_db_to_json = dc.template.export_db_to_json(**fields) _daisy_show(export_db_to_json) -@utils.arg('json_file_path', metavar = '', - help = 'The json file of path') + +@utils.arg('json_file_path', metavar='', + help='The json file of path') def do_import_json_to_template(dc, args): """import json to tempalte""" json_file = args.json_file_path @@ -1972,14 +2228,17 @@ def do_import_json_to_template(dc, args): return with open(json_file) as tfp: params_json = tfp.read() - dict_params = {'template':params_json} - import_json_to_template = dc.template.import_json_to_template(**dict_params) + params_json = json.dumps((json.loads(params_json))) + dict_params = {'template': params_json} + import_json_to_template = dc.template.import_json_to_template( + **dict_params) _daisy_show(import_json_to_template) - -@utils.arg('template_name', metavar = '', - help = 'the name of json.') -@utils.arg('cluster', metavar = '', - help = 'The name of create cluster') + + +@utils.arg('template_name', metavar='', + help='the name of json.') +@utils.arg('cluster', metavar='', + help='The name of create cluster') def do_import_template_to_db(dc, args): """import template to db""" fields = dict(filter(lambda x: x[1] is not None, vars(args).items())) @@ -1987,15 +2246,15 @@ def do_import_template_to_db(dc, args): fields = dict(filter(lambda x: x[0] in CREATE_PARAMS, fields.items())) import_template_to_db = dc.template.import_template_to_db(**fields) _daisy_show(import_template_to_db) - -@utils.arg('cluster_name', metavar='', - help='name of template.') -@utils.arg('host_id', metavar='', - help='host id.') -@utils.arg('host_template_name', metavar='', - help='host template name.') -@utils.arg('--description', metavar='', - help='host template description.') + + +@utils.arg('cluster_name', metavar='', + help='name of template.') +@utils.arg('host_id', metavar='', help='host id.') +@utils.arg('host_template_name', metavar='', + help='host template name.') +@utils.arg('--description', metavar='', + help='host template description.') def do_host_to_template(dc, args): """HOST TO TEMPLATE.""" fields = dict(filter(lambda x: x[1] is not None, vars(args).items())) @@ -2005,12 +2264,13 @@ def do_host_to_template(dc, args): host_to_template = dc.template.host_to_template(**fields) _daisy_show(host_to_template) -@utils.arg('cluster_name', metavar='', - help='name of cluster to config file.') -@utils.arg('host_template_name', metavar='', - help='host template name.') -@utils.arg('host_id', metavar='', - help='host id list') + +@utils.arg('cluster_name', metavar='', + help='name of cluster to config file.') +@utils.arg('host_template_name', metavar='', + help='host template name.') +@utils.arg('host_id', metavar='', + help='host id list') def do_template_to_host(dc, args): """TEMPLATE TO HOST.""" fields = dict(filter(lambda x: x[1] is not None, vars(args).items())) @@ -2020,8 +2280,9 @@ def do_template_to_host(dc, args): template_to_host = dc.template.template_to_host(**fields) _daisy_show(template_to_host) -@utils.arg('cluster_name', metavar='', - help='name of cluster.') + +@utils.arg('cluster_name', metavar='', + help='name of cluster.') def do_host_template_list(dc, args): """GET ALL HOST TEMPLATE.""" filter_keys = ['cluster_name'] @@ -2029,13 +2290,15 @@ def do_host_template_list(dc, args): filters = dict([item for item in filter_items if item[1] is not None]) kwargs = {'filters': filters} get_all_host_template = dc.template.host_template_list(**kwargs) - columns = ['name','description','os_version_file','role','interfaces'] + columns = ['name', 'description', 'os_version_file', 'role', + 'interfaces'] utils.print_list(get_all_host_template, columns) -@utils.arg('cluster_name', metavar='', - help='name of cluster to config file.') -@utils.arg('host_template_name', metavar='', - help='host template name.') + +@utils.arg('cluster_name', metavar='', + help='name of cluster to config file.') +@utils.arg('host_template_name', metavar='', + help='host template name.') def do_delete_host_template(dc, args): """DELETE HOST TEMPLATE.""" fields = dict(filter(lambda x: x[1] is not None, vars(args).items())) @@ -2043,4 +2306,223 @@ def do_delete_host_template(dc, args): fields = dict(filter(lambda x: x[0] in CREATE_PARAMS, fields.items())) host_template = dc.template.delete_host_template(**fields) _daisy_show(host_template) - + + +@utils.arg('--hwm-ip', metavar='', help='The ip of hwm') +def do_node_list(gc, args): + """Get all nodes from hwm.""" + fields = dict(filter(lambda x: x[1] is not None, vars(args).items())) + result = gc.node.list(**fields) + columns = ['id', 'cpuCore', 'cpuFrequency', 'memory', 'disk', + 'hardwareType', 'hardwareStatus', 'interfaces'] + utils.print_list(result, columns, conver_field=False) + + +@utils.arg('--hwm-ip', metavar='', help='The ip of hwm') +@utils.arg('hwm_id', metavar='', help='The id of hwm') +def do_node_location(gc, args): + """Get node location from hwm.""" + fields = dict(filter(lambda x: x[1] is not None, vars(args).items())) + location = gc.node.location(**fields) + _daisy_show(location) + + +@utils.arg('--hwm-ip', metavar='', help='The ip of hwm') +@utils.arg('hwm_id', metavar='', help='The id of hwm') +def do_node_restart(gc, args): + """Restart node.""" + fields = dict(filter(lambda x: x[1] is not None, vars(args).items())) + location = gc.node.restart(**fields) + _daisy_show(location) + + +@utils.arg('--hwm-ip', metavar='', help='The ip of hwm') +@utils.arg('action_id', metavar='', + help='The action id of nodes') +def do_restart_state(gc, args): + """Get restart state of node.""" + fields = dict(filter(lambda x: x[1] is not None, vars(args).items())) + location = gc.node.restart_state(**fields) + _daisy_show(location) + + +@utils.arg('--hwm-ip', metavar='', help='The ip of hwm') +@utils.arg('--boot-type', metavar='', help='The node boot type') +@utils.arg('hwm_id', metavar='', help='The id of hwm') +def do_set_boot(gc, args): + """Set boot type of node.""" + fields = dict(filter(lambda x: x[1] is not None, vars(args).items())) + location = gc.node.set_boot(**fields) + _daisy_show(location) + + +@utils.arg('--hwm-ip', metavar='', help='The ip of hwm') +def do_node_update(gc, args): + """Update hosts.""" + fields = dict(filter(lambda x: x[1] is not None, vars(args).items())) + result = gc.node.update(**fields) + columns = ['ID', 'Hwm_id', 'Hwm_ip', 'Name', 'Description', + 'Resource_type', 'Status', 'Os_progress', 'Os_status', + 'Messages'] + utils.print_list(result, columns) + + +@utils.arg('hwm_id', metavar='', help='The id of hwm') +@utils.arg('--hwm-ip', metavar='', help='The ip of hwm') +def do_pxe_host_discover(gc, args): + """Discover host with pxe.""" + fields = dict(filter(lambda x: x[1] is not None, vars(args).items())) + host = gc.node.pxe_host_discover(**fields) + _daisy_show(host) + + +@utils.arg('hwm_ip', metavar='', + help='Hwm ip to be added.') +@utils.arg('--description', metavar='', + help='Hwm description to be added.') +def do_hwm_add(gc, args): + """Add a hwm.""" + fields = dict(filter(lambda x: x[1] is not None, vars(args).items())) + + # Filter out values we can't use + CREATE_PARAMS = daisyclient.v1.hwms.CREATE_PARAMS + fields = dict(filter(lambda x: x[0] in CREATE_PARAMS, fields.items())) + + hwm = gc.hwm.add(**fields) + _daisy_show(hwm) + + +@utils.arg('hwm', metavar='', help='ID of hwm to modify.') +@utils.arg('--hwm-ip', metavar='', help='The ip of hwm.') +@utils.arg('--description', metavar='', + help='Description of hwm.') +def do_hwm_update(gc, args): + """Update a specific hwm.""" + fields = dict(filter(lambda x: x[1] is not None, vars(args).items())) + hwm_arg = fields.pop('hwm') + hwm = utils.find_resource(gc.hwm, hwm_arg) + + # Filter out values we can't use + UPDATE_PARAMS = daisyclient.v1.hwms.UPDATE_PARAMS + fields = dict(filter(lambda x: x[0] in UPDATE_PARAMS, fields.items())) + hwm = gc.hwm.update(hwm, **fields) + _daisy_show(hwm) + + +@utils.arg('--page-size', metavar='', default=None, type=int, + help='Number of hwms to request in each paginated request.') +@utils.arg('--sort-key', default=None, + choices=daisyclient.v1.hwms.SORT_KEY_VALUES, + help='Sort hwm list by specified field.') +@utils.arg('--sort-dir', default='asc', + choices=daisyclient.v1.hwms.SORT_DIR_VALUES, + help='Sort hwm list in specified direction.') +def do_hwm_list(gc, args): + """List hwms you can access.""" + kwargs = {'filters': {}} + if args.page_size is not None: + kwargs['page_size'] = args.page_size + + kwargs['sort_key'] = args.sort_key + kwargs['sort_dir'] = args.sort_dir + + hwms = gc.hwm.list(**kwargs) + columns = ['ID', 'Hwm_ip', 'Description'] + utils.print_list(hwms, columns) + + +@utils.arg('id', metavar='', + help='Filter hwm to those that have this id.') +def do_hwm_detail(gc, args): + """List hwm you can access.""" + host = utils.find_resource(gc.hwm, args.id) + _daisy_show(host) + + +@utils.arg('hwms', metavar='', nargs='+', + help='ID of hwm(s) to delete.') +def do_hwm_delete(gc, args): + """Delete specified hwm(s).""" + for args_hwm in args.hwms: + hwm = utils.find_resource(gc.hwm, args_hwm) + if hwm and hwm.deleted: + msg = "No hwm with an ID of '%s' exists." % hwm.id + raise exc.CommandError(msg) + try: + if args.verbose: + print('Requesting hwm delete for %s ...' % + encodeutils.safe_decode(args_hwm), end=' ') + gc.hwm.delete(hwm) + + if args.verbose: + print('[Done]') + + except exc.HTTPException as e: + if args.verbose: + print('[Fail]') + print('%s: Unable to delete hwm %s' % (e, args_hwm)) + + +@utils.arg('--provider-ip', metavar='', + help='The ip of provider.') +@utils.arg('--operation', metavar='', + help='The operation of cloud.') +@utils.arg('--name', metavar='', help='The name of cloud.') +@utils.arg('--url', metavar='', + help='The url of cloud.') +def do_inform_cloud_state(gc, args): + """To inform provider the cloud state.""" + fields = dict(filter(lambda x: x[1] is not None, vars(args).items())) + try: + gc.node.cloud_state(**fields) + except exc.HTTPException as e: + print('%s: Unable to inform provider' % e) + + +def do_backup(dc, args): + """Backup daisy data.""" + fields = dict(filter(lambda x: x[1] is not None, vars(args).items())) + + BACKUP_PARAMS = daisyclient.v1.backup_restore.BACKUP_PARAMS + fields = dict(filter(lambda x: x[0] in BACKUP_PARAMS, fields.items())) + backup = dc.backup_restore.backup(**fields) + _daisy_show(backup) + + +@utils.arg('backup_file_path', metavar='', + help='The full path of backup file.') +def do_restore(dc, args): + """Restore daisy data.""" + fields = dict(filter(lambda x: x[1] is not None, vars(args).items())) + + # Filter out values we can't use + RESTORE_PARAMS = daisyclient.v1.backup_restore.RESTORE_PARAMS + fields = dict(filter(lambda x: x[0] in RESTORE_PARAMS, fields.items())) + dc.backup_restore.restore(**fields) + + +@utils.arg('backup_file_path', metavar='', + help='The full path of backup file.') +def do_backup_file_version(dc, args): + """Get version of backup file.""" + fields = dict(filter(lambda x: x[1] is not None, vars(args).items())) + + # Filter out values we can't use + RESTORE_PARAMS = daisyclient.v1.backup_restore.RESTORE_PARAMS + fields = dict(filter(lambda x: x[0] in RESTORE_PARAMS, fields.items())) + file_version = dc.backup_restore.backup_file_version(**fields) + _daisy_show(file_version) + + +@utils.arg('--type', metavar='', + help='Type of daisy version, supported type are "internal": ' + 'the internal version of daisy.') +def do_version(dc, args): + """Get version of daisy.""" + fields = dict(filter(lambda x: x[1] is not None, vars(args).items())) + + # Filter out values we can't use + VERSION_PARAMS = daisyclient.v1.backup_restore.VERSION_PARAMS + fields = dict(filter(lambda x: x[0] in VERSION_PARAMS, fields.items())) + version = dc.backup_restore.version(**fields) + _daisy_show(version) diff --git a/code/daisyclient/daisyclient/v1/template.py b/code/daisyclient/daisyclient/v1/template.py index 5a105f9b..e1457a64 100755 --- a/code/daisyclient/daisyclient/v1/template.py +++ b/code/daisyclient/daisyclient/v1/template.py @@ -14,8 +14,6 @@ # under the License. import copy -import os - from oslo_utils import encodeutils from oslo_utils import strutils import six @@ -24,9 +22,12 @@ import six.moves.urllib.parse as urlparse from daisyclient.common import utils from daisyclient.openstack.common.apiclient import base -UPDATE_PARAMS = ('name','description', 'type', 'hosts', 'content','cluster_name','template_name', 'template') +UPDATE_PARAMS = ('name', 'description', 'type', 'hosts', 'content', + 'cluster_name', 'template_name', 'template') -CREATE_PARAMS = ('id', 'cluster_id','name', 'description', 'cluster_name','host_id', 'host_template_name', 'type', 'hosts', 'content','cluster','template_name', 'template') +CREATE_PARAMS = ('id', 'cluster_id', 'name', 'description', 'cluster_name', + 'host_id', 'host_template_name', 'type', 'hosts', 'content', + 'cluster', 'template_name', 'template') DEFAULT_PAGE_SIZE = 20 @@ -80,7 +81,7 @@ class TemplateManager(base.ManagerWithFind): meta[key] = strutils.bool_from_string(meta[key]) return self._format_template_meta_for_user(meta) - + def _template_meta_to_headers(self, fields): headers = {} fields_copy = copy.deepcopy(fields) @@ -93,7 +94,7 @@ class TemplateManager(base.ManagerWithFind): for key, value in six.iteritems(fields_copy): headers['%s' % key] = utils.to_str(value) return headers - + @staticmethod def _format_image_meta_for_user(meta): for key in ['size', 'min_ram', 'min_disk']: @@ -140,20 +141,22 @@ class TemplateManager(base.ManagerWithFind): params.update(filters) return params - + def get(self, template_id): """get template information by id.""" url = "/v1/template/%s" % base.getid(template_id) resp, body = self.client.get(url) - return Template(self, self._format_template_meta_for_user(body['template'])) + return Template(self, self._format_template_meta_for_user( + body['template'])) def list(self, **kwargs): """Get a list of cluster template. :param page_size: number of items to request in each paginated request :param limit: maximum number of services to return - :param marker: begin returning services that appear later in the service - list than that represented by this service id + :param marker: begin returning services that appear later + in the service list than that represented + by this service id :param filters: dict of direct comparison filters that mimics the structure of an service object :param return_request_id: If an empty list is provided, populate this @@ -162,7 +165,7 @@ class TemplateManager(base.ManagerWithFind): :rtype: list of :class:`Service` """ absolute_limit = kwargs.get('limit') - page_size = kwargs.get('page_size', DEFAULT_PAGE_SIZE) + # page_size = kwargs.get('page_size', DEFAULT_PAGE_SIZE) def paginate(qp, return_request_id=None): for param, value in six.iteritems(qp): @@ -189,8 +192,8 @@ class TemplateManager(base.ManagerWithFind): seen = 0 seen_last_page = 0 - filtered = 0 - + # filtered = 0 + for template in paginate(params, return_request_id): if (absolute_limit is not None and seen + seen_last_page >= absolute_limit): @@ -205,7 +208,7 @@ class TemplateManager(base.ManagerWithFind): TODO(bcwaldon): document accepted params """ - + fields = {} for field in kwargs: if field in CREATE_PARAMS: @@ -216,7 +219,7 @@ class TemplateManager(base.ManagerWithFind): msg = 'create() got an unexpected keyword argument \'%s\'' raise TypeError(msg % field) hdrs = self._template_meta_to_headers(fields) - + resp, body = self.client.post('/v1/template', headers=hdrs, data=hdrs) @@ -224,14 +227,14 @@ class TemplateManager(base.ManagerWithFind): if return_request_id is not None: return_request_id.append(resp.headers.get(OS_REQ_ID_HDR, None)) - return Template(self, self._format_template_meta_for_user(body['template'])) + return Template(self, self._format_template_meta_for_user( + body['template'])) def delete(self, template_id): """Delete a cluster template.""" url = "/v1/template/%s" % base.getid(template_id) resp, body = self.client.delete(url) - def update(self, template_id, **kwargs): """Update an service @@ -255,8 +258,9 @@ class TemplateManager(base.ManagerWithFind): if return_request_id is not None: return_request_id.append(resp.headers.get(OS_REQ_ID_HDR, None)) - return Template(self, self._format_template_meta_for_user(body['template'])) - + return Template(self, self._format_template_meta_for_user( + body['template'])) + def export_db_to_json(self, **kwargs): """Add a template record, export a JSON file TODO(bcwaldon): document accepted params @@ -271,9 +275,10 @@ class TemplateManager(base.ManagerWithFind): url = '/v1/export_db_to_json' hdrs = self._template_meta_to_headers(fields) - resp, body = self.client.post(url,headers=hdrs,data=hdrs) - return Template(self, self._format_template_meta_for_user(body['template'])) - + resp, body = self.client.post(url, headers=hdrs, data=hdrs) + return Template( + self, self._format_template_meta_for_user(body['template'])) + def import_json_to_template(self, **kwargs): """import json to tempalte TODO(bcwaldon): document accepted params @@ -288,9 +293,10 @@ class TemplateManager(base.ManagerWithFind): url = '/v1/import_json_to_template' hdrs = self._template_meta_to_headers(fields) - resp, body = self.client.post(url,headers=hdrs,data=hdrs) - return Template(self, self._format_template_meta_for_user(body['template'])) - + resp, body = self.client.post(url, headers=hdrs, data=hdrs) + return Template( + self, self._format_template_meta_for_user(body['template'])) + def import_template_to_db(self, **kwargs): """Create a cluster with a template record TODO(bcwaldon): document accepted params @@ -305,9 +311,9 @@ class TemplateManager(base.ManagerWithFind): url = '/v1/import_template_to_db' hdrs = self._template_meta_to_headers(fields) - resp, body = self.client.post(url,headers=hdrs,data=hdrs) - return Template(self, self._format_template_meta_for_user(body['template'])) - + resp, body = self.client.post(url, headers=hdrs, data=hdrs) + return Template( + self, self._format_template_meta_for_user(body['template'])) def host_to_template(self, **kwargs): fields = {} @@ -315,14 +321,16 @@ class TemplateManager(base.ManagerWithFind): if field in CREATE_PARAMS: fields[field] = kwargs[field] else: - msg = 'host_to_template() got an unexpected keyword argument \'%s\'' + msg = 'host_to_template() \ + got an unexpected keyword argument \'%s\'' raise TypeError(msg % field) - + url = '/v1/host_to_template' hdrs = self._template_meta_to_headers(fields) - resp, body = self.client.post(url,headers=hdrs,data=hdrs) - return Template(self, self._format_template_meta_for_user(body['host_template'])) - + resp, body = self.client.post(url, headers=hdrs, data=hdrs) + return Template( + self, self._format_template_meta_for_user(body['host_template'])) + def template_to_host(self, **kwargs): """Update host with template""" hdrs = {} @@ -341,11 +349,12 @@ class TemplateManager(base.ManagerWithFind): return_request_id = kwargs.get('return_req_id', None) if return_request_id is not None: return_request_id.append(resp.headers.get(OS_REQ_ID_HDR, None)) - return Template(self, self._format_template_meta_for_user(body['host_template'])) - + return Template( + self, self._format_template_meta_for_user(body['host_template'])) + def host_template_list(self, **kwargs): absolute_limit = kwargs.get('limit') - page_size = kwargs.get('page_size', DEFAULT_PAGE_SIZE) + # page_size = kwargs.get('page_size', DEFAULT_PAGE_SIZE) def paginate(qp, return_request_id=None): for param, value in six.iteritems(qp): @@ -366,13 +375,13 @@ class TemplateManager(base.ManagerWithFind): for template in host_templates: yield template return_request_id = kwargs.get('return_req_id', None) - + params = self._build_params(kwargs) seen = 0 seen_last_page = 0 - filtered = 0 - + # filtered = 0 + for template in paginate(params, return_request_id): if (absolute_limit is not None and seen + seen_last_page >= absolute_limit): @@ -388,10 +397,12 @@ class TemplateManager(base.ManagerWithFind): if field in CREATE_PARAMS: fields[field] = kwargs[field] else: - msg = 'host_to_template() got an unexpected keyword argument \'%s\'' + msg = 'host_to_template()\ + got an unexpected keyword argument \'%s\'' raise TypeError(msg % field) - + url = '/v1/host_template' hdrs = self._template_meta_to_headers(fields) - resp, body = self.client.put(url,headers=hdrs,data=hdrs) - return Template(self, self._format_template_meta_for_user(body['host_template'])) \ No newline at end of file + resp, body = self.client.put(url, headers=hdrs, data=hdrs) + return Template( + self, self._format_template_meta_for_user(body['host_template'])) diff --git a/code/daisyclient/daisyclient/v1/uninstall.py b/code/daisyclient/daisyclient/v1/uninstall.py index 04920ee6..935cbd9d 100755 --- a/code/daisyclient/daisyclient/v1/uninstall.py +++ b/code/daisyclient/daisyclient/v1/uninstall.py @@ -14,17 +14,11 @@ # under the License. import copy -import os - -from oslo_utils import encodeutils -from oslo_utils import strutils import six -import six.moves.urllib.parse as urlparse - from daisyclient.common import utils from daisyclient.openstack.common.apiclient import base -#import daisy.queue_process as queue -#from daisy.queue_process import exec_cmd +# import daisy.queue_process as queue +# from daisy.queue_process import exec_cmd CREATE_PARAMS = ('cluster_id') @@ -45,10 +39,9 @@ class Uninstall(base.Resource): return self.manager.data(self, **kwargs) - class UninstallManager(base.ManagerWithFind): resource_class = Uninstall - + def _uninstall_meta_to_headers(self, fields): headers = {} fields_copy = copy.deepcopy(fields) @@ -74,6 +67,7 @@ class UninstallManager(base.ManagerWithFind): def list(self, **kwargs): pass + def query_progress(self, **kwargs): fields = {} for field in kwargs: @@ -83,13 +77,13 @@ class UninstallManager(base.ManagerWithFind): msg = 'uninstall() got an unexpected keyword argument \'%s\'' raise TypeError(msg % field) - if fields.has_key("cluster_id"): + if "cluster_id" in fields: url = '/v1/uninstall/%s' % fields['cluster_id'] resp, body = self.client.get(url) return Uninstall(self, self._format_uninstall_meta_for_user(body)) - + def uninstall(self, **kwargs): """Uninstall a cluster @@ -103,9 +97,7 @@ class UninstallManager(base.ManagerWithFind): else: msg = 'uninstall() got an unexpected keyword argument \'%s\'' raise TypeError(msg % field) - if fields.has_key("cluster_id"): + if "cluster_id" in fields: url = '/v1/uninstall/%s' % fields['cluster_id'] resp, body = self.client.post(url) return Uninstall(self, self._format_uninstall_meta_for_user(body)) - - diff --git a/code/daisyclient/daisyclient/v1/update.py b/code/daisyclient/daisyclient/v1/update.py index a67c7b2a..3702ada5 100755 --- a/code/daisyclient/daisyclient/v1/update.py +++ b/code/daisyclient/daisyclient/v1/update.py @@ -14,17 +14,12 @@ # under the License. import copy -import os - -from oslo_utils import encodeutils -from oslo_utils import strutils import six -import six.moves.urllib.parse as urlparse from daisyclient.common import utils from daisyclient.openstack.common.apiclient import base -#import daisy.queue_process as queue -#from daisy.queue_process import exec_cmd +# import daisy.queue_process as queue +# from daisy.queue_process import exec_cmd CREATE_PARAMS = ('cluster_id') @@ -45,10 +40,9 @@ class Update(base.Resource): return self.manager.data(self, **kwargs) - class UpdateManager(base.ManagerWithFind): resource_class = Update - + def _Update_meta_to_headers(self, fields): headers = {} fields_copy = copy.deepcopy(fields) @@ -74,6 +68,7 @@ class UpdateManager(base.ManagerWithFind): def list(self, **kwargs): pass + def query_progress(self, **kwargs): fields = {} for field in kwargs: @@ -83,12 +78,12 @@ class UpdateManager(base.ManagerWithFind): msg = 'update() got an unexpected keyword argument \'%s\'' raise TypeError(msg % field) - if fields.has_key("cluster_id"): - url = '/v1/update/%s' % fields['cluster_id'] + if "cluster" in fields: + url = '/v1/update/%s' % fields['cluster_id'] resp, body = self.client.get(url) return Update(self, self._format_update_meta_for_user(body)) - + def update(self, **kwargs): """Update a cluster @@ -99,18 +94,15 @@ class UpdateManager(base.ManagerWithFind): for field in kwargs: if field in CREATE_PARAMS: fields[field] = kwargs[field] - #elif field == 'return_req_id': + # elif field == 'return_req_id': # continue else: msg = 'update() got an unexpected keyword argument \'%s\'' raise TypeError(msg % field) - if fields.has_key("cluster_id"): + if "cluster_id" in fields: url = '/v1/update/%s' % fields['cluster_id'] - - #hdrs = self._install_meta_to_headers(fields) + + # hdrs = self._install_meta_to_headers(fields) resp, body = self.client.post(url) return Update(self, self._format_update_meta_for_user(body)) - - - diff --git a/code/daisyclient/daisyclient/v2/images.py b/code/daisyclient/daisyclient/v2/images.py index 2591743e..b3f8fc70 100755 --- a/code/daisyclient/daisyclient/v2/images.py +++ b/code/daisyclient/daisyclient/v2/images.py @@ -32,6 +32,7 @@ SORT_KEY_VALUES = ('name', 'status', 'container_format', 'disk_format', class Controller(object): + def __init__(self, http_client, schema_client): self.http_client = http_client self.schema_client = schema_client @@ -171,7 +172,7 @@ class Controller(object): def get(self, image_id): url = '/v2/images/%s' % image_id resp, body = self.http_client.get(url) - #NOTE(bcwaldon): remove 'self' for now until we have an elegant + # NOTE(bcwaldon): remove 'self' for now until we have an elegant # way to pass it into the model constructor without conflict body.pop('self', None) return self.model(**body) @@ -227,7 +228,7 @@ class Controller(object): raise TypeError(utils.exception_to_str(e)) resp, body = self.http_client.post(url, data=image) - #NOTE(esheffield): remove 'self' for now until we have an elegant + # NOTE(esheffield): remove 'self' for now until we have an elegant # way to pass it into the model constructor without conflict body.pop('self', None) return self.model(**body) @@ -250,7 +251,7 @@ class Controller(object): if remove_props is not None: cur_props = image.keys() new_props = kwargs.keys() - #NOTE(esheffield): Only remove props that currently exist on the + # NOTE(esheffield): Only remove props that currently exist on the # image and are NOT in the properties being updated / added props_to_remove = set(cur_props).intersection( set(remove_props).difference(new_props)) @@ -262,7 +263,7 @@ class Controller(object): hdrs = {'Content-Type': 'application/openstack-images-v2.1-json-patch'} self.http_client.patch(url, headers=hdrs, data=image.patch) - #NOTE(bcwaldon): calling image.patch doesn't clear the changes, so + # NOTE(bcwaldon): calling image.patch doesn't clear the changes, so # we need to fetch the image again to get a clean history. This is # an obvious optimization for warlock return self.get(image_id) diff --git a/code/daisyclient/daisyclient/v2/schemas.py b/code/daisyclient/daisyclient/v2/schemas.py index 5c31741e..77f49c41 100755 --- a/code/daisyclient/daisyclient/v2/schemas.py +++ b/code/daisyclient/daisyclient/v2/schemas.py @@ -35,8 +35,8 @@ class SchemaBasedModel(warlock.Model): tags_patch = [] else: tags_patch = [{"path": "/tags", - "value": self.get('tags'), - "op": "replace"}] + "value": self.get('tags'), + "op": "replace"}] patch_string = jsonpatch.make_patch(original, new).to_string() patch = json.loads(patch_string) diff --git a/code/daisyclient/daisyclient/v2/tasks.py b/code/daisyclient/daisyclient/v2/tasks.py index 74fe6204..b0189c44 100755 --- a/code/daisyclient/daisyclient/v2/tasks.py +++ b/code/daisyclient/daisyclient/v2/tasks.py @@ -28,6 +28,7 @@ SORT_KEY_VALUES = ('id', 'type', 'status') class Controller(object): + def __init__(self, http_client, schema_client): self.http_client = http_client self.schema_client = schema_client @@ -87,7 +88,7 @@ class Controller(object): url = '/v2/tasks?%s' % six.moves.urllib.parse.urlencode(filters) for task in paginate(url): - #NOTE(flwang): remove 'self' for now until we have an elegant + # NOTE(flwang): remove 'self' for now until we have an elegant # way to pass it into the model constructor without conflict task.pop('self', None) yield self.model(**task) @@ -96,7 +97,7 @@ class Controller(object): """Get a task based on given task id.""" url = '/v2/tasks/%s' % task_id resp, body = self.http_client.get(url) - #NOTE(flwang): remove 'self' for now until we have an elegant + # NOTE(flwang): remove 'self' for now until we have an elegant # way to pass it into the model constructor without conflict body.pop('self', None) return self.model(**body) @@ -113,7 +114,7 @@ class Controller(object): raise TypeError(unicode(e)) resp, body = self.http_client.post(url, data=task) - #NOTE(flwang): remove 'self' for now until we have an elegant + # NOTE(flwang): remove 'self' for now until we have an elegant # way to pass it into the model constructor without conflict body.pop('self', None) return self.model(**body) diff --git a/code/daisyclient/doc/source/conf.py b/code/daisyclient/doc/source/conf.py index 77162fa1..8c0861e8 100755 --- a/code/daisyclient/doc/source/conf.py +++ b/code/daisyclient/doc/source/conf.py @@ -52,7 +52,7 @@ man_pages = [ # The theme to use for HTML and HTML Help pages. Major themes that come with # Sphinx are currently 'default' and 'sphinxdoc'. -#html_theme = 'nature' +# html_theme = 'nature' # Output file base name for HTML help builder. htmlhelp_basename = '%sdoc' % project diff --git a/code/daisyclient/tests/test_http.py b/code/daisyclient/tests/test_http.py index daab8057..b0fffd0a 100755 --- a/code/daisyclient/tests/test_http.py +++ b/code/daisyclient/tests/test_http.py @@ -52,7 +52,7 @@ class TestClient(testtools.TestCase): 'X-Identity-Status': 'Confirmed', 'X-Service-Catalog': 'service_catalog', } - #with token + # with token kwargs = {'token': u'fake-token', 'identity_headers': identity_headers} http_client_object = http.HTTPClient(self.endpoint, **kwargs) @@ -68,7 +68,7 @@ class TestClient(testtools.TestCase): 'X-Identity-Status': 'Confirmed', 'X-Service-Catalog': 'service_catalog', } - #without X-Auth-Token in identity headers + # without X-Auth-Token in identity headers kwargs = {'token': u'fake-token', 'identity_headers': identity_headers} http_client_object = http.HTTPClient(self.endpoint, **kwargs) @@ -133,14 +133,14 @@ class TestClient(testtools.TestCase): self.mock.ReplayAll() try: self.client.get('/v1/images/detail?limit=20') - #NOTE(alaski) We expect exc.CommunicationError to be raised + # NOTE(alaski) We expect exc.CommunicationError to be raised # so we should never reach this point. try/except is used here # rather than assertRaises() so that we can check the body of # the exception. self.fail('An exception should have bypassed this line.') except glanceclient.exc.CommunicationError as comm_err: fail_msg = ("Exception message '%s' should contain '%s'" % - (comm_err.message, self.endpoint)) + (comm_err.message, self.endpoint)) self.assertTrue(self.endpoint in comm_err.message, fail_msg) def test_http_encoding(self): @@ -284,6 +284,7 @@ class TestClient(testtools.TestCase): class TestVerifiedHTTPSConnection(testtools.TestCase): + """Test fixture for glanceclient.common.http.VerifiedHTTPSConnection.""" def test_setcontext_unable_to_load_cacert(self): diff --git a/code/daisyclient/tests/test_shell.py b/code/daisyclient/tests/test_shell.py index e961fe23..a440a536 100755 --- a/code/daisyclient/tests/test_shell.py +++ b/code/daisyclient/tests/test_shell.py @@ -25,7 +25,7 @@ import six from glanceclient import exc from glanceclient import shell as openstack_shell -#NOTE (esheffield) Used for the schema caching tests +# NOTE (esheffield) Used for the schema caching tests from glanceclient.v2 import schemas as schemas import json from tests import keystone_client_fixtures @@ -427,6 +427,7 @@ class ShellTestWithKeystoneV3Auth(ShellTest): class ShellCacheSchemaTest(utils.TestCase): + def setUp(self): super(ShellCacheSchemaTest, self).setUp() self._mock_client_setup() @@ -457,6 +458,7 @@ class ShellCacheSchemaTest(utils.TestCase): def _make_args(self, args): class Args(): + def __init__(self, entries): self.__dict__.update(entries) diff --git a/code/daisyclient/tests/v1/test_shell.py b/code/daisyclient/tests/v1/test_shell.py index fb02c391..15a8e3d3 100755 --- a/code/daisyclient/tests/v1/test_shell.py +++ b/code/daisyclient/tests/v1/test_shell.py @@ -432,8 +432,8 @@ class ShellStdinHandlingTests(testtools.TestCase): self._do_update() self.assertTrue( - 'data' not in self.collected_args[1] - or self.collected_args[1]['data'] is None + 'data' not in self.collected_args[1] or + self.collected_args[1]['data'] is None ) def test_image_update_data_is_read_from_file(self): diff --git a/code/daisyclient/tests/v2/test_images.py b/code/daisyclient/tests/v2/test_images.py index 00b06e34..25cd9fb1 100755 --- a/code/daisyclient/tests/v2/test_images.py +++ b/code/daisyclient/tests/v2/test_images.py @@ -1,1092 +1,1093 @@ -# Copyright 2012 OpenStack Foundation. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import errno - -import testtools - -from glanceclient import exc -from glanceclient.v2 import images -from tests import utils - -_CHKSUM = '93264c3edf5972c9f1cb309543d38a5c' -_CHKSUM1 = '54264c3edf5972c9f1cb309453d38a46' - -_TAG1 = 'power' -_TAG2 = '64bit' - -_BOGUS_ID = '63e7f218-29de-4477-abdc-8db7c9533188' -_EVERYTHING_ID = '802cbbb7-0379-4c38-853f-37302b5e3d29' -_OWNED_IMAGE_ID = 'a4963502-acc7-42ba-ad60-5aa0962b7faf' -_OWNER_ID = '6bd473f0-79ae-40ad-a927-e07ec37b642f' -_PRIVATE_ID = 'e33560a7-3964-4de5-8339-5a24559f99ab' -_PUBLIC_ID = '857806e7-05b6-48e0-9d40-cb0e6fb727b9' -_SHARED_ID = '331ac905-2a38-44c5-a83d-653db8f08313' -_STATUS_REJECTED_ID = 'f3ea56ff-d7e4-4451-998c-1e3d33539c8e' - -data_fixtures = { - '/v2/schemas/image': { - 'GET': ( - {}, - { - 'name': 'image', - 'properties': { - 'id': {}, - 'name': {}, - 'locations': { - 'type': 'array', - 'items': { - 'type': 'object', - 'properties': { - 'metadata': {'type': 'object'}, - 'url': {'type': 'string'}, - }, - 'required': ['url', 'metadata'], - }, - }, - 'color': {'type': 'string', 'is_base': False}, - }, - 'additionalProperties': {'type': 'string'}, - }, - ), - }, - '/v2/images?limit=%d' % images.DEFAULT_PAGE_SIZE: { - 'GET': ( - {}, - {'images': [ - { - 'id': '3a4560a1-e585-443e-9b39-553b46ec92d1', - 'name': 'image-1', - }, - { - 'id': '6f99bf80-2ee6-47cf-acfe-1f1fabb7e810', - 'name': 'image-2', - }, - ]}, - ), - }, - '/v2/images?limit=2': { - 'GET': ( - {}, - { - 'images': [ - { - 'id': '3a4560a1-e585-443e-9b39-553b46ec92d1', - 'name': 'image-1', - }, - { - 'id': '6f99bf80-2ee6-47cf-acfe-1f1fabb7e810', - 'name': 'image-2', - }, - ], - 'next': ('/v2/images?limit=2&' - 'marker=6f99bf80-2ee6-47cf-acfe-1f1fabb7e810'), - }, - ), - }, - '/v2/images?limit=1': { - 'GET': ( - {}, - { - 'images': [ - { - 'id': '3a4560a1-e585-443e-9b39-553b46ec92d1', - 'name': 'image-1', - }, - ], - 'next': ('/v2/images?limit=1&' - 'marker=3a4560a1-e585-443e-9b39-553b46ec92d1'), - }, - ), - }, - ('/v2/images?limit=1&marker=3a4560a1-e585-443e-9b39-553b46ec92d1'): { - 'GET': ( - {}, - {'images': [ - { - 'id': '6f99bf80-2ee6-47cf-acfe-1f1fabb7e810', - 'name': 'image-2', - }, - ]}, - ), - }, - ('/v2/images?limit=1&marker=6f99bf80-2ee6-47cf-acfe-1f1fabb7e810'): { - 'GET': ( - {}, - {'images': [ - { - 'id': '3f99bf80-2ee6-47cf-acfe-1f1fabb7e811', - 'name': 'image-3', - }, - ]}, - ), - }, - '/v2/images/3a4560a1-e585-443e-9b39-553b46ec92d1': { - 'GET': ( - {}, - { - 'id': '3a4560a1-e585-443e-9b39-553b46ec92d1', - 'name': 'image-1', - }, - ), - 'PATCH': ( - {}, - '', - ), - }, - '/v2/images/e7e59ff6-fa2e-4075-87d3-1a1398a07dc3': { - 'GET': ( - {}, - { - 'id': 'e7e59ff6-fa2e-4075-87d3-1a1398a07dc3', - 'name': 'image-3', - 'barney': 'rubble', - 'george': 'jetson', - 'color': 'red', - }, - ), - 'PATCH': ( - {}, - '', - ), - }, - '/v2/images': { - 'POST': ( - {}, - { - 'id': '3a4560a1-e585-443e-9b39-553b46ec92d1', - 'name': 'image-1', - }, - ), - }, - '/v2/images/87b634c1-f893-33c9-28a9-e5673c99239a': { - 'DELETE': ( - {}, - { - 'id': '87b634c1-f893-33c9-28a9-e5673c99239a', - }, - ), - }, - '/v2/images/606b0e88-7c5a-4d54-b5bb-046105d4de6f/file': { - 'PUT': ( - {}, - '', - ), - }, - '/v2/images/5cc4bebc-db27-11e1-a1eb-080027cbe205/file': { - 'GET': ( - {}, - 'A', - ), - }, - '/v2/images/66fb18d6-db27-11e1-a1eb-080027cbe205/file': { - 'GET': ( - { - 'content-md5': 'wrong' - }, - 'BB', - ), - }, - '/v2/images/1b1c6366-dd57-11e1-af0f-02163e68b1d8/file': { - 'GET': ( - { - 'content-md5': 'defb99e69a9f1f6e06f15006b1f166ae' - }, - 'CCC', - ), - }, - '/v2/images?limit=%d&visibility=public' % images.DEFAULT_PAGE_SIZE: { - 'GET': ( - {}, - {'images': [ - { - 'id': _PUBLIC_ID, - 'harvey': 'lipshitz', - }, - ]}, - ), - }, - '/v2/images?limit=%d&visibility=private' % images.DEFAULT_PAGE_SIZE: { - 'GET': ( - {}, - {'images': [ - { - 'id': _PRIVATE_ID, - }, - ]}, - ), - }, - '/v2/images?limit=%d&visibility=shared' % images.DEFAULT_PAGE_SIZE: { - 'GET': ( - {}, - {'images': [ - { - 'id': _SHARED_ID, - }, - ]}, - ), - }, - '/v2/images?limit=%d&member_status=rejected' % images.DEFAULT_PAGE_SIZE: { - 'GET': ( - {}, - {'images': [ - { - 'id': _STATUS_REJECTED_ID, - }, - ]}, - ), - }, - '/v2/images?limit=%d&member_status=pending' % images.DEFAULT_PAGE_SIZE: { - 'GET': ( - {}, - {'images': []}, - ), - }, - '/v2/images?limit=%d&owner=%s' % (images.DEFAULT_PAGE_SIZE, _OWNER_ID): { - 'GET': ( - {}, - {'images': [ - { - 'id': _OWNED_IMAGE_ID, - }, - ]}, - ), - }, - '/v2/images?limit=%d&owner=%s' % (images.DEFAULT_PAGE_SIZE, _BOGUS_ID): { - 'GET': ( - {}, - {'images': []}, - ), - }, - '/v2/images?limit=%d&member_status=pending&owner=%s&visibility=shared' - % (images.DEFAULT_PAGE_SIZE, _BOGUS_ID): { - 'GET': ( - {}, - {'images': [ - { - 'id': _EVERYTHING_ID, - }, - ]}, - ), - }, - '/v2/images?checksum=%s&limit=%d' % (_CHKSUM, images.DEFAULT_PAGE_SIZE): { - 'GET': ( - {}, - {'images': [ - { - 'id': '3a4560a1-e585-443e-9b39-553b46ec92d1', - 'name': 'image-1', - } - ]}, - ), - }, - '/v2/images?checksum=%s&limit=%d' % (_CHKSUM1, images.DEFAULT_PAGE_SIZE): { - 'GET': ( - {}, - {'images': [ - { - 'id': '2a4560b2-e585-443e-9b39-553b46ec92d1', - 'name': 'image-1', - }, - { - 'id': '6f99bf80-2ee6-47cf-acfe-1f1fabb7e810', - 'name': 'image-2', - }, - ]}, - ), - }, - '/v2/images?checksum=wrong&limit=%d' % images.DEFAULT_PAGE_SIZE: { - 'GET': ( - {}, - {'images': []}, - ), - }, - '/v2/images?limit=%d&tag=%s' % (images.DEFAULT_PAGE_SIZE, _TAG1): { - 'GET': ( - {}, - {'images': [ - { - 'id': '3a4560a1-e585-443e-9b39-553b46ec92d1', - 'name': 'image-1', - } - ]}, - ), - }, - '/v2/images?limit=%d&tag=%s' % (images.DEFAULT_PAGE_SIZE, _TAG2): { - 'GET': ( - {}, - {'images': [ - { - 'id': '2a4560b2-e585-443e-9b39-553b46ec92d1', - 'name': 'image-1', - }, - { - 'id': '6f99bf80-2ee6-47cf-acfe-1f1fabb7e810', - 'name': 'image-2', - }, - ]}, - ), - }, - '/v2/images?limit=%d&tag=%s&tag=%s' % (images.DEFAULT_PAGE_SIZE, - _TAG1, _TAG2): - { - 'GET': ( - {}, - {'images': [ - { - 'id': '2a4560b2-e585-443e-9b39-553b46ec92d1', - 'name': 'image-1', - } - ]}, - ), - }, - '/v2/images?limit=%d&tag=fake' % images.DEFAULT_PAGE_SIZE: { - 'GET': ( - {}, - {'images': []}, - ), - }, - '/v2/images/a2b83adc-888e-11e3-8872-78acc0b951d8': { - 'GET': ( - {}, - { - 'id': 'a2b83adc-888e-11e3-8872-78acc0b951d8', - 'name': 'image-location-tests', - 'locations': [{u'url': u'http://foo.com/', - u'metadata': {u'foo': u'foometa'}}, - {u'url': u'http://bar.com/', - u'metadata': {u'bar': u'barmeta'}}], - }, - ), - 'PATCH': ( - {}, - '', - ) - }, - '/v2/images?limit=%d&os_distro=NixOS' % images.DEFAULT_PAGE_SIZE: { - 'GET': ( - {}, - {'images': [ - { - 'id': '8b052954-c76c-4e02-8e90-be89a70183a8', - 'name': 'image-5', - 'os_distro': 'NixOS', - }, - ]}, - ), - }, - '/v2/images?limit=%d&my_little_property=cant_be_this_cute' % - images.DEFAULT_PAGE_SIZE: { - 'GET': ( - {}, - {'images': []}, - ), - }, - '/v2/images?limit=%d&sort_key=name' % images.DEFAULT_PAGE_SIZE: { - 'GET': ( - {}, - {'images': [ - { - 'id': '2a4560b2-e585-443e-9b39-553b46ec92d1', - 'name': 'image-1', - }, - { - 'id': '6f99bf80-2ee6-47cf-acfe-1f1fabb7e810', - 'name': 'image-2', - }, - ]}, - ), - }, - '/v2/images?limit=%d&sort_key=name&sort_key=id' - % images.DEFAULT_PAGE_SIZE: { - 'GET': ( - {}, - {'images': [ - { - 'id': '2a4560b2-e585-443e-9b39-553b46ec92d1', - 'name': 'image', - }, - { - 'id': '6f99bf80-2ee6-47cf-acfe-1f1fabb7e810', - 'name': 'image', - }, - ]}, - ), - }, - '/v2/images?limit=%d&sort_dir=desc&sort_key=id' - % images.DEFAULT_PAGE_SIZE: { - 'GET': ( - {}, - {'images': [ - { - 'id': '6f99bf80-2ee6-47cf-acfe-1f1fabb7e810', - 'name': 'image-2', - }, - { - 'id': '2a4560b2-e585-443e-9b39-553b46ec92d1', - 'name': 'image-1', - }, - ]}, - ), - }, - '/v2/images?limit=%d&sort_dir=desc&sort_key=name&sort_key=id' - % images.DEFAULT_PAGE_SIZE: { - 'GET': ( - {}, - {'images': [ - { - 'id': '6f99bf80-2ee6-47cf-acfe-1f1fabb7e810', - 'name': 'image-2', - }, - { - 'id': '2a4560b2-e585-443e-9b39-553b46ec92d1', - 'name': 'image-1', - }, - ]}, - ), - }, - '/v2/images?limit=%d&sort_dir=desc&sort_dir=asc&sort_key=name&sort_key=id' - % images.DEFAULT_PAGE_SIZE: { - 'GET': ( - {}, - {'images': [ - { - 'id': '6f99bf80-2ee6-47cf-acfe-1f1fabb7e810', - 'name': 'image-2', - }, - { - 'id': '2a4560b2-e585-443e-9b39-553b46ec92d1', - 'name': 'image-1', - }, - ]}, - ), - }, - '/v2/images?limit=%d&sort=name%%3Adesc%%2Csize%%3Aasc' - % images.DEFAULT_PAGE_SIZE: { - 'GET': ( - {}, - {'images': [ - { - 'id': '6f99bf80-2ee6-47cf-acfe-1f1fabb7e810', - 'name': 'image-2', - }, - { - 'id': '2a4560b2-e585-443e-9b39-553b46ec92d1', - 'name': 'image-1', - }, - ]}, - ), - }, -} - -schema_fixtures = { - 'image': { - 'GET': ( - {}, - { - 'name': 'image', - 'properties': { - 'id': {}, - 'name': {}, - 'locations': { - 'type': 'array', - 'items': { - 'type': 'object', - 'properties': { - 'metadata': {'type': 'object'}, - 'url': {'type': 'string'}, - }, - 'required': ['url', 'metadata'], - } - }, - 'color': {'type': 'string', 'is_base': False}, - 'tags': {'type': 'array'}, - }, - 'additionalProperties': {'type': 'string'}, - } - ) - } -} - - -class TestController(testtools.TestCase): - def setUp(self): - super(TestController, self).setUp() - self.api = utils.FakeAPI(data_fixtures) - self.schema_api = utils.FakeSchemaAPI(schema_fixtures) - self.controller = images.Controller(self.api, self.schema_api) - - def test_list_images(self): - # NOTE(bcwaldon):cast to list since the controller returns a generator - images = list(self.controller.list()) - self.assertEqual('3a4560a1-e585-443e-9b39-553b46ec92d1', images[0].id) - self.assertEqual('image-1', images[0].name) - self.assertEqual('6f99bf80-2ee6-47cf-acfe-1f1fabb7e810', images[1].id) - self.assertEqual('image-2', images[1].name) - - def test_list_images_paginated(self): - # NOTE(bcwaldon):cast to list since the controller returns a generator - images = list(self.controller.list(page_size=1)) - self.assertEqual('3a4560a1-e585-443e-9b39-553b46ec92d1', images[0].id) - self.assertEqual('image-1', images[0].name) - self.assertEqual('6f99bf80-2ee6-47cf-acfe-1f1fabb7e810', images[1].id) - self.assertEqual('image-2', images[1].name) - - def test_list_images_paginated_with_limit(self): - # NOTE(bcwaldon):cast to list since the controller returns a generator - images = list(self.controller.list(limit=3, page_size=2)) - self.assertEqual('3a4560a1-e585-443e-9b39-553b46ec92d1', images[0].id) - self.assertEqual('image-1', images[0].name) - self.assertEqual('6f99bf80-2ee6-47cf-acfe-1f1fabb7e810', images[1].id) - self.assertEqual('image-2', images[1].name) - self.assertEqual('3f99bf80-2ee6-47cf-acfe-1f1fabb7e811', images[2].id) - self.assertEqual('image-3', images[2].name) - self.assertEqual(3, len(images)) - - def test_list_images_visibility_public(self): - filters = {'filters': {'visibility': 'public'}} - images = list(self.controller.list(**filters)) - self.assertEqual(_PUBLIC_ID, images[0].id) - - def test_list_images_visibility_private(self): - filters = {'filters': {'visibility': 'private'}} - images = list(self.controller.list(**filters)) - self.assertEqual(_PRIVATE_ID, images[0].id) - - def test_list_images_visibility_shared(self): - filters = {'filters': {'visibility': 'shared'}} - images = list(self.controller.list(**filters)) - self.assertEqual(_SHARED_ID, images[0].id) - - def test_list_images_member_status_rejected(self): - filters = {'filters': {'member_status': 'rejected'}} - images = list(self.controller.list(**filters)) - self.assertEqual(_STATUS_REJECTED_ID, images[0].id) - - def test_list_images_for_owner(self): - filters = {'filters': {'owner': _OWNER_ID}} - images = list(self.controller.list(**filters)) - self.assertEqual(_OWNED_IMAGE_ID, images[0].id) - - def test_list_images_for_checksum_single_image(self): - fake_id = '3a4560a1-e585-443e-9b39-553b46ec92d1' - filters = {'filters': {'checksum': _CHKSUM}} - images = list(self.controller.list(**filters)) - self.assertEqual(1, len(images)) - self.assertEqual('%s' % fake_id, images[0].id) - - def test_list_images_for_checksum_multiple_images(self): - fake_id1 = '2a4560b2-e585-443e-9b39-553b46ec92d1' - fake_id2 = '6f99bf80-2ee6-47cf-acfe-1f1fabb7e810' - filters = {'filters': {'checksum': _CHKSUM1}} - images = list(self.controller.list(**filters)) - self.assertEqual(2, len(images)) - self.assertEqual('%s' % fake_id1, images[0].id) - self.assertEqual('%s' % fake_id2, images[1].id) - - def test_list_images_for_wrong_checksum(self): - filters = {'filters': {'checksum': 'wrong'}} - images = list(self.controller.list(**filters)) - self.assertEqual(0, len(images)) - - def test_list_images_for_bogus_owner(self): - filters = {'filters': {'owner': _BOGUS_ID}} - images = list(self.controller.list(**filters)) - self.assertEqual([], images) - - def test_list_images_for_bunch_of_filters(self): - filters = {'filters': {'owner': _BOGUS_ID, - 'visibility': 'shared', - 'member_status': 'pending'}} - images = list(self.controller.list(**filters)) - self.assertEqual(_EVERYTHING_ID, images[0].id) - - def test_list_images_filters_encoding(self): - filters = {"owner": u"ni\xf1o"} - try: - list(self.controller.list(filters=filters)) - except KeyError: - # NOTE(flaper87): It raises KeyError because there's - # no fixture supporting this query: - # /v2/images?owner=ni%C3%B1o&limit=20 - # We just want to make sure filters are correctly encoded. - pass - self.assertEqual(b"ni\xc3\xb1o", filters["owner"]) - - def test_list_images_for_tag_single_image(self): - img_id = '3a4560a1-e585-443e-9b39-553b46ec92d1' - filters = {'filters': {'tag': [_TAG1]}} - images = list(self.controller.list(**filters)) - self.assertEqual(1, len(images)) - self.assertEqual('%s' % img_id, images[0].id) - pass - - def test_list_images_for_tag_multiple_images(self): - img_id1 = '2a4560b2-e585-443e-9b39-553b46ec92d1' - img_id2 = '6f99bf80-2ee6-47cf-acfe-1f1fabb7e810' - filters = {'filters': {'tag': [_TAG2]}} - images = list(self.controller.list(**filters)) - self.assertEqual(2, len(images)) - self.assertEqual('%s' % img_id1, images[0].id) - self.assertEqual('%s' % img_id2, images[1].id) - - def test_list_images_for_multi_tags(self): - img_id1 = '2a4560b2-e585-443e-9b39-553b46ec92d1' - filters = {'filters': {'tag': [_TAG1, _TAG2]}} - images = list(self.controller.list(**filters)) - self.assertEqual(1, len(images)) - self.assertEqual('%s' % img_id1, images[0].id) - - def test_list_images_for_non_existent_tag(self): - filters = {'filters': {'tag': ['fake']}} - images = list(self.controller.list(**filters)) - self.assertEqual(0, len(images)) - - def test_list_images_with_single_sort_key(self): - img_id1 = '2a4560b2-e585-443e-9b39-553b46ec92d1' - sort_key = 'name' - images = list(self.controller.list(sort_key=sort_key)) - self.assertEqual(2, len(images)) - self.assertEqual('%s' % img_id1, images[0].id) - - def test_list_with_multiple_sort_keys(self): - img_id1 = '2a4560b2-e585-443e-9b39-553b46ec92d1' - sort_key = ['name', 'id'] - images = list(self.controller.list(sort_key=sort_key)) - self.assertEqual(2, len(images)) - self.assertEqual('%s' % img_id1, images[0].id) - - def test_list_images_with_desc_sort_dir(self): - img_id1 = '2a4560b2-e585-443e-9b39-553b46ec92d1' - sort_key = 'id' - sort_dir = 'desc' - images = list(self.controller.list(sort_key=sort_key, - sort_dir=sort_dir)) - self.assertEqual(2, len(images)) - self.assertEqual('%s' % img_id1, images[1].id) - - def test_list_images_with_multiple_sort_keys_and_one_sort_dir(self): - img_id1 = '2a4560b2-e585-443e-9b39-553b46ec92d1' - sort_key = ['name', 'id'] - sort_dir = 'desc' - images = list(self.controller.list(sort_key=sort_key, - sort_dir=sort_dir)) - self.assertEqual(2, len(images)) - self.assertEqual('%s' % img_id1, images[1].id) - - def test_list_images_with_multiple_sort_dirs(self): - img_id1 = '2a4560b2-e585-443e-9b39-553b46ec92d1' - sort_key = ['name', 'id'] - sort_dir = ['desc', 'asc'] - images = list(self.controller.list(sort_key=sort_key, - sort_dir=sort_dir)) - self.assertEqual(2, len(images)) - self.assertEqual('%s' % img_id1, images[1].id) - - def test_list_images_with_new_sorting_syntax(self): - img_id1 = '2a4560b2-e585-443e-9b39-553b46ec92d1' - sort = 'name:desc,size:asc' - images = list(self.controller.list(sort=sort)) - self.assertEqual(2, len(images)) - self.assertEqual('%s' % img_id1, images[1].id) - - def test_list_images_sort_dirs_fewer_than_keys(self): - sort_key = ['name', 'id', 'created_at'] - sort_dir = ['desc', 'asc'] - self.assertRaises(exc.HTTPBadRequest, - list, - self.controller.list( - sort_key=sort_key, - sort_dir=sort_dir)) - - def test_list_images_combined_syntax(self): - sort_key = ['name', 'id'] - sort_dir = ['desc', 'asc'] - sort = 'name:asc' - self.assertRaises(exc.HTTPBadRequest, - list, - self.controller.list( - sort=sort, - sort_key=sort_key, - sort_dir=sort_dir)) - - def test_list_images_new_sorting_syntax_invalid_key(self): - sort = 'INVALID:asc' - self.assertRaises(exc.HTTPBadRequest, - list, - self.controller.list( - sort=sort)) - - def test_list_images_new_sorting_syntax_invalid_direction(self): - sort = 'name:INVALID' - self.assertRaises(exc.HTTPBadRequest, - list, - self.controller.list( - sort=sort)) - - def test_list_images_for_property(self): - filters = {'filters': dict([('os_distro', 'NixOS')])} - images = list(self.controller.list(**filters)) - self.assertEqual(1, len(images)) - - def test_list_images_for_non_existent_property(self): - filters = {'filters': dict([('my_little_property', - 'cant_be_this_cute')])} - images = list(self.controller.list(**filters)) - self.assertEqual(0, len(images)) - - def test_get_image(self): - image = self.controller.get('3a4560a1-e585-443e-9b39-553b46ec92d1') - self.assertEqual('3a4560a1-e585-443e-9b39-553b46ec92d1', image.id) - self.assertEqual('image-1', image.name) - - def test_create_image(self): - properties = { - 'name': 'image-1' - } - image = self.controller.create(**properties) - self.assertEqual('3a4560a1-e585-443e-9b39-553b46ec92d1', image.id) - self.assertEqual('image-1', image.name) - - def test_create_bad_additionalProperty_type(self): - properties = { - 'name': 'image-1', - 'bad_prop': True, - } - with testtools.ExpectedException(TypeError): - self.controller.create(**properties) - - def test_delete_image(self): - self.controller.delete('87b634c1-f893-33c9-28a9-e5673c99239a') - expect = [ - ('DELETE', - '/v2/images/87b634c1-f893-33c9-28a9-e5673c99239a', - {}, - None)] - self.assertEqual(expect, self.api.calls) - - def test_data_upload(self): - image_data = 'CCC' - image_id = '606b0e88-7c5a-4d54-b5bb-046105d4de6f' - self.controller.upload(image_id, image_data) - expect = [('PUT', '/v2/images/%s/file' % image_id, - {'Content-Type': 'application/octet-stream'}, - image_data)] - self.assertEqual(expect, self.api.calls) - - def test_data_upload_w_size(self): - image_data = 'CCC' - image_id = '606b0e88-7c5a-4d54-b5bb-046105d4de6f' - self.controller.upload(image_id, image_data, image_size=3) - body = {'image_data': image_data, - 'image_size': 3} - expect = [('PUT', '/v2/images/%s/file' % image_id, - {'Content-Type': 'application/octet-stream'}, - sorted(body.items()))] - self.assertEqual(expect, self.api.calls) - - def test_data_without_checksum(self): - body = self.controller.data('5cc4bebc-db27-11e1-a1eb-080027cbe205', - do_checksum=False) - body = ''.join([b for b in body]) - self.assertEqual('A', body) - - body = self.controller.data('5cc4bebc-db27-11e1-a1eb-080027cbe205') - body = ''.join([b for b in body]) - self.assertEqual('A', body) - - def test_data_with_wrong_checksum(self): - body = self.controller.data('66fb18d6-db27-11e1-a1eb-080027cbe205', - do_checksum=False) - body = ''.join([b for b in body]) - self.assertEqual('BB', body) - - body = self.controller.data('66fb18d6-db27-11e1-a1eb-080027cbe205') - try: - body = ''.join([b for b in body]) - self.fail('data did not raise an error.') - except IOError as e: - self.assertEqual(errno.EPIPE, e.errno) - msg = 'was 9d3d9048db16a7eee539e93e3618cbe7 expected wrong' - self.assertTrue(msg in str(e)) - - def test_data_with_checksum(self): - body = self.controller.data('1b1c6366-dd57-11e1-af0f-02163e68b1d8', - do_checksum=False) - body = ''.join([b for b in body]) - self.assertEqual('CCC', body) - - body = self.controller.data('1b1c6366-dd57-11e1-af0f-02163e68b1d8') - body = ''.join([b for b in body]) - self.assertEqual('CCC', body) - - def test_update_replace_prop(self): - image_id = '3a4560a1-e585-443e-9b39-553b46ec92d1' - params = {'name': 'pong'} - image = self.controller.update(image_id, **params) - expect_hdrs = { - 'Content-Type': 'application/openstack-images-v2.1-json-patch', - } - expect_body = [[('op', 'replace'), ('path', '/name'), - ('value', 'pong')]] - expect = [ - ('GET', '/v2/images/%s' % image_id, {}, None), - ('PATCH', '/v2/images/%s' % image_id, expect_hdrs, expect_body), - ('GET', '/v2/images/%s' % image_id, {}, None), - ] - self.assertEqual(expect, self.api.calls) - self.assertEqual(image_id, image.id) - # NOTE(bcwaldon):due to limitations of our fake api framework, the name - # will not actually change - yet in real life it will... - self.assertEqual('image-1', image.name) - - def test_update_add_prop(self): - image_id = '3a4560a1-e585-443e-9b39-553b46ec92d1' - params = {'finn': 'human'} - image = self.controller.update(image_id, **params) - expect_hdrs = { - 'Content-Type': 'application/openstack-images-v2.1-json-patch', - } - expect_body = [[('op', 'add'), ('path', '/finn'), ('value', 'human')]] - expect = [ - ('GET', '/v2/images/%s' % image_id, {}, None), - ('PATCH', '/v2/images/%s' % image_id, expect_hdrs, expect_body), - ('GET', '/v2/images/%s' % image_id, {}, None), - ] - self.assertEqual(expect, self.api.calls) - self.assertEqual(image_id, image.id) - # NOTE(bcwaldon):due to limitations of our fake api framework, the name - # will not actually change - yet in real life it will... - self.assertEqual('image-1', image.name) - - def test_update_remove_prop(self): - image_id = 'e7e59ff6-fa2e-4075-87d3-1a1398a07dc3' - remove_props = ['barney'] - image = self.controller.update(image_id, remove_props) - expect_hdrs = { - 'Content-Type': 'application/openstack-images-v2.1-json-patch', - } - expect_body = [[('op', 'remove'), ('path', '/barney')]] - expect = [ - ('GET', '/v2/images/%s' % image_id, {}, None), - ('PATCH', '/v2/images/%s' % image_id, expect_hdrs, expect_body), - ('GET', '/v2/images/%s' % image_id, {}, None), - ] - self.assertEqual(expect, self.api.calls) - self.assertEqual(image_id, image.id) - # NOTE(bcwaldon):due to limitations of our fake api framework, the name - # will not actually change - yet in real life it will... - self.assertEqual('image-3', image.name) - - def test_update_replace_remove_same_prop(self): - image_id = 'e7e59ff6-fa2e-4075-87d3-1a1398a07dc3' - # Updating a property takes precedence over removing a property - params = {'barney': 'miller'} - remove_props = ['barney'] - image = self.controller.update(image_id, remove_props, **params) - expect_hdrs = { - 'Content-Type': 'application/openstack-images-v2.1-json-patch', - } - expect_body = ([[('op', 'replace'), ('path', '/barney'), - ('value', 'miller')]]) - expect = [ - ('GET', '/v2/images/%s' % image_id, {}, None), - ('PATCH', '/v2/images/%s' % image_id, expect_hdrs, expect_body), - ('GET', '/v2/images/%s' % image_id, {}, None), - ] - self.assertEqual(expect, self.api.calls) - self.assertEqual(image_id, image.id) - # NOTE(bcwaldon):due to limitations of our fake api framework, the name - # will not actually change - yet in real life it will... - self.assertEqual('image-3', image.name) - - def test_update_add_remove_same_prop(self): - image_id = 'e7e59ff6-fa2e-4075-87d3-1a1398a07dc3' - # Adding a property takes precedence over removing a property - params = {'finn': 'human'} - remove_props = ['finn'] - image = self.controller.update(image_id, remove_props, **params) - expect_hdrs = { - 'Content-Type': 'application/openstack-images-v2.1-json-patch', - } - expect_body = [[('op', 'add'), ('path', '/finn'), ('value', 'human')]] - expect = [ - ('GET', '/v2/images/%s' % image_id, {}, None), - ('PATCH', '/v2/images/%s' % image_id, expect_hdrs, expect_body), - ('GET', '/v2/images/%s' % image_id, {}, None), - ] - self.assertEqual(expect, self.api.calls) - self.assertEqual(image_id, image.id) - # NOTE(bcwaldon):due to limitations of our fake api framework, the name - # will not actually change - yet in real life it will... - self.assertEqual('image-3', image.name) - - def test_update_bad_additionalProperty_type(self): - image_id = 'e7e59ff6-fa2e-4075-87d3-1a1398a07dc3' - params = {'name': 'pong', 'bad_prop': False} - with testtools.ExpectedException(TypeError): - self.controller.update(image_id, **params) - - def test_update_add_custom_property(self): - image_id = '3a4560a1-e585-443e-9b39-553b46ec92d1' - params = {'color': 'red'} - image = self.controller.update(image_id, **params) - expect_hdrs = { - 'Content-Type': 'application/openstack-images-v2.1-json-patch', - } - expect_body = [[('op', 'add'), ('path', '/color'), ('value', 'red')]] - expect = [ - ('GET', '/v2/images/%s' % image_id, {}, None), - ('PATCH', '/v2/images/%s' % image_id, expect_hdrs, expect_body), - ('GET', '/v2/images/%s' % image_id, {}, None), - ] - self.assertEqual(expect, self.api.calls) - self.assertEqual(image_id, image.id) - - def test_update_replace_custom_property(self): - image_id = 'e7e59ff6-fa2e-4075-87d3-1a1398a07dc3' - params = {'color': 'blue'} - image = self.controller.update(image_id, **params) - expect_hdrs = { - 'Content-Type': 'application/openstack-images-v2.1-json-patch', - } - expect_body = [[('op', 'replace'), ('path', '/color'), - ('value', 'blue')]] - expect = [ - ('GET', '/v2/images/%s' % image_id, {}, None), - ('PATCH', '/v2/images/%s' % image_id, expect_hdrs, expect_body), - ('GET', '/v2/images/%s' % image_id, {}, None), - ] - self.assertEqual(expect, self.api.calls) - self.assertEqual(image_id, image.id) - - def test_location_ops_when_server_disabled_location_ops(self): - # Location operations should not be allowed if server has not - # enabled location related operations - image_id = '3a4560a1-e585-443e-9b39-553b46ec92d1' - estr = 'The administrator has disabled API access to image locations' - url = 'http://bar.com/' - meta = {'bar': 'barmeta'} - - e = self.assertRaises(exc.HTTPBadRequest, - self.controller.add_location, - image_id, url, meta) - self.assertTrue(estr in str(e)) - - e = self.assertRaises(exc.HTTPBadRequest, - self.controller.delete_locations, - image_id, set([url])) - self.assertTrue(estr in str(e)) - - e = self.assertRaises(exc.HTTPBadRequest, - self.controller.update_location, - image_id, url, meta) - self.assertTrue(estr in str(e)) - - def _empty_get(self, image_id): - return ('GET', '/v2/images/%s' % image_id, {}, None) - - def _patch_req(self, image_id, patch_body): - c_type = 'application/openstack-images-v2.1-json-patch' - data = [sorted(d.items()) for d in patch_body] - return ('PATCH', - '/v2/images/%s' % image_id, - {'Content-Type': c_type}, - data) - - def test_add_location(self): - image_id = 'a2b83adc-888e-11e3-8872-78acc0b951d8' - new_loc = {'url': 'http://spam.com/', 'metadata': {'spam': 'ham'}} - add_patch = {'path': '/locations/-', 'value': new_loc, 'op': 'add'} - self.controller.add_location(image_id, **new_loc) - self.assertEqual(self.api.calls, [ - self._empty_get(image_id), - self._patch_req(image_id, [add_patch]), - self._empty_get(image_id) - ]) - - def test_add_duplicate_location(self): - image_id = 'a2b83adc-888e-11e3-8872-78acc0b951d8' - new_loc = {'url': 'http://foo.com/', 'metadata': {'foo': 'newfoo'}} - err_str = 'A location entry at %s already exists' % new_loc['url'] - - err = self.assertRaises(exc.HTTPConflict, - self.controller.add_location, - image_id, **new_loc) - self.assertIn(err_str, str(err)) - - def test_remove_location(self): - image_id = 'a2b83adc-888e-11e3-8872-78acc0b951d8' - url_set = set(['http://foo.com/', 'http://bar.com/']) - del_patches = [{'path': '/locations/1', 'op': 'remove'}, - {'path': '/locations/0', 'op': 'remove'}] - self.controller.delete_locations(image_id, url_set) - self.assertEqual(self.api.calls, [ - self._empty_get(image_id), - self._patch_req(image_id, del_patches) - ]) - - def test_remove_missing_location(self): - image_id = 'a2b83adc-888e-11e3-8872-78acc0b951d8' - url_set = set(['http://spam.ham/']) - err_str = 'Unknown URL(s): %s' % list(url_set) - - err = self.assertRaises(exc.HTTPNotFound, - self.controller.delete_locations, - image_id, url_set) - self.assertTrue(err_str in str(err)) - - def test_update_location(self): - image_id = 'a2b83adc-888e-11e3-8872-78acc0b951d8' - new_loc = {'url': 'http://foo.com/', 'metadata': {'spam': 'ham'}} - fixture_idx = '/v2/images/%s' % (image_id) - orig_locations = data_fixtures[fixture_idx]['GET'][1]['locations'] - loc_map = dict([(l['url'], l) for l in orig_locations]) - loc_map[new_loc['url']] = new_loc - mod_patch = [{'path': '/locations', 'op': 'replace', - 'value': []}, - {'path': '/locations', 'op': 'replace', - 'value': list(loc_map.values())}] - self.controller.update_location(image_id, **new_loc) - self.assertEqual(self.api.calls, [ - self._empty_get(image_id), - self._patch_req(image_id, mod_patch), - self._empty_get(image_id) - ]) - - def test_update_tags(self): - image_id = 'a2b83adc-888e-11e3-8872-78acc0b951d8' - tag_map = {'tags': ['tag01', 'tag02', 'tag03']} - - image = self.controller.update(image_id, **tag_map) - - expected_body = [{'path': '/tags', 'op': 'replace', - 'value': tag_map['tags']}] - expected = [ - self._empty_get(image_id), - self._patch_req(image_id, expected_body), - self._empty_get(image_id) - ] - self.assertEqual(expected, self.api.calls) - self.assertEqual(image_id, image.id) - - def test_update_missing_location(self): - image_id = 'a2b83adc-888e-11e3-8872-78acc0b951d8' - new_loc = {'url': 'http://spam.com/', 'metadata': {'spam': 'ham'}} - err_str = 'Unknown URL: %s' % new_loc['url'] - err = self.assertRaises(exc.HTTPNotFound, - self.controller.update_location, - image_id, **new_loc) - self.assertTrue(err_str in str(err)) +# Copyright 2012 OpenStack Foundation. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import errno + +import testtools + +from glanceclient import exc +from glanceclient.v2 import images +from tests import utils + +_CHKSUM = '93264c3edf5972c9f1cb309543d38a5c' +_CHKSUM1 = '54264c3edf5972c9f1cb309453d38a46' + +_TAG1 = 'power' +_TAG2 = '64bit' + +_BOGUS_ID = '63e7f218-29de-4477-abdc-8db7c9533188' +_EVERYTHING_ID = '802cbbb7-0379-4c38-853f-37302b5e3d29' +_OWNED_IMAGE_ID = 'a4963502-acc7-42ba-ad60-5aa0962b7faf' +_OWNER_ID = '6bd473f0-79ae-40ad-a927-e07ec37b642f' +_PRIVATE_ID = 'e33560a7-3964-4de5-8339-5a24559f99ab' +_PUBLIC_ID = '857806e7-05b6-48e0-9d40-cb0e6fb727b9' +_SHARED_ID = '331ac905-2a38-44c5-a83d-653db8f08313' +_STATUS_REJECTED_ID = 'f3ea56ff-d7e4-4451-998c-1e3d33539c8e' + +data_fixtures = { + '/v2/schemas/image': { + 'GET': ( + {}, + { + 'name': 'image', + 'properties': { + 'id': {}, + 'name': {}, + 'locations': { + 'type': 'array', + 'items': { + 'type': 'object', + 'properties': { + 'metadata': {'type': 'object'}, + 'url': {'type': 'string'}, + }, + 'required': ['url', 'metadata'], + }, + }, + 'color': {'type': 'string', 'is_base': False}, + }, + 'additionalProperties': {'type': 'string'}, + }, + ), + }, + '/v2/images?limit=%d' % images.DEFAULT_PAGE_SIZE: { + 'GET': ( + {}, + {'images': [ + { + 'id': '3a4560a1-e585-443e-9b39-553b46ec92d1', + 'name': 'image-1', + }, + { + 'id': '6f99bf80-2ee6-47cf-acfe-1f1fabb7e810', + 'name': 'image-2', + }, + ]}, + ), + }, + '/v2/images?limit=2': { + 'GET': ( + {}, + { + 'images': [ + { + 'id': '3a4560a1-e585-443e-9b39-553b46ec92d1', + 'name': 'image-1', + }, + { + 'id': '6f99bf80-2ee6-47cf-acfe-1f1fabb7e810', + 'name': 'image-2', + }, + ], + 'next': ('/v2/images?limit=2&' + 'marker=6f99bf80-2ee6-47cf-acfe-1f1fabb7e810'), + }, + ), + }, + '/v2/images?limit=1': { + 'GET': ( + {}, + { + 'images': [ + { + 'id': '3a4560a1-e585-443e-9b39-553b46ec92d1', + 'name': 'image-1', + }, + ], + 'next': ('/v2/images?limit=1&' + 'marker=3a4560a1-e585-443e-9b39-553b46ec92d1'), + }, + ), + }, + ('/v2/images?limit=1&marker=3a4560a1-e585-443e-9b39-553b46ec92d1'): { + 'GET': ( + {}, + {'images': [ + { + 'id': '6f99bf80-2ee6-47cf-acfe-1f1fabb7e810', + 'name': 'image-2', + }, + ]}, + ), + }, + ('/v2/images?limit=1&marker=6f99bf80-2ee6-47cf-acfe-1f1fabb7e810'): { + 'GET': ( + {}, + {'images': [ + { + 'id': '3f99bf80-2ee6-47cf-acfe-1f1fabb7e811', + 'name': 'image-3', + }, + ]}, + ), + }, + '/v2/images/3a4560a1-e585-443e-9b39-553b46ec92d1': { + 'GET': ( + {}, + { + 'id': '3a4560a1-e585-443e-9b39-553b46ec92d1', + 'name': 'image-1', + }, + ), + 'PATCH': ( + {}, + '', + ), + }, + '/v2/images/e7e59ff6-fa2e-4075-87d3-1a1398a07dc3': { + 'GET': ( + {}, + { + 'id': 'e7e59ff6-fa2e-4075-87d3-1a1398a07dc3', + 'name': 'image-3', + 'barney': 'rubble', + 'george': 'jetson', + 'color': 'red', + }, + ), + 'PATCH': ( + {}, + '', + ), + }, + '/v2/images': { + 'POST': ( + {}, + { + 'id': '3a4560a1-e585-443e-9b39-553b46ec92d1', + 'name': 'image-1', + }, + ), + }, + '/v2/images/87b634c1-f893-33c9-28a9-e5673c99239a': { + 'DELETE': ( + {}, + { + 'id': '87b634c1-f893-33c9-28a9-e5673c99239a', + }, + ), + }, + '/v2/images/606b0e88-7c5a-4d54-b5bb-046105d4de6f/file': { + 'PUT': ( + {}, + '', + ), + }, + '/v2/images/5cc4bebc-db27-11e1-a1eb-080027cbe205/file': { + 'GET': ( + {}, + 'A', + ), + }, + '/v2/images/66fb18d6-db27-11e1-a1eb-080027cbe205/file': { + 'GET': ( + { + 'content-md5': 'wrong' + }, + 'BB', + ), + }, + '/v2/images/1b1c6366-dd57-11e1-af0f-02163e68b1d8/file': { + 'GET': ( + { + 'content-md5': 'defb99e69a9f1f6e06f15006b1f166ae' + }, + 'CCC', + ), + }, + '/v2/images?limit=%d&visibility=public' % images.DEFAULT_PAGE_SIZE: { + 'GET': ( + {}, + {'images': [ + { + 'id': _PUBLIC_ID, + 'harvey': 'lipshitz', + }, + ]}, + ), + }, + '/v2/images?limit=%d&visibility=private' % images.DEFAULT_PAGE_SIZE: { + 'GET': ( + {}, + {'images': [ + { + 'id': _PRIVATE_ID, + }, + ]}, + ), + }, + '/v2/images?limit=%d&visibility=shared' % images.DEFAULT_PAGE_SIZE: { + 'GET': ( + {}, + {'images': [ + { + 'id': _SHARED_ID, + }, + ]}, + ), + }, + '/v2/images?limit=%d&member_status=rejected' % images.DEFAULT_PAGE_SIZE: { + 'GET': ( + {}, + {'images': [ + { + 'id': _STATUS_REJECTED_ID, + }, + ]}, + ), + }, + '/v2/images?limit=%d&member_status=pending' % images.DEFAULT_PAGE_SIZE: { + 'GET': ( + {}, + {'images': []}, + ), + }, + '/v2/images?limit=%d&owner=%s' % (images.DEFAULT_PAGE_SIZE, _OWNER_ID): { + 'GET': ( + {}, + {'images': [ + { + 'id': _OWNED_IMAGE_ID, + }, + ]}, + ), + }, + '/v2/images?limit=%d&owner=%s' % (images.DEFAULT_PAGE_SIZE, _BOGUS_ID): { + 'GET': ( + {}, + {'images': []}, + ), + }, + '/v2/images?limit=%d&member_status=pending&owner=%s&visibility=shared' + % (images.DEFAULT_PAGE_SIZE, _BOGUS_ID): { + 'GET': ( + {}, + {'images': [ + { + 'id': _EVERYTHING_ID, + }, + ]}, + ), + }, + '/v2/images?checksum=%s&limit=%d' % (_CHKSUM, images.DEFAULT_PAGE_SIZE): { + 'GET': ( + {}, + {'images': [ + { + 'id': '3a4560a1-e585-443e-9b39-553b46ec92d1', + 'name': 'image-1', + } + ]}, + ), + }, + '/v2/images?checksum=%s&limit=%d' % (_CHKSUM1, images.DEFAULT_PAGE_SIZE): { + 'GET': ( + {}, + {'images': [ + { + 'id': '2a4560b2-e585-443e-9b39-553b46ec92d1', + 'name': 'image-1', + }, + { + 'id': '6f99bf80-2ee6-47cf-acfe-1f1fabb7e810', + 'name': 'image-2', + }, + ]}, + ), + }, + '/v2/images?checksum=wrong&limit=%d' % images.DEFAULT_PAGE_SIZE: { + 'GET': ( + {}, + {'images': []}, + ), + }, + '/v2/images?limit=%d&tag=%s' % (images.DEFAULT_PAGE_SIZE, _TAG1): { + 'GET': ( + {}, + {'images': [ + { + 'id': '3a4560a1-e585-443e-9b39-553b46ec92d1', + 'name': 'image-1', + } + ]}, + ), + }, + '/v2/images?limit=%d&tag=%s' % (images.DEFAULT_PAGE_SIZE, _TAG2): { + 'GET': ( + {}, + {'images': [ + { + 'id': '2a4560b2-e585-443e-9b39-553b46ec92d1', + 'name': 'image-1', + }, + { + 'id': '6f99bf80-2ee6-47cf-acfe-1f1fabb7e810', + 'name': 'image-2', + }, + ]}, + ), + }, + '/v2/images?limit=%d&tag=%s&tag=%s' % (images.DEFAULT_PAGE_SIZE, + _TAG1, _TAG2): + { + 'GET': ( + {}, + {'images': [ + { + 'id': '2a4560b2-e585-443e-9b39-553b46ec92d1', + 'name': 'image-1', + } + ]}, + ), + }, + '/v2/images?limit=%d&tag=fake' % images.DEFAULT_PAGE_SIZE: { + 'GET': ( + {}, + {'images': []}, + ), + }, + '/v2/images/a2b83adc-888e-11e3-8872-78acc0b951d8': { + 'GET': ( + {}, + { + 'id': 'a2b83adc-888e-11e3-8872-78acc0b951d8', + 'name': 'image-location-tests', + 'locations': [{u'url': u'http://foo.com/', + u'metadata': {u'foo': u'foometa'}}, + {u'url': u'http://bar.com/', + u'metadata': {u'bar': u'barmeta'}}], + }, + ), + 'PATCH': ( + {}, + '', + ) + }, + '/v2/images?limit=%d&os_distro=NixOS' % images.DEFAULT_PAGE_SIZE: { + 'GET': ( + {}, + {'images': [ + { + 'id': '8b052954-c76c-4e02-8e90-be89a70183a8', + 'name': 'image-5', + 'os_distro': 'NixOS', + }, + ]}, + ), + }, + '/v2/images?limit=%d&my_little_property=cant_be_this_cute' % + images.DEFAULT_PAGE_SIZE: { + 'GET': ( + {}, + {'images': []}, + ), + }, + '/v2/images?limit=%d&sort_key=name' % images.DEFAULT_PAGE_SIZE: { + 'GET': ( + {}, + {'images': [ + { + 'id': '2a4560b2-e585-443e-9b39-553b46ec92d1', + 'name': 'image-1', + }, + { + 'id': '6f99bf80-2ee6-47cf-acfe-1f1fabb7e810', + 'name': 'image-2', + }, + ]}, + ), + }, + '/v2/images?limit=%d&sort_key=name&sort_key=id' + % images.DEFAULT_PAGE_SIZE: { + 'GET': ( + {}, + {'images': [ + { + 'id': '2a4560b2-e585-443e-9b39-553b46ec92d1', + 'name': 'image', + }, + { + 'id': '6f99bf80-2ee6-47cf-acfe-1f1fabb7e810', + 'name': 'image', + }, + ]}, + ), + }, + '/v2/images?limit=%d&sort_dir=desc&sort_key=id' + % images.DEFAULT_PAGE_SIZE: { + 'GET': ( + {}, + {'images': [ + { + 'id': '6f99bf80-2ee6-47cf-acfe-1f1fabb7e810', + 'name': 'image-2', + }, + { + 'id': '2a4560b2-e585-443e-9b39-553b46ec92d1', + 'name': 'image-1', + }, + ]}, + ), + }, + '/v2/images?limit=%d&sort_dir=desc&sort_key=name&sort_key=id' + % images.DEFAULT_PAGE_SIZE: { + 'GET': ( + {}, + {'images': [ + { + 'id': '6f99bf80-2ee6-47cf-acfe-1f1fabb7e810', + 'name': 'image-2', + }, + { + 'id': '2a4560b2-e585-443e-9b39-553b46ec92d1', + 'name': 'image-1', + }, + ]}, + ), + }, + '/v2/images?limit=%d&sort_dir=desc&sort_dir=asc&sort_key=name&sort_key=id' + % images.DEFAULT_PAGE_SIZE: { + 'GET': ( + {}, + {'images': [ + { + 'id': '6f99bf80-2ee6-47cf-acfe-1f1fabb7e810', + 'name': 'image-2', + }, + { + 'id': '2a4560b2-e585-443e-9b39-553b46ec92d1', + 'name': 'image-1', + }, + ]}, + ), + }, + '/v2/images?limit=%d&sort=name%%3Adesc%%2Csize%%3Aasc' + % images.DEFAULT_PAGE_SIZE: { + 'GET': ( + {}, + {'images': [ + { + 'id': '6f99bf80-2ee6-47cf-acfe-1f1fabb7e810', + 'name': 'image-2', + }, + { + 'id': '2a4560b2-e585-443e-9b39-553b46ec92d1', + 'name': 'image-1', + }, + ]}, + ), + }, +} + +schema_fixtures = { + 'image': { + 'GET': ( + {}, + { + 'name': 'image', + 'properties': { + 'id': {}, + 'name': {}, + 'locations': { + 'type': 'array', + 'items': { + 'type': 'object', + 'properties': { + 'metadata': {'type': 'object'}, + 'url': {'type': 'string'}, + }, + 'required': ['url', 'metadata'], + } + }, + 'color': {'type': 'string', 'is_base': False}, + 'tags': {'type': 'array'}, + }, + 'additionalProperties': {'type': 'string'}, + } + ) + } +} + + +class TestController(testtools.TestCase): + + def setUp(self): + super(TestController, self).setUp() + self.api = utils.FakeAPI(data_fixtures) + self.schema_api = utils.FakeSchemaAPI(schema_fixtures) + self.controller = images.Controller(self.api, self.schema_api) + + def test_list_images(self): + # NOTE(bcwaldon):cast to list since the controller returns a generator + images = list(self.controller.list()) + self.assertEqual('3a4560a1-e585-443e-9b39-553b46ec92d1', images[0].id) + self.assertEqual('image-1', images[0].name) + self.assertEqual('6f99bf80-2ee6-47cf-acfe-1f1fabb7e810', images[1].id) + self.assertEqual('image-2', images[1].name) + + def test_list_images_paginated(self): + # NOTE(bcwaldon):cast to list since the controller returns a generator + images = list(self.controller.list(page_size=1)) + self.assertEqual('3a4560a1-e585-443e-9b39-553b46ec92d1', images[0].id) + self.assertEqual('image-1', images[0].name) + self.assertEqual('6f99bf80-2ee6-47cf-acfe-1f1fabb7e810', images[1].id) + self.assertEqual('image-2', images[1].name) + + def test_list_images_paginated_with_limit(self): + # NOTE(bcwaldon):cast to list since the controller returns a generator + images = list(self.controller.list(limit=3, page_size=2)) + self.assertEqual('3a4560a1-e585-443e-9b39-553b46ec92d1', images[0].id) + self.assertEqual('image-1', images[0].name) + self.assertEqual('6f99bf80-2ee6-47cf-acfe-1f1fabb7e810', images[1].id) + self.assertEqual('image-2', images[1].name) + self.assertEqual('3f99bf80-2ee6-47cf-acfe-1f1fabb7e811', images[2].id) + self.assertEqual('image-3', images[2].name) + self.assertEqual(3, len(images)) + + def test_list_images_visibility_public(self): + filters = {'filters': {'visibility': 'public'}} + images = list(self.controller.list(**filters)) + self.assertEqual(_PUBLIC_ID, images[0].id) + + def test_list_images_visibility_private(self): + filters = {'filters': {'visibility': 'private'}} + images = list(self.controller.list(**filters)) + self.assertEqual(_PRIVATE_ID, images[0].id) + + def test_list_images_visibility_shared(self): + filters = {'filters': {'visibility': 'shared'}} + images = list(self.controller.list(**filters)) + self.assertEqual(_SHARED_ID, images[0].id) + + def test_list_images_member_status_rejected(self): + filters = {'filters': {'member_status': 'rejected'}} + images = list(self.controller.list(**filters)) + self.assertEqual(_STATUS_REJECTED_ID, images[0].id) + + def test_list_images_for_owner(self): + filters = {'filters': {'owner': _OWNER_ID}} + images = list(self.controller.list(**filters)) + self.assertEqual(_OWNED_IMAGE_ID, images[0].id) + + def test_list_images_for_checksum_single_image(self): + fake_id = '3a4560a1-e585-443e-9b39-553b46ec92d1' + filters = {'filters': {'checksum': _CHKSUM}} + images = list(self.controller.list(**filters)) + self.assertEqual(1, len(images)) + self.assertEqual('%s' % fake_id, images[0].id) + + def test_list_images_for_checksum_multiple_images(self): + fake_id1 = '2a4560b2-e585-443e-9b39-553b46ec92d1' + fake_id2 = '6f99bf80-2ee6-47cf-acfe-1f1fabb7e810' + filters = {'filters': {'checksum': _CHKSUM1}} + images = list(self.controller.list(**filters)) + self.assertEqual(2, len(images)) + self.assertEqual('%s' % fake_id1, images[0].id) + self.assertEqual('%s' % fake_id2, images[1].id) + + def test_list_images_for_wrong_checksum(self): + filters = {'filters': {'checksum': 'wrong'}} + images = list(self.controller.list(**filters)) + self.assertEqual(0, len(images)) + + def test_list_images_for_bogus_owner(self): + filters = {'filters': {'owner': _BOGUS_ID}} + images = list(self.controller.list(**filters)) + self.assertEqual([], images) + + def test_list_images_for_bunch_of_filters(self): + filters = {'filters': {'owner': _BOGUS_ID, + 'visibility': 'shared', + 'member_status': 'pending'}} + images = list(self.controller.list(**filters)) + self.assertEqual(_EVERYTHING_ID, images[0].id) + + def test_list_images_filters_encoding(self): + filters = {"owner": u"ni\xf1o"} + try: + list(self.controller.list(filters=filters)) + except KeyError: + # NOTE(flaper87): It raises KeyError because there's + # no fixture supporting this query: + # /v2/images?owner=ni%C3%B1o&limit=20 + # We just want to make sure filters are correctly encoded. + pass + self.assertEqual(b"ni\xc3\xb1o", filters["owner"]) + + def test_list_images_for_tag_single_image(self): + img_id = '3a4560a1-e585-443e-9b39-553b46ec92d1' + filters = {'filters': {'tag': [_TAG1]}} + images = list(self.controller.list(**filters)) + self.assertEqual(1, len(images)) + self.assertEqual('%s' % img_id, images[0].id) + pass + + def test_list_images_for_tag_multiple_images(self): + img_id1 = '2a4560b2-e585-443e-9b39-553b46ec92d1' + img_id2 = '6f99bf80-2ee6-47cf-acfe-1f1fabb7e810' + filters = {'filters': {'tag': [_TAG2]}} + images = list(self.controller.list(**filters)) + self.assertEqual(2, len(images)) + self.assertEqual('%s' % img_id1, images[0].id) + self.assertEqual('%s' % img_id2, images[1].id) + + def test_list_images_for_multi_tags(self): + img_id1 = '2a4560b2-e585-443e-9b39-553b46ec92d1' + filters = {'filters': {'tag': [_TAG1, _TAG2]}} + images = list(self.controller.list(**filters)) + self.assertEqual(1, len(images)) + self.assertEqual('%s' % img_id1, images[0].id) + + def test_list_images_for_non_existent_tag(self): + filters = {'filters': {'tag': ['fake']}} + images = list(self.controller.list(**filters)) + self.assertEqual(0, len(images)) + + def test_list_images_with_single_sort_key(self): + img_id1 = '2a4560b2-e585-443e-9b39-553b46ec92d1' + sort_key = 'name' + images = list(self.controller.list(sort_key=sort_key)) + self.assertEqual(2, len(images)) + self.assertEqual('%s' % img_id1, images[0].id) + + def test_list_with_multiple_sort_keys(self): + img_id1 = '2a4560b2-e585-443e-9b39-553b46ec92d1' + sort_key = ['name', 'id'] + images = list(self.controller.list(sort_key=sort_key)) + self.assertEqual(2, len(images)) + self.assertEqual('%s' % img_id1, images[0].id) + + def test_list_images_with_desc_sort_dir(self): + img_id1 = '2a4560b2-e585-443e-9b39-553b46ec92d1' + sort_key = 'id' + sort_dir = 'desc' + images = list(self.controller.list(sort_key=sort_key, + sort_dir=sort_dir)) + self.assertEqual(2, len(images)) + self.assertEqual('%s' % img_id1, images[1].id) + + def test_list_images_with_multiple_sort_keys_and_one_sort_dir(self): + img_id1 = '2a4560b2-e585-443e-9b39-553b46ec92d1' + sort_key = ['name', 'id'] + sort_dir = 'desc' + images = list(self.controller.list(sort_key=sort_key, + sort_dir=sort_dir)) + self.assertEqual(2, len(images)) + self.assertEqual('%s' % img_id1, images[1].id) + + def test_list_images_with_multiple_sort_dirs(self): + img_id1 = '2a4560b2-e585-443e-9b39-553b46ec92d1' + sort_key = ['name', 'id'] + sort_dir = ['desc', 'asc'] + images = list(self.controller.list(sort_key=sort_key, + sort_dir=sort_dir)) + self.assertEqual(2, len(images)) + self.assertEqual('%s' % img_id1, images[1].id) + + def test_list_images_with_new_sorting_syntax(self): + img_id1 = '2a4560b2-e585-443e-9b39-553b46ec92d1' + sort = 'name:desc,size:asc' + images = list(self.controller.list(sort=sort)) + self.assertEqual(2, len(images)) + self.assertEqual('%s' % img_id1, images[1].id) + + def test_list_images_sort_dirs_fewer_than_keys(self): + sort_key = ['name', 'id', 'created_at'] + sort_dir = ['desc', 'asc'] + self.assertRaises(exc.HTTPBadRequest, + list, + self.controller.list( + sort_key=sort_key, + sort_dir=sort_dir)) + + def test_list_images_combined_syntax(self): + sort_key = ['name', 'id'] + sort_dir = ['desc', 'asc'] + sort = 'name:asc' + self.assertRaises(exc.HTTPBadRequest, + list, + self.controller.list( + sort=sort, + sort_key=sort_key, + sort_dir=sort_dir)) + + def test_list_images_new_sorting_syntax_invalid_key(self): + sort = 'INVALID:asc' + self.assertRaises(exc.HTTPBadRequest, + list, + self.controller.list( + sort=sort)) + + def test_list_images_new_sorting_syntax_invalid_direction(self): + sort = 'name:INVALID' + self.assertRaises(exc.HTTPBadRequest, + list, + self.controller.list( + sort=sort)) + + def test_list_images_for_property(self): + filters = {'filters': dict([('os_distro', 'NixOS')])} + images = list(self.controller.list(**filters)) + self.assertEqual(1, len(images)) + + def test_list_images_for_non_existent_property(self): + filters = {'filters': dict([('my_little_property', + 'cant_be_this_cute')])} + images = list(self.controller.list(**filters)) + self.assertEqual(0, len(images)) + + def test_get_image(self): + image = self.controller.get('3a4560a1-e585-443e-9b39-553b46ec92d1') + self.assertEqual('3a4560a1-e585-443e-9b39-553b46ec92d1', image.id) + self.assertEqual('image-1', image.name) + + def test_create_image(self): + properties = { + 'name': 'image-1' + } + image = self.controller.create(**properties) + self.assertEqual('3a4560a1-e585-443e-9b39-553b46ec92d1', image.id) + self.assertEqual('image-1', image.name) + + def test_create_bad_additionalProperty_type(self): + properties = { + 'name': 'image-1', + 'bad_prop': True, + } + with testtools.ExpectedException(TypeError): + self.controller.create(**properties) + + def test_delete_image(self): + self.controller.delete('87b634c1-f893-33c9-28a9-e5673c99239a') + expect = [ + ('DELETE', + '/v2/images/87b634c1-f893-33c9-28a9-e5673c99239a', + {}, + None)] + self.assertEqual(expect, self.api.calls) + + def test_data_upload(self): + image_data = 'CCC' + image_id = '606b0e88-7c5a-4d54-b5bb-046105d4de6f' + self.controller.upload(image_id, image_data) + expect = [('PUT', '/v2/images/%s/file' % image_id, + {'Content-Type': 'application/octet-stream'}, + image_data)] + self.assertEqual(expect, self.api.calls) + + def test_data_upload_w_size(self): + image_data = 'CCC' + image_id = '606b0e88-7c5a-4d54-b5bb-046105d4de6f' + self.controller.upload(image_id, image_data, image_size=3) + body = {'image_data': image_data, + 'image_size': 3} + expect = [('PUT', '/v2/images/%s/file' % image_id, + {'Content-Type': 'application/octet-stream'}, + sorted(body.items()))] + self.assertEqual(expect, self.api.calls) + + def test_data_without_checksum(self): + body = self.controller.data('5cc4bebc-db27-11e1-a1eb-080027cbe205', + do_checksum=False) + body = ''.join([b for b in body]) + self.assertEqual('A', body) + + body = self.controller.data('5cc4bebc-db27-11e1-a1eb-080027cbe205') + body = ''.join([b for b in body]) + self.assertEqual('A', body) + + def test_data_with_wrong_checksum(self): + body = self.controller.data('66fb18d6-db27-11e1-a1eb-080027cbe205', + do_checksum=False) + body = ''.join([b for b in body]) + self.assertEqual('BB', body) + + body = self.controller.data('66fb18d6-db27-11e1-a1eb-080027cbe205') + try: + body = ''.join([b for b in body]) + self.fail('data did not raise an error.') + except IOError as e: + self.assertEqual(errno.EPIPE, e.errno) + msg = 'was 9d3d9048db16a7eee539e93e3618cbe7 expected wrong' + self.assertTrue(msg in str(e)) + + def test_data_with_checksum(self): + body = self.controller.data('1b1c6366-dd57-11e1-af0f-02163e68b1d8', + do_checksum=False) + body = ''.join([b for b in body]) + self.assertEqual('CCC', body) + + body = self.controller.data('1b1c6366-dd57-11e1-af0f-02163e68b1d8') + body = ''.join([b for b in body]) + self.assertEqual('CCC', body) + + def test_update_replace_prop(self): + image_id = '3a4560a1-e585-443e-9b39-553b46ec92d1' + params = {'name': 'pong'} + image = self.controller.update(image_id, **params) + expect_hdrs = { + 'Content-Type': 'application/openstack-images-v2.1-json-patch', + } + expect_body = [[('op', 'replace'), ('path', '/name'), + ('value', 'pong')]] + expect = [ + ('GET', '/v2/images/%s' % image_id, {}, None), + ('PATCH', '/v2/images/%s' % image_id, expect_hdrs, expect_body), + ('GET', '/v2/images/%s' % image_id, {}, None), + ] + self.assertEqual(expect, self.api.calls) + self.assertEqual(image_id, image.id) + # NOTE(bcwaldon):due to limitations of our fake api framework, the name + # will not actually change - yet in real life it will... + self.assertEqual('image-1', image.name) + + def test_update_add_prop(self): + image_id = '3a4560a1-e585-443e-9b39-553b46ec92d1' + params = {'finn': 'human'} + image = self.controller.update(image_id, **params) + expect_hdrs = { + 'Content-Type': 'application/openstack-images-v2.1-json-patch', + } + expect_body = [[('op', 'add'), ('path', '/finn'), ('value', 'human')]] + expect = [ + ('GET', '/v2/images/%s' % image_id, {}, None), + ('PATCH', '/v2/images/%s' % image_id, expect_hdrs, expect_body), + ('GET', '/v2/images/%s' % image_id, {}, None), + ] + self.assertEqual(expect, self.api.calls) + self.assertEqual(image_id, image.id) + # NOTE(bcwaldon):due to limitations of our fake api framework, the name + # will not actually change - yet in real life it will... + self.assertEqual('image-1', image.name) + + def test_update_remove_prop(self): + image_id = 'e7e59ff6-fa2e-4075-87d3-1a1398a07dc3' + remove_props = ['barney'] + image = self.controller.update(image_id, remove_props) + expect_hdrs = { + 'Content-Type': 'application/openstack-images-v2.1-json-patch', + } + expect_body = [[('op', 'remove'), ('path', '/barney')]] + expect = [ + ('GET', '/v2/images/%s' % image_id, {}, None), + ('PATCH', '/v2/images/%s' % image_id, expect_hdrs, expect_body), + ('GET', '/v2/images/%s' % image_id, {}, None), + ] + self.assertEqual(expect, self.api.calls) + self.assertEqual(image_id, image.id) + # NOTE(bcwaldon):due to limitations of our fake api framework, the name + # will not actually change - yet in real life it will... + self.assertEqual('image-3', image.name) + + def test_update_replace_remove_same_prop(self): + image_id = 'e7e59ff6-fa2e-4075-87d3-1a1398a07dc3' + # Updating a property takes precedence over removing a property + params = {'barney': 'miller'} + remove_props = ['barney'] + image = self.controller.update(image_id, remove_props, **params) + expect_hdrs = { + 'Content-Type': 'application/openstack-images-v2.1-json-patch', + } + expect_body = ([[('op', 'replace'), ('path', '/barney'), + ('value', 'miller')]]) + expect = [ + ('GET', '/v2/images/%s' % image_id, {}, None), + ('PATCH', '/v2/images/%s' % image_id, expect_hdrs, expect_body), + ('GET', '/v2/images/%s' % image_id, {}, None), + ] + self.assertEqual(expect, self.api.calls) + self.assertEqual(image_id, image.id) + # NOTE(bcwaldon):due to limitations of our fake api framework, the name + # will not actually change - yet in real life it will... + self.assertEqual('image-3', image.name) + + def test_update_add_remove_same_prop(self): + image_id = 'e7e59ff6-fa2e-4075-87d3-1a1398a07dc3' + # Adding a property takes precedence over removing a property + params = {'finn': 'human'} + remove_props = ['finn'] + image = self.controller.update(image_id, remove_props, **params) + expect_hdrs = { + 'Content-Type': 'application/openstack-images-v2.1-json-patch', + } + expect_body = [[('op', 'add'), ('path', '/finn'), ('value', 'human')]] + expect = [ + ('GET', '/v2/images/%s' % image_id, {}, None), + ('PATCH', '/v2/images/%s' % image_id, expect_hdrs, expect_body), + ('GET', '/v2/images/%s' % image_id, {}, None), + ] + self.assertEqual(expect, self.api.calls) + self.assertEqual(image_id, image.id) + # NOTE(bcwaldon):due to limitations of our fake api framework, the name + # will not actually change - yet in real life it will... + self.assertEqual('image-3', image.name) + + def test_update_bad_additionalProperty_type(self): + image_id = 'e7e59ff6-fa2e-4075-87d3-1a1398a07dc3' + params = {'name': 'pong', 'bad_prop': False} + with testtools.ExpectedException(TypeError): + self.controller.update(image_id, **params) + + def test_update_add_custom_property(self): + image_id = '3a4560a1-e585-443e-9b39-553b46ec92d1' + params = {'color': 'red'} + image = self.controller.update(image_id, **params) + expect_hdrs = { + 'Content-Type': 'application/openstack-images-v2.1-json-patch', + } + expect_body = [[('op', 'add'), ('path', '/color'), ('value', 'red')]] + expect = [ + ('GET', '/v2/images/%s' % image_id, {}, None), + ('PATCH', '/v2/images/%s' % image_id, expect_hdrs, expect_body), + ('GET', '/v2/images/%s' % image_id, {}, None), + ] + self.assertEqual(expect, self.api.calls) + self.assertEqual(image_id, image.id) + + def test_update_replace_custom_property(self): + image_id = 'e7e59ff6-fa2e-4075-87d3-1a1398a07dc3' + params = {'color': 'blue'} + image = self.controller.update(image_id, **params) + expect_hdrs = { + 'Content-Type': 'application/openstack-images-v2.1-json-patch', + } + expect_body = [[('op', 'replace'), ('path', '/color'), + ('value', 'blue')]] + expect = [ + ('GET', '/v2/images/%s' % image_id, {}, None), + ('PATCH', '/v2/images/%s' % image_id, expect_hdrs, expect_body), + ('GET', '/v2/images/%s' % image_id, {}, None), + ] + self.assertEqual(expect, self.api.calls) + self.assertEqual(image_id, image.id) + + def test_location_ops_when_server_disabled_location_ops(self): + # Location operations should not be allowed if server has not + # enabled location related operations + image_id = '3a4560a1-e585-443e-9b39-553b46ec92d1' + estr = 'The administrator has disabled API access to image locations' + url = 'http://bar.com/' + meta = {'bar': 'barmeta'} + + e = self.assertRaises(exc.HTTPBadRequest, + self.controller.add_location, + image_id, url, meta) + self.assertTrue(estr in str(e)) + + e = self.assertRaises(exc.HTTPBadRequest, + self.controller.delete_locations, + image_id, set([url])) + self.assertTrue(estr in str(e)) + + e = self.assertRaises(exc.HTTPBadRequest, + self.controller.update_location, + image_id, url, meta) + self.assertTrue(estr in str(e)) + + def _empty_get(self, image_id): + return ('GET', '/v2/images/%s' % image_id, {}, None) + + def _patch_req(self, image_id, patch_body): + c_type = 'application/openstack-images-v2.1-json-patch' + data = [sorted(d.items()) for d in patch_body] + return ('PATCH', + '/v2/images/%s' % image_id, + {'Content-Type': c_type}, + data) + + def test_add_location(self): + image_id = 'a2b83adc-888e-11e3-8872-78acc0b951d8' + new_loc = {'url': 'http://spam.com/', 'metadata': {'spam': 'ham'}} + add_patch = {'path': '/locations/-', 'value': new_loc, 'op': 'add'} + self.controller.add_location(image_id, **new_loc) + self.assertEqual(self.api.calls, [ + self._empty_get(image_id), + self._patch_req(image_id, [add_patch]), + self._empty_get(image_id) + ]) + + def test_add_duplicate_location(self): + image_id = 'a2b83adc-888e-11e3-8872-78acc0b951d8' + new_loc = {'url': 'http://foo.com/', 'metadata': {'foo': 'newfoo'}} + err_str = 'A location entry at %s already exists' % new_loc['url'] + + err = self.assertRaises(exc.HTTPConflict, + self.controller.add_location, + image_id, **new_loc) + self.assertIn(err_str, str(err)) + + def test_remove_location(self): + image_id = 'a2b83adc-888e-11e3-8872-78acc0b951d8' + url_set = set(['http://foo.com/', 'http://bar.com/']) + del_patches = [{'path': '/locations/1', 'op': 'remove'}, + {'path': '/locations/0', 'op': 'remove'}] + self.controller.delete_locations(image_id, url_set) + self.assertEqual(self.api.calls, [ + self._empty_get(image_id), + self._patch_req(image_id, del_patches) + ]) + + def test_remove_missing_location(self): + image_id = 'a2b83adc-888e-11e3-8872-78acc0b951d8' + url_set = set(['http://spam.ham/']) + err_str = 'Unknown URL(s): %s' % list(url_set) + + err = self.assertRaises(exc.HTTPNotFound, + self.controller.delete_locations, + image_id, url_set) + self.assertTrue(err_str in str(err)) + + def test_update_location(self): + image_id = 'a2b83adc-888e-11e3-8872-78acc0b951d8' + new_loc = {'url': 'http://foo.com/', 'metadata': {'spam': 'ham'}} + fixture_idx = '/v2/images/%s' % (image_id) + orig_locations = data_fixtures[fixture_idx]['GET'][1]['locations'] + loc_map = dict([(l['url'], l) for l in orig_locations]) + loc_map[new_loc['url']] = new_loc + mod_patch = [{'path': '/locations', 'op': 'replace', + 'value': []}, + {'path': '/locations', 'op': 'replace', + 'value': list(loc_map.values())}] + self.controller.update_location(image_id, **new_loc) + self.assertEqual(self.api.calls, [ + self._empty_get(image_id), + self._patch_req(image_id, mod_patch), + self._empty_get(image_id) + ]) + + def test_update_tags(self): + image_id = 'a2b83adc-888e-11e3-8872-78acc0b951d8' + tag_map = {'tags': ['tag01', 'tag02', 'tag03']} + + image = self.controller.update(image_id, **tag_map) + + expected_body = [{'path': '/tags', 'op': 'replace', + 'value': tag_map['tags']}] + expected = [ + self._empty_get(image_id), + self._patch_req(image_id, expected_body), + self._empty_get(image_id) + ] + self.assertEqual(expected, self.api.calls) + self.assertEqual(image_id, image.id) + + def test_update_missing_location(self): + image_id = 'a2b83adc-888e-11e3-8872-78acc0b951d8' + new_loc = {'url': 'http://spam.com/', 'metadata': {'spam': 'ham'}} + err_str = 'Unknown URL: %s' % new_loc['url'] + err = self.assertRaises(exc.HTTPNotFound, + self.controller.update_location, + image_id, **new_loc) + self.assertTrue(err_str in str(err)) diff --git a/code/daisyclient/tests/v2/test_members.py b/code/daisyclient/tests/v2/test_members.py index 744f3a6f..7a3688fe 100755 --- a/code/daisyclient/tests/v2/test_members.py +++ b/code/daisyclient/tests/v2/test_members.py @@ -76,6 +76,7 @@ schema_fixtures = { class TestController(testtools.TestCase): + def setUp(self): super(TestController, self).setUp() self.api = utils.FakeAPI(data_fixtures) @@ -84,7 +85,7 @@ class TestController(testtools.TestCase): def test_list_image_members(self): image_id = IMAGE - #NOTE(iccha): cast to list since the controller returns a generator + # NOTE(iccha): cast to list since the controller returns a generator image_members = list(self.controller.list(image_id)) self.assertEqual(IMAGE, image_members[0].image_id) self.assertEqual(MEMBER, image_members[0].member_id) diff --git a/code/daisyclient/tests/v2/test_metadefs_namespaces.py b/code/daisyclient/tests/v2/test_metadefs_namespaces.py index b03dcd15..41dcbb47 100755 --- a/code/daisyclient/tests/v2/test_metadefs_namespaces.py +++ b/code/daisyclient/tests/v2/test_metadefs_namespaces.py @@ -1,674 +1,675 @@ -# Copyright 2012 OpenStack Foundation. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import testtools - -from glanceclient.v2 import metadefs -from tests import utils - -NAMESPACE1 = 'Namespace1' -NAMESPACE2 = 'Namespace2' -NAMESPACE3 = 'Namespace3' -NAMESPACE4 = 'Namespace4' -NAMESPACE5 = 'Namespace5' -NAMESPACE6 = 'Namespace6' -NAMESPACE7 = 'Namespace7' -NAMESPACE8 = 'Namespace8' -NAMESPACENEW = 'NamespaceNew' -RESOURCE_TYPE1 = 'ResourceType1' -RESOURCE_TYPE2 = 'ResourceType2' -OBJECT1 = 'Object1' -PROPERTY1 = 'Property1' -PROPERTY2 = 'Property2' - - -def _get_namespace_fixture(ns_name, rt_name=RESOURCE_TYPE1, **kwargs): - ns = { - "display_name": "Flavor Quota", - "description": "DESCRIPTION1", - "self": "/v2/metadefs/namespaces/%s" % ns_name, - "namespace": ns_name, - "visibility": "public", - "protected": True, - "owner": "admin", - "resource_types": [ - { - "name": rt_name - } - ], - "schema": "/v2/schemas/metadefs/namespace", - "created_at": "2014-08-14T09:07:06Z", - "updated_at": "2014-08-14T09:07:06Z", - } - - ns.update(kwargs) - - return ns - -data_fixtures = { - "/v2/metadefs/namespaces?limit=20": { - "GET": ( - {}, - { - "first": "/v2/metadefs/namespaces?limit=20", - "namespaces": [ - _get_namespace_fixture(NAMESPACE1), - _get_namespace_fixture(NAMESPACE2), - ], - "schema": "/v2/schemas/metadefs/namespaces" - } - ) - }, - "/v2/metadefs/namespaces?limit=1": { - "GET": ( - {}, - { - "first": "/v2/metadefs/namespaces?limit=1", - "namespaces": [ - _get_namespace_fixture(NAMESPACE7), - ], - "schema": "/v2/schemas/metadefs/namespaces", - "next": "/v2/metadefs/namespaces?marker=%s&limit=1" - % NAMESPACE7, - } - ) - }, - "/v2/metadefs/namespaces?limit=1&marker=%s" % NAMESPACE7: { - "GET": ( - {}, - { - "first": "/v2/metadefs/namespaces?limit=2", - "namespaces": [ - _get_namespace_fixture(NAMESPACE8), - ], - "schema": "/v2/schemas/metadefs/namespaces" - } - ) - }, - "/v2/metadefs/namespaces?limit=2&marker=%s" % NAMESPACE6: { - "GET": ( - {}, - { - "first": "/v2/metadefs/namespaces?limit=2", - "namespaces": [ - _get_namespace_fixture(NAMESPACE7), - _get_namespace_fixture(NAMESPACE8), - ], - "schema": "/v2/schemas/metadefs/namespaces" - } - ) - }, - "/v2/metadefs/namespaces?limit=20&sort_dir=asc": { - "GET": ( - {}, - { - "first": "/v2/metadefs/namespaces?limit=1", - "namespaces": [ - _get_namespace_fixture(NAMESPACE1), - ], - "schema": "/v2/schemas/metadefs/namespaces" - } - ) - }, - "/v2/metadefs/namespaces?limit=20&sort_key=created_at": { - "GET": ( - {}, - { - "first": "/v2/metadefs/namespaces?limit=1", - "namespaces": [ - _get_namespace_fixture(NAMESPACE1), - ], - "schema": "/v2/schemas/metadefs/namespaces" - } - ) - }, - "/v2/metadefs/namespaces?limit=20&resource_types=%s" % RESOURCE_TYPE1: { - "GET": ( - {}, - { - "first": "/v2/metadefs/namespaces?limit=20", - "namespaces": [ - _get_namespace_fixture(NAMESPACE3), - ], - "schema": "/v2/schemas/metadefs/namespaces" - } - ) - }, - "/v2/metadefs/namespaces?limit=20&resource_types=" - "%s%%2C%s" % (RESOURCE_TYPE1, RESOURCE_TYPE2): { - "GET": ( - {}, - { - "first": "/v2/metadefs/namespaces?limit=20", - "namespaces": [ - _get_namespace_fixture(NAMESPACE4), - ], - "schema": "/v2/schemas/metadefs/namespaces" - } - ) - }, - "/v2/metadefs/namespaces?limit=20&visibility=private": { - "GET": ( - {}, - { - "first": "/v2/metadefs/namespaces?limit=20", - "namespaces": [ - _get_namespace_fixture(NAMESPACE5), - ], - "schema": "/v2/schemas/metadefs/namespaces" - } - ) - }, - "/v2/metadefs/namespaces": { - "POST": ( - {}, - { - "display_name": "Flavor Quota", - "description": "DESCRIPTION1", - "self": "/v2/metadefs/namespaces/%s" % 'NamespaceNew', - "namespace": 'NamespaceNew', - "visibility": "public", - "protected": True, - "owner": "admin", - "schema": "/v2/schemas/metadefs/namespace", - "created_at": "2014-08-14T09:07:06Z", - "updated_at": "2014-08-14T09:07:06Z", - } - ) - }, - "/v2/metadefs/namespaces/%s" % NAMESPACE1: { - "GET": ( - {}, - { - "display_name": "Flavor Quota", - "description": "DESCRIPTION1", - "objects": [ - { - "description": "DESCRIPTION2", - "name": "OBJECT1", - "self": "/v2/metadefs/namespaces/%s/objects/" % - OBJECT1, - "required": [], - "properties": { - PROPERTY1: { - "type": "integer", - "description": "DESCRIPTION3", - "title": "Quota: CPU Shares" - }, - PROPERTY2: { - "minimum": 1000, - "type": "integer", - "description": "DESCRIPTION4", - "maximum": 1000000, - "title": "Quota: CPU Period" - }, - }, - "schema": "/v2/schemas/metadefs/object" - } - ], - "self": "/v2/metadefs/namespaces/%s" % NAMESPACE1, - "namespace": NAMESPACE1, - "visibility": "public", - "protected": True, - "owner": "admin", - "resource_types": [ - { - "name": RESOURCE_TYPE1 - } - ], - "schema": "/v2/schemas/metadefs/namespace", - "created_at": "2014-08-14T09:07:06Z", - "updated_at": "2014-08-14T09:07:06Z", - } - ), - "PUT": ( - {}, - { - "display_name": "Flavor Quota", - "description": "DESCRIPTION1", - "objects": [ - { - "description": "DESCRIPTION2", - "name": "OBJECT1", - "self": "/v2/metadefs/namespaces/%s/objects/" % - OBJECT1, - "required": [], - "properties": { - PROPERTY1: { - "type": "integer", - "description": "DESCRIPTION3", - "title": "Quota: CPU Shares" - }, - PROPERTY2: { - "minimum": 1000, - "type": "integer", - "description": "DESCRIPTION4", - "maximum": 1000000, - "title": "Quota: CPU Period" - }, - }, - "schema": "/v2/schemas/metadefs/object" - } - ], - "self": "/v2/metadefs/namespaces/%s" % NAMESPACENEW, - "namespace": NAMESPACENEW, - "visibility": "public", - "protected": True, - "owner": "admin", - "resource_types": [ - { - "name": RESOURCE_TYPE1 - } - ], - "schema": "/v2/schemas/metadefs/namespace", - "created_at": "2014-08-14T09:07:06Z", - "updated_at": "2014-08-14T09:07:06Z", - } - ), - "DELETE": ( - {}, - {} - ) - }, - "/v2/metadefs/namespaces/%s?resource_type=%s" % (NAMESPACE6, - RESOURCE_TYPE1): - { - "GET": ( - {}, - { - "display_name": "Flavor Quota", - "description": "DESCRIPTION1", - "objects": [], - "self": "/v2/metadefs/namespaces/%s" % NAMESPACE1, - "namespace": NAMESPACE6, - "visibility": "public", - "protected": True, - "owner": "admin", - "resource_types": [ - { - "name": RESOURCE_TYPE1 - } - ], - "schema": "/v2/schemas/metadefs/namespace", - "created_at": "2014-08-14T09:07:06Z", - "updated_at": "2014-08-14T09:07:06Z", - } - ), - }, -} - -schema_fixtures = { - "metadefs/namespace": - { - "GET": ( - {}, - { - "additionalProperties": False, - "definitions": { - "property": { - "additionalProperties": { - "required": [ - "title", - "type" - ], - "type": "object", - "properties": { - "additionalItems": { - "type": "boolean" - }, - "enum": { - "type": "array" - }, - "description": { - "type": "string" - }, - "title": { - "type": "string" - }, - "default": {}, - "minLength": { - "$ref": "#/definitions/" - "positiveIntegerDefault0" - }, - "required": { - "$ref": "#/definitions/stringArray" - }, - "maximum": { - "type": "number" - }, - "minItems": { - "$ref": "#/definitions/" - "positiveIntegerDefault0" - }, - "readonly": { - "type": "boolean" - }, - "minimum": { - "type": "number" - }, - "maxItems": { - "$ref": "#/definitions/" - "positiveInteger" - }, - "maxLength": { - "$ref": "#/definitions/positiveInteger" - }, - "uniqueItems": { - "default": False, - "type": "boolean" - }, - "pattern": { - "type": "string", - "format": "regex" - }, - "items": { - "type": "object", - "properties": { - "enum": { - "type": "array" - }, - "type": { - "enum": [ - "array", - "boolean", - "integer", - "number", - "object", - "string", - "null" - ], - "type": "string" - } - } - }, - "type": { - "enum": [ - "array", - "boolean", - "integer", - "number", - "object", - "string", - "null" - ], - "type": "string" - } - } - }, - "type": "object" - }, - "positiveIntegerDefault0": { - "allOf": [ - { - "$ref": "#/definitions/positiveInteger" - }, - { - "default": 0 - } - ] - }, - "stringArray": { - "uniqueItems": True, - "items": { - "type": "string" - }, - "type": "array" - }, - "positiveInteger": { - "minimum": 0, - "type": "integer" - } - }, - "required": [ - "namespace" - ], - "name": "namespace", - "properties": { - "description": { - "type": "string", - "description": "Provides a user friendly description " - "of the namespace.", - "maxLength": 500 - }, - "updated_at": { - "type": "string", - "description": "Date and time of the last namespace " - "modification (READ-ONLY)", - "format": "date-time" - }, - "visibility": { - "enum": [ - "public", - "private" - ], - "type": "string", - "description": "Scope of namespace accessibility." - }, - "self": { - "type": "string" - }, - "objects": { - "items": { - "type": "object", - "properties": { - "properties": { - "$ref": "#/definitions/property" - }, - "required": { - "$ref": "#/definitions/stringArray" - }, - "name": { - "type": "string" - }, - "description": { - "type": "string" - } - } - }, - "type": "array" - }, - "owner": { - "type": "string", - "description": "Owner of the namespace.", - "maxLength": 255 - }, - "resource_types": { - "items": { - "type": "object", - "properties": { - "prefix": { - "type": "string" - }, - "name": { - "type": "string" - }, - "metadata_type": { - "type": "string" - } - } - }, - "type": "array" - }, - "properties": { - "$ref": "#/definitions/property" - }, - "display_name": { - "type": "string", - "description": "The user friendly name for the " - "namespace. Used by UI if available.", - "maxLength": 80 - }, - "created_at": { - "type": "string", - "description": "Date and time of namespace creation " - "(READ-ONLY)", - "format": "date-time" - }, - "namespace": { - "type": "string", - "description": "The unique namespace text.", - "maxLength": 80 - }, - "protected": { - "type": "boolean", - "description": "If true, namespace will not be " - "deletable." - }, - "schema": { - "type": "string" - } - } - } - ), - } -} - - -class TestNamespaceController(testtools.TestCase): - def setUp(self): - super(TestNamespaceController, self).setUp() - self.api = utils.FakeAPI(data_fixtures) - self.schema_api = utils.FakeSchemaAPI(schema_fixtures) - self.controller = metadefs.NamespaceController(self.api, - self.schema_api) - - def test_list_namespaces(self): - namespaces = list(self.controller.list()) - - self.assertEqual(2, len(namespaces)) - self.assertEqual(NAMESPACE1, namespaces[0]['namespace']) - self.assertEqual(NAMESPACE2, namespaces[1]['namespace']) - - def test_list_namespaces_paginate(self): - namespaces = list(self.controller.list(page_size=1)) - - self.assertEqual(2, len(namespaces)) - self.assertEqual(NAMESPACE7, namespaces[0]['namespace']) - self.assertEqual(NAMESPACE8, namespaces[1]['namespace']) - - def test_list_with_limit_greater_than_page_size(self): - namespaces = list(self.controller.list(page_size=1, limit=2)) - self.assertEqual(2, len(namespaces)) - self.assertEqual(NAMESPACE7, namespaces[0]['namespace']) - self.assertEqual(NAMESPACE8, namespaces[1]['namespace']) - - def test_list_with_marker(self): - namespaces = list(self.controller.list(marker=NAMESPACE6, page_size=2)) - self.assertEqual(2, len(namespaces)) - self.assertEqual(NAMESPACE7, namespaces[0]['namespace']) - self.assertEqual(NAMESPACE8, namespaces[1]['namespace']) - - def test_list_with_sort_dir(self): - namespaces = list(self.controller.list(sort_dir='asc', limit=1)) - self.assertEqual(1, len(namespaces)) - self.assertEqual(NAMESPACE1, namespaces[0]['namespace']) - - def test_list_with_sort_dir_invalid(self): - # NOTE(TravT): The clients work by returning an iterator. - # Invoking the iterator is what actually executes the logic. - ns_iterator = self.controller.list(sort_dir='foo') - self.assertRaises(ValueError, next, ns_iterator) - - def test_list_with_sort_key(self): - namespaces = list(self.controller.list(sort_key='created_at', limit=1)) - self.assertEqual(1, len(namespaces)) - self.assertEqual(NAMESPACE1, namespaces[0]['namespace']) - - def test_list_with_sort_key_invalid(self): - # NOTE(TravT): The clients work by returning an iterator. - # Invoking the iterator is what actually executes the logic. - ns_iterator = self.controller.list(sort_key='foo') - self.assertRaises(ValueError, next, ns_iterator) - - def test_list_namespaces_with_one_resource_type_filter(self): - namespaces = list(self.controller.list( - filters={ - 'resource_types': [RESOURCE_TYPE1] - } - )) - - self.assertEqual(1, len(namespaces)) - self.assertEqual(NAMESPACE3, namespaces[0]['namespace']) - - def test_list_namespaces_with_multiple_resource_types_filter(self): - namespaces = list(self.controller.list( - filters={ - 'resource_types': [RESOURCE_TYPE1, RESOURCE_TYPE2] - } - )) - - self.assertEqual(1, len(namespaces)) - self.assertEqual(NAMESPACE4, namespaces[0]['namespace']) - - def test_list_namespaces_with_visibility_filter(self): - namespaces = list(self.controller.list( - filters={ - 'visibility': 'private' - } - )) - - self.assertEqual(1, len(namespaces)) - self.assertEqual(NAMESPACE5, namespaces[0]['namespace']) - - def test_get_namespace(self): - namespace = self.controller.get(NAMESPACE1) - self.assertEqual(NAMESPACE1, namespace.namespace) - self.assertTrue(namespace.protected) - - def test_get_namespace_with_resource_type(self): - namespace = self.controller.get(NAMESPACE6, - resource_type=RESOURCE_TYPE1) - self.assertEqual(NAMESPACE6, namespace.namespace) - self.assertTrue(namespace.protected) - - def test_create_namespace(self): - properties = { - 'namespace': NAMESPACENEW - } - namespace = self.controller.create(**properties) - - self.assertEqual(NAMESPACENEW, namespace.namespace) - self.assertTrue(namespace.protected) - - def test_create_namespace_invalid_data(self): - properties = {} - - self.assertRaises(TypeError, self.controller.create, **properties) - - def test_create_namespace_invalid_property(self): - properties = {'namespace': 'NewNamespace', 'protected': '123'} - - self.assertRaises(TypeError, self.controller.create, **properties) - - def test_update_namespace(self): - properties = {'display_name': 'My Updated Name'} - namespace = self.controller.update(NAMESPACE1, **properties) - - self.assertEqual(NAMESPACE1, namespace.namespace) - - def test_update_namespace_invalid_property(self): - properties = {'protected': '123'} - - self.assertRaises(TypeError, self.controller.update, NAMESPACE1, - **properties) - - def test_delete_namespace(self): - self.controller.delete(NAMESPACE1) - expect = [ - ('DELETE', - '/v2/metadefs/namespaces/%s' % NAMESPACE1, - {}, - None)] - self.assertEqual(expect, self.api.calls) +# Copyright 2012 OpenStack Foundation. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import testtools + +from glanceclient.v2 import metadefs +from tests import utils + +NAMESPACE1 = 'Namespace1' +NAMESPACE2 = 'Namespace2' +NAMESPACE3 = 'Namespace3' +NAMESPACE4 = 'Namespace4' +NAMESPACE5 = 'Namespace5' +NAMESPACE6 = 'Namespace6' +NAMESPACE7 = 'Namespace7' +NAMESPACE8 = 'Namespace8' +NAMESPACENEW = 'NamespaceNew' +RESOURCE_TYPE1 = 'ResourceType1' +RESOURCE_TYPE2 = 'ResourceType2' +OBJECT1 = 'Object1' +PROPERTY1 = 'Property1' +PROPERTY2 = 'Property2' + + +def _get_namespace_fixture(ns_name, rt_name=RESOURCE_TYPE1, **kwargs): + ns = { + "display_name": "Flavor Quota", + "description": "DESCRIPTION1", + "self": "/v2/metadefs/namespaces/%s" % ns_name, + "namespace": ns_name, + "visibility": "public", + "protected": True, + "owner": "admin", + "resource_types": [ + { + "name": rt_name + } + ], + "schema": "/v2/schemas/metadefs/namespace", + "created_at": "2014-08-14T09:07:06Z", + "updated_at": "2014-08-14T09:07:06Z", + } + + ns.update(kwargs) + + return ns + +data_fixtures = { + "/v2/metadefs/namespaces?limit=20": { + "GET": ( + {}, + { + "first": "/v2/metadefs/namespaces?limit=20", + "namespaces": [ + _get_namespace_fixture(NAMESPACE1), + _get_namespace_fixture(NAMESPACE2), + ], + "schema": "/v2/schemas/metadefs/namespaces" + } + ) + }, + "/v2/metadefs/namespaces?limit=1": { + "GET": ( + {}, + { + "first": "/v2/metadefs/namespaces?limit=1", + "namespaces": [ + _get_namespace_fixture(NAMESPACE7), + ], + "schema": "/v2/schemas/metadefs/namespaces", + "next": "/v2/metadefs/namespaces?marker=%s&limit=1" + % NAMESPACE7, + } + ) + }, + "/v2/metadefs/namespaces?limit=1&marker=%s" % NAMESPACE7: { + "GET": ( + {}, + { + "first": "/v2/metadefs/namespaces?limit=2", + "namespaces": [ + _get_namespace_fixture(NAMESPACE8), + ], + "schema": "/v2/schemas/metadefs/namespaces" + } + ) + }, + "/v2/metadefs/namespaces?limit=2&marker=%s" % NAMESPACE6: { + "GET": ( + {}, + { + "first": "/v2/metadefs/namespaces?limit=2", + "namespaces": [ + _get_namespace_fixture(NAMESPACE7), + _get_namespace_fixture(NAMESPACE8), + ], + "schema": "/v2/schemas/metadefs/namespaces" + } + ) + }, + "/v2/metadefs/namespaces?limit=20&sort_dir=asc": { + "GET": ( + {}, + { + "first": "/v2/metadefs/namespaces?limit=1", + "namespaces": [ + _get_namespace_fixture(NAMESPACE1), + ], + "schema": "/v2/schemas/metadefs/namespaces" + } + ) + }, + "/v2/metadefs/namespaces?limit=20&sort_key=created_at": { + "GET": ( + {}, + { + "first": "/v2/metadefs/namespaces?limit=1", + "namespaces": [ + _get_namespace_fixture(NAMESPACE1), + ], + "schema": "/v2/schemas/metadefs/namespaces" + } + ) + }, + "/v2/metadefs/namespaces?limit=20&resource_types=%s" % RESOURCE_TYPE1: { + "GET": ( + {}, + { + "first": "/v2/metadefs/namespaces?limit=20", + "namespaces": [ + _get_namespace_fixture(NAMESPACE3), + ], + "schema": "/v2/schemas/metadefs/namespaces" + } + ) + }, + "/v2/metadefs/namespaces?limit=20&resource_types=" + "%s%%2C%s" % (RESOURCE_TYPE1, RESOURCE_TYPE2): { + "GET": ( + {}, + { + "first": "/v2/metadefs/namespaces?limit=20", + "namespaces": [ + _get_namespace_fixture(NAMESPACE4), + ], + "schema": "/v2/schemas/metadefs/namespaces" + } + ) + }, + "/v2/metadefs/namespaces?limit=20&visibility=private": { + "GET": ( + {}, + { + "first": "/v2/metadefs/namespaces?limit=20", + "namespaces": [ + _get_namespace_fixture(NAMESPACE5), + ], + "schema": "/v2/schemas/metadefs/namespaces" + } + ) + }, + "/v2/metadefs/namespaces": { + "POST": ( + {}, + { + "display_name": "Flavor Quota", + "description": "DESCRIPTION1", + "self": "/v2/metadefs/namespaces/%s" % 'NamespaceNew', + "namespace": 'NamespaceNew', + "visibility": "public", + "protected": True, + "owner": "admin", + "schema": "/v2/schemas/metadefs/namespace", + "created_at": "2014-08-14T09:07:06Z", + "updated_at": "2014-08-14T09:07:06Z", + } + ) + }, + "/v2/metadefs/namespaces/%s" % NAMESPACE1: { + "GET": ( + {}, + { + "display_name": "Flavor Quota", + "description": "DESCRIPTION1", + "objects": [ + { + "description": "DESCRIPTION2", + "name": "OBJECT1", + "self": "/v2/metadefs/namespaces/%s/objects/" % + OBJECT1, + "required": [], + "properties": { + PROPERTY1: { + "type": "integer", + "description": "DESCRIPTION3", + "title": "Quota: CPU Shares" + }, + PROPERTY2: { + "minimum": 1000, + "type": "integer", + "description": "DESCRIPTION4", + "maximum": 1000000, + "title": "Quota: CPU Period" + }, + }, + "schema": "/v2/schemas/metadefs/object" + } + ], + "self": "/v2/metadefs/namespaces/%s" % NAMESPACE1, + "namespace": NAMESPACE1, + "visibility": "public", + "protected": True, + "owner": "admin", + "resource_types": [ + { + "name": RESOURCE_TYPE1 + } + ], + "schema": "/v2/schemas/metadefs/namespace", + "created_at": "2014-08-14T09:07:06Z", + "updated_at": "2014-08-14T09:07:06Z", + } + ), + "PUT": ( + {}, + { + "display_name": "Flavor Quota", + "description": "DESCRIPTION1", + "objects": [ + { + "description": "DESCRIPTION2", + "name": "OBJECT1", + "self": "/v2/metadefs/namespaces/%s/objects/" % + OBJECT1, + "required": [], + "properties": { + PROPERTY1: { + "type": "integer", + "description": "DESCRIPTION3", + "title": "Quota: CPU Shares" + }, + PROPERTY2: { + "minimum": 1000, + "type": "integer", + "description": "DESCRIPTION4", + "maximum": 1000000, + "title": "Quota: CPU Period" + }, + }, + "schema": "/v2/schemas/metadefs/object" + } + ], + "self": "/v2/metadefs/namespaces/%s" % NAMESPACENEW, + "namespace": NAMESPACENEW, + "visibility": "public", + "protected": True, + "owner": "admin", + "resource_types": [ + { + "name": RESOURCE_TYPE1 + } + ], + "schema": "/v2/schemas/metadefs/namespace", + "created_at": "2014-08-14T09:07:06Z", + "updated_at": "2014-08-14T09:07:06Z", + } + ), + "DELETE": ( + {}, + {} + ) + }, + "/v2/metadefs/namespaces/%s?resource_type=%s" % (NAMESPACE6, + RESOURCE_TYPE1): + { + "GET": ( + {}, + { + "display_name": "Flavor Quota", + "description": "DESCRIPTION1", + "objects": [], + "self": "/v2/metadefs/namespaces/%s" % NAMESPACE1, + "namespace": NAMESPACE6, + "visibility": "public", + "protected": True, + "owner": "admin", + "resource_types": [ + { + "name": RESOURCE_TYPE1 + } + ], + "schema": "/v2/schemas/metadefs/namespace", + "created_at": "2014-08-14T09:07:06Z", + "updated_at": "2014-08-14T09:07:06Z", + } + ), + }, +} + +schema_fixtures = { + "metadefs/namespace": + { + "GET": ( + {}, + { + "additionalProperties": False, + "definitions": { + "property": { + "additionalProperties": { + "required": [ + "title", + "type" + ], + "type": "object", + "properties": { + "additionalItems": { + "type": "boolean" + }, + "enum": { + "type": "array" + }, + "description": { + "type": "string" + }, + "title": { + "type": "string" + }, + "default": {}, + "minLength": { + "$ref": "#/definitions/" + "positiveIntegerDefault0" + }, + "required": { + "$ref": "#/definitions/stringArray" + }, + "maximum": { + "type": "number" + }, + "minItems": { + "$ref": "#/definitions/" + "positiveIntegerDefault0" + }, + "readonly": { + "type": "boolean" + }, + "minimum": { + "type": "number" + }, + "maxItems": { + "$ref": "#/definitions/" + "positiveInteger" + }, + "maxLength": { + "$ref": "#/definitions/positiveInteger" + }, + "uniqueItems": { + "default": False, + "type": "boolean" + }, + "pattern": { + "type": "string", + "format": "regex" + }, + "items": { + "type": "object", + "properties": { + "enum": { + "type": "array" + }, + "type": { + "enum": [ + "array", + "boolean", + "integer", + "number", + "object", + "string", + "null" + ], + "type": "string" + } + } + }, + "type": { + "enum": [ + "array", + "boolean", + "integer", + "number", + "object", + "string", + "null" + ], + "type": "string" + } + } + }, + "type": "object" + }, + "positiveIntegerDefault0": { + "allOf": [ + { + "$ref": "#/definitions/positiveInteger" + }, + { + "default": 0 + } + ] + }, + "stringArray": { + "uniqueItems": True, + "items": { + "type": "string" + }, + "type": "array" + }, + "positiveInteger": { + "minimum": 0, + "type": "integer" + } + }, + "required": [ + "namespace" + ], + "name": "namespace", + "properties": { + "description": { + "type": "string", + "description": "Provides a user friendly description " + "of the namespace.", + "maxLength": 500 + }, + "updated_at": { + "type": "string", + "description": "Date and time of the last namespace " + "modification (READ-ONLY)", + "format": "date-time" + }, + "visibility": { + "enum": [ + "public", + "private" + ], + "type": "string", + "description": "Scope of namespace accessibility." + }, + "self": { + "type": "string" + }, + "objects": { + "items": { + "type": "object", + "properties": { + "properties": { + "$ref": "#/definitions/property" + }, + "required": { + "$ref": "#/definitions/stringArray" + }, + "name": { + "type": "string" + }, + "description": { + "type": "string" + } + } + }, + "type": "array" + }, + "owner": { + "type": "string", + "description": "Owner of the namespace.", + "maxLength": 255 + }, + "resource_types": { + "items": { + "type": "object", + "properties": { + "prefix": { + "type": "string" + }, + "name": { + "type": "string" + }, + "metadata_type": { + "type": "string" + } + } + }, + "type": "array" + }, + "properties": { + "$ref": "#/definitions/property" + }, + "display_name": { + "type": "string", + "description": "The user friendly name for the " + "namespace. Used by UI if available.", + "maxLength": 80 + }, + "created_at": { + "type": "string", + "description": "Date and time of namespace creation " + "(READ-ONLY)", + "format": "date-time" + }, + "namespace": { + "type": "string", + "description": "The unique namespace text.", + "maxLength": 80 + }, + "protected": { + "type": "boolean", + "description": "If true, namespace will not be " + "deletable." + }, + "schema": { + "type": "string" + } + } + } + ), + } +} + + +class TestNamespaceController(testtools.TestCase): + + def setUp(self): + super(TestNamespaceController, self).setUp() + self.api = utils.FakeAPI(data_fixtures) + self.schema_api = utils.FakeSchemaAPI(schema_fixtures) + self.controller = metadefs.NamespaceController(self.api, + self.schema_api) + + def test_list_namespaces(self): + namespaces = list(self.controller.list()) + + self.assertEqual(2, len(namespaces)) + self.assertEqual(NAMESPACE1, namespaces[0]['namespace']) + self.assertEqual(NAMESPACE2, namespaces[1]['namespace']) + + def test_list_namespaces_paginate(self): + namespaces = list(self.controller.list(page_size=1)) + + self.assertEqual(2, len(namespaces)) + self.assertEqual(NAMESPACE7, namespaces[0]['namespace']) + self.assertEqual(NAMESPACE8, namespaces[1]['namespace']) + + def test_list_with_limit_greater_than_page_size(self): + namespaces = list(self.controller.list(page_size=1, limit=2)) + self.assertEqual(2, len(namespaces)) + self.assertEqual(NAMESPACE7, namespaces[0]['namespace']) + self.assertEqual(NAMESPACE8, namespaces[1]['namespace']) + + def test_list_with_marker(self): + namespaces = list(self.controller.list(marker=NAMESPACE6, page_size=2)) + self.assertEqual(2, len(namespaces)) + self.assertEqual(NAMESPACE7, namespaces[0]['namespace']) + self.assertEqual(NAMESPACE8, namespaces[1]['namespace']) + + def test_list_with_sort_dir(self): + namespaces = list(self.controller.list(sort_dir='asc', limit=1)) + self.assertEqual(1, len(namespaces)) + self.assertEqual(NAMESPACE1, namespaces[0]['namespace']) + + def test_list_with_sort_dir_invalid(self): + # NOTE(TravT): The clients work by returning an iterator. + # Invoking the iterator is what actually executes the logic. + ns_iterator = self.controller.list(sort_dir='foo') + self.assertRaises(ValueError, next, ns_iterator) + + def test_list_with_sort_key(self): + namespaces = list(self.controller.list(sort_key='created_at', limit=1)) + self.assertEqual(1, len(namespaces)) + self.assertEqual(NAMESPACE1, namespaces[0]['namespace']) + + def test_list_with_sort_key_invalid(self): + # NOTE(TravT): The clients work by returning an iterator. + # Invoking the iterator is what actually executes the logic. + ns_iterator = self.controller.list(sort_key='foo') + self.assertRaises(ValueError, next, ns_iterator) + + def test_list_namespaces_with_one_resource_type_filter(self): + namespaces = list(self.controller.list( + filters={ + 'resource_types': [RESOURCE_TYPE1] + } + )) + + self.assertEqual(1, len(namespaces)) + self.assertEqual(NAMESPACE3, namespaces[0]['namespace']) + + def test_list_namespaces_with_multiple_resource_types_filter(self): + namespaces = list(self.controller.list( + filters={ + 'resource_types': [RESOURCE_TYPE1, RESOURCE_TYPE2] + } + )) + + self.assertEqual(1, len(namespaces)) + self.assertEqual(NAMESPACE4, namespaces[0]['namespace']) + + def test_list_namespaces_with_visibility_filter(self): + namespaces = list(self.controller.list( + filters={ + 'visibility': 'private' + } + )) + + self.assertEqual(1, len(namespaces)) + self.assertEqual(NAMESPACE5, namespaces[0]['namespace']) + + def test_get_namespace(self): + namespace = self.controller.get(NAMESPACE1) + self.assertEqual(NAMESPACE1, namespace.namespace) + self.assertTrue(namespace.protected) + + def test_get_namespace_with_resource_type(self): + namespace = self.controller.get(NAMESPACE6, + resource_type=RESOURCE_TYPE1) + self.assertEqual(NAMESPACE6, namespace.namespace) + self.assertTrue(namespace.protected) + + def test_create_namespace(self): + properties = { + 'namespace': NAMESPACENEW + } + namespace = self.controller.create(**properties) + + self.assertEqual(NAMESPACENEW, namespace.namespace) + self.assertTrue(namespace.protected) + + def test_create_namespace_invalid_data(self): + properties = {} + + self.assertRaises(TypeError, self.controller.create, **properties) + + def test_create_namespace_invalid_property(self): + properties = {'namespace': 'NewNamespace', 'protected': '123'} + + self.assertRaises(TypeError, self.controller.create, **properties) + + def test_update_namespace(self): + properties = {'display_name': 'My Updated Name'} + namespace = self.controller.update(NAMESPACE1, **properties) + + self.assertEqual(NAMESPACE1, namespace.namespace) + + def test_update_namespace_invalid_property(self): + properties = {'protected': '123'} + + self.assertRaises(TypeError, self.controller.update, NAMESPACE1, + **properties) + + def test_delete_namespace(self): + self.controller.delete(NAMESPACE1) + expect = [ + ('DELETE', + '/v2/metadefs/namespaces/%s' % NAMESPACE1, + {}, + None)] + self.assertEqual(expect, self.api.calls) diff --git a/code/daisyclient/tests/v2/test_metadefs_objects.py b/code/daisyclient/tests/v2/test_metadefs_objects.py index 701d5621..7efb9655 100755 --- a/code/daisyclient/tests/v2/test_metadefs_objects.py +++ b/code/daisyclient/tests/v2/test_metadefs_objects.py @@ -1,323 +1,324 @@ -# Copyright 2012 OpenStack Foundation. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import six -import testtools - -from glanceclient.v2 import metadefs -from tests import utils - -NAMESPACE1 = 'Namespace1' -OBJECT1 = 'Object1' -OBJECT2 = 'Object2' -OBJECTNEW = 'ObjectNew' -PROPERTY1 = 'Property1' -PROPERTY2 = 'Property2' -PROPERTY3 = 'Property3' -PROPERTY4 = 'Property4' - - -def _get_object_fixture(ns_name, obj_name, **kwargs): - obj = { - "description": "DESCRIPTION", - "name": obj_name, - "self": "/v2/metadefs/namespaces/%s/objects/%s" % - (ns_name, obj_name), - "required": [], - "properties": { - PROPERTY1: { - "type": "integer", - "description": "DESCRIPTION", - "title": "Quota: CPU Shares" - }, - PROPERTY2: { - "minimum": 1000, - "type": "integer", - "description": "DESCRIPTION", - "maximum": 1000000, - "title": "Quota: CPU Period" - }}, - "schema": "/v2/schemas/metadefs/object", - "created_at": "2014-08-14T09:07:06Z", - "updated_at": "2014-08-14T09:07:06Z", - } - - obj.update(kwargs) - - return obj - -data_fixtures = { - "/v2/metadefs/namespaces/%s/objects" % NAMESPACE1: { - "GET": ( - {}, - { - "objects": [ - _get_object_fixture(NAMESPACE1, OBJECT1), - _get_object_fixture(NAMESPACE1, OBJECT2) - ], - "schema": "v2/schemas/metadefs/objects" - } - ), - "POST": ( - {}, - _get_object_fixture(NAMESPACE1, OBJECTNEW) - ), - "DELETE": ( - {}, - {} - ) - }, - "/v2/metadefs/namespaces/%s/objects/%s" % (NAMESPACE1, OBJECT1): { - "GET": ( - {}, - _get_object_fixture(NAMESPACE1, OBJECT1) - ), - "PUT": ( - {}, - _get_object_fixture(NAMESPACE1, OBJECT1) - ), - "DELETE": ( - {}, - {} - ) - } -} - -schema_fixtures = { - "metadefs/object": { - "GET": ( - {}, - { - "additionalProperties": False, - "definitions": { - "property": { - "additionalProperties": { - "required": [ - "title", - "type" - ], - "type": "object", - "properties": { - "additionalItems": { - "type": "boolean" - }, - "enum": { - "type": "array" - }, - "description": { - "type": "string" - }, - "title": { - "type": "string" - }, - "default": {}, - "minLength": { - "$ref": "#/definitions/positiveInteger" - "Default0" - }, - "required": { - "$ref": "#/definitions/stringArray" - }, - "maximum": { - "type": "number" - }, - "minItems": { - "$ref": "#/definitions/positiveInteger" - "Default0" - }, - "readonly": { - "type": "boolean" - }, - "minimum": { - "type": "number" - }, - "maxItems": { - "$ref": "#/definitions/positiveInteger" - }, - "maxLength": { - "$ref": "#/definitions/positiveInteger" - }, - "uniqueItems": { - "default": False, - "type": "boolean" - }, - "pattern": { - "type": "string", - "format": "regex" - }, - "items": { - "type": "object", - "properties": { - "enum": { - "type": "array" - }, - "type": { - "enum": [ - "array", - "boolean", - "integer", - "number", - "object", - "string", - "null" - ], - "type": "string" - } - } - }, - "type": { - "enum": [ - "array", - "boolean", - "integer", - "number", - "object", - "string", - "null" - ], - "type": "string" - } - } - }, - "type": "object" - }, - "positiveIntegerDefault0": { - "allOf": [ - { - "$ref": "#/definitions/positiveInteger" - }, - { - "default": 0 - } - ] - }, - "stringArray": { - "uniqueItems": True, - "items": { - "type": "string" - }, - "type": "array" - }, - "positiveInteger": { - "minimum": 0, - "type": "integer" - } - }, - "required": [ - "name" - ], - "name": "object", - "properties": { - "created_at": { - "type": "string", - "description": "Date and time of object creation " - "(READ-ONLY)", - "format": "date-time" - }, - "description": { - "type": "string" - }, - "name": { - "type": "string" - }, - "self": { - "type": "string" - }, - "required": { - "$ref": "#/definitions/stringArray" - }, - "properties": { - "$ref": "#/definitions/property" - }, - "schema": { - "type": "string" - }, - "updated_at": { - "type": "string", - "description": "Date and time of the last object " - "modification (READ-ONLY)", - "format": "date-time" - }, - } - } - ) - } -} - - -class TestObjectController(testtools.TestCase): - def setUp(self): - super(TestObjectController, self).setUp() - self.api = utils.FakeAPI(data_fixtures) - self.schema_api = utils.FakeSchemaAPI(schema_fixtures) - self.controller = metadefs.ObjectController(self.api, self.schema_api) - - def test_list_object(self): - objects = list(self.controller.list(NAMESPACE1)) - - actual = [obj.name for obj in objects] - self.assertEqual([OBJECT1, OBJECT2], actual) - - def test_get_object(self): - obj = self.controller.get(NAMESPACE1, OBJECT1) - self.assertEqual(OBJECT1, obj.name) - self.assertEqual(sorted([PROPERTY1, PROPERTY2]), - sorted(list(six.iterkeys(obj.properties)))) - - def test_create_object(self): - properties = { - 'name': OBJECTNEW, - 'description': 'DESCRIPTION' - } - obj = self.controller.create(NAMESPACE1, **properties) - self.assertEqual(OBJECTNEW, obj.name) - - def test_create_object_invalid_property(self): - properties = { - 'namespace': NAMESPACE1 - } - self.assertRaises(TypeError, self.controller.create, **properties) - - def test_update_object(self): - properties = { - 'description': 'UPDATED_DESCRIPTION' - } - obj = self.controller.update(NAMESPACE1, OBJECT1, **properties) - self.assertEqual(OBJECT1, obj.name) - - def test_update_object_invalid_property(self): - properties = { - 'required': 'INVALID' - } - self.assertRaises(TypeError, self.controller.update, NAMESPACE1, - OBJECT1, **properties) - - def test_delete_object(self): - self.controller.delete(NAMESPACE1, OBJECT1) - expect = [ - ('DELETE', - '/v2/metadefs/namespaces/%s/objects/%s' % (NAMESPACE1, OBJECT1), - {}, - None)] - self.assertEqual(expect, self.api.calls) - - def test_delete_all_objects(self): - self.controller.delete_all(NAMESPACE1) - expect = [ - ('DELETE', - '/v2/metadefs/namespaces/%s/objects' % NAMESPACE1, - {}, - None)] - self.assertEqual(expect, self.api.calls) +# Copyright 2012 OpenStack Foundation. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import six +import testtools + +from glanceclient.v2 import metadefs +from tests import utils + +NAMESPACE1 = 'Namespace1' +OBJECT1 = 'Object1' +OBJECT2 = 'Object2' +OBJECTNEW = 'ObjectNew' +PROPERTY1 = 'Property1' +PROPERTY2 = 'Property2' +PROPERTY3 = 'Property3' +PROPERTY4 = 'Property4' + + +def _get_object_fixture(ns_name, obj_name, **kwargs): + obj = { + "description": "DESCRIPTION", + "name": obj_name, + "self": "/v2/metadefs/namespaces/%s/objects/%s" % + (ns_name, obj_name), + "required": [], + "properties": { + PROPERTY1: { + "type": "integer", + "description": "DESCRIPTION", + "title": "Quota: CPU Shares" + }, + PROPERTY2: { + "minimum": 1000, + "type": "integer", + "description": "DESCRIPTION", + "maximum": 1000000, + "title": "Quota: CPU Period" + }}, + "schema": "/v2/schemas/metadefs/object", + "created_at": "2014-08-14T09:07:06Z", + "updated_at": "2014-08-14T09:07:06Z", + } + + obj.update(kwargs) + + return obj + +data_fixtures = { + "/v2/metadefs/namespaces/%s/objects" % NAMESPACE1: { + "GET": ( + {}, + { + "objects": [ + _get_object_fixture(NAMESPACE1, OBJECT1), + _get_object_fixture(NAMESPACE1, OBJECT2) + ], + "schema": "v2/schemas/metadefs/objects" + } + ), + "POST": ( + {}, + _get_object_fixture(NAMESPACE1, OBJECTNEW) + ), + "DELETE": ( + {}, + {} + ) + }, + "/v2/metadefs/namespaces/%s/objects/%s" % (NAMESPACE1, OBJECT1): { + "GET": ( + {}, + _get_object_fixture(NAMESPACE1, OBJECT1) + ), + "PUT": ( + {}, + _get_object_fixture(NAMESPACE1, OBJECT1) + ), + "DELETE": ( + {}, + {} + ) + } +} + +schema_fixtures = { + "metadefs/object": { + "GET": ( + {}, + { + "additionalProperties": False, + "definitions": { + "property": { + "additionalProperties": { + "required": [ + "title", + "type" + ], + "type": "object", + "properties": { + "additionalItems": { + "type": "boolean" + }, + "enum": { + "type": "array" + }, + "description": { + "type": "string" + }, + "title": { + "type": "string" + }, + "default": {}, + "minLength": { + "$ref": "#/definitions/positiveInteger" + "Default0" + }, + "required": { + "$ref": "#/definitions/stringArray" + }, + "maximum": { + "type": "number" + }, + "minItems": { + "$ref": "#/definitions/positiveInteger" + "Default0" + }, + "readonly": { + "type": "boolean" + }, + "minimum": { + "type": "number" + }, + "maxItems": { + "$ref": "#/definitions/positiveInteger" + }, + "maxLength": { + "$ref": "#/definitions/positiveInteger" + }, + "uniqueItems": { + "default": False, + "type": "boolean" + }, + "pattern": { + "type": "string", + "format": "regex" + }, + "items": { + "type": "object", + "properties": { + "enum": { + "type": "array" + }, + "type": { + "enum": [ + "array", + "boolean", + "integer", + "number", + "object", + "string", + "null" + ], + "type": "string" + } + } + }, + "type": { + "enum": [ + "array", + "boolean", + "integer", + "number", + "object", + "string", + "null" + ], + "type": "string" + } + } + }, + "type": "object" + }, + "positiveIntegerDefault0": { + "allOf": [ + { + "$ref": "#/definitions/positiveInteger" + }, + { + "default": 0 + } + ] + }, + "stringArray": { + "uniqueItems": True, + "items": { + "type": "string" + }, + "type": "array" + }, + "positiveInteger": { + "minimum": 0, + "type": "integer" + } + }, + "required": [ + "name" + ], + "name": "object", + "properties": { + "created_at": { + "type": "string", + "description": "Date and time of object creation " + "(READ-ONLY)", + "format": "date-time" + }, + "description": { + "type": "string" + }, + "name": { + "type": "string" + }, + "self": { + "type": "string" + }, + "required": { + "$ref": "#/definitions/stringArray" + }, + "properties": { + "$ref": "#/definitions/property" + }, + "schema": { + "type": "string" + }, + "updated_at": { + "type": "string", + "description": "Date and time of the last object " + "modification (READ-ONLY)", + "format": "date-time" + }, + } + } + ) + } +} + + +class TestObjectController(testtools.TestCase): + + def setUp(self): + super(TestObjectController, self).setUp() + self.api = utils.FakeAPI(data_fixtures) + self.schema_api = utils.FakeSchemaAPI(schema_fixtures) + self.controller = metadefs.ObjectController(self.api, self.schema_api) + + def test_list_object(self): + objects = list(self.controller.list(NAMESPACE1)) + + actual = [obj.name for obj in objects] + self.assertEqual([OBJECT1, OBJECT2], actual) + + def test_get_object(self): + obj = self.controller.get(NAMESPACE1, OBJECT1) + self.assertEqual(OBJECT1, obj.name) + self.assertEqual(sorted([PROPERTY1, PROPERTY2]), + sorted(list(six.iterkeys(obj.properties)))) + + def test_create_object(self): + properties = { + 'name': OBJECTNEW, + 'description': 'DESCRIPTION' + } + obj = self.controller.create(NAMESPACE1, **properties) + self.assertEqual(OBJECTNEW, obj.name) + + def test_create_object_invalid_property(self): + properties = { + 'namespace': NAMESPACE1 + } + self.assertRaises(TypeError, self.controller.create, **properties) + + def test_update_object(self): + properties = { + 'description': 'UPDATED_DESCRIPTION' + } + obj = self.controller.update(NAMESPACE1, OBJECT1, **properties) + self.assertEqual(OBJECT1, obj.name) + + def test_update_object_invalid_property(self): + properties = { + 'required': 'INVALID' + } + self.assertRaises(TypeError, self.controller.update, NAMESPACE1, + OBJECT1, **properties) + + def test_delete_object(self): + self.controller.delete(NAMESPACE1, OBJECT1) + expect = [ + ('DELETE', + '/v2/metadefs/namespaces/%s/objects/%s' % (NAMESPACE1, OBJECT1), + {}, + None)] + self.assertEqual(expect, self.api.calls) + + def test_delete_all_objects(self): + self.controller.delete_all(NAMESPACE1) + expect = [ + ('DELETE', + '/v2/metadefs/namespaces/%s/objects' % NAMESPACE1, + {}, + None)] + self.assertEqual(expect, self.api.calls) diff --git a/code/daisyclient/tests/v2/test_metadefs_properties.py b/code/daisyclient/tests/v2/test_metadefs_properties.py index d2ac25dd..9a3ce650 100755 --- a/code/daisyclient/tests/v2/test_metadefs_properties.py +++ b/code/daisyclient/tests/v2/test_metadefs_properties.py @@ -1,300 +1,301 @@ -# Copyright 2012 OpenStack Foundation. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import testtools - -from glanceclient.v2 import metadefs -from tests import utils - -NAMESPACE1 = 'Namespace1' -PROPERTY1 = 'Property1' -PROPERTY2 = 'Property2' -PROPERTYNEW = 'PropertyNew' - -data_fixtures = { - "/v2/metadefs/namespaces/%s/properties" % NAMESPACE1: { - "GET": ( - {}, - { - "properties": { - PROPERTY1: { - "default": "1", - "type": "integer", - "description": "Number of cores.", - "title": "cores" - }, - PROPERTY2: { - "items": { - "enum": [ - "Intel", - "AMD" - ], - "type": "string" - }, - "type": "array", - "description": "Specifies the CPU manufacturer.", - "title": "Vendor" - }, - } - } - ), - "POST": ( - {}, - { - "items": { - "enum": [ - "Intel", - "AMD" - ], - "type": "string" - }, - "type": "array", - "description": "UPDATED_DESCRIPTION", - "title": "Vendor", - "name": PROPERTYNEW - } - ), - "DELETE": ( - {}, - {} - ) - }, - "/v2/metadefs/namespaces/%s/properties/%s" % (NAMESPACE1, PROPERTY1): { - "GET": ( - {}, - { - "items": { - "enum": [ - "Intel", - "AMD" - ], - "type": "string" - }, - "type": "array", - "description": "Specifies the CPU manufacturer.", - "title": "Vendor" - } - ), - "PUT": ( - {}, - { - "items": { - "enum": [ - "Intel", - "AMD" - ], - "type": "string" - }, - "type": "array", - "description": "UPDATED_DESCRIPTION", - "title": "Vendor" - } - ), - "DELETE": ( - {}, - {} - ) - } -} - -schema_fixtures = { - "metadefs/property": { - "GET": ( - {}, - { - "additionalProperties": False, - "definitions": { - "positiveIntegerDefault0": { - "allOf": [ - { - "$ref": "#/definitions/positiveInteger" - }, - { - "default": 0 - } - ] - }, - "stringArray": { - "minItems": 1, - "items": { - "type": "string" - }, - "uniqueItems": True, - "type": "array" - }, - "positiveInteger": { - "minimum": 0, - "type": "integer" - } - }, - "required": [ - "name", - "title", - "type" - ], - "name": "property", - "properties": { - "description": { - "type": "string" - }, - "minLength": { - "$ref": "#/definitions/positiveIntegerDefault0" - }, - "enum": { - "type": "array" - }, - "minimum": { - "type": "number" - }, - "maxItems": { - "$ref": "#/definitions/positiveInteger" - }, - "maxLength": { - "$ref": "#/definitions/positiveInteger" - }, - "uniqueItems": { - "default": False, - "type": "boolean" - }, - "additionalItems": { - "type": "boolean" - }, - "name": { - "type": "string" - }, - "title": { - "type": "string" - }, - "default": {}, - "pattern": { - "type": "string", - "format": "regex" - }, - "required": { - "$ref": "#/definitions/stringArray" - }, - "maximum": { - "type": "number" - }, - "minItems": { - "$ref": "#/definitions/positiveIntegerDefault0" - }, - "readonly": { - "type": "boolean" - }, - "items": { - "type": "object", - "properties": { - "enum": { - "type": "array" - }, - "type": { - "enum": [ - "array", - "boolean", - "integer", - "number", - "object", - "string", - "null" - ], - "type": "string" - } - } - }, - "type": { - "enum": [ - "array", - "boolean", - "integer", - "number", - "object", - "string", - "null" - ], - "type": "string" - } - } - } - ) - } -} - - -class TestPropertyController(testtools.TestCase): - def setUp(self): - super(TestPropertyController, self).setUp() - self.api = utils.FakeAPI(data_fixtures) - self.schema_api = utils.FakeSchemaAPI(schema_fixtures) - self.controller = metadefs.PropertyController(self.api, - self.schema_api) - - def test_list_property(self): - properties = list(self.controller.list(NAMESPACE1)) - - actual = [prop.name for prop in properties] - self.assertEqual(sorted([PROPERTY1, PROPERTY2]), sorted(actual)) - - def test_get_property(self): - prop = self.controller.get(NAMESPACE1, PROPERTY1) - self.assertEqual(PROPERTY1, prop.name) - - def test_create_property(self): - properties = { - 'name': PROPERTYNEW, - 'title': 'TITLE', - 'type': 'string' - } - obj = self.controller.create(NAMESPACE1, **properties) - self.assertEqual(PROPERTYNEW, obj.name) - - def test_create_property_invalid_property(self): - properties = { - 'namespace': NAMESPACE1 - } - self.assertRaises(TypeError, self.controller.create, **properties) - - def test_update_property(self): - properties = { - 'description': 'UPDATED_DESCRIPTION' - } - prop = self.controller.update(NAMESPACE1, PROPERTY1, **properties) - self.assertEqual(PROPERTY1, prop.name) - - def test_update_property_invalid_property(self): - properties = { - 'type': 'INVALID' - } - self.assertRaises(TypeError, self.controller.update, NAMESPACE1, - PROPERTY1, **properties) - - def test_delete_property(self): - self.controller.delete(NAMESPACE1, PROPERTY1) - expect = [ - ('DELETE', - '/v2/metadefs/namespaces/%s/properties/%s' % (NAMESPACE1, - PROPERTY1), - {}, - None)] - self.assertEqual(expect, self.api.calls) - - def test_delete_all_properties(self): - self.controller.delete_all(NAMESPACE1) - expect = [ - ('DELETE', - '/v2/metadefs/namespaces/%s/properties' % NAMESPACE1, - {}, - None)] - self.assertEqual(expect, self.api.calls) +# Copyright 2012 OpenStack Foundation. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import testtools + +from glanceclient.v2 import metadefs +from tests import utils + +NAMESPACE1 = 'Namespace1' +PROPERTY1 = 'Property1' +PROPERTY2 = 'Property2' +PROPERTYNEW = 'PropertyNew' + +data_fixtures = { + "/v2/metadefs/namespaces/%s/properties" % NAMESPACE1: { + "GET": ( + {}, + { + "properties": { + PROPERTY1: { + "default": "1", + "type": "integer", + "description": "Number of cores.", + "title": "cores" + }, + PROPERTY2: { + "items": { + "enum": [ + "Intel", + "AMD" + ], + "type": "string" + }, + "type": "array", + "description": "Specifies the CPU manufacturer.", + "title": "Vendor" + }, + } + } + ), + "POST": ( + {}, + { + "items": { + "enum": [ + "Intel", + "AMD" + ], + "type": "string" + }, + "type": "array", + "description": "UPDATED_DESCRIPTION", + "title": "Vendor", + "name": PROPERTYNEW + } + ), + "DELETE": ( + {}, + {} + ) + }, + "/v2/metadefs/namespaces/%s/properties/%s" % (NAMESPACE1, PROPERTY1): { + "GET": ( + {}, + { + "items": { + "enum": [ + "Intel", + "AMD" + ], + "type": "string" + }, + "type": "array", + "description": "Specifies the CPU manufacturer.", + "title": "Vendor" + } + ), + "PUT": ( + {}, + { + "items": { + "enum": [ + "Intel", + "AMD" + ], + "type": "string" + }, + "type": "array", + "description": "UPDATED_DESCRIPTION", + "title": "Vendor" + } + ), + "DELETE": ( + {}, + {} + ) + } +} + +schema_fixtures = { + "metadefs/property": { + "GET": ( + {}, + { + "additionalProperties": False, + "definitions": { + "positiveIntegerDefault0": { + "allOf": [ + { + "$ref": "#/definitions/positiveInteger" + }, + { + "default": 0 + } + ] + }, + "stringArray": { + "minItems": 1, + "items": { + "type": "string" + }, + "uniqueItems": True, + "type": "array" + }, + "positiveInteger": { + "minimum": 0, + "type": "integer" + } + }, + "required": [ + "name", + "title", + "type" + ], + "name": "property", + "properties": { + "description": { + "type": "string" + }, + "minLength": { + "$ref": "#/definitions/positiveIntegerDefault0" + }, + "enum": { + "type": "array" + }, + "minimum": { + "type": "number" + }, + "maxItems": { + "$ref": "#/definitions/positiveInteger" + }, + "maxLength": { + "$ref": "#/definitions/positiveInteger" + }, + "uniqueItems": { + "default": False, + "type": "boolean" + }, + "additionalItems": { + "type": "boolean" + }, + "name": { + "type": "string" + }, + "title": { + "type": "string" + }, + "default": {}, + "pattern": { + "type": "string", + "format": "regex" + }, + "required": { + "$ref": "#/definitions/stringArray" + }, + "maximum": { + "type": "number" + }, + "minItems": { + "$ref": "#/definitions/positiveIntegerDefault0" + }, + "readonly": { + "type": "boolean" + }, + "items": { + "type": "object", + "properties": { + "enum": { + "type": "array" + }, + "type": { + "enum": [ + "array", + "boolean", + "integer", + "number", + "object", + "string", + "null" + ], + "type": "string" + } + } + }, + "type": { + "enum": [ + "array", + "boolean", + "integer", + "number", + "object", + "string", + "null" + ], + "type": "string" + } + } + } + ) + } +} + + +class TestPropertyController(testtools.TestCase): + + def setUp(self): + super(TestPropertyController, self).setUp() + self.api = utils.FakeAPI(data_fixtures) + self.schema_api = utils.FakeSchemaAPI(schema_fixtures) + self.controller = metadefs.PropertyController(self.api, + self.schema_api) + + def test_list_property(self): + properties = list(self.controller.list(NAMESPACE1)) + + actual = [prop.name for prop in properties] + self.assertEqual(sorted([PROPERTY1, PROPERTY2]), sorted(actual)) + + def test_get_property(self): + prop = self.controller.get(NAMESPACE1, PROPERTY1) + self.assertEqual(PROPERTY1, prop.name) + + def test_create_property(self): + properties = { + 'name': PROPERTYNEW, + 'title': 'TITLE', + 'type': 'string' + } + obj = self.controller.create(NAMESPACE1, **properties) + self.assertEqual(PROPERTYNEW, obj.name) + + def test_create_property_invalid_property(self): + properties = { + 'namespace': NAMESPACE1 + } + self.assertRaises(TypeError, self.controller.create, **properties) + + def test_update_property(self): + properties = { + 'description': 'UPDATED_DESCRIPTION' + } + prop = self.controller.update(NAMESPACE1, PROPERTY1, **properties) + self.assertEqual(PROPERTY1, prop.name) + + def test_update_property_invalid_property(self): + properties = { + 'type': 'INVALID' + } + self.assertRaises(TypeError, self.controller.update, NAMESPACE1, + PROPERTY1, **properties) + + def test_delete_property(self): + self.controller.delete(NAMESPACE1, PROPERTY1) + expect = [ + ('DELETE', + '/v2/metadefs/namespaces/%s/properties/%s' % (NAMESPACE1, + PROPERTY1), + {}, + None)] + self.assertEqual(expect, self.api.calls) + + def test_delete_all_properties(self): + self.controller.delete_all(NAMESPACE1) + expect = [ + ('DELETE', + '/v2/metadefs/namespaces/%s/properties' % NAMESPACE1, + {}, + None)] + self.assertEqual(expect, self.api.calls) diff --git a/code/daisyclient/tests/v2/test_metadefs_resource_types.py b/code/daisyclient/tests/v2/test_metadefs_resource_types.py index bcb4993a..b10614d9 100755 --- a/code/daisyclient/tests/v2/test_metadefs_resource_types.py +++ b/code/daisyclient/tests/v2/test_metadefs_resource_types.py @@ -1,186 +1,187 @@ -# Copyright 2012 OpenStack Foundation. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import testtools - -from glanceclient.v2 import metadefs -from tests import utils - -NAMESPACE1 = 'Namespace1' -RESOURCE_TYPE1 = 'ResourceType1' -RESOURCE_TYPE2 = 'ResourceType2' -RESOURCE_TYPE3 = 'ResourceType3' -RESOURCE_TYPE4 = 'ResourceType4' -RESOURCE_TYPENEW = 'ResourceTypeNew' - - -data_fixtures = { - "/v2/metadefs/namespaces/%s/resource_types" % NAMESPACE1: { - "GET": ( - {}, - { - "resource_type_associations": [ - { - "name": RESOURCE_TYPE3, - "created_at": "2014-08-14T09:07:06Z", - "updated_at": "2014-08-14T09:07:06Z", - }, - { - "name": RESOURCE_TYPE4, - "prefix": "PREFIX:", - "created_at": "2014-08-14T09:07:06Z", - "updated_at": "2014-08-14T09:07:06Z", - } - ] - } - ), - "POST": ( - {}, - { - "name": RESOURCE_TYPENEW, - "prefix": "PREFIX:", - "created_at": "2014-08-14T09:07:06Z", - "updated_at": "2014-08-14T09:07:06Z", - } - ), - }, - "/v2/metadefs/namespaces/%s/resource_types/%s" % (NAMESPACE1, - RESOURCE_TYPE1): - { - "DELETE": ( - {}, - {} - ), - }, - "/v2/metadefs/resource_types": { - "GET": ( - {}, - { - "resource_types": [ - { - "name": RESOURCE_TYPE1, - "created_at": "2014-08-14T09:07:06Z", - "updated_at": "2014-08-14T09:07:06Z", - }, - { - "name": RESOURCE_TYPE2, - "created_at": "2014-08-14T09:07:06Z", - "updated_at": "2014-08-14T09:07:06Z", - } - ] - } - ) - } -} - -schema_fixtures = { - "metadefs/resource_type": { - "GET": ( - {}, - { - "name": "resource_type", - "properties": { - "prefix": { - "type": "string", - "description": "Specifies the prefix to use for the " - "given resource type. Any properties " - "in the namespace should be prefixed " - "with this prefix when being applied " - "to the specified resource type. Must " - "include prefix separator (e.g. a " - "colon :).", - "maxLength": 80 - }, - "properties_target": { - "type": "string", - "description": "Some resource types allow more than " - "one key / value pair per instance. " - "For example, Cinder allows user and " - "image metadata on volumes. Only the " - "image properties metadata is " - "evaluated by Nova (scheduling or " - "drivers). This property allows a " - "namespace target to remove the " - "ambiguity.", - "maxLength": 80 - }, - "name": { - "type": "string", - "description": "Resource type names should be " - "aligned with Heat resource types " - "whenever possible: http://docs." - "openstack.org/developer/heat/" - "template_guide/openstack.html", - "maxLength": 80 - }, - "created_at": { - "type": "string", - "description": "Date and time of resource type " - "association (READ-ONLY)", - "format": "date-time" - }, - "updated_at": { - "type": "string", - "description": "Date and time of the last resource " - "type association modification " - "(READ-ONLY)", - "format": "date-time" - }, - } - } - ) - } -} - - -class TestResoureTypeController(testtools.TestCase): - def setUp(self): - super(TestResoureTypeController, self).setUp() - self.api = utils.FakeAPI(data_fixtures) - self.schema_api = utils.FakeSchemaAPI(schema_fixtures) - self.controller = metadefs.ResourceTypeController(self.api, - self.schema_api) - - def test_list_resource_types(self): - resource_types = list(self.controller.list()) - names = [rt.name for rt in resource_types] - self.assertEqual([RESOURCE_TYPE1, RESOURCE_TYPE2], names) - - def test_get_resource_types(self): - resource_types = list(self.controller.get(NAMESPACE1)) - names = [rt.name for rt in resource_types] - self.assertEqual([RESOURCE_TYPE3, RESOURCE_TYPE4], names) - - def test_associate_resource_types(self): - resource_types = self.controller.associate(NAMESPACE1, - name=RESOURCE_TYPENEW) - - self.assertEqual(RESOURCE_TYPENEW, resource_types['name']) - - def test_associate_resource_types_invalid_property(self): - longer = '1234' * 50 - properties = {'name': RESOURCE_TYPENEW, 'prefix': longer} - self.assertRaises(TypeError, self.controller.associate, NAMESPACE1, - **properties) - - def test_deassociate_resource_types(self): - self.controller.deassociate(NAMESPACE1, RESOURCE_TYPE1) - expect = [ - ('DELETE', - '/v2/metadefs/namespaces/%s/resource_types/%s' % (NAMESPACE1, - RESOURCE_TYPE1), - {}, - None)] - self.assertEqual(expect, self.api.calls) +# Copyright 2012 OpenStack Foundation. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import testtools + +from glanceclient.v2 import metadefs +from tests import utils + +NAMESPACE1 = 'Namespace1' +RESOURCE_TYPE1 = 'ResourceType1' +RESOURCE_TYPE2 = 'ResourceType2' +RESOURCE_TYPE3 = 'ResourceType3' +RESOURCE_TYPE4 = 'ResourceType4' +RESOURCE_TYPENEW = 'ResourceTypeNew' + + +data_fixtures = { + "/v2/metadefs/namespaces/%s/resource_types" % NAMESPACE1: { + "GET": ( + {}, + { + "resource_type_associations": [ + { + "name": RESOURCE_TYPE3, + "created_at": "2014-08-14T09:07:06Z", + "updated_at": "2014-08-14T09:07:06Z", + }, + { + "name": RESOURCE_TYPE4, + "prefix": "PREFIX:", + "created_at": "2014-08-14T09:07:06Z", + "updated_at": "2014-08-14T09:07:06Z", + } + ] + } + ), + "POST": ( + {}, + { + "name": RESOURCE_TYPENEW, + "prefix": "PREFIX:", + "created_at": "2014-08-14T09:07:06Z", + "updated_at": "2014-08-14T09:07:06Z", + } + ), + }, + "/v2/metadefs/namespaces/%s/resource_types/%s" % (NAMESPACE1, + RESOURCE_TYPE1): + { + "DELETE": ( + {}, + {} + ), + }, + "/v2/metadefs/resource_types": { + "GET": ( + {}, + { + "resource_types": [ + { + "name": RESOURCE_TYPE1, + "created_at": "2014-08-14T09:07:06Z", + "updated_at": "2014-08-14T09:07:06Z", + }, + { + "name": RESOURCE_TYPE2, + "created_at": "2014-08-14T09:07:06Z", + "updated_at": "2014-08-14T09:07:06Z", + } + ] + } + ) + } +} + +schema_fixtures = { + "metadefs/resource_type": { + "GET": ( + {}, + { + "name": "resource_type", + "properties": { + "prefix": { + "type": "string", + "description": "Specifies the prefix to use for the " + "given resource type. Any properties " + "in the namespace should be prefixed " + "with this prefix when being applied " + "to the specified resource type. Must " + "include prefix separator (e.g. a " + "colon :).", + "maxLength": 80 + }, + "properties_target": { + "type": "string", + "description": "Some resource types allow more than " + "one key / value pair per instance. " + "For example, Cinder allows user and " + "image metadata on volumes. Only the " + "image properties metadata is " + "evaluated by Nova (scheduling or " + "drivers). This property allows a " + "namespace target to remove the " + "ambiguity.", + "maxLength": 80 + }, + "name": { + "type": "string", + "description": "Resource type names should be " + "aligned with Heat resource types " + "whenever possible: http://docs." + "openstack.org/developer/heat/" + "template_guide/openstack.html", + "maxLength": 80 + }, + "created_at": { + "type": "string", + "description": "Date and time of resource type " + "association (READ-ONLY)", + "format": "date-time" + }, + "updated_at": { + "type": "string", + "description": "Date and time of the last resource " + "type association modification " + "(READ-ONLY)", + "format": "date-time" + }, + } + } + ) + } +} + + +class TestResoureTypeController(testtools.TestCase): + + def setUp(self): + super(TestResoureTypeController, self).setUp() + self.api = utils.FakeAPI(data_fixtures) + self.schema_api = utils.FakeSchemaAPI(schema_fixtures) + self.controller = metadefs.ResourceTypeController(self.api, + self.schema_api) + + def test_list_resource_types(self): + resource_types = list(self.controller.list()) + names = [rt.name for rt in resource_types] + self.assertEqual([RESOURCE_TYPE1, RESOURCE_TYPE2], names) + + def test_get_resource_types(self): + resource_types = list(self.controller.get(NAMESPACE1)) + names = [rt.name for rt in resource_types] + self.assertEqual([RESOURCE_TYPE3, RESOURCE_TYPE4], names) + + def test_associate_resource_types(self): + resource_types = self.controller.associate(NAMESPACE1, + name=RESOURCE_TYPENEW) + + self.assertEqual(RESOURCE_TYPENEW, resource_types['name']) + + def test_associate_resource_types_invalid_property(self): + longer = '1234' * 50 + properties = {'name': RESOURCE_TYPENEW, 'prefix': longer} + self.assertRaises(TypeError, self.controller.associate, NAMESPACE1, + **properties) + + def test_deassociate_resource_types(self): + self.controller.deassociate(NAMESPACE1, RESOURCE_TYPE1) + expect = [ + ('DELETE', + '/v2/metadefs/namespaces/%s/resource_types/%s' % (NAMESPACE1, + RESOURCE_TYPE1), + {}, + None)] + self.assertEqual(expect, self.api.calls) diff --git a/code/daisyclient/tests/v2/test_schemas.py b/code/daisyclient/tests/v2/test_schemas.py index 39625965..d274e93f 100755 --- a/code/daisyclient/tests/v2/test_schemas.py +++ b/code/daisyclient/tests/v2/test_schemas.py @@ -1,213 +1,217 @@ -# Copyright 2012 OpenStack Foundation -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from jsonpatch import JsonPatch -import testtools -import warlock - -from glanceclient.v2 import schemas -from tests import utils - - -fixtures = { - '/v2/schemas': { - 'GET': ( - {}, - { - 'image': '/v2/schemas/image', - 'access': '/v2/schemas/image/access', - }, - ), - }, - '/v2/schemas/image': { - 'GET': ( - {}, - { - 'name': 'image', - 'properties': { - 'name': {'type': 'string', - 'description': 'Name of image'}, - 'tags': {'type': 'array'} - }, - - }, - ), - }, -} - - -_SCHEMA = schemas.Schema({ - 'name': 'image', - 'properties': { - 'name': {'type': 'string'}, - 'color': {'type': 'string'}, - 'shape': {'type': 'string', 'is_base': False}, - 'tags': {'type': 'array'} - }, -}) - - -def compare_json_patches(a, b): - """Return 0 if a and b describe the same JSON patch.""" - return JsonPatch.from_string(a) == JsonPatch.from_string(b) - - -class TestSchemaProperty(testtools.TestCase): - def test_property_minimum(self): - prop = schemas.SchemaProperty('size') - self.assertEqual('size', prop.name) - - def test_property_description(self): - prop = schemas.SchemaProperty('size', description='some quantity') - self.assertEqual('size', prop.name) - self.assertEqual('some quantity', prop.description) - - -class TestSchema(testtools.TestCase): - def test_schema_minimum(self): - raw_schema = {'name': 'Country', 'properties': {}} - schema = schemas.Schema(raw_schema) - self.assertEqual('Country', schema.name) - self.assertEqual([], schema.properties) - - def test_schema_with_property(self): - raw_schema = {'name': 'Country', 'properties': {'size': {}}} - schema = schemas.Schema(raw_schema) - self.assertEqual('Country', schema.name) - self.assertEqual(['size'], [p.name for p in schema.properties]) - - def test_raw(self): - raw_schema = {'name': 'Country', 'properties': {}} - schema = schemas.Schema(raw_schema) - self.assertEqual(raw_schema, schema.raw()) - - -class TestController(testtools.TestCase): - def setUp(self): - super(TestController, self).setUp() - self.api = utils.FakeAPI(fixtures) - self.controller = schemas.Controller(self.api) - - def test_get_schema(self): - schema = self.controller.get('image') - self.assertEqual('image', schema.name) - self.assertEqual(['name', 'tags'], - [p.name for p in schema.properties]) - - -class TestSchemaBasedModel(testtools.TestCase): - def setUp(self): - super(TestSchemaBasedModel, self).setUp() - self.model = warlock.model_factory(_SCHEMA.raw(), - schemas.SchemaBasedModel) - - def test_patch_should_replace_missing_core_properties(self): - obj = { - 'name': 'fred' - } - - original = self.model(obj) - original['color'] = 'red' - - patch = original.patch - expected = '[{"path": "/color", "value": "red", "op": "replace"}]' - self.assertTrue(compare_json_patches(patch, expected)) - - def test_patch_should_add_extra_properties(self): - obj = { - 'name': 'fred', - } - - original = self.model(obj) - original['weight'] = '10' - - patch = original.patch - expected = '[{"path": "/weight", "value": "10", "op": "add"}]' - self.assertTrue(compare_json_patches(patch, expected)) - - def test_patch_should_replace_extra_properties(self): - obj = { - 'name': 'fred', - 'weight': '10' - } - - original = self.model(obj) - original['weight'] = '22' - - patch = original.patch - expected = '[{"path": "/weight", "value": "22", "op": "replace"}]' - self.assertTrue(compare_json_patches(patch, expected)) - - def test_patch_should_remove_extra_properties(self): - obj = { - 'name': 'fred', - 'weight': '10' - } - - original = self.model(obj) - del original['weight'] - - patch = original.patch - expected = '[{"path": "/weight", "op": "remove"}]' - self.assertTrue(compare_json_patches(patch, expected)) - - def test_patch_should_remove_core_properties(self): - obj = { - 'name': 'fred', - 'color': 'red' - } - - original = self.model(obj) - del original['color'] - - patch = original.patch - expected = '[{"path": "/color", "op": "remove"}]' - self.assertTrue(compare_json_patches(patch, expected)) - - def test_patch_should_add_missing_custom_properties(self): - obj = { - 'name': 'fred' - } - - original = self.model(obj) - original['shape'] = 'circle' - - patch = original.patch - expected = '[{"path": "/shape", "value": "circle", "op": "add"}]' - self.assertTrue(compare_json_patches(patch, expected)) - - def test_patch_should_replace_custom_properties(self): - obj = { - 'name': 'fred', - 'shape': 'circle' - } - - original = self.model(obj) - original['shape'] = 'square' - - patch = original.patch - expected = '[{"path": "/shape", "value": "square", "op": "replace"}]' - self.assertTrue(compare_json_patches(patch, expected)) - - def test_patch_should_replace_tags(self): - obj = {'name': 'fred', } - - original = self.model(obj) - original['tags'] = ['tag1', 'tag2'] - - patch = original.patch - expected = '[{"path": "/tags", "value": ["tag1", "tag2"], ' \ - '"op": "replace"}]' - self.assertTrue(compare_json_patches(patch, expected)) +# Copyright 2012 OpenStack Foundation +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from jsonpatch import JsonPatch +import testtools +import warlock + +from glanceclient.v2 import schemas +from tests import utils + + +fixtures = { + '/v2/schemas': { + 'GET': ( + {}, + { + 'image': '/v2/schemas/image', + 'access': '/v2/schemas/image/access', + }, + ), + }, + '/v2/schemas/image': { + 'GET': ( + {}, + { + 'name': 'image', + 'properties': { + 'name': {'type': 'string', + 'description': 'Name of image'}, + 'tags': {'type': 'array'} + }, + + }, + ), + }, +} + + +_SCHEMA = schemas.Schema({ + 'name': 'image', + 'properties': { + 'name': {'type': 'string'}, + 'color': {'type': 'string'}, + 'shape': {'type': 'string', 'is_base': False}, + 'tags': {'type': 'array'} + }, +}) + + +def compare_json_patches(a, b): + """Return 0 if a and b describe the same JSON patch.""" + return JsonPatch.from_string(a) == JsonPatch.from_string(b) + + +class TestSchemaProperty(testtools.TestCase): + + def test_property_minimum(self): + prop = schemas.SchemaProperty('size') + self.assertEqual('size', prop.name) + + def test_property_description(self): + prop = schemas.SchemaProperty('size', description='some quantity') + self.assertEqual('size', prop.name) + self.assertEqual('some quantity', prop.description) + + +class TestSchema(testtools.TestCase): + + def test_schema_minimum(self): + raw_schema = {'name': 'Country', 'properties': {}} + schema = schemas.Schema(raw_schema) + self.assertEqual('Country', schema.name) + self.assertEqual([], schema.properties) + + def test_schema_with_property(self): + raw_schema = {'name': 'Country', 'properties': {'size': {}}} + schema = schemas.Schema(raw_schema) + self.assertEqual('Country', schema.name) + self.assertEqual(['size'], [p.name for p in schema.properties]) + + def test_raw(self): + raw_schema = {'name': 'Country', 'properties': {}} + schema = schemas.Schema(raw_schema) + self.assertEqual(raw_schema, schema.raw()) + + +class TestController(testtools.TestCase): + + def setUp(self): + super(TestController, self).setUp() + self.api = utils.FakeAPI(fixtures) + self.controller = schemas.Controller(self.api) + + def test_get_schema(self): + schema = self.controller.get('image') + self.assertEqual('image', schema.name) + self.assertEqual(['name', 'tags'], + [p.name for p in schema.properties]) + + +class TestSchemaBasedModel(testtools.TestCase): + + def setUp(self): + super(TestSchemaBasedModel, self).setUp() + self.model = warlock.model_factory(_SCHEMA.raw(), + schemas.SchemaBasedModel) + + def test_patch_should_replace_missing_core_properties(self): + obj = { + 'name': 'fred' + } + + original = self.model(obj) + original['color'] = 'red' + + patch = original.patch + expected = '[{"path": "/color", "value": "red", "op": "replace"}]' + self.assertTrue(compare_json_patches(patch, expected)) + + def test_patch_should_add_extra_properties(self): + obj = { + 'name': 'fred', + } + + original = self.model(obj) + original['weight'] = '10' + + patch = original.patch + expected = '[{"path": "/weight", "value": "10", "op": "add"}]' + self.assertTrue(compare_json_patches(patch, expected)) + + def test_patch_should_replace_extra_properties(self): + obj = { + 'name': 'fred', + 'weight': '10' + } + + original = self.model(obj) + original['weight'] = '22' + + patch = original.patch + expected = '[{"path": "/weight", "value": "22", "op": "replace"}]' + self.assertTrue(compare_json_patches(patch, expected)) + + def test_patch_should_remove_extra_properties(self): + obj = { + 'name': 'fred', + 'weight': '10' + } + + original = self.model(obj) + del original['weight'] + + patch = original.patch + expected = '[{"path": "/weight", "op": "remove"}]' + self.assertTrue(compare_json_patches(patch, expected)) + + def test_patch_should_remove_core_properties(self): + obj = { + 'name': 'fred', + 'color': 'red' + } + + original = self.model(obj) + del original['color'] + + patch = original.patch + expected = '[{"path": "/color", "op": "remove"}]' + self.assertTrue(compare_json_patches(patch, expected)) + + def test_patch_should_add_missing_custom_properties(self): + obj = { + 'name': 'fred' + } + + original = self.model(obj) + original['shape'] = 'circle' + + patch = original.patch + expected = '[{"path": "/shape", "value": "circle", "op": "add"}]' + self.assertTrue(compare_json_patches(patch, expected)) + + def test_patch_should_replace_custom_properties(self): + obj = { + 'name': 'fred', + 'shape': 'circle' + } + + original = self.model(obj) + original['shape'] = 'square' + + patch = original.patch + expected = '[{"path": "/shape", "value": "square", "op": "replace"}]' + self.assertTrue(compare_json_patches(patch, expected)) + + def test_patch_should_replace_tags(self): + obj = {'name': 'fred', } + + original = self.model(obj) + original['tags'] = ['tag1', 'tag2'] + + patch = original.patch + expected = '[{"path": "/tags", "value": ["tag1", "tag2"], ' \ + '"op": "replace"}]' + self.assertTrue(compare_json_patches(patch, expected)) diff --git a/code/daisyclient/tests/v2/test_shell_v2.py b/code/daisyclient/tests/v2/test_shell_v2.py index fc8f2db5..798ac420 100755 --- a/code/daisyclient/tests/v2/test_shell_v2.py +++ b/code/daisyclient/tests/v2/test_shell_v2.py @@ -24,17 +24,19 @@ from glanceclient.v2 import shell as test_shell class ShellV2Test(testtools.TestCase): + def setUp(self): super(ShellV2Test, self).setUp() self._mock_utils() self.gc = self._mock_glance_client() def _make_args(self, args): - #NOTE(venkatesh): this conversion from a dict to an object + # NOTE(venkatesh): this conversion from a dict to an object # is required because the test_shell.do_xxx(gc, args) methods # expects the args to be attributes of an object. If passed as # dict directly, it throws an AttributeError. class Args(): + def __init__(self, entries): self.__dict__.update(entries) diff --git a/code/daisyclient/tests/v2/test_tags.py b/code/daisyclient/tests/v2/test_tags.py index 88ec2fec..69adf0d7 100755 --- a/code/daisyclient/tests/v2/test_tags.py +++ b/code/daisyclient/tests/v2/test_tags.py @@ -1,81 +1,82 @@ -# Copyright 2013 OpenStack Foundation -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import testtools - -from glanceclient.v2 import image_tags -from tests import utils - - -IMAGE = '3a4560a1-e585-443e-9b39-553b46ec92d1' -TAG = 'tag01' - - -data_fixtures = { - '/v2/images/{image}/tags/{tag_value}'.format(image=IMAGE, tag_value=TAG): { - 'DELETE': ( - {}, - None, - ), - 'PUT': ( - {}, - { - 'image_id': IMAGE, - 'tag_value': TAG - } - ), - } -} - -schema_fixtures = { - 'tag': { - 'GET': ( - {}, - {'name': 'image', 'properties': {'image_id': {}, 'tags': {}}} - ) - } -} - - -class TestController(testtools.TestCase): - def setUp(self): - super(TestController, self).setUp() - self.api = utils.FakeAPI(data_fixtures) - self.schema_api = utils.FakeSchemaAPI(schema_fixtures) - self.controller = image_tags.Controller(self.api, self.schema_api) - - def test_update_image_tag(self): - image_id = IMAGE - tag_value = TAG - self.controller.update(image_id, tag_value) - expect = [ - ('PUT', - '/v2/images/{image}/tags/{tag_value}'.format(image=IMAGE, - tag_value=TAG), - {}, - None)] - self.assertEqual(expect, self.api.calls) - - def test_delete_image_tag(self): - image_id = IMAGE - tag_value = TAG - self.controller.delete(image_id, tag_value) - expect = [ - ('DELETE', - '/v2/images/{image}/tags/{tag_value}'.format(image=IMAGE, - tag_value=TAG), - {}, - None)] - self.assertEqual(expect, self.api.calls) +# Copyright 2013 OpenStack Foundation +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import testtools + +from glanceclient.v2 import image_tags +from tests import utils + + +IMAGE = '3a4560a1-e585-443e-9b39-553b46ec92d1' +TAG = 'tag01' + + +data_fixtures = { + '/v2/images/{image}/tags/{tag_value}'.format(image=IMAGE, tag_value=TAG): { + 'DELETE': ( + {}, + None, + ), + 'PUT': ( + {}, + { + 'image_id': IMAGE, + 'tag_value': TAG + } + ), + } +} + +schema_fixtures = { + 'tag': { + 'GET': ( + {}, + {'name': 'image', 'properties': {'image_id': {}, 'tags': {}}} + ) + } +} + + +class TestController(testtools.TestCase): + + def setUp(self): + super(TestController, self).setUp() + self.api = utils.FakeAPI(data_fixtures) + self.schema_api = utils.FakeSchemaAPI(schema_fixtures) + self.controller = image_tags.Controller(self.api, self.schema_api) + + def test_update_image_tag(self): + image_id = IMAGE + tag_value = TAG + self.controller.update(image_id, tag_value) + expect = [ + ('PUT', + '/v2/images/{image}/tags/{tag_value}'.format(image=IMAGE, + tag_value=TAG), + {}, + None)] + self.assertEqual(expect, self.api.calls) + + def test_delete_image_tag(self): + image_id = IMAGE + tag_value = TAG + self.controller.delete(image_id, tag_value) + expect = [ + ('DELETE', + '/v2/images/{image}/tags/{tag_value}'.format(image=IMAGE, + tag_value=TAG), + {}, + None)] + self.assertEqual(expect, self.api.calls) diff --git a/code/daisyclient/tests/v2/test_tasks.py b/code/daisyclient/tests/v2/test_tasks.py index a5491a3b..5d48b2d1 100755 --- a/code/daisyclient/tests/v2/test_tasks.py +++ b/code/daisyclient/tests/v2/test_tasks.py @@ -194,6 +194,7 @@ schema_fixtures = { class TestController(testtools.TestCase): + def setUp(self): super(TestController, self).setUp() self.api = utils.FakeAPI(fixtures) @@ -201,7 +202,7 @@ class TestController(testtools.TestCase): self.controller = tasks.Controller(self.api, self.schema_api) def test_list_tasks(self): - #NOTE(flwang): cast to list since the controller returns a generator + # NOTE(flwang): cast to list since the controller returns a generator tasks = list(self.controller.list()) self.assertEqual(tasks[0].id, '3a4560a1-e585-443e-9b39-553b46ec92d1') self.assertEqual(tasks[0].type, 'import') @@ -211,7 +212,7 @@ class TestController(testtools.TestCase): self.assertEqual(tasks[1].status, 'processing') def test_list_tasks_paginated(self): - #NOTE(flwang): cast to list since the controller returns a generator + # NOTE(flwang): cast to list since the controller returns a generator tasks = list(self.controller.list(page_size=1)) self.assertEqual(tasks[0].id, '3a4560a1-e585-443e-9b39-553b46ec92d1') self.assertEqual(tasks[0].type, 'import') diff --git a/code/daisyclient/tox.ini b/code/daisyclient/tox.ini index f2877e3f..f8b80fc0 100755 --- a/code/daisyclient/tox.ini +++ b/code/daisyclient/tox.ini @@ -36,6 +36,6 @@ downloadcache = ~/cache/pip # H302 import only modules # H303 no wildcard import # H404 multi line docstring should start with a summary -ignore = F403,F812,F821,H233,H302,H303,H404 +ignore = F403,F812,F821,H233,H302,H303,H404,F841,F401,E731 show-source = True exclude = .venv,.tox,dist,doc,*egg,build diff --git a/code/horizon/doc/source/conf.py b/code/horizon/doc/source/conf.py old mode 100644 new mode 100755 index 3eb0e981..ecf7eccf --- a/code/horizon/doc/source/conf.py +++ b/code/horizon/doc/source/conf.py @@ -23,7 +23,7 @@ # serve to show the default. from __future__ import print_function - +import horizon.version import os import sys @@ -35,8 +35,6 @@ sys.path.insert(0, ROOT) # This is required for ReadTheDocs.org, but isn't a bad idea anyway. os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'openstack_dashboard.settings') -import horizon.version - def write_autodoc_index(): diff --git a/code/horizon/horizon/exceptions.py b/code/horizon/horizon/exceptions.py old mode 100644 new mode 100755 index f6ec7650..92ea2fb5 --- a/code/horizon/horizon/exceptions.py +++ b/code/horizon/horizon/exceptions.py @@ -51,8 +51,9 @@ class HorizonReporterFilter(SafeExceptionReporterFilter): current_frame = tb_frame.f_back sensitive_variables = None while current_frame is not None: - if (current_frame.f_code.co_name == 'sensitive_variables_wrapper' - and 'sensitive_variables_wrapper' + if (current_frame.f_code.co_name == + 'sensitive_variables_wrapper' and + 'sensitive_variables_wrapper' in current_frame.f_locals): # The sensitive_variables decorator was used, so we take note # of the sensitive variables' names. diff --git a/code/horizon/horizon/locale/zh_CN/LC_MESSAGES/django.po b/code/horizon/horizon/locale/zh_CN/LC_MESSAGES/django.po index 7714528b..53100150 100644 --- a/code/horizon/horizon/locale/zh_CN/LC_MESSAGES/django.po +++ b/code/horizon/horizon/locale/zh_CN/LC_MESSAGES/django.po @@ -1,7 +1,7 @@ # SOME DESCRIPTIVE TITLE. # Copyright (C) YEAR THE PACKAGE'S COPYRIGHT HOLDER # This file is distributed under the same license as the PACKAGE package. -# +# # Translators: # ChangBo Guo(gcb) , 2014 # LIU Yulong , 2014-2015 @@ -13,16 +13,14 @@ msgstr "" "Project-Id-Version: Horizon\n" "Report-Msgid-Bugs-To: \n" "POT-Creation-Date: 2015-04-06 21:07-0500\n" -"PO-Revision-Date: 2016-03-03 09:51+0800\n" +"PO-Revision-Date: 2015-04-01 22:52+0000\n" "Last-Translator: 棰滄捣宄 \n" -"Language-Team: Chinese (China) (http://www.transifex.com/projects/p/horizon/" -"language/zh_CN/)\n" +"Language-Team: Chinese (China) (http://www.transifex.com/projects/p/horizon/language/zh_CN/)\n" "MIME-Version: 1.0\n" "Content-Type: text/plain; charset=UTF-8\n" "Content-Transfer-Encoding: 8bit\n" "Language: zh_CN\n" "Plural-Forms: nplurals=1; plural=0;\n" -"X-Generator: Poedit 1.6.7\n" #: base.py:475 msgid "Other" @@ -192,12 +190,9 @@ msgstr "閫鍑" #: templates/auth/_description.html:9 msgid "" "\n" -" If you are not sure which authentication method to use, contact your " -"administrator.\n" +" If you are not sure which authentication method to use, contact your administrator.\n" " " -msgstr "" -"\n" -"濡傛灉浣犱笉纭畾浣跨敤鍝璁よ瘉鏂瑰紡锛岃鑱旂郴绠$悊鍛樸" +msgstr "\n濡傛灉浣犱笉纭畾浣跨敤鍝璁よ瘉鏂瑰紡锛岃鑱旂郴绠$悊鍛樸" #: templates/auth/_login.html:5 msgid "Log In" @@ -210,7 +205,8 @@ msgstr "浣犳病鏈夎鎺堟潈璁块棶杩欎簺璧勬簮锛" #: templates/auth/_login.html:29 #, python-format msgid "" -"Login as different user or go back to home page" +"Login as different user or go back to home " +"page" msgstr "浣跨敤鍏朵粬鐢ㄦ埛鐧诲綍鎴栬繑鍥 棣栭〉" #: templates/auth/_login.html:45 @@ -354,18 +350,14 @@ msgstr "閫夋嫨涓娈垫椂闂存潵鏌ヨ鍏剁敤閲忥細" msgid "" "\n" " %(start)s" -msgstr "" -"\n" -" %(start)s" +msgstr "\n %(start)s" #: templates/horizon/common/_usage_summary.html:13 #, python-format msgid "" "\n" " %(end)s" -msgstr "" -"\n" -"%(end)s" +msgstr "\n%(end)s" #: templates/horizon/common/_usage_summary.html:17 msgid "The date should be in YYYY-mm-dd format." diff --git a/code/horizon/horizon/middleware.py b/code/horizon/horizon/middleware.py old mode 100644 new mode 100755 index 885489e5..538f0852 --- a/code/horizon/horizon/middleware.py +++ b/code/horizon/horizon/middleware.py @@ -63,9 +63,9 @@ class HorizonMiddleware(object): last_activity = request.session.get('last_activity', None) timestamp = int(time.time()) if ( - hasattr(request, "user") - and hasattr(request.user, "token") - and not auth_utils.is_token_valid(request.user.token) + hasattr(request, "user") and + hasattr(request.user, "token") and not + auth_utils.is_token_valid(request.user.token) ): # The user was logged in, but his keystone token expired. has_timed_out = True diff --git a/code/horizon/horizon/tables/base.py b/code/horizon/horizon/tables/base.py old mode 100644 new mode 100755 index 1e2a5739..2f130fe7 --- a/code/horizon/horizon/tables/base.py +++ b/code/horizon/horizon/tables/base.py @@ -966,8 +966,8 @@ class DataTableOptions(object): """ def __init__(self, options): self.name = getattr(options, 'name', self.__class__.__name__) - verbose_name = (getattr(options, 'verbose_name', None) - or self.name.title()) + verbose_name = (getattr(options, 'verbose_name', None) or + self.name.title()) self.verbose_name = verbose_name self.columns = getattr(options, 'columns', None) self.status_columns = getattr(options, 'status_columns', []) @@ -1203,12 +1203,12 @@ class DataTable(object): filter_string = self.get_filter_string() filter_field = self.get_filter_field() request_method = self.request.method - needs_preloading = (not filter_string - and request_method == 'GET' - and action.needs_preloading) + needs_preloading = (not filter_string and + request_method == 'GET' and + action.needs_preloading) valid_method = (request_method == action.method) - not_api_filter = (filter_string - and not action.is_api_filter(filter_field)) + not_api_filter = (filter_string and not + action.is_api_filter(filter_field)) if valid_method or needs_preloading or not_api_filter: if self._meta.mixed_data_type: diff --git a/code/horizon/horizon/tables/views.py b/code/horizon/horizon/tables/views.py old mode 100644 new mode 100755 index d8d800ce..a707f2c5 --- a/code/horizon/horizon/tables/views.py +++ b/code/horizon/horizon/tables/views.py @@ -229,8 +229,8 @@ class DataTableView(MultiTableView): param_name = filter_action.get_param_name() filter_string = request.POST.get(param_name) filter_string_session = request.session.get(param_name, "") - changed = (filter_string is not None - and filter_string != filter_string_session) + changed = (filter_string is not None and + filter_string != filter_string_session) if filter_string is None: filter_string = filter_string_session filter_field_param = param_name + '_field' diff --git a/code/horizon/horizon/test/helpers.py b/code/horizon/horizon/test/helpers.py old mode 100644 new mode 100755 index ebe0100c..3d54dd17 --- a/code/horizon/horizon/test/helpers.py +++ b/code/horizon/horizon/test/helpers.py @@ -21,7 +21,9 @@ import logging import os import socket import time +import mox +from horizon import middleware from django.contrib.auth.middleware import AuthenticationMiddleware # noqa from django.contrib.auth.models import Permission # noqa from django.contrib.auth.models import User # noqa @@ -58,11 +60,6 @@ except ImportError as e: os.environ['WITH_SELENIUM'] = '' -import mox - -from horizon import middleware - - # Makes output of failing mox tests much easier to read. wsgi.WSGIRequest.__repr__ = lambda self: "" diff --git a/code/horizon/horizon/test/patches.py b/code/horizon/horizon/test/patches.py old mode 100644 new mode 100755 index f94714e5..f437d731 --- a/code/horizon/horizon/test/patches.py +++ b/code/horizon/horizon/test/patches.py @@ -55,8 +55,8 @@ def parse_starttag_patched(self, i): lineno, offset = self.getpos() if "\n" in self.__starttag_text: lineno = lineno + self.__starttag_text.count("\n") - offset = (len(self.__starttag_text) - - self.__starttag_text.rfind("\n")) + offset = \ + (len(self.__starttag_text) - self.__starttag_text.rfind("\n")) else: offset = offset + len(self.__starttag_text) self.error("junk characters in start tag: %r" diff --git a/code/horizon/horizon/utils/csvbase.py b/code/horizon/horizon/utils/csvbase.py old mode 100644 new mode 100755 index 35ea454b..88000ba8 --- a/code/horizon/horizon/utils/csvbase.py +++ b/code/horizon/horizon/utils/csvbase.py @@ -23,6 +23,8 @@ from django import VERSION # noqa import logging LOG = logging.getLogger(__name__) + + class CsvDataMixin(object): """CSV data Mixin - provides handling for CSV data. @@ -74,8 +76,8 @@ class BaseCsvResponse(CsvDataMixin, HttpResponse): self['Content-Disposition'] = 'attachment; filename="%s"' % ( kwargs.get("filename", "export.csv"),) self['Content-Type'] = content_type - LOG.debug('##############BaseCsvResponse##########################') - LOG.debug(content_type) + LOG.debug('##############BaseCsvResponse##########################') + LOG.debug(content_type) self.context = context self.header = None @@ -86,7 +88,7 @@ class BaseCsvResponse(CsvDataMixin, HttpResponse): self.header = header_template.render(context) if self.header: - self.out.write(codecs.BOM_UTF8) + self.out.write(codecs.BOM_UTF8) self.out.write(self.encode(self.header)) self.write_csv_header() diff --git a/code/horizon/openstack_dashboard/api/__init__.py b/code/horizon/openstack_dashboard/api/__init__.py old mode 100644 new mode 100755 index 54b29960..8f2e8560 --- a/code/horizon/openstack_dashboard/api/__init__.py +++ b/code/horizon/openstack_dashboard/api/__init__.py @@ -31,7 +31,6 @@ In other words, Horizon developers not working on openstack_dashboard.api shouldn't need to understand the finer details of APIs for Keystone/Nova/Glance/Swift et. al. """ -from openstack_dashboard.api import base from openstack_dashboard.api import keystone from openstack_dashboard.api import daisy diff --git a/code/horizon/openstack_dashboard/api/daisy.py b/code/horizon/openstack_dashboard/api/daisy.py index 5486596a..87780c81 100755 --- a/code/horizon/openstack_dashboard/api/daisy.py +++ b/code/horizon/openstack_dashboard/api/daisy.py @@ -20,7 +20,7 @@ class Host(base.APIResourceWrapper): 'os_version_id', 'os_version_file', 'ipmi_user', 'ipmi_passwd', 'ipmi_addr', 'os_version', 'role', 'cluster', 'mac', 'interfaces', 'os_progress', 'messages', 'role_status', - 'os_status', 'role_progress', 'role_messages'] + 'os_status', 'role_progress', 'role_messages', 'discover_state'] def __init__(self, apiresource, request): super(Host, self).__init__(apiresource) @@ -114,6 +114,10 @@ def network_list(request, cluster_id): return [n for n in networks] +def netplane_delete(request, network_id): + return daisyclient(request).networks.delete(network_id) + + def install_cluster(request, cluster_id): return daisyclient(request).install.install(cluster_id=cluster_id) @@ -130,6 +134,15 @@ def config_add(request, **kwargs): return daisyclient(request).configs.add(**kwargs) +def config_delete(request, **kwargs): + return daisyclient(request).configs.delete(**kwargs) + + +def config_list(request, **kwargs): + configs = daisyclient(request).configs.list(**kwargs) + return [c for c in configs] + + def service_disk_add(request, **kwargs): return daisyclient(request).disk_array.service_disk_add(**kwargs) @@ -216,3 +229,58 @@ def template_detail(request, template_id): def template_delete(request, template_id): return daisyclient(request).template.delete(template_id) + + +def node_update(request, **kwargs): + return daisyclient(request).node.update(**kwargs) + + +def hwm_list(request): + return daisyclient(request).hwm.list() + + +def backup_system(request, **kwargs): + return daisyclient(request).backup_restore.backup(**kwargs) + + +def restore_system(request, **kwargs): + return daisyclient(request).backup_restore.restore(**kwargs) + + +def get_backup_file_version(request, **kwargs): + return daisyclient(request).backup_restore.backup_file_version(**kwargs) + + +def get_daisy_internal_version(request): + return daisyclient(request).backup_restore.version(**{'type': 'internal'}) + + +def pxe_host_discover(request, **kwargs): + return daisyclient(request).node.pxe_host_discover(**kwargs) + + +def hwmip_delete(request, hwm_id): + return daisyclient(request).hwm.delete(hwm_id) + + +def hwmip_add(request, **kwargs): + return daisyclient(request).hwm.add(**kwargs) + + +def hwmip_update(request, hwm_id, **kwargs): + return daisyclient(request).hwm.update(hwm_id, **kwargs) + + +def hwmip_get(request, hwm_id): + return daisyclient(request).hwm.get(hwm_id) + + +def get_pxeserver(request): + qp = {'type': "system"} + networks = daisyclient(request).networks.list(filters=qp) + return [n for n in networks] + + +def set_pxeserver(request, network_id, interface, **kwargs): + daisyclient(request).networks.update(network_id, **kwargs) + daisyclient(request).install.install(deployment_interface=interface) diff --git a/code/horizon/openstack_dashboard/api/keystone.py b/code/horizon/openstack_dashboard/api/keystone.py index d6d7f22a..2f94540b 100644 --- a/code/horizon/openstack_dashboard/api/keystone.py +++ b/code/horizon/openstack_dashboard/api/keystone.py @@ -744,7 +744,6 @@ def keystone_backend_name(): else: return 'unknown' + def log_list(request): - domain_context = request.session.get('domain_context', None) - kwargs = {"domain": domain_context} - return keystoneclient(request,admin=True).recordlogs.list() \ No newline at end of file + return keystoneclient(request, admin=True).recordlogs.list() diff --git a/code/horizon/openstack_dashboard/dashboards/director_theme/__init__.py b/code/horizon/openstack_dashboard/dashboards/director_theme/__init__.py new file mode 100644 index 00000000..94456aba --- /dev/null +++ b/code/horizon/openstack_dashboard/dashboards/director_theme/__init__.py @@ -0,0 +1 @@ +# intentionally left blank diff --git a/code/horizon/openstack_dashboard/dashboards/director_theme/dashboard.py b/code/horizon/openstack_dashboard/dashboards/director_theme/dashboard.py new file mode 100644 index 00000000..d5a5ad23 --- /dev/null +++ b/code/horizon/openstack_dashboard/dashboards/director_theme/dashboard.py @@ -0,0 +1,30 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright 2012 OpenStack Foundation +# Copyright 2012 Nebula, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from django.utils.translation import ugettext_lazy as _ + +import horizon + + +class Theme(horizon.Dashboard): + name = _("director_theme") + slug = "director_theme" + panels = ('theme_index', ) + default_panel = 'theme_index' + nav = False + +horizon.register(Theme) diff --git a/code/horizon/openstack_dashboard/dashboards/director_theme/models.py b/code/horizon/openstack_dashboard/dashboards/director_theme/models.py new file mode 100644 index 00000000..94456aba --- /dev/null +++ b/code/horizon/openstack_dashboard/dashboards/director_theme/models.py @@ -0,0 +1 @@ +# intentionally left blank diff --git a/code/horizon/openstack_dashboard/dashboards/director_theme/templates/_stylesheets.html b/code/horizon/openstack_dashboard/dashboards/director_theme/templates/_stylesheets.html new file mode 100644 index 00000000..cc84268a --- /dev/null +++ b/code/horizon/openstack_dashboard/dashboards/director_theme/templates/_stylesheets.html @@ -0,0 +1,563 @@ +{% comment %} +We want to have separate compressed css files for horizon.scss and dashboard.scss. +The reason for it is based on the fact that IE9 has a limit on the number of css rules +that can be parsed in a single css file. The limit is 4095 = (4k - 1). This causes some +css rules getting cut off if one css file to get more than 4k rules inside. +{% endcomment %} + +{% load compress %} + +{% compress css %} + +{% endcompress %} + +{% compress css %} + +{% endcompress %} + + + + diff --git a/backend/tecs/storage_auto_config/common/__init__.py b/code/horizon/openstack_dashboard/dashboards/director_theme/theme_index/__init__.py old mode 100755 new mode 100644 similarity index 100% rename from backend/tecs/storage_auto_config/common/__init__.py rename to code/horizon/openstack_dashboard/dashboards/director_theme/theme_index/__init__.py diff --git a/code/horizon/openstack_dashboard/dashboards/environment/network/subnets/tabs.py b/code/horizon/openstack_dashboard/dashboards/director_theme/theme_index/panel.py similarity index 60% rename from code/horizon/openstack_dashboard/dashboards/environment/network/subnets/tabs.py rename to code/horizon/openstack_dashboard/dashboards/director_theme/theme_index/panel.py index 1052e8c6..d9c3182e 100644 --- a/code/horizon/openstack_dashboard/dashboards/environment/network/subnets/tabs.py +++ b/code/horizon/openstack_dashboard/dashboards/director_theme/theme_index/panel.py @@ -1,4 +1,6 @@ -# Copyright 2012 NEC Corporation +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright 2013 Centrin Data Systems Ltd. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain @@ -14,19 +16,14 @@ from django.utils.translation import ugettext_lazy as _ -from horizon import tabs +import horizon + +from openstack_dashboard.dashboards.director_theme import dashboard -class OverviewTab(tabs.Tab): - name = _("Overview") - slug = "overview" - template_name = "project/networks/subnets/_detail_overview.html" +class ThemePanel(horizon.Panel): + name = _("Panel providing a theme") + slug = 'theme_index' + nav = True - def get_context_data(self, request): - subnet = self.tab_group.kwargs['subnet'] - return {'subnet': subnet} - - -class SubnetDetailTabs(tabs.TabGroup): - slug = "subnet_details" - tabs = (OverviewTab,) +dashboard.Theme.register(ThemePanel) diff --git a/code/horizon/openstack_dashboard/dashboards/director_theme/theme_index/urls.py b/code/horizon/openstack_dashboard/dashboards/director_theme/theme_index/urls.py new file mode 100644 index 00000000..41a2be05 --- /dev/null +++ b/code/horizon/openstack_dashboard/dashboards/director_theme/theme_index/urls.py @@ -0,0 +1,2 @@ + +urlpatterns = () diff --git a/code/horizon/openstack_dashboard/dashboards/environment/network/__init__.py b/code/horizon/openstack_dashboard/dashboards/director_theme/theme_index/views.py similarity index 100% rename from code/horizon/openstack_dashboard/dashboards/environment/network/__init__.py rename to code/horizon/openstack_dashboard/dashboards/director_theme/theme_index/views.py diff --git a/code/horizon/openstack_dashboard/dashboards/environment/cluster/create_cluster.py b/code/horizon/openstack_dashboard/dashboards/environment/cluster/create_cluster.py index 7086d38c..a5c92757 100755 --- a/code/horizon/openstack_dashboard/dashboards/environment/cluster/create_cluster.py +++ b/code/horizon/openstack_dashboard/dashboards/environment/cluster/create_cluster.py @@ -3,6 +3,8 @@ # Daisy Tools Dashboard # import json +from django.utils.translation import ugettext_lazy as _ +from horizon import exceptions from django.http import HttpResponse @@ -10,265 +12,112 @@ from horizon import messages from horizon import views from openstack_dashboard import api +from openstack_dashboard.dashboards.environment.cluster import net_plane \ + as cluster_net_plane +from openstack_dashboard.dashboards.environment.cluster import role \ + as cluster_role +from openstack_dashboard.dashboards.environment.deploy import deploy_rule_lib import logging LOG = logging.getLogger(__name__) -def is_zenic_role(roles): - for role in roles: - if "ZENIC_" in role: - return True - return False - - class CreateView(views.HorizonTemplateView): template_name = "environment/cluster/create_cluster.html" + def get_roles_data(self): + roles_data = [] + try: + role_list = api.daisy.role_list(self.request) + roles = [role for role in role_list if role.type == "template"] + for role in roles: + roles_data.append({ + "id": role.id, + "name": role.name + }) + roles_data = cluster_role.sort_roles(roles_data) + except Exception: + exceptions.handle(self.request, + _('Unable to retrieve host list.')) + return roles_data + def get_context_data(self, **kwargs): context = super(CreateView, self).get_context_data(**kwargs) - networks = [{"name": "DEPLOYMENT", "network_type": "DEPLOYMENT"}, - {"name": "EXTERNAL", "network_type": "EXTERNAL"}, - {"name": "MANAGEMENT", "network_type": "MANAGEMENT"}, - {"name": "PRIVATE", "network_type": "PRIVATE"}, - {"name": "PUBLIC", "network_type": "PUBLIC"}, - {"name": "STORAGE", "network_type": "STORAGE"}, - {"name": "VXLAN", "network_type": "VXLAN"}, ] - - networks.sort(key=lambda x: x['name']) - - for net in networks: - net.update({"cidr": "192.168.1.1/24", - "vlan_start": "1", - "vlan_end": "4094", - "vlan_id": "1"}) - - context["network"] = {"networks": networks} - + context["network"] = { + "networks": cluster_net_plane.get_default_net_plane()} clusters = api.daisy.cluster_list(self.request) cluster_lists = [c for c in clusters] context['clusters'] = cluster_lists - - role_list = api.daisy.role_list(self.request) - roles = [role.name for role in role_list if role.type == "template"] - context["has_zenic"] = is_zenic_role(roles) - + context["roles"] = self.get_roles_data() + hwms = api.daisy.hwm_list(self.request) + hwmip_list = [hwm.hwm_ip for hwm in hwms] + context["hwmip_list"] = hwmip_list return context def create_submit(request): data = json.loads(request.body) - msg = ('Create cluster request.body::::::: %s') % request.body + msg = ('Create cluster request.body::::::: %s') % data LOG.info(msg) cluster_new = [] status_code = 200 cluster = data["cluster_info"] + try: cluster_created = api.daisy.cluster_add( request, name=cluster["cluster_name"], description=cluster["description"], networking_parameters=cluster["networking_parameters"], - use_dns=cluster["use_dns"]) + use_dns=cluster["use_dns"], + hwm_ip=cluster["hwm_ip"]) cluster_new.append({ "id": cluster_created.id }) + # check param valid + deploy_rule_lib.net_plane_4_role_rule(request, + cluster_created.id, + data["role_info"], + data["netplane_info"]) + role_list = api.daisy.role_list(request) roles = [role for role in role_list if role.cluster_id == cluster_created.id] for role in roles: if role.name == "CONTROLLER_HA": - if not set_ha_role(request, role.id, data["role_info"]["ha"]): - status_code = 500 - + cluster_role.\ + set_ha_role_for_new_cluster(request, + role.id, + data["role_info"]["ha"]) if role.name == "CONTROLLER_LB": - if not set_lb_role(request, role.id, data["role_info"]["lb"]): - status_code = 500 - + cluster_role.set_role_info(request, + role.id, + data["role_info"]["lb"]) + if role.name == "COMPUTER": + cluster_role.\ + set_computer_role_info(request, + role.id, + data["role_info"]["computer"]) if role.name == "ZENIC_NFM": - if not set_zenic_nfm_role(request, role.id, data["role_info"]["zenic_nfm"]): - status_code = 500 - + cluster_role.set_role_info(request, + role.id, + data["role_info"]["zenic_nfm"]) if role.name == "ZENIC_CTL": - if not set_zenic_ctl_role(request, role.id, data["role_info"]["zenic_ctl"]): - status_code = 500 - - if not set_netplane(request, - cluster_created.id, - data["netplane_info"]): - status_code = 500 - messages.error(request, 'Set Netplane Information Failed!') - - if status_code == 500: - api.daisy.cluster_delete(request, cluster_created.id) - + cluster_role.set_role_info(request, + role.id, + data["role_info"]["zenic_ctl"]) + cluster_net_plane.add_net_plane_for_add_cluster(request, + cluster_created.id, + data["netplane_info"]) except Exception as e: + if len(cluster_new) > 0: + api.daisy.cluster_delete(request, cluster_created.id) status_code = 500 + LOG.error('Create Cluster Failed: %s' % e) messages.error(request, 'Create Cluster Failed: %s' % e) return HttpResponse(json.dumps(cluster_new), content_type="application/json", status=status_code) - - -def set_ha_role(request, role_id, data): - try: - role_param = {} - role_param["vip"] = data["vip"] - role_param["glance_vip"] = data["glance_vip"] - role_param["db_vip"] = data["db_vip"] - role_param["public_vip"] = data["public_vip"] - role_param["ntp_server"] = data["ntp_ip"] - role_param["glance_lv_size"] = data["glance_lv_size"] - role_param["db_lv_size"] = data["db_lv_size"] - api.daisy.role_update(request, role_id, **role_param) - - # glance - glance_param = { - 'service': 'glance', - 'role_id': role_id, - 'data_ips': data["glance_data_ips"], - 'lun': data["glance_lun"], - 'disk_location': data["glance_disk_location"] - } - api.daisy.service_disk_add(request, **glance_param) - - # db - db_param = { - 'service': 'db', - 'role_id': role_id, - 'data_ips': data["db_data_ips"], - 'lun': data["db_lun"], - 'disk_location': data["db_disk_location"] - } - api.daisy.service_disk_add(request, **db_param) - - # mongodb - LOG.info("WMH DBG: mongodb settings") - mongodb_param = { - 'service': 'mongodb', - 'role_id': role_id, - 'data_ips': data["mongodb_data_ips"], - 'lun': data["mongodb_lun"], - 'disk_location': data["mongodb_disk_location"], - 'size': data["mongodb_lv_size"] - } - api.daisy.service_disk_add(request, **mongodb_param) - - # Add new cinder volumes - if len(data["cinder_volume_array"]) > 0: - cinder_param = \ - {'role_id': role_id, - 'disk_array': data["cinder_volume_array"]} - api.daisy.cinder_volume_add(request, **cinder_param) - - except Exception as e: - LOG.info("update ha role info failed! %s" % e) - messages.error(request, 'Set HA Role Information Failed!') - return False - - return True - - -def set_lb_role(request, role_id, data): - try: - role_param = { - "vip": data["vip"] - } - - api.daisy.role_update(request, role_id, **role_param) - - except Exception as e: - LOG.info("update lb role info failed! %s" % e) - messages.error(request, 'Set LB Role Information Failed!') - return False - - return True - - -def set_zenic_nfm_role(request, role_id, data): - try: - role_param = { - "vip": data["vip"], - "mongodb_vip": data["mongodb_vip"] - } - api.daisy.role_update(request, role_id, **role_param) - - except Exception as e: - LOG.info("update zenic nfm role info failed! %s" % e) - messages.error(request, 'Set ZENIC NFM Role Information Failed!') - return False - - return True - - -def set_zenic_ctl_role(request, role_id, data): - try: - role_param = { - "vip": data["vip"] - } - - api.daisy.role_update(request, role_id, **role_param) - - except Exception as e: - LOG.info("update zenic role info failed! %s" % e) - messages.error(request, 'Set ZENIC Role Information Failed!') - return False - - return True - -def set_netplane(request, cluster_id, nets): - netplane_param = \ - {"PUBLIC": ["cidr", "gateway", "ip_ranges", "vlan_id", "description"], - "DEPLOYMENT": ["cidr", "gateway", "ip_ranges", - "vlan_id", "description"], - "MANAGEMENT": ["cidr", "gateway", "ip_ranges", - "vlan_id", "description"], - "PRIVATE": ["vlan_start", "vlan_end", "description"], - "STORAGE": ["cidr", "gateway", "ip_ranges", - "vlan_id", "description"], - "EXTERNAL": ["cidr", "gateway", "ip_ranges", "vlan_start", - "vlan_end", "description"], - "VXLAN": ["cidr", "gateway", "ip_ranges", "description"], } - - def get_id_by_name(nets, name): - for n in nets: - if n.name == name: - return n.id - return None - - try: - networks = api.daisy.network_list(request, cluster_id) - for net in nets: - net_id = get_id_by_name(networks, net["name"]) - if net_id: - net_params = { - "name": net["name"], - "cluster_id": cluster_id} - network_type = net["network_type"] - for param in netplane_param[network_type]: - net_params[param] = net[param] - - api.daisy.network_update(request, net_id, **net_params) - - else: - net_plane = { - "name": net["name"], - "network_type": net["network_type"], - "description": net["description"], - "cluster_id": cluster_id} - - for param in netplane_param[net["network_type"]]: - net_plane[param] = net[param] - - if net_plane[param] == "": - net_plane[param] = None - - api.daisy.net_plane_add(request, **net_plane) - except Exception as e: - LOG.error('wmh dbg: e=%s' % e) - return False - - return True diff --git a/code/horizon/openstack_dashboard/dashboards/environment/cluster/modify_cluster.py b/code/horizon/openstack_dashboard/dashboards/environment/cluster/modify_cluster.py index 28c9fbb8..132e757f 100755 --- a/code/horizon/openstack_dashboard/dashboards/environment/cluster/modify_cluster.py +++ b/code/horizon/openstack_dashboard/dashboards/environment/cluster/modify_cluster.py @@ -5,7 +5,6 @@ from django.http import HttpResponse from django.views.decorators.csrf import csrf_exempt -from django.utils.translation import ugettext_lazy as _ import json @@ -14,57 +13,36 @@ from horizon import views from horizon import exceptions from openstack_dashboard import api +from openstack_dashboard.dashboards.environment.cluster import net_plane \ + as cluster_net_plane +from openstack_dashboard.dashboards.environment.cluster import role \ + as cluster_role +from openstack_dashboard.dashboards.environment.deploy import deploy_rule_lib import logging LOG = logging.getLogger(__name__) -def is_zenic_role(roles): - for role in roles: - if "ZENIC_" in role["name"]: - return True - return False - - class ModifyView(views.HorizonTemplateView): template_name = "environment/cluster/modify_cluster.html" def get_clusters(self): clusters = api.daisy.cluster_list(self.request) cluster_lists = [c for c in clusters] - return cluster_lists - def get_roles_data(self): - roles_data = [] - try: - role_list = api.daisy.role_list(self.request) - roles = [role for role in role_list - if role.cluster_id == self.kwargs["cluster_id"]] - for role in roles: - roles_data.append({ - "id": role.id, - "name": role.name - }) - roles_data.sort(key=lambda x: x['name']) - except Exception: - exceptions.handle(self.request, - _('Unable to retrieve host list.')) - - return roles_data - def get_context_data(self, **kwargs): context = super(ModifyView, self).get_context_data(**kwargs) - - networks = \ - api.daisy.network_list(self.request, self.kwargs["cluster_id"]) - networks_list = [net.__dict__ for net in networks] - networks_list.sort(key=lambda x: x['name']) - context["network"] = {"networks": networks_list} + net_planes = cluster_net_plane.\ + get_net_plane_list(self.request, self.kwargs["cluster_id"]) + context["network"] = {"networks": net_planes} context['cluster_id'] = self.kwargs['cluster_id'] context['clusters'] = self.get_clusters() - context["roles"] = self.get_roles_data() - context["has_zenic"] = is_zenic_role(context["roles"]) + context["roles"] = cluster_role.\ + get_role_list(self.request, self.kwargs["cluster_id"]) + hwms = api.daisy.hwm_list(self.request) + hwmip_list = [hwm.hwm_ip for hwm in hwms] + context["hwmip_list"] = hwmip_list return context def get_success_url(self): @@ -77,21 +55,21 @@ def GetCluster(request): filter = data["cluster_info"] cluster_info = api.daisy.cluster_get(request, filter["cluster_id"]) + hwms = api.daisy.hwm_list(request) + hwmip_list = [hwm.hwm_ip for hwm in hwms] ret_cluster_list = [] ret_cluster_list.append({ "id": cluster_info.id, "name": cluster_info.name, "base_mac": cluster_info.networking_parameters["base_mac"], - "segmentation_type": - cluster_info.networking_parameters["segmentation_type"], "gre_id_start": cluster_info.networking_parameters["gre_id_range"][0], "gre_id_end": cluster_info.networking_parameters["gre_id_range"][1], - "vni_start": cluster_info.networking_parameters["vni_range"][0], - "vni_end": cluster_info.networking_parameters["vni_range"][1], "auto_scale": cluster_info.auto_scale, "use_dns": cluster_info.use_dns, - "description": cluster_info.description}) + "hwm_ip": cluster_info.hwm_ip, + "description": cluster_info.description, + "hwmip_list": hwmip_list}) return HttpResponse(json.dumps(ret_cluster_list), content_type="application/json") @@ -114,283 +92,81 @@ def GetClusters(request): @csrf_exempt -def get_ha_role_info(request): - request_data = json.loads(request.body) - role_id = request_data["role_id"] - try: - role = api.daisy.role_get(request, role_id) - role_info = { - "role_id": role_id, - "name": role.name, - "vip": role.vip, - "glance_vip": role.glance_vip, - "db_vip": role.db_vip, - "public_vip": role.public_vip, - "ntp_ip": role.ntp_server, - "glance_lv_size": role.glance_lv_size, - "db_lv_size": role.db_lv_size, - 'service_disk_array': [], - 'cinder_volume_array': []} - except Exception: - role_info = { - "role_id": role_id, - "vip": None, - "glance_vip": None, - "db_vip": None, - "public_vip": None, - "ntp_ip": None, - "glance_lv_size": None, - "db_lv_size": None, - 'service_disk_array': [], - 'cinder_volume_array': []} - try: - service_disks = \ - api.daisy.service_disk_list(request, **{'role_id': role_id}) - - columns = ['ID', 'SERVICE', 'ROLE_ID', 'DISK_LOCATION', 'DATA_IPS', - 'SIZE', 'LUN'] - service_disk_array = [] - for disk in service_disks: - row = [] - for item in columns: - field_name = item.lower().replace(' ', '_') - data = getattr(disk, field_name, None) - row.append(data) - service_disk_array.append(row) - dict_names = ['id', 'service', 'role_id', 'disk_location', - 'data_ips', 'size', 'lun'] - for item in service_disk_array: - if item[columns.index('ROLE_ID')] != role_id: - continue - role_info['service_disk_array'].append(dict(zip(dict_names, item))) - except Exception: - role_info['service_disk_array'] = [] - - try: - cinder_volumes = \ - api.daisy.cinder_volume_list(request, **{'role_id': role_id}) - - columns = ["ID", "MANAGEMENT_IPS", "DATA_IPS", "POOLS", "VOLUME_DRIVER", - "VOLUME_TYPE", "BACKEND_INDEX", "USER_NAME", - "USER_PWD", "ROLE_ID"] - cinder_volume_array = [] - for volume in cinder_volumes: - row = [] - for item in columns: - field_name = item.lower().replace(' ', '_') - data = getattr(volume, field_name, None) - row.append(data) - cinder_volume_array.append(row) - - dict_names = ['id', 'management_ips', 'data_ips', 'pools', 'volume_driver', - 'volume_type', 'backend_index', 'user_name', - 'user_pwd', 'role_id'] - for item in cinder_volume_array: - if item[columns.index('ROLE_ID')] != role_id: - continue - role_info['cinder_volume_array'].\ - append(dict(zip(dict_names, item))) - except Exception: - role_info['cinder_volume_array'] = [] - - return HttpResponse(json.dumps(role_info), - content_type="application/json") - - -@csrf_exempt -def get_role_info(request): - request_data = json.loads(request.body) - role_id = request_data["role_id"] - try: - role = api.daisy.role_get(request, role_id) - role_info = { - "role_id": role_id, - "name": role.name, - "vip": role.vip, - "mongodb_vip": role.mongodb_vip} - except Exception, e: - messages.error(request, e) - role_info = { - "role_id": role_id, - "vip": None, - "mongodb_vip": None} - return HttpResponse(json.dumps(role_info), - content_type="application/json") - - -@csrf_exempt -def set_ha_role_info(request): - response = HttpResponse() - data = json.loads(request.body) - role_param = {} - msg = ('HA role modify request.body::::::: %s') % request.body - LOG.info(msg) - - role_id = data["role_id"] - try: - role_param["vip"] = data["vip"] - role_param["glance_vip"] = data["glance_vip"] - role_param["db_vip"] = data["db_vip"] - role_param["public_vip"] = data["public_vip"] - role_param["ntp_server"] = data["ntp_ip"] - role_param["glance_lv_size"] = data["glance_lv_size"] - role_param["db_lv_size"] = data["db_lv_size"] - api.daisy.role_update(request, role_id, **role_param) - - # glance - if data["glance_service_id"] == "": - glance_param = { - 'service': 'glance', - 'role_id': role_id, - 'data_ips': data["glance_data_ips"], - 'lun': data["glance_lun"], - 'disk_location': data["glance_disk_location"] - } - api.daisy.service_disk_add(request, **glance_param) - else: - glance_param = { - 'service': 'glance', - 'role_id': role_id, - 'data_ips': data["glance_data_ips"], - 'lun': data["glance_lun"], - 'disk_location': data["glance_disk_location"] - } - api.daisy.service_disk_update(request, - data['glance_service_id'], - **glance_param) - - # db - if data["db_service_id"] == "": - db_param = { - 'service': 'db', - 'role_id': role_id, - 'data_ips': data["db_data_ips"], - 'lun': data["db_lun"], - 'disk_location': data["db_disk_location"] - } - api.daisy.service_disk_add(request, **db_param) - else: - db_param = { - 'service': 'db', - 'role_id': role_id, - 'data_ips': data["db_data_ips"], - 'lun': data["db_lun"], - 'disk_location': data["db_disk_location"] - } - api.daisy.service_disk_update(request, - data['db_service_id'], - **db_param) - - - # mongodb - if data["mongodb_service_id"] == "": - mongodb_param = { - 'service': 'mongodb', - 'role_id': role_id, - 'data_ips': data["mongodb_data_ips"], - 'lun': data["mongodb_lun"], - 'disk_location': data["mongodb_disk_location"], - 'size': data['mongodb_lv_size'] - } - api.daisy.service_disk_add(request, **mongodb_param) - else: - mongodb_param = { - 'service': 'mongodb', - 'role_id': role_id, - 'data_ips': data["mongodb_data_ips"], - 'lun': data["mongodb_lun"], - 'disk_location': data["mongodb_disk_location"], - 'size': data['mongodb_lv_size'] - } - api.daisy.service_disk_update(request, - data['mongodb_service_id'], - **mongodb_param) - - # cinder - # 1. get all cinder volumes and delete it - cinder_volumes = \ - api.daisy.cinder_volume_list(request, **{'role_id': role_id}) - columns = ["ID", "MANAGEMENT_IPS", "DATA_IPS", "POOLS", "VOLUME_DRIVER", - "VOLUME_TYPE", "BACKEND_INDEX", "USER_NAME", - "USER_PWD", "ROLE_ID"] - disk_array = [] - for volume in cinder_volumes: - row = [] - for item in columns: - field_name = item.lower().replace(' ', '_') - row.append(getattr(volume, field_name, None)) - disk_array.append(row) - for item in disk_array: - if item[columns.index('ROLE_ID')] != role_id: - continue - api.daisy.cinder_volume_delete(request, item[columns.index('ID')]) - # 2. Add new cinder volumes - if len(data["cinder_volume_array"]) > 0: - cinder_param = \ - {'role_id': role_id, - 'disk_array': data["cinder_volume_array"]} - api.daisy.cinder_volume_add(request, **cinder_param) - except Exception, e: - messages.error(request, e) - LOG.info("update ha role info failed!, role_id=%s" % role_id) - response.status_code = 500 - return response - - response.status_code = 200 - return response - - -@csrf_exempt -def set_role_info(request): - response = HttpResponse() - data = json.loads(request.body) - msg = ('Role modify request.body::::::: %s') % request.body - LOG.info(msg) - role_id = data["role_id"] - - try: - role_param = { - "vip": data["vip"] - } - if "mongodb_vip" in data.keys(): - role_param["mongodb_vip"] = data["mongodb_vip"] - api.daisy.role_update(request, role_id, **role_param) - except Exception, e: - messages.error(request, e) - response.status_code = 500 - LOG.info("update role info failed!") - return response - - response.status_code = 200 - return response - - -@csrf_exempt -def ModifyCluster(request): +def modify_submit(request, cluster_id): data = json.loads(request.body) msg = ('Cluster modify request.body::::::: %s') % request.body LOG.info(msg) - cluster_info = data["cluster_info"] response = HttpResponse() try: - if cluster_info["other"] == 1: - api.daisy.cluster_update(request, - cluster_info["id"], - name=cluster_info["name"], - auto_scale=cluster_info["auto_scale"]) - else: - api.daisy.cluster_update( - request, - cluster_info["id"], - name=cluster_info["name"], - networking_parameters=cluster_info["networking_parameters"], - auto_scale=cluster_info["auto_scale"], - use_dns=cluster_info["use_dns"], - description=cluster_info["description"]) + # check param valid + deploy_rule_lib.net_plane_4_role_rule(request, + cluster_id, + data["role_info"], + data["net_plane_info"]) + api.daisy.cluster_update( + request, + cluster_id, + name=cluster_info["name"], + networking_parameters=cluster_info["networking_parameters"], + auto_scale=cluster_info["auto_scale"], + use_dns=cluster_info["use_dns"], + description=cluster_info["description"]) + + role_list = api.daisy.role_list(request) + roles = [role for role in role_list + if role.cluster_id == cluster_id] + for role in roles: + if role.name == "CONTROLLER_HA": + ha_role_info = data["role_info"]["ha"] + cluster_role.\ + set_ha_role_info_for_modify_cluster(request, + ha_role_info) + if role.name == "CONTROLLER_LB": + cluster_role.set_role_info(request, + role.id, + data["role_info"]["lb"]) + if role.name == "COMPUTER": + cluster_role.\ + set_computer_role_info(request, + role.id, + data["role_info"]["computer"]) + if role.name == "ZENIC_NFM": + cluster_role.set_role_info(request, + role.id, + data["role_info"]["zenic_nfm"]) + if role.name == "ZENIC_CTL": + cluster_role.set_role_info(request, + role.id, + data["role_info"]["zenic_ctl"]) + + cluster_net_plane.set_net_plane(request, + cluster_id, + data["net_plane_info"]) except Exception as e: messages.error(request, e) exceptions.handle(request, "Cluster modify failed!(%s)" % e) + LOG.info("modify_submit %s", e) + response.status_code = 500 + return response + + response.status_code = 200 + return response + + +@csrf_exempt +def set_cluster_auto_scale(request, cluster_id): + data = json.loads(request.body) + response = HttpResponse() + cluster_info = data["cluster_info"] + try: + api.daisy.cluster_update(request, + cluster_id, + name=cluster_info["name"], + auto_scale=cluster_info["auto_scale"]) + except Exception as e: + messages.error(request, e) + exceptions.handle(request, "set_cluster_auto_scale failed!(%s)" % e) response.status_code = 500 return response diff --git a/code/horizon/openstack_dashboard/dashboards/environment/cluster/net_plane.py b/code/horizon/openstack_dashboard/dashboards/environment/cluster/net_plane.py new file mode 100755 index 00000000..53b76f17 --- /dev/null +++ b/code/horizon/openstack_dashboard/dashboards/environment/cluster/net_plane.py @@ -0,0 +1,214 @@ +# +# Copyright ZTE +# Daisy Tools Dashboard +# +import logging + +from django.http import HttpResponse +from django.views.decorators.csrf import csrf_exempt +from django.utils.translation import ugettext_lazy as _ +import json +from horizon import messages +from horizon import exceptions +from openstack_dashboard import api + +LOG = logging.getLogger(__name__) + + +@csrf_exempt +def add_net_plane(request, cluster_id): + status_code = 200 + data = json.loads(request.body) + net_plane_params = { + "PUBLICAPI": ["cidr", "gateway", "ip_ranges", "vlan_id", + "capability", "description"], + "MANAGEMENT": ["cidr", "gateway", "ip_ranges", "vlan_id", + "capability", "description"], + "DATAPLANE": ["segmentation_type", "vlan_start", "vlan_end", + "capability", "description"], + "STORAGE": ["cidr", "gateway", "ip_ranges", "vlan_id", + "capability", "description"], + "HEARTBEAT": ["cidr", "vlan_id", "ip_ranges", "capability", + "description"]} + try: + net_plane = { + "name": data["name"], + "network_type": data["network_type"], + "description": data["description"], + "cluster_id": cluster_id, + "type": "custom"} + for param in net_plane_params[data["network_type"]]: + net_plane[param] = data[param] + if net_plane[param] == "": + net_plane[param] = None + new_net_plane = api.daisy.net_plane_add(request, **net_plane) + data["id"] = new_net_plane.id + except Exception as e: + LOG.info("add_net_plane failed:%s", e) + messages.error(request, e) + status_code = 500 + return HttpResponse(json.dumps(data), + content_type="application/json", + status=status_code) + + +@csrf_exempt +def set_net_plane(request, cluster_id, nets): + net_plane_params = { + "PUBLICAPI": ["cidr", "gateway", "ip_ranges", "vlan_id", + "capability", "description"], + "MANAGEMENT": ["cidr", "gateway", "ip_ranges", "vlan_id", + "capability", "description"], + "DATAPLANE": ["segmentation_type", "vni_start", "vni_end", + "cidr", "gateway", "ip_ranges", "vlan_start", + "capability", "vlan_end", "description"], + "STORAGE": ["cidr", "gateway", "ip_ranges", "vlan_id", + "capability", "description"], + "HEARTBEAT": ["cidr", "vlan_id", "ip_ranges", "capability", + "description"]} + for net in nets: + net_params = { + "name": net["name"], + "cluster_id": cluster_id, } + network_type = net["network_type"] + for param in net_plane_params[network_type]: + if param in net: + net_params[param] = net[param] + LOG.info("set_net_plane %s", net_params) + api.daisy.network_update(request, net["id"], **net_params) + + +@csrf_exempt +def sort_net_planes(net_planes): + ret_net_planes = [] + sort_list = ["MANAGEMENT", "PUBLICAPI", "DATAPLANE", + "STORAGE", "VXLAN", "HEARTBEAT"] + for sort in sort_list: + for net_plane in net_planes: + if net_plane["network_type"] == sort: + ret_net_planes.append(net_plane) + break + return ret_net_planes + + +@csrf_exempt +def get_default_net_plane(): + networks = [ + {"network_type": "MANAGEMENT", + "net_planes": [{"name": "MANAGEMENT", }]}, + {"network_type": "DATAPLANE", + "net_planes": [{"name": "physnet1", }]}, + {"network_type": "PUBLICAPI", + "net_planes": [{"name": "PUBLICAPI", }]}, + {"network_type": "STORAGE", + "net_planes": [{"name": "STORAGE", }]}, ] + for net in networks: + for net_plane in net["net_planes"]: + net_plane.update({"cidr": "192.168.1.1/24", + "vlan_start": "1", + "vlan_end": "4094", + "vlan_id": "", + "capability": "high", + "segmentation_type": "vlan", + "vni_start": None, + "vni_end": None}) + return sort_net_planes(networks) + + +@csrf_exempt +def add_net_plane_for_add_cluster(request, cluster_id, in_net_planes): + net_plane_params = { + "PUBLICAPI": ["cidr", "gateway", "ip_ranges", "vlan_id", + "capability", "description"], + "MANAGEMENT": ["cidr", "gateway", "ip_ranges", + "capability", "vlan_id", "description"], + "DATAPLANE": ["segmentation_type", "vni_start", "vni_end", + "cidr", "gateway", "ip_ranges", "vlan_start", + "capability", "vlan_end", "description"], + "STORAGE": ["cidr", "gateway", "ip_ranges", + "capability", "vlan_id", "description"], + "HEARTBEAT": ["cidr", "vlan_id", "ip_ranges", "capability", + "description"]} + + def get_id_by_name(nets, name): + for n in nets: + if n.name == name: + return n.id + return None + + try: + network_list = api.daisy.network_list(request, cluster_id) + for in_net_plane in in_net_planes: + net_id = get_id_by_name(network_list, in_net_plane["name"]) + if net_id: + net_params = { + "name": in_net_plane["name"], + "cluster_id": cluster_id} + network_type = in_net_plane["network_type"] + for param in net_plane_params[network_type]: + if param in in_net_plane: + net_params[param] = in_net_plane[param] + api.daisy.network_update(request, net_id, **net_params) + else: + net_plane = { + "name": in_net_plane["name"], + "network_type": in_net_plane["network_type"], + "description": in_net_plane["description"], + "cluster_id": cluster_id, + "type": "custom"} + + for param in net_plane_params[in_net_plane["network_type"]]: + if param in in_net_plane: + net_plane[param] = in_net_plane[param] + if net_plane[param] == "": + net_plane[param] = None + api.daisy.net_plane_add(request, **net_plane) + except Exception as e: + LOG.error('add_net_plane_for_add_cluster failed: e=%s' % e) + raise + + +@csrf_exempt +def delete_net_plane(request, cluster_id): + data = json.loads(request.body) + net_plane_info = { + "name": data["name"] + } + response = HttpResponse(json.dumps(net_plane_info), + content_type="application/json") + try: + api.daisy.netplane_delete(request, data["id"]) + except Exception as e: + LOG.info("delete_net_plane:%s", e) + messages.error(request, e) + response.status_code = 500 + return response + response.status_code = 200 + return response + + +@csrf_exempt +def get_net_plane_list(request, cluster_id): + ret_net_planes = [] + filter_net_planes = ["DEPLOYMENT", "EXTERNAL"] + try: + network_list = api.daisy.network_list(request, cluster_id) + show_net_planes = [net.__dict__ for net in network_list + if net.name not in filter_net_planes] + for show_net_plane in show_net_planes: + network_types = [ret_net_plane["network_type"] + for ret_net_plane in ret_net_planes] + if show_net_plane["network_type"] not in network_types: + ret_net_planes.append( + {"network_type": show_net_plane["network_type"], + "net_planes": [show_net_plane, ]}) + else: + for ret_net_plane in ret_net_planes: + if ret_net_plane["network_type"] == \ + show_net_plane["network_type"]: + ret_net_plane["net_planes"].append(show_net_plane) + except Exception as e: + LOG.info("get_net_plane_list! %s" % e) + exceptions.handle(request, + _('Unable to retrieve net plane list.')) + return sort_net_planes(ret_net_planes) diff --git a/code/horizon/openstack_dashboard/dashboards/environment/cluster/role.py b/code/horizon/openstack_dashboard/dashboards/environment/cluster/role.py new file mode 100755 index 00000000..29856b63 --- /dev/null +++ b/code/horizon/openstack_dashboard/dashboards/environment/cluster/role.py @@ -0,0 +1,476 @@ +# +# Copyright ZTE +# Daisy Tools Dashboard +# +import logging +from django.http import HttpResponse +from django.views.decorators.csrf import csrf_exempt +from django.utils.translation import ugettext_lazy as _ +import json +from django import template +from horizon import messages +from horizon import exceptions +from openstack_dashboard import api + +LOG = logging.getLogger(__name__) + + +@csrf_exempt +def set_ha_role_for_new_cluster(request, role_id, data): + try: + role_param = { + "public_vip": data["public_vip"], + "ntp_server": data["ntp_ip"], + "glance_lv_size": data["glance_lv_size"], + "db_lv_size": data["db_lv_size"]} + if data.get("vip", None): + role_param["vip"] = data["vip"] + if data.get("glance_vip", None): + role_param["glance_vip"] = data["glance_vip"] + if data.get("db_vip", None): + role_param["db_vip"] = data["db_vip"] + api.daisy.role_update(request, role_id, **role_param) + + # glance + glance_param = { + 'service': 'glance', + 'role_id': role_id, + 'protocol_type': data["glance_protocol_type"], + 'data_ips': data["glance_data_ips"], + 'lun': data["glance_lun"], + 'disk_location': data["glance_disk_location"] + } + api.daisy.service_disk_add(request, **glance_param) + + # db + if data["db_disk_location"] == "local" or\ + data["db_disk_location"] == "share": + db_param = { + 'service': 'db', + 'role_id': role_id, + 'protocol_type': data["db_protocol_type"], + 'data_ips': data["db_data_ips"], + 'lun': data["db_lun"], + 'disk_location': data["db_disk_location"] + } + api.daisy.service_disk_add(request, **db_param) + else: + db_param = { + 'service': 'db', + 'role_id': role_id, + 'protocol_type': data["db1_protocol_type"], + 'data_ips': data["db1_data_ips"], + 'lun': data["db1_lun"], + 'disk_location': data["db_disk_location"] + } + api.daisy.service_disk_add(request, **db_param) + db_param = { + 'service': 'db', + 'role_id': role_id, + 'protocol_type': data["db2_protocol_type"], + 'data_ips': data["db2_data_ips"], + 'lun': data["db2_lun"], + 'disk_location': data["db_disk_location"] + } + api.daisy.service_disk_add(request, **db_param) + + # mongodb + LOG.info("WMH DBG: mongodb settings") + mongodb_param = { + 'service': 'mongodb', + 'role_id': role_id, + 'protocol_type': data["mongodb_protocol_type"], + 'data_ips': data["mongodb_data_ips"], + 'lun': data["mongodb_lun"], + 'disk_location': data["mongodb_disk_location"], + 'size': data["mongodb_lv_size"] + } + api.daisy.service_disk_add(request, **mongodb_param) + + # dbbackup + LOG.info("WMH DBG: dbbackup settings") + dbbackup_param = { + 'service': 'db_backup', + 'role_id': role_id, + 'protocol_type': data["dbbackup_protocol_type"], + 'data_ips': data["dbbackup_data_ips"], + 'lun': data["dbbackup_lun"], + 'disk_location': data["dbbackup_disk_location"], + 'size': data["dbbackup_lv_size"] + } + api.daisy.service_disk_add(request, **dbbackup_param) + + # Add new cinder volumes + if len(data["cinder_volume_array"]) > 0: + cinder_param = \ + {'role_id': role_id, + 'disk_array': data["cinder_volume_array"]} + api.daisy.cinder_volume_add(request, **cinder_param) + + except Exception as e: + LOG.info("update ha role info failed! %s" % e) + raise + + +@csrf_exempt +def set_ha_role_info_for_modify_cluster(request, data): + role_param = {} + role_id = data["role_id"] + try: + role_param["public_vip"] = data["public_vip"] + role_param["ntp_server"] = data["ntp_ip"] + role_param["glance_lv_size"] = data["glance_lv_size"] + role_param["db_lv_size"] = data["db_lv_size"] + if data.get("vip", None): + role_param["vip"] = data["vip"] + if data.get("glance_vip", None): + role_param["glance_vip"] = data["glance_vip"] + if data.get("db_vip", None): + role_param["db_vip"] = data["db_vip"] + api.daisy.role_update(request, role_id, **role_param) + + # glance + if data["glance_service_id"] == "": + glance_param = { + 'service': 'glance', + 'role_id': role_id, + 'protocol_type': data["glance_protocol_type"], + 'data_ips': data["glance_data_ips"], + 'lun': data["glance_lun"], + 'disk_location': data["glance_disk_location"] + } + api.daisy.service_disk_add(request, **glance_param) + else: + glance_param = { + 'service': 'glance', + 'role_id': role_id, + 'protocol_type': data["glance_protocol_type"], + 'data_ips': data["glance_data_ips"], + 'lun': data["glance_lun"], + 'disk_location': data["glance_disk_location"] + } + api.daisy.service_disk_update(request, + data['glance_service_id'], + **glance_param) + + # db + if data["db_disk_location"] == "local" or\ + data["db_disk_location"] == "share": + db_param = { + 'service': 'db', + 'role_id': role_id, + 'protocol_type': data["db_protocol_type"], + 'data_ips': data["db_data_ips"], + 'lun': data["db_lun"], + 'disk_location': data["db_disk_location"] + } + if data["db_service_id"] == "": + api.daisy.service_disk_add(request, **db_param) + else: + api.daisy.service_disk_update(request, + data['db_service_id'], + **db_param) + else: + db1_param = { + 'service': 'db', + 'role_id': role_id, + 'protocol_type': data["db1_protocol_type"], + 'data_ips': data["db1_data_ips"], + 'lun': data["db1_lun"], + 'disk_location': "share_cluster" + } + db2_param = { + 'service': 'db', + 'role_id': role_id, + 'protocol_type': data["db2_protocol_type"], + 'data_ips': data["db2_data_ips"], + 'lun': data["db2_lun"], + 'disk_location': "share_cluster" + } + + if data["db1_service_id"] == "" and data["db2_service_id"] == "": + api.daisy.service_disk_add(request, **db1_param) + api.daisy.service_disk_add(request, **db2_param) + + else: + api.daisy.service_disk_update(request, + data['db1_service_id'], + **db1_param) + api.daisy.service_disk_update(request, + data['db2_service_id'], + **db2_param) + # mongodb + if data["mongodb_service_id"] == "": + mongodb_param = { + 'service': 'mongodb', + 'role_id': role_id, + 'protocol_type': data["mongodb_protocol_type"], + 'data_ips': data["mongodb_data_ips"], + 'lun': data["mongodb_lun"], + 'disk_location': data["mongodb_disk_location"], + 'size': data['mongodb_lv_size'] + } + api.daisy.service_disk_add(request, **mongodb_param) + else: + mongodb_param = { + 'service': 'mongodb', + 'role_id': role_id, + 'protocol_type': data["mongodb_protocol_type"], + 'data_ips': data["mongodb_data_ips"], + 'lun': data["mongodb_lun"], + 'disk_location': data["mongodb_disk_location"], + 'size': data['mongodb_lv_size'] + } + api.daisy.service_disk_update(request, + data['mongodb_service_id'], + **mongodb_param) + # dbbackup + if data["dbbackup_service_id"] == "": + dbbackup_param = { + 'service': 'db_backup', + 'role_id': role_id, + 'protocol_type': data["dbbackup_protocol_type"], + 'data_ips': data["dbbackup_data_ips"], + 'lun': data["dbbackup_lun"], + 'disk_location': data["dbbackup_disk_location"], + 'size': data['dbbackup_lv_size'] + } + api.daisy.service_disk_add(request, **dbbackup_param) + else: + dbbackup_param = { + 'service': 'db_backup', + 'role_id': role_id, + 'protocol_type': data["dbbackup_protocol_type"], + 'data_ips': data["dbbackup_data_ips"], + 'lun': data["dbbackup_lun"], + 'disk_location': data["dbbackup_disk_location"], + 'size': data['dbbackup_lv_size'] + } + api.daisy.service_disk_update(request, + data['dbbackup_service_id'], + **dbbackup_param) + # cinder + # 1. get all cinder volumes and delete it + cinder_volumes = \ + api.daisy.cinder_volume_list(request, **{'role_id': role_id}) + columns = ["ID", "MANAGEMENT_IPS", "DATA_IPS", "POOLS", + "VOLUME_DRIVER", "VOLUME_TYPE", "BACKEND_INDEX", + "USER_NAME", "USER_PWD", "ROLE_ID"] + disk_array = [] + for volume in cinder_volumes: + row = [] + for item in columns: + field_name = item.lower().replace(' ', '_') + row.append(getattr(volume, field_name, None)) + disk_array.append(row) + for item in disk_array: + if item[columns.index('ROLE_ID')] != role_id: + continue + api.daisy.cinder_volume_delete(request, item[columns.index('ID')]) + # 2. Add new cinder volumes + if len(data["cinder_volume_array"]) > 0: + cinder_param = \ + {'role_id': role_id, + 'disk_array': data["cinder_volume_array"]} + api.daisy.cinder_volume_add(request, **cinder_param) + except Exception as e: + LOG.info("update ha role info failed!, %s" % e) + raise + + +@csrf_exempt +def set_computer_role_info(request, role_id, data): + try: + role_param = {"nova_lv_size": data.get("nova_lv_size")} + api.daisy.role_update(request, role_id, **role_param) + except Exception as e: + LOG.info("update computer role info failed! %s" % e) + raise + + +@csrf_exempt +def set_role_info(request, role_id, data): + try: + role_param = {} + if data.get("vip", None): + role_param["vip"] = data["vip"] + + if "mongodb_vip" in data.keys(): + role_param["mongodb_vip"] = data["mongodb_vip"] + api.daisy.role_update(request, role_id, **role_param) + except Exception as e: + LOG.info("update lb role info failed! %s" % e) + raise + + +def sort_roles(roles): + ret_roles = [] + sort_list = ["CONTROLLER_HA", "CONTROLLER_LB", "COMPUTER", + "ZENIC_NFM", "ZENIC_CTL"] + for sort in sort_list: + for role in roles: + if role.get("name", "") == sort: + ret_roles.append(role) + break + return ret_roles + + +@csrf_exempt +def get_role_list(request, cluster_id): + roles_data = [] + try: + role_list = api.daisy.role_list(request) + roles = [role for role in role_list + if role.cluster_id == cluster_id] + for role in roles: + roles_data.append({ + "id": role.id, + "name": role.name}) + roles_data = sort_roles(roles_data) + except Exception as e: + LOG.info("get_role_list! %s" % e) + exceptions.handle(request, + _('Unable to retrieve role list.')) + return roles_data + + +@csrf_exempt +def get_ha_role_info(request): + request_data = json.loads(request.body) + role_id = request_data["role_id"] + status_code = 200 + role_info = {} + try: + role = api.daisy.role_get(request, role_id) + role_info["role_id"] = role_id + role_info["name"] = role.name + role_info["vip"] = role.vip + role_info["glance_vip"] = role.glance_vip + role_info["db_vip"] = role.db_vip + role_info["public_vip"] = role.public_vip + role_info["ntp_ip"] = role.ntp_server + role_info["glance_lv_size"] = role.glance_lv_size + role_info["db_lv_size"] = role.db_lv_size + role_info["service_disk_array"] = [] + role_info["cinder_volume_array"] = [] + + service_disks = \ + api.daisy.service_disk_list(request, **{'role_id': role_id}) + + columns = ['ID', 'SERVICE', 'ROLE_ID', 'DISK_LOCATION', 'DATA_IPS', + 'SIZE', 'LUN', 'PROTOCOL_TYPE'] + service_disk_array = [] + for disk in service_disks: + row = [] + for item in columns: + field_name = item.lower().replace(' ', '_') + data = getattr(disk, field_name, None) + row.append(data) + service_disk_array.append(row) + dict_names = ['id', 'service', 'role_id', 'disk_location', + 'data_ips', 'size', 'lun', 'protocol_type'] + for item in service_disk_array: + if item[columns.index('ROLE_ID')] != role_id: + continue + role_info['service_disk_array'].append(dict(zip(dict_names, item))) + + cinder_volumes = \ + api.daisy.cinder_volume_list(request, **{'role_id': role_id}) + + columns = ["ID", "MANAGEMENT_IPS", "DATA_IPS", "POOLS", + "VOLUME_DRIVER", "VOLUME_TYPE", "BACKEND_INDEX", + "USER_NAME", "USER_PWD", "ROLE_ID"] + cinder_volume_array = [] + for volume in cinder_volumes: + row = [] + for item in columns: + field_name = item.lower().replace(' ', '_') + data = getattr(volume, field_name, None) + row.append(data) + cinder_volume_array.append(row) + + dict_names = ['id', 'management_ips', 'data_ips', 'pools', + 'volume_driver', 'volume_type', 'backend_index', + 'user_name', 'user_pwd', 'role_id'] + for item in cinder_volume_array: + if item[columns.index('ROLE_ID')] != role_id: + continue + role_info['cinder_volume_array'].\ + append(dict(zip(dict_names, item))) + except Exception as e: + status_code = 500 + messages.error(request, e) + LOG.error("get_ha_role_info failed!%s", e) + return HttpResponse(json.dumps(role_info), + content_type="application/json", + status=status_code) + + +@csrf_exempt +def get_computer_role_info(request): + request_data = json.loads(request.body) + role_id = request_data["role_id"] + status_code = 200 + role_info = {} + try: + role = api.daisy.role_get(request, role_id) + role_info["role_id"] = role_id + role_info["name"] = role.name + role_info["nova_lv_size"] = role.nova_lv_size + except Exception, e: + status_code = 500 + messages.error(request, e) + LOG.error("get_computer_role_info failed!%s", e) + return HttpResponse(json.dumps(role_info), + content_type="application/json", + status=status_code) + + +@csrf_exempt +def get_role_info(request): + request_data = json.loads(request.body) + role_id = request_data["role_id"] + status_code = 200 + role_info = {} + try: + role = api.daisy.role_get(request, role_id) + role_info["role_id"] = role_id + role_info["name"] = role.name + role_info["vip"] = role.vip + role_info["mongodb_vip"] = role.mongodb_vip + except Exception, e: + status_code = 500 + messages.error(request, e) + LOG.error("get_role_info failed!%s", e) + return HttpResponse(json.dumps(role_info), + content_type="application/json", + status=status_code) + + +@csrf_exempt +def get_roles_detail(request, in_role_list, cluster_id): + ret_role_list = [] + try: + if in_role_list is not None: + role_list = api.daisy.role_list(request) + roles = [role for role in role_list + if role.cluster_id == cluster_id and + role.name in in_role_list] + for role in roles: + roles_detail = api.daisy.role_get(request, role.id) + ret_role_list.append(roles_detail) + except Exception as e: + LOG.info("get_roles_detail! %s" % e) + messages.error(request, 'Get Role Information Failed!') + return ret_role_list + + +@csrf_exempt +def get_role_html_detail(host): + template_name = 'environment/cluster/role_detail.html' + context = { + "host_id": host["host_id"], + "roles": host["roles"], + "show_vip_role_list": ["CONTROLLER_HA", "CONTROLLER_LB"] + } + return template.loader.render_to_string(template_name, context) diff --git a/code/horizon/openstack_dashboard/dashboards/environment/cluster/tables.py b/code/horizon/openstack_dashboard/dashboards/environment/cluster/tables.py index ceaa84de..9f31727f 100755 --- a/code/horizon/openstack_dashboard/dashboards/environment/cluster/tables.py +++ b/code/horizon/openstack_dashboard/dashboards/environment/cluster/tables.py @@ -11,24 +11,20 @@ # under the License. import logging - from django import template -from django.template.defaultfilters import title -from django.utils.http import urlencode from django.utils.translation import ugettext_lazy as _ -from django.utils.translation import npgettext_lazy from django.utils.translation import ungettext_lazy -from django.utils.translation import pgettext_lazy from django.core import urlresolvers -from django.core.urlresolvers import reverse - -from horizon import exceptions -from horizon import messages from horizon import tables -from horizon.utils import filters +from horizon import messages from openstack_dashboard import api +from openstack_dashboard.dashboards.environment.cluster import role \ + as cluster_role from openstack_dashboard.dashboards.environment.deploy import wizard_cache - +from openstack_dashboard.dashboards.environment.template \ + import views as template_views +from openstack_dashboard.dashboards.environment.host \ + import views as host_views LOG = logging.getLogger(__name__) dot_count = 0 @@ -42,47 +38,37 @@ class AddHost(tables.LinkAction): def get_link_url(self): wizard_cache.clean_cache(self.table.kwargs["cluster_id"]) - return urlresolvers.reverse(self.url, args=(self.table.kwargs["cluster_id"],)) + return urlresolvers.reverse(self.url, + args=(self.table.kwargs["cluster_id"],)) class DeleteHost(tables.DeleteAction): @staticmethod def action_present(count): return ungettext_lazy( - u"Delete Host", - u"Delete Hosts", + u"Remove Host", + u"Remove Hosts", count ) @staticmethod def action_past(count): return ungettext_lazy( - u"Deleted Host", - u"Deleted Hosts", + u"Remove Host", + u"Remove Hosts", count ) name = "delete" verbose_name = "delete" + def _allowed(self, request, datum): + return True + def delete(self, request, host_id): - api.daisy.delete_host_from_cluster(request, self.table.kwargs["cluster_id"], host_id) - - -class GenerateTemplate(tables.LinkAction): - name = "generate" - verbose_name = _("Generate Host Template") - url = "horizon:environment:cluster:generate_host_template" - classes = ("ajax-modal", "btn-generate-template") - - def get_link_url(self, datum): - cluster_id = self.table.kwargs["cluster_id"] - host_id = self.table.get_object_id(datum) - base_url = reverse(self.url, args=(cluster_id, host_id)) - return base_url - - def allowed(self, request, host): - return host["host_os_status"] == 'active' and host["host_role_status"] == "active" + api.daisy.delete_host_from_cluster(request, + self.table.kwargs["cluster_id"], + host_id) def get_cluster_id(request, cluster_name): @@ -95,7 +81,7 @@ def get_cluster_id(request, cluster_name): messages.error(request, e) -class ReDeployHost(tables.DeleteAction): +class ReDeployHost(tables.BatchAction): @staticmethod def action_present(count): return ungettext_lazy( @@ -114,228 +100,80 @@ class ReDeployHost(tables.DeleteAction): name = "redeploy_host" verbose_name = "ReDeploy Host" + classes = ("btn-danger",) - def delete(self, request, host_id): + def _allowed(self, request, datum): + return True + + def action(self, request, host_id): try: - host = api.daisy.host_get(request, host_id) - cluster_name = host.cluster - cluster_id = get_cluster_id(request, cluster_name) - api.daisy.delete_host_from_cluster(request, cluster_id, host_id) - api.daisy.host_update(request, host_id, - os_status="init") + api.daisy.host_update(request, host_id, os_status="init") except Exception, e: messages.error(request, e) +def get_install_status(host_detail): + role_status = host_detail.get("role_status", None) + deploy_info = host_views.get_deploy_info(host_detail["host_os_status"], + role_status) + return _(deploy_info.get("i18n", 'unknown')) + + def get_progress(host): dot = "......" - global dot_count + os_indeterminate_states = ["init", "installing", "updating"] + role_indeterminate_states = ["init", "installing", + "uninstalling", "updating"] + os_status = host["host_os_status"] + role_status = host.get("host_role_status", None) + dot_count += 1 if dot_count == 7: dot_count = 1 - - if ((host["host_os_status"] == 'init' or \ - host["host_os_status"] == 'installing' or \ - host["host_os_status"] == 'updating') and \ - host["host_messages"] != ''): - host["host_messages"] += dot[0:dot_count] - if ((host["host_role_status"] == 'init' or \ - host["host_role_status"] == 'installing' or \ - host["host_role_status"] == 'uninstalling' or \ - host["host_role_status"] == 'updating') and \ - host["role_messages"] != '' and \ - host["role_messages"] != None): - host["role_messages"] += dot[0:dot_count] - if host["host_os_status"] == 'init' and host["host_role_status"] == 'init': - context = { - "progress": 0, - "message": host["host_messages"], - "bar_type": 'progress-bar-info' - } - elif host["host_os_status"] == 'installing': - context = { - "progress": host["host_os_progress"], - "message": host["host_messages"], - "bar_type": 'progress-bar-info' - } - elif host["host_os_status"] == 'install-failed': - context = { - "progress": host["host_os_progress"], - "message": host["host_messages"], - "bar_type": 'progress-bar-danger' - } - elif host["host_os_status"] == 'updating': - context = { - "progress": host["host_os_progress"], - "message": host["host_messages"], - "bar_type": 'progress-bar-update' - } - elif host["host_os_status"] == 'update-failed': - context = { - "progress": host["host_os_progress"], - "message": host["host_messages"], - "bar_type": 'progress-bar-danger' - } - elif host["host_os_status"] == 'active' and host["host_role_status"] == 'init': - context = { - "progress": host["host_role_progress"], - "message": host["role_messages"], - "bar_type": 'progress-bar-info' - } - elif host["host_os_status"] == 'active' and host["host_role_status"] is None: - context = { - "progress": host["host_os_status"], - "message": host["host_message"], - "bar_type": 'progress-bar-success' - } - elif host["host_os_status"] == 'active' and host["host_role_status"] == 'installing': - context = { - "progress": host["host_role_progress"], - "message": host["role_messages"], - "bar_type": 'progress-bar-info' - } - elif host["host_os_status"] == 'active' and host["host_role_status"] == 'updating': - context = { - "progress": host["host_role_progress"], - "message": host["role_messages"], - "bar_type": 'progress-bar-update' - } - elif host["host_os_status"] == 'active' and host["host_role_status"] == "uninstalling": - context = { - "progress": host["host_role_progress"], - "message": host["role_messages"], - "bar_type": 'progress-bar-uninstall' - } - elif host["host_role_status"] == 'install-failed': - context = { - "progress": host["host_role_progress"], - "message": host["role_messages"], - "bar_type": 'progress-bar-danger' - } - elif host["host_role_status"] == 'update-failed': - context = { - "progress": host["host_role_progress"], - "message": host["role_messages"], - "bar_type": 'progress-bar-danger' - } - elif host["host_role_status"] == "uninstall-failed": - context = { - "progress": host["host_role_progress"], - "message": host["role_messages"], - "bar_type": 'progress-bar-danger' - } - elif host["host_os_status"] == 'active' and host["host_role_status"] == "active": - context = { - "progress": 100, - "message": host["role_messages"], - "bar_type": 'progress-bar-success' - } - else: - context = { - "progress": 0, - "message": " ", - "bar_type": 'progress-bar-info' - } + if os_status in os_indeterminate_states \ + and host["host_messages"]: + host["host_messages"] += dot[0:dot_count] + + if role_status in role_indeterminate_states \ + and host["role_messages"]: + host["role_messages"] += dot[0:dot_count] + + deploy_info = host_views.get_deploy_info(os_status, role_status, + host["host_os_progress"], + host["host_messages"], + host["host_role_progress"], + host["role_messages"]) + context = { + "progress": deploy_info.get("progress", 0), + "message": deploy_info.get("message", " "), + "bar_type": deploy_info.get("bar_type", "progress-bar-info")} template_name = 'environment/cluster/_host_progress.html' return template.loader.render_to_string(template_name, context) -class UpdateRow(tables.Row): - ajax = True - - def get_data(self, request, host_id): - try: - qp = {"cluster_id": self.table.kwargs["cluster_id"]} - host_list = api.daisy.host_list(request, filters=qp) - host_manage_ip = "" - for host in host_list: - if host.os_progress is None: - host.os_progress = 0 - if host.messages is None: - host.messages = "" - - if host.id == host_id: - host_detail = api.daisy.host_get(request, host.id) - if hasattr(host_detail, "interfaces"): - for nic in host_detail.interfaces: - nic_assigned_networks = nic['assigned_networks'] - for network in nic_assigned_networks: - if network["name"] == 'MANAGEMENT': - host_manage_ip = network["ip"] - else: - host_manage_ip = "" - - if not hasattr(host, 'role_progress'): - host.role_progress = 0 - - if not hasattr(host, 'role_status'): - host.role_status = "" - - if not hasattr(host, "role_messages"): - host.role_messages = "" - - return {"host_name": host.name, - "host_manager_ip": host_manage_ip, - "host_os_progress": host.os_progress, - "host_os_status": host.os_status, - "host_role_progress": host.role_progress, - "host_role_status": host.role_status, - "host_messages": host.messages, - "role_messages": host.role_messages, - "host_id": host.id} - - except Exception as e: - LOG.error("wmh dbg: e=%s" % e) - class HostsTable(tables.DataTable): - - STATUS_CHOICES = ( - ("", True), - ) - - OS_STATUS_CHOICES = ( - ("init",None), - ("installing",None), - ("install-failed",False), - ("updating",None), - ("update-failed",False), - ("active",True), - ) - - ROLE_STATUS_CHOICES = ( - ("",None), - ("init",None), - ("installing",None), - ("uninstalling",None), - ("install-failed",False), - ("uninstall-failed",False), - ("updating",None), - ("update-failed",False), - ("active",True), - ) name = tables.Column('host_name', verbose_name=_('Name')) + host_role = tables.Column(cluster_role.get_role_html_detail, + verbose_name=_("Roles")) manager_ip = tables.Column('host_manager_ip', verbose_name=_('Manager Ip')) + install_status = tables.Column(get_install_status, + verbose_name=_('Status')) progress = tables.Column(get_progress, verbose_name=_('progress')) host_os_status = tables.Column('host_os_status', - verbose_name=_('host_os_status'), - hidden=True, - status=True, - status_choices=OS_STATUS_CHOICES) + verbose_name=_('host_os_status'), + hidden=True) host_role_status = tables.Column('host_role_status', - verbose_name=_('host_role_status'), - hidden=True, - status=True, - status_choices=ROLE_STATUS_CHOICES) + verbose_name=_('host_role_status'), + hidden=True) + def get_object_id(self, datum): return datum["host_id"] class Meta(object): name = "host" verbose_name = _("Host") - status_columns = ["host_os_status","host_role_status"] - table_actions = (AddHost, DeleteHost, ReDeployHost) - row_actions = (DeleteHost, GenerateTemplate, ReDeployHost) - row_class = UpdateRow + table_actions = (AddHost, ReDeployHost, DeleteHost) + row_actions = (DeleteHost, template_views.GenerateTemplate) diff --git a/code/horizon/openstack_dashboard/dashboards/environment/cluster/templates/cluster/_base.html b/code/horizon/openstack_dashboard/dashboards/environment/cluster/templates/cluster/_base.html index 7dfbbb12..66027ee0 100755 --- a/code/horizon/openstack_dashboard/dashboards/environment/cluster/templates/cluster/_base.html +++ b/code/horizon/openstack_dashboard/dashboards/environment/cluster/templates/cluster/_base.html @@ -1,196 +1,106 @@ -锘縶% load i18n %} -{% load context_selection %} - - - - -
-
- -
- -
-
-
- -
- -
-
-
- -
- - - -
-
-
- -
-
-
- -
-
- -
-
- -
-
-
-
-
- -
- -
-
-
- -
- -
-
-
- -
- -
-
-
+锘縶% load i18n %} +{% load context_selection %} + + + + +
+
+ +
+ +
+
+
+ +
+ +
+
+
+ +
+ +
+
+
+ +
+ +
+
+{% if hwmip_list|length > 0 %} +
+ +
+ +
+
+{% endif %} +
+ +
+ +
+
+
diff --git a/code/horizon/openstack_dashboard/dashboards/environment/cluster/templates/cluster/_cinder.html b/code/horizon/openstack_dashboard/dashboards/environment/cluster/templates/cluster/_cinder.html old mode 100644 new mode 100755 index 70e08178..912580b6 --- a/code/horizon/openstack_dashboard/dashboards/environment/cluster/templates/cluster/_cinder.html +++ b/code/horizon/openstack_dashboard/dashboards/environment/cluster/templates/cluster/_cinder.html @@ -146,7 +146,7 @@ cinder_volume["user_pwd"] = $("#cinder_user_password").val(); cinder_volume["volume_driver"] = $("#cinder_volume_driver option:selected").text(); cinder_volume["data_ips"] = new Array(); - if (cinder_volume["volume_driver"] == "FUJISTU_ETERNUS"){ + if (cinder_volume["volume_driver"] == "FUJITSU_ETERNUS"){ cinder_volume["data_ips"].push($("#cinder_data_ip").val()); for(var i=1; i @@ -256,11 +265,12 @@ -
+
 
diff --git a/code/horizon/openstack_dashboard/dashboards/environment/cluster/templates/cluster/_host_progress.html b/code/horizon/openstack_dashboard/dashboards/environment/cluster/templates/cluster/_host_progress.html index c61134b7..7e738e26 100644 --- a/code/horizon/openstack_dashboard/dashboards/environment/cluster/templates/cluster/_host_progress.html +++ b/code/horizon/openstack_dashboard/dashboards/environment/cluster/templates/cluster/_host_progress.html @@ -1,12 +1,9 @@
-
-
-
-
-
- {{progress}}% -
+
+
+
+
+
{{progress}}% +
- -
{{message}}
- +
{{message}}
\ No newline at end of file diff --git a/code/horizon/openstack_dashboard/dashboards/environment/cluster/templates/cluster/_modify_cluster.html b/code/horizon/openstack_dashboard/dashboards/environment/cluster/templates/cluster/_modify_cluster.html index fa065541..af2007be 100644 --- a/code/horizon/openstack_dashboard/dashboards/environment/cluster/templates/cluster/_modify_cluster.html +++ b/code/horizon/openstack_dashboard/dashboards/environment/cluster/templates/cluster/_modify_cluster.html @@ -27,9 +27,9 @@ {% elif role.name == "CONTROLLER_LB" %} - {% elif role.name == "ZENIC_MDB" %} - - + {% elif role.name == "COMPUTER" %} + + {% elif role.name == "ZENIC_NFM" %} @@ -48,7 +48,7 @@ {% include "environment/cluster/_cinder.html" %} diff --git a/code/horizon/openstack_dashboard/dashboards/environment/cluster/templates/cluster/_network.html b/code/horizon/openstack_dashboard/dashboards/environment/cluster/templates/cluster/_network.html index ebbecf30..b439286a 100755 --- a/code/horizon/openstack_dashboard/dashboards/environment/cluster/templates/cluster/_network.html +++ b/code/horizon/openstack_dashboard/dashboards/environment/cluster/templates/cluster/_network.html @@ -4,17 +4,21 @@
@@ -741,153 +1093,237 @@ $(function(){
{% for anet in network.networks %} -
-