Delete all rpms and major upgrade from inside...

Change-Id: I12d6307464cc03664b6113807a35c396b652add9
Signed-off-by: Zhijiang Hu <hu.zhijiang@zte.com.cn>
This commit is contained in:
Zhijiang Hu 2016-07-01 10:21:39 +08:00
parent 8698f3a15e
commit 2d2da98b35
574 changed files with 28824 additions and 22613 deletions

View File

@ -1,3 +0,0 @@
# Ignore everything in this directory
*
# Except this file !.gitignore

View File

@ -1,166 +0,0 @@
## HA配置双机
# 每套HA系统配置一个配置文件该文件名命令规律如下一套HA为HA_1.conf两套HA命令格式为HA_2_1.conf和HA_2_2.conf依次类推
# 建议拷贝该模版改名后再编辑如使用vi命令应先执行 export LC_ALL="zh_CN.GB2312" 否则会有乱码编辑后unset LC_ALL
[DEFAULT]
# HA安装的OpenCOS组件, 可以填写为loadbalance,database,amqp,keystone,neutron,glance,cinder,nova,horizon,heat,ceilometer,ironic与下面组件服务列表的关键字一致
# 之中的任意组合,用逗号分开, 全部可简写为all, 无顺序要求haproxy代表配置LB.
# 注意HA是通过conf方式安装的但这种方式不支持安装ironic如果这里配置了ironic应在整个安装流程前手动通过custom方式单独安装ironic
# 该配置项必填
components=database,amqp,keystone,neutron,glance,cinder,nova,horizon,heat,ceilometer
# 由HA管理的组件服务(可裁剪),多个服务以逗号分开.
# 一般对服务无增加或减少可不必修改如下选项多余组件也无需注释掉组件选择与否由“components”决定
loadbalance = haproxy
database=mariadb
amqp=rabbitmq-server
keystone=openstack-keystone
#neutron-metadata-agent,neutron-lbaas-agent don't use default
neutron=neutron-server,neutron-l3-agent,neutron-dhcp-agent
#openstack-glance-scrubber don't use default
glance=openstack-glance-api,openstack-glance-registry
#openstack-cinder-backup don't use default
cinder=openstack-cinder-api,openstack-cinder-scheduler,openstack-cinder-volume
nova=openstack-nova-api,openstack-nova-conductor,openstack-nova-scheduler,openstack-nova-cert,openstack-nova-consoleauth,openstack-nova-novncproxy
horizon=httpd,opencos-alarmmanager,opencos-alarmagent
heat=openstack-heat-api,openstack-heat-engine,openstack-heat-api-cfn,openstack-heat-api-cloudwatch
ceilometer=openstack-ceilometer-api,openstack-ceilometer-central,openstack-ceilometer-alarm-evaluator,openstack-ceilometer-alarm-notifier,openstack-ceilometer-notification,openstack-ceilometer-collector
ironic=openstack-ironic-api,openstack-ironic-conductor
# 根据业务需要增加clone服务资源(每个节点都运行),填写去掉.service后的服务名多个服务以逗号分隔,可选
#clone_service=
# guard服务名字
guard=tfg-guard
# HA集群心跳线至少一条建议三条每条是一对IP用逗号分开
# 如果LB和HA是使用相同服务器则此处心跳线不用再填写
# 第一条心跳线例中是外网IP必填
heartbeat_link1=10.43.179.221,10.43.179.222
# 第二条心跳线不能与其他心跳线有相同IP可选
heartbeat_link2=
# 第三条心跳线不能与其他心跳线有相同IP可选
heartbeat_link3=
#执行HA脚本的节点为local node其他节点为remote node这里为ssh登录remote node的root用户密码必填
remote_node_password=ossdbg1
# haproxy浮动IP地址,配置LB时必填
#loadbalance_fip=192.160.0.226
#loadbalance_nic=ens33
#loadbalance_netmask=23
#############DB################
# 数据库浮动IP可以与LB浮动IP相同必填
# 浮动IP地址
#database_fip=192.160.0.225
# 浮动IP所在网卡
#database_nic=baseleft
# 掩码CIDR格式
#database_netmask=23
# 数据库共享磁盘全路径名,组件存在则必填
# 磁盘名建议用lv方式使用lv时应注意配置为逻辑盘名
#database_device=/dev/mapper/vg_mysql-lv_mysql
# 文件系统类型
#database_fs_type=ext4
#数据库备份共享磁盘全路径名,不能和其他共享磁盘相同(功能暂不支持),可选
#backup_database_device=/dev/mapper/vg_mysqlbackup-lv_mysqlbackup
#backup_database_fs_type=ext4
##############AMQP################
# AMQP浮动IP可以与LB浮动IP相同必填
#amqp_fip=192.160.0.225
#amqp_nic=baseleft
#amqp_netmask=23
##############keystone################
# keystone浮动IP配置为LB时浮动IP不用填写否则组件存在则必填
#keystone_fip=192.160.0.225
#keystone_nic=baseleft
#keystone_netmask=23
##############neutron################
# neutron 浮动IP配置为LB时浮动IP不用填写否则组件存在则必填
#neutron_fip=192.160.0.225
#neutron_nic=baseleft
#neutron_netmask=23
##############glance################
# glance 浮动IP配置为LB时浮动IP不用填写否则组件存在则必填
#glance_fip=192.160.0.225
#glance_nic=baseleft
#glance_netmask=23
# 镜像共享磁盘设置,不能和其他共享磁盘相同,组件存在则必填
# glance_device_type可选drbd或iscsi
#glance_device_type=drbd
#glance_device=/dev/mapper/vg_glance-lv_glance
#glance_fs_type=ext4
##############cinder################
# cinder浮动IP配置为LB时浮动IP不用填写否则组件存在则必填
#cinder_fip=192.160.0.225
#cinder_nic=baseleft
#cinder_netmask=23
#虚拟机块设备使用的磁阵管理口IP,如果有多个IP用空格分开可选
#cinder_ping_ip=192.160.0.7
##############nova################
# nova浮动IP配置为LB时浮动IP不用填写否则组件存在则必填
#nova_fip=192.160.0.225
#nova_nic=baseleft
#nova_netmask=23
##############horizon################
# TECS dashboard登录时使用的浮动IP配置为LB时浮动IP不用填写否则组件存在则必填
# 不同浮动IP的组件可以运行在不同节点上如果还想与
# 某个组件运行在相同节点需配置location_constraint
#horizon_fip=10.43.179.230
#horizon_nic=kmportv1
#horizon_netmask=23
##############ironic################
# ironic 浮动IP配置为LB时浮动IP不用填写否则组件存在则必填
#ironic_fip=192.160.0.225
#ironic_nic=baseleft
#ironic_netmask=23
##############heat################
# heat 浮动IP配置为LB时浮动IP不用填写否则组件存在则必填
#heat_fip=192.160.0.225
#heat_nic=baseleft
#heat_netmask=23
##############ceilometer################
# ceilometer浮动IP配置为LB时浮动IP不用填写否则组件存在则必填
#ceilometer_fip=192.160.0.225
#ceilometer_nic=baseleft
#ceilometer_netmask=23
# mongod数据库共享磁盘全路径名建议配置
#mongod_device=/dev/mapper/vg_mongodb-lv_mongodb
# 文件系统类型
#mongod_fs_type=ext4
# 若mongod数据库使用本地盘则配置成local否则为空
mongod_local=local
# 如下两个配置项表示共享盘所在的磁阵信息,暂时仅支持本配置中用到的所有共享盘都在一个磁阵上,可选
# 参数说明:(主控制器业务口IP地址,主控制器iqn),(备控制器业务口IP地址,备控制器iqn)
# 如果两个控制iqn相同可以配置为(主控制器业务口IP地址,主控制器iqn)
#iscsi_storage=(172.32.1.1,iqn.2099-01.cn.com.zte:usp.spr11-4c:09:b4:b0:56:8b),(172.32.1.2,iqn.2099-01.cn.com.zte:usp.spr11-4c:09:b4:b0:56:8c)

View File

@ -1,159 +0,0 @@
#!/bin/bash
dhcp_ip="127.0.0.1"
DISCOVERD_URL="http://$dhcp_ip:5050/v1/continue"
function update() {
jq "$1" data.json > temp.json || echo "Error: update $1 to json failed"
mv temp.json data.json
}
function get_system_info(){
PRODUCT=$(dmidecode -s system-product-name)
FAMILY=$(dmidecode -t system|grep "Family"|cut -d ":" -f2)
VERSION=$(dmidecode -s system-version)
SERIAL=$(dmidecode -s system-serial-number)
MANUFACTURER=$(dmidecode -s system-manufacturer)
UUID=$(dmidecode -s system-uuid)
FQDN=$(hostname -f)
echo '{"system":{}}' > data.json
update ".system[\"product\"] = \"$PRODUCT\""
update ".system[\"family\"] = \"$FAMILY\""
update ".system[\"fqdn\"] = \"$FQDN\""
update ".system[\"version\"] = \"$VERSION\""
update ".system[\"serial\"] = \"$SERIAL\""
update ".system[\"manufacturer\"] = \"$MANUFACTURER\""
update ".system[\"uuid\"] = \"$UUID\""
}
function get_cpu_info(){
REAL=$(cat /proc/cpuinfo |grep "physical id"|sort |uniq|wc -l)
TOTAL=$(cat /proc/cpuinfo |grep "processor"|wc -l)
update ".cpu[\"real\"] = $REAL"
update ".cpu[\"total\"] = $TOTAL"
for i in $(seq $TOTAL)
do
if [ ! -z "$i" ]; then
SPEC_MODEL=$(cat /proc/cpuinfo | grep name | cut -f2 -d:|sed -n $i"p")
SPEC_FRE=$(cat /proc/cpuinfo | grep MHz | cut -f2 -d:|sed -n $i"p")
update ".cpu[\"spec_$i\"] = {model:\"$SPEC_MODEL\", frequency:$SPEC_FRE}"
fi
done
}
function get_memory_info(){
PHY_NUM=$(dmidecode -t memory|grep "Physical Memory Array"|wc -l)
TOTAL_MEM=$(cat /proc/meminfo |grep MemTotal |cut -d ":" -f2)
update ".memory[\"total\"] = \"$TOTAL_MEM\""
for num in $(seq $PHY_NUM)
do
SLOTS=$(dmidecode -t memory |grep "Number Of Devices" |cut -d ":" -f2|sed -n $num"p")
MAX_CAP=$(dmidecode -t memory |grep "Maximum Capacity" |cut -d ":" -f2|sed -n $num"p")
update ".memory[\"phy_memory_$num\"] = {slots:\"$SLOTS\", maximum_capacity:\"$MAX_CAP\"}"
for i in $(seq $SLOTS)
do
if [ ! -z "$i" ]; then
DEVICE_FRE=$(dmidecode -t memory |grep "Speed" |cut -d ":" -f2|sed -n $i"p")
DEVICE_TYPE=$(dmidecode -t memory |grep 'Type:' |grep -v "Error Correction Type"|cut -d ":" -f2|sed -n $i"p")
DEVICE_SIZE=$(dmidecode -t memory |grep Size |cut -d ":" -f2|sed -n $i"p")
update ".memory[\"phy_memory_$num\"][\"devices_$i\"] = {frequency:\"$DEVICE_FRE\", type:\"$DEVICE_TYPE\", size:\"$DEVICE_SIZE\"}"
fi
done
done
}
function get_net_info(){
physical_networks=`ls -l /sys/class/net/ | grep -v lo |grep "pci"|awk -F 'net/' '{print $2}'`
if [ -f "/sys/class/net/bonding_masters" ]; then
bond_network=$(cat /sys/class/net/bonding_masters)
if [ ! -z "$bond_network" ];then
physical_networks+=" $bond_network"
fi
fi
for iface in $physical_networks
do
NAME=$iface
MAC=$(ip link show $iface | awk '/ether/ {print $2}')
IP=$(ip addr show $iface | awk '/inet / { sub(/\/.*/, "", $2); print $2 }')
NETMASK=$(ifconfig $iface | grep netmask | awk '{print $4}')
STATE=$(ip link show $iface | awk '/mtu/ {print $3}')
PCI=$(ethtool -i $iface|grep "bus-info"|cut -d " " -f2)
CURRENT_SPEED=$(ethtool $iface |grep Speed |awk -F " " '{print $2}')
LINE=$(ethtool $iface|grep -n "Supported pause frame use"|awk -F ":" '{print $1}')
LINE=$[ LINE - 1 ]
LINE_SPEED=$(ethtool $iface|grep -n "Supported link modes"|awk -F ":" '{print $1}')
BOND=$(ifconfig $iface | grep MASTER)
if [ $LINE -eq $LINE_SPEED ]; then
MAX_SPEED=$(ethtool $iface|grep "Supported link modes"|cut -d ":" -f2)
else
MAX_SPEED=$(ethtool $iface |sed -n $LINE"p"|awk -F " " '{print $1}')
fi
UP="UP"
if [[ "$STATE" =~ "$UP" ]]; then
STATE="up"
else
STATE="down"
fi
if [ -z "$BOND" ]; then
TYPE="ether"
else
TYPE="bond"
SLAVES=$(find /etc/sysconfig/network-scripts/ -name "ifcfg-*" |xargs grep "MASTER=$iface"|awk -F 'ifcfg-' '{print $2}'|awk -F ':' '{print $1}')
fi
if [ ! -z "$MAC" ]; then
update ".interfaces[\"$iface\"] = {mac: \"$MAC\", ip: \"$IP\", netmask: \"$NETMASK\", name: \"$iface\", max_speed: \"$MAX_SPEED\", state: \"$STATE\", pci: \"$PCI\", current_speed: \"$CURRENT_SPEED\", type: \"$TYPE\", slaves:\"$SLAVES\"}"
fi
done
}
function get_disk_info(){
for disk in $(fdisk -l|grep Disk|grep "/dev" |cut -d ":" -f1|awk -F "/" '{print $NF}')
do
DISK_NAME=$disk
DISK_SIZE=$(fdisk -l|grep Disk|grep "/dev" |grep -w $disk|cut -d "," -f2)
DISK_DISK=$(ls -l /dev/disk/by-path/|grep $disk"$"|awk '{print $9}')
DISK_EXTRA_1=$(ls -l /dev/disk/by-id/|grep $disk"$"|awk '{print $9}'|sed -n 1p)
DISK_EXTRA_2=$(ls -l /dev/disk/by-id/|grep $disk"$"|awk '{print $9}'|sed -n 2p)
MODEL=$(hdparm -I /dev/sda |grep Model | cut -d ":" -f2)
REMOVABLE=$(hdparm -I /dev/sda |grep removable|awk '{print $4}')
update ".disk[\"$disk\"] = {name: \"$DISK_NAME\", size: \"$DISK_SIZE\", disk: \"$DISK_DISK\", model: \"$MODEL\", removable: \"$REMOVABLE\",extra: [\"$DISK_EXTRA_1\", \"$DISK_EXTRA_2\"]}"
done
}
function main(){
get_system_info
get_cpu_info
get_memory_info
get_net_info
get_disk_info
}
main
BMC_ADDRESS=$(ipmitool lan print | grep -e "IP Address [^S]" | awk '{ print $4 }')
if [ -z "$BMC_ADDRESS" ]; then
BMC_ADDRESS=$(ipmitool lan print 3| grep -e "IP Address [^S]" | awk '{ print $4 }')
fi
update ".ipmi_address = \"$BMC_ADDRESS\""
update ".data_name = \"baremetal_source\""
update ".os_status = \"active\""
echo Collected:
cat data.json
RESULT=$(eval curl -i -X POST \
"-H 'Accept: application/json'" \
"-H 'Content-Type: application/json'" \
"-d @data.json" \
"$DISCOVERD_URL")
if echo $RESULT | grep "HTTP/1.0 4"; then
echo "Ironic API returned error: $RESULT"
fi
echo "Node is now discovered! Halting..."
sleep 5

Binary file not shown.

View File

@ -1,39 +0,0 @@
[
{
"protocol_type": "ISCSI",
"service": "glance",
"lun": "0",
"data_ips": [
"10.43.177.159"
],
"lvm_config": {
"size": 100,
"vg_name": "VolGroupHAImage",
"lv_name": "lvHAImage",
"fs_type": "ext4"
}
},
{
"protocol_type": "ISCSI",
"service": "db",
"lun": "1",
"data_ips": [
"162.1.1.101"
],
"lvm_config": {
"size": 100,
"vg_name": "VolGroupHAMysql",
"lv_name": "lvHAMysql",
"fs_type": "ext4"
}
},
{
"protocol_type": "CEPH",
"rbd_config": {
"size": 100,
"rbd_pool": "mysql",
"rbd_volume": "mysql",
"fs_type": "ext4" # can be none
}
}
]

View File

@ -1,39 +0,0 @@
[
{
"protocol_type": "ISCSI",
"service": "glance",
"lun": "0",
"data_ips": [
"10.43.177.159"
],
"lvm_config": {
"size": 100,
"vg_name": "VolGroupHAImage",
"lv_name": "lvHAImage",
"fs_type": "ext4"
}
},
{
"protocol_type": "ISCSI",
"service": "db",
"lun": "1",
"data_ips": [
"162.1.1.101"
],
"lvm_config": {
"size": 100,
"vg_name": "VolGroupHAMysql",
"lv_name": "lvHAMysql",
"fs_type": "ext4"
}
},
{
"protocol_type": "CEPH",
"rbd_config": {
"size": 100,
"rbd_pool": "mysql",
"rbd_volume": "mysql",
"fs_type": "ext4" # can be none
}
}
]

View File

@ -1,144 +0,0 @@
# This is a basic configuration file with some examples, for device mapper
# mulead of using WWIDs as names.
defaults {
user_friendly_names yes
queue_without_daemon no
# find_multipaths yes
}
##
## Here is an example of how to configure some standard options.
##
#
#defaults {
# udev_dir /dev
# polling_interval 10
# selector "round-robin 0"
# path_grouping_policy multibus
# getuid_callout "/lib/udev/scsi_id --whitelisted --device=/dev/%n"
# prio alua
# path_checker readsector0
# rr_min_io 100
# max_fds 8192
# rr_weight priorities
# failback immediate
# no_path_retry fail
# user_friendly_names yes
#}
##
## The wwid line in the following blacklist section is shown as an example
## of how to blacklist devices by wwid. The 2 devnode lines are the
## compiled in default blacklist. If you want to blacklist entire types
## of devices, such as all scsi devices, you should use a devnode line.
## However, if you want to blacklist specific devices, you should use
## a wwid line. Since there is no guarantee that a specific device will
## not change names on reboot (from /dev/sda to /dev/sdb for example)
## devnode lines are not recommended for blacklisting specific devices.
##
#blacklist {
# wwid 26353900f02796769
# devnode "^(ram|raw|loop|fd|md|dm-|sr|scd|st)[0-9]*"
# devnode "^hd[a-z]"
#}
#multipaths {
# multipath {
# wwid 3600508b4000156d700012000000b0000
# alias yellow
# path_grouping_policy multibus
# path_checker readsector0
# path_selector "round-robin 0"
# failback manual
# rr_weight priorities
# no_path_retry 5
# }
# multipath {
# wwid 1DEC_____321816758474
# alias red
# }
#}
#devices {
# device {
# vendor "COMPAQ "
# product "HSV110 (C)COMPAQ"
# path_grouping_policy multibus
# getuid_callout "/lib/udev/scsi_id --whitelisted --device=/dev/%n"
# path_checker readsector0
# path_selector "round-robin 0"
# hardware_handler "0"
# failback 15
# rr_weight priorities
# no_path_retry queue
# }
# device {
# vendor "COMPAQ "
# product "MSA1000 "
# path_grouping_policy multibus
# }
#}
devices {
device {
vendor "FUJITSU"
product "ETERNUS_DXL"
prio alua
path_grouping_policy group_by_prio
path_selector "round-robin 0"
failback immediate
no_path_retry 0 (*1)
path_checker tur
dev_loss_tmo 2097151 (*2)
fast_io_fail_tmo 1
}
device {
vendor "FUJITSU"
product "ETERNUS_DXM"
prio alua
path_grouping_policy group_by_prio
path_selector "round-robin 0"
failback immediate
no_path_retry 0 (*1)
path_checker tur
dev_loss_tmo 2097151 (*2)
fast_io_fail_tmo 1
}
device {
vendor "FUJITSU"
product "ETERNUS_DX400"
prio alua
path_grouping_policy group_by_prio
path_selector "round-robin 0"
failback immediate
no_path_retry 0 (*1)
path_checker tur
dev_loss_tmo 2097151 (*2)
fast_io_fail_tmo 1
}
device {
vendor "FUJITSU"
product "ETERNUS_DX8000"
prio alua
path_grouping_policy group_by_prio
path_selector "round-robin 0"
failback immediate
no_path_retry 0 (*1)
path_checker tur
dev_loss_tmo 2097151 (*2)
fast_io_fail_tmo 1
}
device {
vendor "ZTE"
product "ZXUSP"
path_grouping_policy group_by_prio
path_checker tur
prio alua
path_selector "round-robin 0"
hardware_handler "1 alua"
failback immediate
rr_weight priorities
no_path_retry 0 (*1)
rr_min_io_rq 1
flush_on_last_del yes
}
}
blacklist {
}

View File

@ -1,281 +0,0 @@
import uuid
from utils import *
from xml.etree.ElementTree import ElementTree, Element
class BaseConfig():
_CINDER_CONF_PATH = "/etc/cinder/cinder.conf"
SET_CONFIG = \
"openstack-config --set {config_file} {section} {key} {value}"
GET_CONFIG = \
"openstack-config --get {config_file} {section} {key}"
instance = None
def __init__(self):
self._BACKEND_MAPPING = {
'KS3200_IPSAN': ZTEBackendConfig,
'KS3200_FCSAN': ZTEBackendConfig,
'FUJISTU_ETERNUS': FUJISTUBackendConfig,
'LVM': None,
'CEPH': CEPHBackendConfig,
}
self.instance_mapping = {}
def __get_backend_instance(self, backend_type):
if not backend_type or \
backend_type not in self._BACKEND_MAPPING.keys():
print_or_raise("Volume driver type '%s' is not valid." %
backend_type,
ScriptInnerError)
backend_instance = self.instance_mapping.get(backend_type, BaseConfig)
if isinstance(backend_instance, self._BACKEND_MAPPING[backend_type]):
return backend_instance
else:
self.instance_mapping.update(
{backend_type: self._BACKEND_MAPPING[backend_type]()})
return self.instance_mapping[backend_type]
@classmethod
def single_instance(cls):
if not BaseConfig.instance:
BaseConfig.instance = BaseConfig()
return BaseConfig.instance
def _construct_particular_cinder_data(self, backend, backend_data):
print_or_raise("Backend _construct_particular_cinder_data method no "
"implement!", ScriptInnerError)
def _write_xml(self, fp_xml, **backend_device_args):
self.backend_instance._write_xml(fp_xml, **backend_device_args)
def _construct_commonality_cinder_data(self, backend, backend_data):
backend_pools, xml_path = \
self.backend_instance._construct_particular_cinder_data(
backend, backend_data)
backend_data['volume_backend_name'] = \
backend_data.pop('volume_type')
set_backend = lambda x, y: self.SET_CONFIG.format(
config_file=self._CINDER_CONF_PATH,
section=backend,
key=x, value=y)
backend_config_list = list()
backend_config_list += map(
set_backend, backend_data.keys(), backend_data.values())
get_bakcends = \
self.GET_CONFIG.format(config_file=self._CINDER_CONF_PATH,
section="DEFAULT",
key="enabled_backends")
out, err = execute(get_bakcends, check_exit_code=[0, 1])
exist_backends = out.split("\n")[0] if out else ""
enabled_backends = \
exist_backends if backend in exist_backends else \
"%s" % backend if not out else "%s,%s" % \
(exist_backends, backend)
set_bakcends = \
self.SET_CONFIG.format(config_file=self._CINDER_CONF_PATH,
section="DEFAULT",
key="enabled_backends",
value=enabled_backends)
# write to cinder.conf
config_set_all = set_bakcends + ";" + ";".join(backend_config_list)
execute(config_set_all)
return backend_pools, xml_path
def is_needed_generate_backend_xml(self, backend_driver):
if backend_driver in ['KS3200_IPSAN', 'KS3200_FCSAN',
'FUJISTU_ETERNUS']:
return True
else:
return False
def config_backend(self, backend_cinder_args, **backend_device_args):
"""
Config outer interface,for public flow.
:param backend_device_args: device config
:param backend_cinder_args: backend config
:return:
"""
backend_data = backend_cinder_args[1]
backend_driver = backend_data.get('volume_driver', None)
self.backend_instance = self.__get_backend_instance(backend_driver)
# config cinder.conf
backend_pools, xml_path = \
self._construct_commonality_cinder_data(backend_cinder_args[0],
backend_data)
# config xml
if self.is_needed_generate_backend_xml(backend_driver):
backend_device_args.update({'pools': backend_pools})
with open(xml_path, "w+") as fp_xml:
self._write_xml(fp_xml, **backend_device_args)
execute("chown cinder:cinder %s" % xml_path)
def update_xml_node(self, element_obj, node_path, content):
node_list = element_obj.findall(node_path)
if node_list:
node_list[0].text = content
else:
new_element = Element(node_path.split('/')[-1])
new_element.text = content
parent_node = element_obj.findall(node_path.split('/')[0])
parent_node[0].append(new_element)
class ZTEBackendConfig(BaseConfig):
_DEFAULT_USERNAME = "admin"
_DEFAULT_USERPWD = "admin"
_DEFAULT_XML_FILE_PREFIX = "cinder_zte_conf_file"
_DEFAULT_XML_TEMPLATE_PATH = "/etc/cinder/cinder_zte_conf.xml"
_ISCSI_DRIVER = 'cinder.volume.drivers.zte.zte_ks.ZteISCSIDriver'
_FC_DRIVER = 'cinder.volume.drivers.zte.zte_ks.ZteFCDriver'
def _construct_particular_cinder_data(self, backend, backend_data):
# construct commonality data in cinder.conf
backend_data['volume_driver'] = \
self._ISCSI_DRIVER \
if "KS3200_IPSAN" == backend_data['volume_driver'] \
else self._FC_DRIVER
backend_data[self._DEFAULT_XML_FILE_PREFIX] = \
backend_data.pop('backend_config_file') \
if backend_data.get('backend_config_file', None) \
else "/etc/cinder/%s_%s.xml" % (self._DEFAULT_XML_FILE_PREFIX,
backend)
backend_data['use_multipath_for_image_xfer'] = \
backend_data.get('multipath_tool', True)
backend_pools = backend_data.pop('pools')
return backend_pools, backend_data[self._DEFAULT_XML_FILE_PREFIX]
def _write_xml(self, fp, **backend_device_args):
if not os.path.exists(self._DEFAULT_XML_TEMPLATE_PATH):
print_or_raise("XML file template %s not exists,can't load defult "
"params." % self._DEFAULT_XML_TEMPLATE_PATH,
ScriptInnerError)
mgnt_ips = backend_device_args['management_ips']
user_name = backend_device_args['user_name']
user_pwd = backend_device_args['user_pwd']
cinder_host_ip = backend_device_args['cinder_host_ip']
pools = backend_device_args['pools']
xml_fp = fp
tree = ElementTree()
elements = tree.parse(self._DEFAULT_XML_TEMPLATE_PATH)
for index in range(len(mgnt_ips)):
self.update_xml_node(
elements,
"Storage/ControllerIP" + str(index), mgnt_ips[index])
if cinder_host_ip:
self.update_xml_node(elements, "Storage/LocalIP", cinder_host_ip)
self.update_xml_node(elements, "Storage/UserName", user_name)
self.update_xml_node(elements, "Storage/UserPassword", user_pwd)
# del all StoragePool and StorageVd node
pool_parent_node = elements.findall("LUN")
pool_child_nodes = elements.findall("LUN/StoragePool")
vd_child_nodes = elements.findall("LUN/StorageVd")
map(pool_parent_node[0].remove, pool_child_nodes + vd_child_nodes)
# add StoragePool node base on pools
for pool in pools:
element = Element("StoragePool")
element.text = pool
element.tail = "\n\t"
pool_parent_node[0].insert(0, element)
tree.write(xml_fp, encoding="utf-8", xml_declaration=True)
class FUJISTUBackendConfig(BaseConfig):
_DEFAULT_USERNAME = "root"
_DEFAULT_USERPWD = "root"
_DEFAULT_XML_FILE_PREFIX = "cinder_eternus_config_file"
_DEFAULT_XML_TEMPLATE_PATH = \
"/etc/cinder/cinder_fujitsu_eternus_dx.xml"
FUJISTU_DRIVER = \
"cinder.volume.drivers.fujitsu.eternus_dx_iscsi.FJDXISCSIDriver"
def _construct_particular_cinder_data(self, backend, backend_data):
# construct commonality data in cinder.conf
backend_data['volume_driver'] = self.FUJISTU_DRIVER
backend_data[self._DEFAULT_XML_FILE_PREFIX] = \
backend_data.pop('backend_config_file') \
if backend_data.get('backend_config_file', None) \
else "/etc/cinder/%s_%s.xml" % (self._DEFAULT_XML_FILE_PREFIX,
backend)
backend_data['use_multipath_for_image_xfer'] = \
backend_data.get('multipath_tool', True)
backend_data['use_fujitsu_image_volume'] = \
backend_data.get('use_fujitsu_image_volume', True)
backend_data['fujitsu_min_image_volume_per_storage'] = \
backend_data.get('fujitsu_min_image_volume_per_storage', 1)
backend_data['fujitsu_image_management_dir'] = \
backend_data.get('fujitsu_image_management_dir',
'/var/lib/glance/conversion')
backend_pools = backend_data.pop('pools')
return backend_pools, backend_data[self._DEFAULT_XML_FILE_PREFIX]
def _write_xml(self, fp, **backend_device_args):
if not os.path.exists(self._DEFAULT_XML_TEMPLATE_PATH):
print_or_raise("XML file template %s not exists,can't load defult "
"params." % self._DEFAULT_XML_TEMPLATE_PATH,
ScriptInnerError)
mgnt_ip = backend_device_args['management_ips'][0]
data_ips = backend_device_args['data_ips']
user_name = backend_device_args['user_name']
user_pwd = backend_device_args['user_pwd']
pool = backend_device_args['pools'][0]
xml_fp = fp
tree = ElementTree()
elements = tree.parse(self._DEFAULT_XML_TEMPLATE_PATH)
self.update_xml_node(elements, "EternusIP", mgnt_ip)
self.update_xml_node(elements, "EternusUser", user_name)
self.update_xml_node(elements, "EternusPassword", user_pwd)
self.update_xml_node(elements, "EternusPool", pool)
self.update_xml_node(elements, "EternusSnapPool", pool)
root = tree.getroot()
map(root.remove, root.findall("EternusISCSIIP"))
for ip in data_ips:
element = Element("EternusISCSIIP")
element.text = ip
element.tail = "\n"
root.insert(4, element)
# root.append(element)
tree.write(xml_fp, encoding="utf-8", xml_declaration=True)
class CEPHBackendConfig(BaseConfig):
NOVA_CONF_FILE = "/etc/nova/nova.conf"
GLANCE_API_CONF_FILE = "/etc/glance/glance-api.conf"
_RBD_STORE_USER = "cinder"
_RBD_POOL = "volumes"
_RBD_MAX_CLONE_DEPTH = 5
_RBD_FLATTEN_VOLUME_FROM_SNAPSHOT = "False"
_RBD_CEPH_CONF = "/etc/ceph/ceph.conf"
_RBD_DRIVER = 'cinder.volume.drivers.rbd.RBDDriver'
def _construct_particular_cinder_data(self, backend, backend_data):
backend_data['volume_driver'] = self._RBD_DRIVER
backend_data['rbd_pool'] = self._RBD_POOL
backend_data['rbd_max_clone_depth'] = self._RBD_MAX_CLONE_DEPTH
backend_data['rbd_flatten_volume_from_snapshot'] = \
self._RBD_FLATTEN_VOLUME_FROM_SNAPSHOT
backend_data['rbd_ceph_conf'] = self._RBD_CEPH_CONF
uuid_instance = uuid.uuid3(uuid.NAMESPACE_DNS, "zte.com.cn")
backend_data['rbd_secret_uuid'] = uuid_instance.urn.split(":")[2]
return [], []

View File

@ -1,312 +0,0 @@
from utils import *
class BaseShareDisk():
instance = None
def __init__(self):
self._PROTOCOL_MAPPING = {
'ISCSI': ISCSIShareDisk,
'CEPH': CEPHShareDisk
}
self.instance_mapping = {}
def __get_protocol_instance(self, protocol_type):
if not protocol_type or \
protocol_type not in self._PROTOCOL_MAPPING.keys():
print_or_raise("Protocol type '%s' is not valid." % protocol_type,
ScriptInnerError)
protocol_instance = self.instance_mapping.get(protocol_type,
BaseShareDisk)
if isinstance(protocol_instance,
self._PROTOCOL_MAPPING[protocol_type]):
return protocol_instance
else:
self.instance_mapping.update(
{protocol_type: self._PROTOCOL_MAPPING[protocol_type]()})
return self.instance_mapping[protocol_type]
@classmethod
def single_instance(cls):
if not BaseShareDisk.instance:
BaseShareDisk.instance = BaseShareDisk()
return BaseShareDisk.instance
def deploy_share_disk(self, item, host_name):
protocol_instance = self.__get_protocol_instance(
item.get('protocol_type', 'ISCSI'))
protocol_instance.deploy_share_disk(item, host_name)
class ISCSIShareDisk(BaseShareDisk):
_LV_DEFAULT_NAME = {
'glance': ("VolGroupHAImage", "lvHAImage", 254),
'db': ("VolGroupHAMysql", "lvHAMysql", 253),
'db_backup': ("VolGroupHABakMysql", "lvHABakMysql", 252),
'mongodb': ("VolGroupHAMongodb", "lvHAMongodb", 251),
}
def _get_iscsi_configs(self, record_list):
raid_config = {}
for record in record_list:
discovery_media_ip = record.split(" ")[0].split(":")[0]
discovery_media_iqn = record.split(" ")[1]
try:
execute("ping -c 1 -W 2 %s" % discovery_media_ip)
except ProcessExecutionError:
execute("iscsiadm -m node -T %s -p %s -o delete" %
(discovery_media_iqn, discovery_media_ip),
check_exit_code=[0, 1])
continue
if discovery_media_ip in raid_config.get(discovery_media_iqn, []):
execute("iscsiadm -m node -T %s -p %s -R" %
(discovery_media_iqn, discovery_media_ip),
check_exit_code=[0, 1])
elif discovery_media_iqn in raid_config.keys():
raid_config[discovery_media_iqn] += [discovery_media_ip]
else:
raid_config[discovery_media_iqn] = [discovery_media_ip]
print_or_raise("Raid config is:\n%s" % str(raid_config))
return raid_config
def _lv_reentrant_check(
self, vg_name, lv_name, iscsi_session_setup, lun=None,
data_ips=[]):
"""
Check if share disk operation is reentrant.
:return:True,continue follow action; False, do nothing.
"""
lv_device_path = "/dev/%s/%s" % (vg_name, lv_name)
if not os.path.exists(lv_device_path):
return True
if not iscsi_session_setup:
exist_volumes = \
[sd for sd in self._ls_sd_path() if "-lun-" + lun in sd
for ip in data_ips if "ip-" + ip in sd]
if not exist_volumes:
print_or_raise("Lvm %s is exist, but no sd device match!" %
lv_device_path, ScriptInnerError)
return False
def _lv_rollback(self, lv, vg, block_device):
try:
execute("lvremove -y -ff /dev/%s/%s" % (lv, vg),
check_exit_code=[0, 1, 5])
execute("vgremove -y -ff %s" % vg, check_exit_code=[0, 1, 5])
execute("pvremove -y -ff %s" % block_device,
check_exit_code=[0, 1, 5])
except Exception as e:
print_or_raise("Rollback lvm resource failed!", e)
def _establish_iscsi_session(self, available_data_ips):
# discovery
discovery_ret = ""
for ip in available_data_ips:
out, err = execute(
"iscsiadm -m discovery -t st -p %s:3260" % ip)
discovery_ret += out
# if('0' != err) or ('0\n' != err ) or err:
# print_or_raise("Discovery ip:%s failed,continue.." % ip)
if not discovery_ret:
print_or_raise("No discovery record!", ScriptInnerError)
record_list = list(set(discovery_ret.split('\n')[:-1]))
print_or_raise(
"Discovery successful! Record:\n%s" % "\n".join(record_list))
# get iqn and ip like {iqn1: ip1, iqn2:ip2}
raid_config = self._get_iscsi_configs(record_list)
# auto config & login
login_cmd = \
lambda x, y: "iscsiadm -m node -T %s -p %s:3260 -l" % (x, y)
auto_cmd = \
lambda x, y: "iscsiadm -m node -T %s -p %s -o update -n " \
"node.startup -v automatic" % (x, y)
login = []
auto_config = []
for index in range(len(raid_config.keys())):
k = raid_config.keys()[index]
v = raid_config[k]
login += map(login_cmd, [k] * len(v), v)
auto_config += map(auto_cmd, [k] * len(v), v)
execute(";".join(login))
execute(";".join(auto_config))
print_or_raise("Login successful!")
return raid_config
def _modify_host_iqn(self, host_name):
# modify host IQN
host_iqn, err = execute("cat /etc/iscsi/initiatorname.iscsi")
md5_str, err = execute("echo -n %s | openssl md5" % host_name)
host_iqn = host_iqn.split("=")[1].strip()
wish_iqn = "iqn.opencos.rh:" + md5_str.split("=")[1].strip()
if wish_iqn != host_iqn:
print_or_raise(
"The host iqn is:%s, but wish iqn is %s, it will be modified."
% (host_iqn, wish_iqn))
with open("/etc/iscsi/initiatorname.iscsi", "w") as fp:
fp.write("InitiatorName=" + wish_iqn + "\n")
execute("systemctl restart iscsid.service")
def _ls_sd_path(self):
out, err = execute("ls /dev/disk/by-path")
return out.split("\n")[:-1]
def _find_multipath_by_sd(self, iqns, lun_id):
sd_path = []
attemps = 0
while not sd_path:
sd_path = \
[sd for sd in self._ls_sd_path()
if filter(lambda complex_sd_path: complex_sd_path in sd,
[iqn + "-lun-" + str(lun_id) for iqn in iqns])]
attemps += 1
if attemps == 5:
execute("iscsiadm -m node -R")
elif attemps > 10:
print_or_raise(
"After login successful,"
"there is no local sd device match with block device.",
ScriptInnerError)
time.sleep(2)
sd_path = "/dev/disk/by-path/" + sd_path[0]
sd_real_path = os.path.realpath(sd_path)
attemps = 0
multipath_path = ""
while not os.path.exists(multipath_path):
multipath_device, err = execute("multipath -l %s" % sd_real_path)
# if not multipath_device or ('0' != err) or ('0\n' != err) or err:
# continue
multipath_path = "/dev/mapper/" + \
multipath_device.split("\n")[0].split(" ")[0]
attemps += 1
if attemps > 5:
print_or_raise(
"No multipath match with local sd device:%s." %
sd_real_path,
ScriptInnerError)
time.sleep(2)
return multipath_path
def _create_lv_by_multipath_device(
self, multipath, vg_name, lv_name, size, fs_type):
try:
# create lvm base on block device
execute("pvcreate -y -ff %s" % multipath,
check_exit_code=[0, 1, 5])
execute("vgcreate -y -ff %s %s" % (vg_name, multipath),
check_exit_code=[0, 1, 5])
if size == -1:
lvcreate = "lvcreate -W y -l 100%%FREE -n %s %s" % \
(lv_name, vg_name)
else:
lvcreate = "lvcreate -W y -L %sG -n %s %s" % \
(round(size * 0.95, 2), lv_name, vg_name)
execute(lvcreate, check_exit_code=[0, 1, 5])
execute("pvscan --cache --activate ay")
# make filesystem
execute("mkfs.%s /dev/%s/%s" % (fs_type, vg_name, lv_name))
except Exception as e:
self._lv_rollback(lv_name, vg_name, multipath)
print_or_raise("LVM create failed, resource has been rollbacked.",
e)
def deploy_share_disk(self, item, host_name):
config_computer()
self._modify_host_iqn(host_name)
service = item['service']
if service not in ['glance', 'db', 'db_backup', 'mongodb']:
print_or_raise("Service name '%s' is not valid." % service)
# check ip
available_data_ips, invalid_ips = \
get_available_data_ip(item['data_ips'])
if not available_data_ips:
print_or_raise("No valid data ips,please check.", ScriptInnerError)
raid_config = self._establish_iscsi_session(available_data_ips)
lv_config = item.get('lvm_config', None)
vg_name = lv_config.get('vg_name', self._LV_DEFAULT_NAME[service][0])
lv_name = lv_config.get('lv_name', self._LV_DEFAULT_NAME[service][1])
if not self._lv_reentrant_check(vg_name, lv_name, True):
return
multipath = self._find_multipath_by_sd(
raid_config.keys(),
item.get('lun', self._LV_DEFAULT_NAME[service][2]))
self._create_lv_by_multipath_device(multipath,
vg_name,
lv_name,
lv_config.get('size', -1),
lv_config.get('fs_type', 'ext4'))
class CEPHShareDisk(BaseShareDisk):
def __init__(self):
self.monitor_ip = ''
self.monitor_passwd = ''
def deploy_share_disk(self, item, host_name):
self.monitor_ip = item.get('monitor_ip', '')
self.monitor_passwd = item.get('monitor_passwd', '')
rbd_pool = item['rbd_config']['rbd_pool']
rbd_img = item['rbd_config']['rbd_volume']
img_size = int(item['rbd_config']['size'])*1024
fs_type = item['rbd_config'].get('fs_type', 'ext4')
cmd_create = 'sshpass -p %s ssh %s rbd create -p %s --size %s %s ' % \
(self.monitor_passwd,
self.monitor_ip,
rbd_pool,
img_size,
rbd_img)
cmd_query = 'sshpass -p %s ssh %s rbd ls -l %s' % (
self.monitor_passwd, self.monitor_ip, rbd_pool)
image_in_monitor = []
print_or_raise("Create image %s in pool %s at monitor %s." %
(rbd_img, rbd_pool, self.monitor_ip))
try:
out, err = execute(cmd_query)
if out:
for line in out.splitlines():
image_in_monitor.append(line.split()[0])
if rbd_img not in image_in_monitor:
execute(cmd_create)
except Exception as e:
print_or_raise("Query pool %s in monitor error or create image %s "
"in pool %s." % (rbd_pool, rbd_img, rbd_pool), e)
execute("systemctl stop rbdmap")
rbd_map = '%s/%s id=admin,' \
'keyring=/etc/ceph/ceph.client.admin.keyring' % (rbd_pool,
rbd_img)
rbd_map_need_to_write = True
print_or_raise("Write rbdmap.")
with open("/etc/ceph/rbdmap", "a+") as fp:
for line in fp:
if line == rbd_map + "\n":
rbd_map_need_to_write = False
if rbd_map_need_to_write is True:
fp.write(rbd_map + "\n")
execute("chmod 777 /etc/ceph/rbdmap")
execute("systemctl enable rbdmap")
execute("systemctl start rbdmap")
execute("mkfs.%s /dev/rbd/%s/%s" % (fs_type, rbd_pool, rbd_img))

View File

@ -1,231 +0,0 @@
import subprocess
import random
import shlex
import signal
import time
import os
import logging
LOG = logging.getLogger()
formatter = "%(asctime)s %(name)s %(levelname)s %(message)s"
logging.basicConfig(format=formatter,
filename="storage_auto_config.log",
filemode="a",
level=logging.DEBUG)
stream_log = logging.StreamHandler()
stream_log.setLevel(logging.DEBUG)
stream_log.setFormatter(logging.Formatter(formatter))
LOG.addHandler(stream_log)
def print_or_raise(msg, exc=None):
if not exc:
LOG.debug(msg)
else:
if isinstance(exc, Exception):
LOG.error(msg)
raise exc
elif issubclass(exc, Exception):
raise exc(msg)
class ScriptInnerError(Exception):
def __init__(self, message=None):
super(ScriptInnerError, self).__init__(message)
class UnknownArgumentError(Exception):
def __init__(self, message=None):
super(UnknownArgumentError, self).__init__(message)
class NoRootWrapSpecified(Exception):
def __init__(self, message=None):
super(NoRootWrapSpecified, self).__init__(message)
class ProcessExecutionError(Exception):
def __init__(self, stdout=None, stderr=None, exit_code=None, cmd=None,
description=None):
self.exit_code = exit_code
self.stderr = stderr
self.stdout = stdout
self.cmd = cmd
self.description = description
if description is None:
description = "Unexpected error while running command."
if exit_code is None:
exit_code = '-'
message = ("%s\nCommand: %s\nExit code: %s\nStdout: %r\nStderr: %r"
% (description, cmd, exit_code, stdout, stderr))
super(ProcessExecutionError, self).__init__(message)
def execute(cmd, **kwargs):
"""Helper method to shell out and execute a command through subprocess.
Allows optional retry.s
:param cmd: Passed to subprocess.Popen.
:type cmd: string
TODO:param process_input: Send to opened process.
:type proces_input: string
TODO:param check_exit_code: Single bool, int, or list of allowed exit
codes. Defaults to [0]. Raise
:class:`ProcessExecutionError` unless
program exits with one of these code.
:type check_exit_code: boolean, int, or [int]
:param delay_on_retry: True | False. Defaults to True. If set to True,
wait a short amount of time before retrying.
:type delay_on_retry: boolean
:param attempts: How many times to retry cmd.
:type attempts: int
TODO:param run_as_root: True | False. Defaults to False. If set to True,
the command is prefixed by the command specified
in the root_helper kwarg.
:type run_as_root: boolean
:param root_helper: command to prefix to commands called with
run_as_root=True
:type root_helper: string
TODO:param shell: whether or not there should be a shell used to
execute this command. Defaults to false.
:type shell: boolean
:param loglevel: log level for execute commands.
:type loglevel: int. (Should be logging.DEBUG or logging.INFO)
:returns: (stdout, stderr) from process execution
:raises: :class:`UnknownArgumentError` on
receiving unknown arguments
:raises: :class:`ProcessExecutionError`
"""
def _subprocess_setup():
# Python installs a SIGPIPE handler by default.
# This is usually not what non-Python subprocesses expect.
signal.signal(signal.SIGPIPE, signal.SIG_DFL)
# stdin
process_input = kwargs.pop('process_input', None)
check_exit_code = kwargs.pop('check_exit_code', [0])
ignore_exit_code = False
delay_on_retry = kwargs.pop('delay_on_retry', True)
attempts = kwargs.pop('attempts', 1)
run_as_root = kwargs.pop('run_as_root', False)
root_helper = kwargs.pop('root_helper', '')
shell = kwargs.pop('shell', True)
silent = kwargs.pop('silent', False)
# loglevel = kwargs.pop('loglevel', logging.DEBUG)
if isinstance(check_exit_code, bool):
ignore_exit_code = not check_exit_code
check_exit_code = [0]
elif isinstance(check_exit_code, int):
check_exit_code = [check_exit_code]
if kwargs:
raise UnknownArgumentError(
'Got unknown keyword args to utils.execute: %r' % kwargs)
if run_as_root and hasattr(os, 'geteuid') and os.geteuid() != 0:
if not root_helper:
raise NoRootWrapSpecified(
message=('Command requested root, but did not specify a root '
'helper.'))
cmd = shlex.split(root_helper) + list(cmd)
while attempts > 0:
attempts -= 1
try:
if not silent:
print_or_raise('Running cmd (subprocess): %s' % cmd)
# windows
if os.name == 'nt':
preexec_fn = None
close_fds = False
else:
preexec_fn = _subprocess_setup
close_fds = True
obj = subprocess.Popen(cmd,
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
close_fds=close_fds,
preexec_fn=preexec_fn,
shell=shell)
if process_input is not None:
result = obj.communicate(process_input)
else:
result = obj.communicate()
obj.stdin.close()
_returncode = obj.returncode
if not silent:
print_or_raise('Result was %s' % _returncode)
if not ignore_exit_code and _returncode not in check_exit_code:
(stdout, stderr) = result
raise ProcessExecutionError(exit_code=_returncode,
stdout=stdout,
stderr=stderr,
cmd=cmd)
# cmd=sanitized_cmd)
return result
except ProcessExecutionError:
if not attempts:
raise
else:
if not silent:
print_or_raise('%r failed. Retrying.' % cmd)
if delay_on_retry:
time.sleep(random.randint(20, 200) / 100.0)
finally:
time.sleep(0)
def get_available_data_ip(media_ips):
unavailable_ip = []
for media_ip in media_ips:
try:
execute("ping -c 1 -W 2 %s" % media_ip)
except ProcessExecutionError:
unavailable_ip.append(media_ip)
continue
return list(set(media_ips) - set(unavailable_ip)), unavailable_ip
def clear_host_iscsi_resource():
out, err = execute("iscsiadm -m node", check_exit_code=[0, 21])
if not out:
return
sd_ips_list = map(lambda x: x.split(":3260")[0], out.split("\n")[:-1])
if not sd_ips_list:
return
valid_ips, invalid_ips = get_available_data_ip(sd_ips_list)
clear_resource = ""
for ip in invalid_ips:
logout_session = "iscsiadm -m node -p %s -u;" % ip
del_node = "iscsiadm -m node -p %s -o delete;" % ip
# manual_startup = "iscsiadm -m node -p %s -o update -n node.startup "
# "-v manual;" % ip
clear_resource += (logout_session + del_node)
execute(clear_resource, check_exit_code=[0, 21], silent=True)
# _execute("multipath -F")
def config_computer():
# remove exist iscsi resource
clear_host_iscsi_resource()
config_multipath()
def config_multipath():
if os.path.exists("/etc/multipath.conf"):
execute("echo y|mv /etc/multipath.conf /etc/multipath.conf.bak",
check_exit_code=[0, 1])
execute("cp -p base/multipath.conf /etc/")
execute("systemctl enable multipathd.service;"
"systemctl restart multipathd.service")

View File

@ -1,168 +0,0 @@
###############################################################################
# Author: CG
# Description:
# 1.The script should be copied to the host, before running.
# 2.The script is not thread safe.
# 3.Example for script call:
# [config share disk]:
# python storage_auto_config share_disk <host_pxe_mac>,
# we use host_pxe_mac to generate host IQN by md5 and write it to
# '/etc/iscsi/initiatorname.iscsi'
# [config cinder]: python storage_auto_config cinder_conf 10.43.177.129,
# the second parameter for cinder_config is cinder <host_ip>.
# If the backend is CEPH,you should call the following command:
# python storage_auto_config glance_rbd_conf at glance node &
# python storage_auto_config nova_rbd_conf at nova node.
# [config multipath]:python storage_auto_config check_multipath.
# 4.Before run script,the cinder.json and control.json file
# must be must be config.
###############################################################################
import sys
import uuid
import traceback
from common.utils import *
from common.cinder_conf import BaseConfig, CEPHBackendConfig
from common.share_disk import BaseShareDisk
try:
import simplejson as json
except ImportError:
import json
def _set_config_file(file, section, key, value):
set_config = BaseConfig.SET_CONFIG.format(
config_file=file,
section=section,
key=key,
value=value)
execute(set_config)
def config_share_disk(config, host_name):
# deploy share_disk
for item in config:
BaseShareDisk.single_instance().deploy_share_disk(item, host_name)
def config_cinder(config, cinder_host_ip=""):
# config xml and cinder.conf
for config in config['disk_array']:
# load disk array global config
backends = config['backend']
for item in backends.items():
BaseConfig.single_instance().config_backend(
item,
management_ips=config.get('management_ips', []),
data_ips=config.get('data_ips', []),
user_name=config.get('user_name', []),
user_pwd=config.get('user_pwd', []),
cinder_host_ip=cinder_host_ip)
# config multipath
config_computer()
# enable config
execute("systemctl restart openstack-cinder-volume.service")
def config_nova_with_rbd(config):
# config xml and cinder.conf
for config in config['disk_array']:
# load disk array global config
backends = config['backend']
for key, value in backends.items():
if value.get('volume_driver') == 'CEPH':
uuid_instance = uuid.uuid3(uuid.NAMESPACE_DNS, "zte.com.cn")
uuid_str = uuid_instance.urn.split(":")[2]
_set_config_file(CEPHBackendConfig.NOVA_CONF_FILE,
'libvirt',
'images_type',
'rbd')
_set_config_file(CEPHBackendConfig.NOVA_CONF_FILE,
'libvirt',
'rbd_secret_uuid',
uuid_str)
return
# enable config
execute("systemctl restart openstack-nova-compute.service")
def config_glance_with_rbd(config):
# config xml and cinder.conf
for config in config['disk_array']:
# load disk array global config
backends = config['backend']
for key, value in backends.items():
if value.get('volume_driver') == 'CEPH':
_set_config_file(CEPHBackendConfig.GLANCE_API_CONF_FILE,
'DEFAULT',
'show_image_direct_url',
'True')
_set_config_file(CEPHBackendConfig.GLANCE_API_CONF_FILE,
'glance_store',
'default_store',
'rbd')
return
# enable config
execute("systemctl restart openstack-glance-api.service")
def _launch_script():
def subcommand_launcher(args, valid_args_len, json_path, oper_type):
if len(args) < valid_args_len:
print_or_raise("Too few parameter is given,please check.",
ScriptInnerError)
with open(json_path, "r") as fp_json:
params = json.load(fp_json)
print_or_raise("-----Begin config %s, params is %s.-----" %
(oper_type, params))
return params
oper_type = sys.argv[1] if len(sys.argv) > 1 else ""
try:
if oper_type == "share_disk":
share_disk_config = \
subcommand_launcher(sys.argv, 3, "base/control.json",
oper_type)
config_share_disk(share_disk_config, sys.argv[2])
elif oper_type == "cinder_conf":
cinder_backend_config = subcommand_launcher(sys.argv, 3,
"base/cinder.json",
oper_type)
config_cinder(cinder_backend_config, sys.argv[2])
elif oper_type == "nova_rbd_conf":
nova_rbd_config = subcommand_launcher(sys.argv, 1,
"base/cinder.json",
oper_type)
config_nova_with_rbd(nova_rbd_config)
elif oper_type == "glance_rbd_conf":
glance_rbd_config = subcommand_launcher(sys.argv, 1,
"base/cinder.json",
oper_type)
config_glance_with_rbd(glance_rbd_config)
elif oper_type == "check_multipath":
print_or_raise("-----Begin config %s.-----")
config_computer()
elif oper_type == "debug":
pass
else:
print_or_raise("Script operation is not given,such as:share_disk,"
"cinder_conf,nova_rbd_conf,glance_rbd_conf,"
"check_multipath.", ScriptInnerError)
except Exception as e:
print_or_raise("----------Operation %s is Failed.----------\n"
"Exception call chain as follow,%s" %
(oper_type, traceback.format_exc()))
raise e
else:
print_or_raise("----------Operation %s is done!----------" %
oper_type)
if __name__ == "__main__":
_launch_script()

File diff suppressed because it is too large Load Diff

View File

@ -1,9 +0,0 @@
#!/bin/bash
scriptsdir=$(cd $(dirname $0) && pwd)
ISODIR=`mktemp -d /mnt/TFG_ISOXXXXXX`
mount -o loop $scriptsdir/*CGSL_VPLAT*.iso ${ISODIR}
cp ${ISODIR}/*CGSL_VPLAT*.bin $scriptsdir
umount ${ISODIR}
[ -e ${ISODIR} ] && rm -rf ${ISODIR}
$scriptsdir/*CGSL_VPLAT*.bin upgrade reboot

View File

@ -1,93 +0,0 @@
#!/bin/sh
# 让某个主机彻底信任我以后ssh登录过去不需要密码
#检查参数是否合法
logfile=/var/log/trustme.log
function print_log
{
local promt="$1"
echo -e "$promt"
echo -e "`date -d today +"%Y-%m-%d %H:%M:%S"` $promt" >> $logfile
}
ip=$1
if [ -z $ip ]; then
print_log "Usage: `basename $0` ipaddr passwd"
exit 1
fi
passwd=$2
if [ -z $passwd ]; then
print_log "Usage: `basename $0` ipaddr passwd"
exit 1
fi
rpm -qi sshpass >/dev/null
if [ $? != 0 ]; then
print_log "Please install sshpass first"
exit 1
fi
#试试对端能不能ping得通
unreachable=`ping $ip -c 1 -W 3 | grep -c "100% packet loss"`
if [ $unreachable -eq 1 ]; then
print_log "host $ip is unreachable"
exit 1
fi
#如果本机还没有ssh公钥就生成一个
if [ ! -e ~/.ssh/id_dsa.pub ]; then
print_log "generating ssh public key ..."
ssh-keygen -t dsa -f /root/.ssh/id_dsa -N ""
if [ $? != 0 ]; then
print_log "ssh-keygen failed"
exit 1
fi
fi
#首先在对端删除原来保存的信任公钥
user=`whoami`
host=`hostname`
keyend="$user@$host"
print_log "my keyend = $keyend"
cmd="sed '/$keyend$/d' -i ~/.ssh/authorized_keys"
#echo cmd:$cmd
print_log "clear my old pub key on $ip ..."
sshpass -p $passwd ssh -o StrictHostKeyChecking=no $ip "rm -rf /root/.ssh/known_hosts"
if [ $? != 0 ]; then
print_log "ssh $ip to delete known_hosts failed"
exit 1
fi
sshpass -p $passwd ssh -o StrictHostKeyChecking=no $ip "touch ~/.ssh/authorized_keys"
if [ $? != 0 ]; then
print_log "ssh $ip to create file authorized_keys failed"
exit 1
fi
sshpass -p $passwd ssh -o StrictHostKeyChecking=no $ip "$cmd"
if [ $? != 0 ]; then
print_log "ssh $ip to edit authorized_keys failed"
exit 1
fi
#把新生成的拷贝过去
print_log "copy my public key to $ip ..."
tmpfile=/tmp/`hostname`.key.pub
sshpass -p $passwd scp -o StrictHostKeyChecking=no ~/.ssh/id_dsa.pub $ip:$tmpfile
if [ $? != 0 ]; then
print_log "scp file to $ip failed"
exit 1
fi
#在对端将其追加到authorized_keys
print_log "on $ip, append my public key to ~/.ssh/authorized_keys ..."
sshpass -p $passwd ssh -o StrictHostKeyChecking=no $ip "cat $tmpfile >> ~/.ssh/authorized_keys"
if [ $? != 0 ]; then
print_log "ssh $ip to add public key for authorized_keys failed"
exit 1
fi
print_log "rm tmp file $ip:$tmpfile"
sshpass -p $passwd ssh -o StrictHostKeyChecking=no $ip "rm $tmpfile"
if [ $? != 0 ]; then
print_log "ssh $ip to delete tmp file failed"
exit 1
fi
print_log "trustme ok!"

View File

@ -1,62 +0,0 @@
#!/bin/sh
# 让某个主机彻底信任我以后ssh登录过去不需要密码
#检查参数是否合法
ip=$1
if [ -z $ip ]; then
echo "Usage: `basename $0` ipaddr passwd" >&2
exit 1
fi
passwd=$2
if [ -z $passwd ]; then
echo "Usage: `basename $0` ipaddr passwd" >&2
exit 1
fi
rpm -qi sshpass >/dev/null
if [ $? != 0 ]; then
echo "Please install sshpass first!" >&2
exit 1
fi
#试试对端能不能ping得通
unreachable=`ping $ip -c 1 -W 3 | grep -c "100% packet loss"`
if [ $unreachable -eq 1 ]; then
echo "host $ip is unreachable!!!"
exit 1
fi
#如果本机还没有ssh公钥就生成一个
if [ ! -e ~/.ssh/id_dsa.pub ]; then
echo "generating ssh public key ..."
ssh-keygen -t dsa -f /root/.ssh/id_dsa -N ""
fi
#首先在对端删除原来保存的信任公钥
user=`whoami`
host=`hostname`
keyend="$user@$host"
echo "my keyend = $keyend"
cmd="sed '/$keyend$/d' -i ~/.ssh/authorized_keys"
#echo cmd:$cmd
echo "clear my old pub key on $ip ..."
sshpass -p $passwd ssh $ip "rm -rf /root/.ssh/known_hosts"
sshpass -p $passwd ssh $ip "touch ~/.ssh/authorized_keys"
sshpass -p $passwd ssh $ip "$cmd"
#把新生成的拷贝过去
echo "copy my public key to $ip ..."
tmpfile=/tmp/`hostname`.key.pub
sshpass -p $passwd scp ~/.ssh/id_dsa.pub $ip:$tmpfile
#在对端将其追加到authorized_keys
echo "on $ip, append my public key to ~/.ssh/authorized_keys ..."
sshpass -p $passwd ssh $ip "cat $tmpfile >> ~/.ssh/authorized_keys"
echo "rm tmp file $ip:$tmpfile"
sshpass -p $passwd ssh $ip "rm $tmpfile"
echo "trustme ok!"

View File

@ -1,17 +0,0 @@
[general]
nodeip=192.168.3.1
nodeid=1
hostname=sdn59
needzamp=y
zbpips=192.168.3.1
zbp_node_num=1
zbpnodelist=1,256
zampips=192.168.3.1
zamp_node_num=1
mongodbips=192.168.3.1
mongodb_node_num=1
zamp_vip=
mongodb_vip=
MacName=eth1
netid=1234
memmode=tiny

View File

@ -1,899 +0,0 @@
# Copyright 2012 OpenStack Foundation
# Copyright 2013 IBM Corp.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import copy
from daisy.common import exception
import daisy.domain.proxy
from daisy import i18n
_ = i18n._
def is_image_mutable(context, image):
"""Return True if the image is mutable in this context."""
if context.is_admin:
return True
if image.owner is None or context.owner is None:
return False
return image.owner == context.owner
def proxy_image(context, image):
if is_image_mutable(context, image):
return ImageProxy(image, context)
else:
return ImmutableImageProxy(image, context)
def is_member_mutable(context, member):
"""Return True if the image is mutable in this context."""
if context.is_admin:
return True
if context.owner is None:
return False
return member.member_id == context.owner
def proxy_member(context, member):
if is_member_mutable(context, member):
return member
else:
return ImmutableMemberProxy(member)
def is_task_mutable(context, task):
"""Return True if the task is mutable in this context."""
if context.is_admin:
return True
if context.owner is None:
return False
return task.owner == context.owner
def is_task_stub_mutable(context, task_stub):
"""Return True if the task stub is mutable in this context."""
if context.is_admin:
return True
if context.owner is None:
return False
return task_stub.owner == context.owner
def proxy_task(context, task):
if is_task_mutable(context, task):
return task
else:
return ImmutableTaskProxy(task)
def proxy_task_stub(context, task_stub):
if is_task_stub_mutable(context, task_stub):
return task_stub
else:
return ImmutableTaskStubProxy(task_stub)
class ImageRepoProxy(daisy.domain.proxy.Repo):
def __init__(self, image_repo, context):
self.context = context
self.image_repo = image_repo
proxy_kwargs = {'context': self.context}
super(ImageRepoProxy, self).__init__(image_repo,
item_proxy_class=ImageProxy,
item_proxy_kwargs=proxy_kwargs)
def get(self, image_id):
image = self.image_repo.get(image_id)
return proxy_image(self.context, image)
def list(self, *args, **kwargs):
images = self.image_repo.list(*args, **kwargs)
return [proxy_image(self.context, i) for i in images]
class ImageMemberRepoProxy(daisy.domain.proxy.Repo):
def __init__(self, member_repo, image, context):
self.member_repo = member_repo
self.image = image
self.context = context
super(ImageMemberRepoProxy, self).__init__(member_repo)
def get(self, member_id):
if (self.context.is_admin or
self.context.owner in (self.image.owner, member_id)):
member = self.member_repo.get(member_id)
return proxy_member(self.context, member)
else:
message = _("You cannot get image member for %s")
raise exception.Forbidden(message % member_id)
def list(self, *args, **kwargs):
members = self.member_repo.list(*args, **kwargs)
if (self.context.is_admin or
self.context.owner == self.image.owner):
return [proxy_member(self.context, m) for m in members]
for member in members:
if member.member_id == self.context.owner:
return [proxy_member(self.context, member)]
message = _("You cannot get image member for %s")
raise exception.Forbidden(message % self.image.image_id)
def remove(self, image_member):
if (self.image.owner == self.context.owner or
self.context.is_admin):
self.member_repo.remove(image_member)
else:
message = _("You cannot delete image member for %s")
raise exception.Forbidden(message
% self.image.image_id)
def add(self, image_member):
if (self.image.owner == self.context.owner or
self.context.is_admin):
self.member_repo.add(image_member)
else:
message = _("You cannot add image member for %s")
raise exception.Forbidden(message
% self.image.image_id)
def save(self, image_member, from_state=None):
if (self.context.is_admin or
self.context.owner == image_member.member_id):
self.member_repo.save(image_member, from_state=from_state)
else:
message = _("You cannot update image member %s")
raise exception.Forbidden(message % image_member.member_id)
class ImageFactoryProxy(daisy.domain.proxy.ImageFactory):
def __init__(self, image_factory, context):
self.image_factory = image_factory
self.context = context
kwargs = {'context': self.context}
super(ImageFactoryProxy, self).__init__(image_factory,
proxy_class=ImageProxy,
proxy_kwargs=kwargs)
def new_image(self, **kwargs):
owner = kwargs.pop('owner', self.context.owner)
if not self.context.is_admin:
if owner is None or owner != self.context.owner:
message = _("You are not permitted to create images "
"owned by '%s'.")
raise exception.Forbidden(message % owner)
return super(ImageFactoryProxy, self).new_image(owner=owner, **kwargs)
class ImageMemberFactoryProxy(object):
def __init__(self, image_member_factory, context):
self.image_member_factory = image_member_factory
self.context = context
def new_image_member(self, image, member_id):
owner = image.owner
if not self.context.is_admin:
if owner is None or owner != self.context.owner:
message = _("You are not permitted to create image members "
"for the image.")
raise exception.Forbidden(message)
if image.visibility == 'public':
message = _("Public images do not have members.")
raise exception.Forbidden(message)
return self.image_member_factory.new_image_member(image, member_id)
def _immutable_attr(target, attr, proxy=None):
def get_attr(self):
value = getattr(getattr(self, target), attr)
if proxy is not None:
value = proxy(value)
return value
def forbidden(self, *args, **kwargs):
resource = getattr(self, 'resource_name', 'resource')
message = _("You are not permitted to modify '%(attr)s' on this "
"%(resource)s.")
raise exception.Forbidden(message % {'attr': attr,
'resource': resource})
return property(get_attr, forbidden, forbidden)
class ImmutableLocations(list):
def forbidden(self, *args, **kwargs):
message = _("You are not permitted to modify locations "
"for this image.")
raise exception.Forbidden(message)
def __deepcopy__(self, memo):
return ImmutableLocations(copy.deepcopy(list(self), memo))
append = forbidden
extend = forbidden
insert = forbidden
pop = forbidden
remove = forbidden
reverse = forbidden
sort = forbidden
__delitem__ = forbidden
__delslice__ = forbidden
__iadd__ = forbidden
__imul__ = forbidden
__setitem__ = forbidden
__setslice__ = forbidden
class ImmutableProperties(dict):
def forbidden_key(self, key, *args, **kwargs):
message = _("You are not permitted to modify '%s' on this image.")
raise exception.Forbidden(message % key)
def forbidden(self, *args, **kwargs):
message = _("You are not permitted to modify this image.")
raise exception.Forbidden(message)
__delitem__ = forbidden_key
__setitem__ = forbidden_key
pop = forbidden
popitem = forbidden
setdefault = forbidden
update = forbidden
class ImmutableTags(set):
def forbidden(self, *args, **kwargs):
message = _("You are not permitted to modify tags on this image.")
raise exception.Forbidden(message)
add = forbidden
clear = forbidden
difference_update = forbidden
intersection_update = forbidden
pop = forbidden
remove = forbidden
symmetric_difference_update = forbidden
update = forbidden
class ImmutableImageProxy(object):
def __init__(self, base, context):
self.base = base
self.context = context
self.resource_name = 'image'
name = _immutable_attr('base', 'name')
image_id = _immutable_attr('base', 'image_id')
name = _immutable_attr('base', 'name')
status = _immutable_attr('base', 'status')
created_at = _immutable_attr('base', 'created_at')
updated_at = _immutable_attr('base', 'updated_at')
visibility = _immutable_attr('base', 'visibility')
min_disk = _immutable_attr('base', 'min_disk')
min_ram = _immutable_attr('base', 'min_ram')
protected = _immutable_attr('base', 'protected')
locations = _immutable_attr('base', 'locations', proxy=ImmutableLocations)
checksum = _immutable_attr('base', 'checksum')
owner = _immutable_attr('base', 'owner')
disk_format = _immutable_attr('base', 'disk_format')
container_format = _immutable_attr('base', 'container_format')
size = _immutable_attr('base', 'size')
virtual_size = _immutable_attr('base', 'virtual_size')
extra_properties = _immutable_attr('base', 'extra_properties',
proxy=ImmutableProperties)
tags = _immutable_attr('base', 'tags', proxy=ImmutableTags)
def delete(self):
message = _("You are not permitted to delete this image.")
raise exception.Forbidden(message)
def get_member_repo(self):
member_repo = self.base.get_member_repo()
return ImageMemberRepoProxy(member_repo, self, self.context)
def get_data(self, *args, **kwargs):
return self.base.get_data(*args, **kwargs)
def set_data(self, *args, **kwargs):
message = _("You are not permitted to upload data for this image.")
raise exception.Forbidden(message)
class ImmutableMemberProxy(object):
def __init__(self, base):
self.base = base
self.resource_name = 'image member'
id = _immutable_attr('base', 'id')
image_id = _immutable_attr('base', 'image_id')
member_id = _immutable_attr('base', 'member_id')
status = _immutable_attr('base', 'status')
created_at = _immutable_attr('base', 'created_at')
updated_at = _immutable_attr('base', 'updated_at')
class ImmutableTaskProxy(object):
def __init__(self, base):
self.base = base
self.resource_name = 'task'
task_id = _immutable_attr('base', 'task_id')
type = _immutable_attr('base', 'type')
status = _immutable_attr('base', 'status')
owner = _immutable_attr('base', 'owner')
expires_at = _immutable_attr('base', 'expires_at')
created_at = _immutable_attr('base', 'created_at')
updated_at = _immutable_attr('base', 'updated_at')
input = _immutable_attr('base', 'input')
message = _immutable_attr('base', 'message')
result = _immutable_attr('base', 'result')
def run(self, executor):
self.base.run(executor)
def begin_processing(self):
message = _("You are not permitted to set status on this task.")
raise exception.Forbidden(message)
def succeed(self, result):
message = _("You are not permitted to set status on this task.")
raise exception.Forbidden(message)
def fail(self, message):
message = _("You are not permitted to set status on this task.")
raise exception.Forbidden(message)
class ImmutableTaskStubProxy(object):
def __init__(self, base):
self.base = base
self.resource_name = 'task stub'
task_id = _immutable_attr('base', 'task_id')
type = _immutable_attr('base', 'type')
status = _immutable_attr('base', 'status')
owner = _immutable_attr('base', 'owner')
expires_at = _immutable_attr('base', 'expires_at')
created_at = _immutable_attr('base', 'created_at')
updated_at = _immutable_attr('base', 'updated_at')
class ImageProxy(daisy.domain.proxy.Image):
def __init__(self, image, context):
self.image = image
self.context = context
super(ImageProxy, self).__init__(image)
def get_member_repo(self, **kwargs):
if self.image.visibility == 'public':
message = _("Public images do not have members.")
raise exception.Forbidden(message)
else:
member_repo = self.image.get_member_repo(**kwargs)
return ImageMemberRepoProxy(member_repo, self, self.context)
class TaskProxy(daisy.domain.proxy.Task):
def __init__(self, task):
self.task = task
super(TaskProxy, self).__init__(task)
class TaskFactoryProxy(daisy.domain.proxy.TaskFactory):
def __init__(self, task_factory, context):
self.task_factory = task_factory
self.context = context
super(TaskFactoryProxy, self).__init__(
task_factory,
task_proxy_class=TaskProxy)
def new_task(self, **kwargs):
owner = kwargs.get('owner', self.context.owner)
# NOTE(nikhil): Unlike Images, Tasks are expected to have owner.
# We currently do not allow even admins to set the owner to None.
if owner is not None and (owner == self.context.owner
or self.context.is_admin):
return super(TaskFactoryProxy, self).new_task(**kwargs)
else:
message = _("You are not permitted to create this task with "
"owner as: %s")
raise exception.Forbidden(message % owner)
class TaskRepoProxy(daisy.domain.proxy.TaskRepo):
def __init__(self, task_repo, context):
self.task_repo = task_repo
self.context = context
super(TaskRepoProxy, self).__init__(task_repo)
def get(self, task_id):
task = self.task_repo.get(task_id)
return proxy_task(self.context, task)
class TaskStubRepoProxy(daisy.domain.proxy.TaskStubRepo):
def __init__(self, task_stub_repo, context):
self.task_stub_repo = task_stub_repo
self.context = context
super(TaskStubRepoProxy, self).__init__(task_stub_repo)
def list(self, *args, **kwargs):
task_stubs = self.task_stub_repo.list(*args, **kwargs)
return [proxy_task_stub(self.context, t) for t in task_stubs]
# Metadef Namespace classes
def is_namespace_mutable(context, namespace):
"""Return True if the namespace is mutable in this context."""
if context.is_admin:
return True
if context.owner is None:
return False
return namespace.owner == context.owner
def proxy_namespace(context, namespace):
if is_namespace_mutable(context, namespace):
return namespace
else:
return ImmutableMetadefNamespaceProxy(namespace)
class ImmutableMetadefNamespaceProxy(object):
def __init__(self, base):
self.base = base
self.resource_name = 'namespace'
namespace_id = _immutable_attr('base', 'namespace_id')
namespace = _immutable_attr('base', 'namespace')
display_name = _immutable_attr('base', 'display_name')
description = _immutable_attr('base', 'description')
owner = _immutable_attr('base', 'owner')
visibility = _immutable_attr('base', 'visibility')
protected = _immutable_attr('base', 'protected')
created_at = _immutable_attr('base', 'created_at')
updated_at = _immutable_attr('base', 'updated_at')
def delete(self):
message = _("You are not permitted to delete this namespace.")
raise exception.Forbidden(message)
def save(self):
message = _("You are not permitted to update this namespace.")
raise exception.Forbidden(message)
class MetadefNamespaceProxy(daisy.domain.proxy.MetadefNamespace):
def __init__(self, namespace):
self.namespace_input = namespace
super(MetadefNamespaceProxy, self).__init__(namespace)
class MetadefNamespaceFactoryProxy(
daisy.domain.proxy.MetadefNamespaceFactory):
def __init__(self, meta_namespace_factory, context):
self.meta_namespace_factory = meta_namespace_factory
self.context = context
super(MetadefNamespaceFactoryProxy, self).__init__(
meta_namespace_factory,
meta_namespace_proxy_class=MetadefNamespaceProxy)
def new_namespace(self, **kwargs):
owner = kwargs.pop('owner', self.context.owner)
if not self.context.is_admin:
if owner is None or owner != self.context.owner:
message = _("You are not permitted to create namespace "
"owned by '%s'")
raise exception.Forbidden(message % (owner))
return super(MetadefNamespaceFactoryProxy, self).new_namespace(
owner=owner, **kwargs)
class MetadefNamespaceRepoProxy(daisy.domain.proxy.MetadefNamespaceRepo):
def __init__(self, namespace_repo, context):
self.namespace_repo = namespace_repo
self.context = context
super(MetadefNamespaceRepoProxy, self).__init__(namespace_repo)
def get(self, namespace):
namespace_obj = self.namespace_repo.get(namespace)
return proxy_namespace(self.context, namespace_obj)
def list(self, *args, **kwargs):
namespaces = self.namespace_repo.list(*args, **kwargs)
return [proxy_namespace(self.context, namespace) for
namespace in namespaces]
# Metadef Object classes
def is_object_mutable(context, object):
"""Return True if the object is mutable in this context."""
if context.is_admin:
return True
if context.owner is None:
return False
return object.namespace.owner == context.owner
def proxy_object(context, object):
if is_object_mutable(context, object):
return object
else:
return ImmutableMetadefObjectProxy(object)
class ImmutableMetadefObjectProxy(object):
def __init__(self, base):
self.base = base
self.resource_name = 'object'
object_id = _immutable_attr('base', 'object_id')
name = _immutable_attr('base', 'name')
required = _immutable_attr('base', 'required')
description = _immutable_attr('base', 'description')
properties = _immutable_attr('base', 'properties')
created_at = _immutable_attr('base', 'created_at')
updated_at = _immutable_attr('base', 'updated_at')
def delete(self):
message = _("You are not permitted to delete this object.")
raise exception.Forbidden(message)
def save(self):
message = _("You are not permitted to update this object.")
raise exception.Forbidden(message)
class MetadefObjectProxy(daisy.domain.proxy.MetadefObject):
def __init__(self, meta_object):
self.meta_object = meta_object
super(MetadefObjectProxy, self).__init__(meta_object)
class MetadefObjectFactoryProxy(daisy.domain.proxy.MetadefObjectFactory):
def __init__(self, meta_object_factory, context):
self.meta_object_factory = meta_object_factory
self.context = context
super(MetadefObjectFactoryProxy, self).__init__(
meta_object_factory,
meta_object_proxy_class=MetadefObjectProxy)
def new_object(self, **kwargs):
owner = kwargs.pop('owner', self.context.owner)
if not self.context.is_admin:
if owner is None or owner != self.context.owner:
message = _("You are not permitted to create object "
"owned by '%s'")
raise exception.Forbidden(message % (owner))
return super(MetadefObjectFactoryProxy, self).new_object(**kwargs)
class MetadefObjectRepoProxy(daisy.domain.proxy.MetadefObjectRepo):
def __init__(self, object_repo, context):
self.object_repo = object_repo
self.context = context
super(MetadefObjectRepoProxy, self).__init__(object_repo)
def get(self, namespace, object_name):
meta_object = self.object_repo.get(namespace, object_name)
return proxy_object(self.context, meta_object)
def list(self, *args, **kwargs):
objects = self.object_repo.list(*args, **kwargs)
return [proxy_object(self.context, meta_object) for
meta_object in objects]
# Metadef ResourceType classes
def is_meta_resource_type_mutable(context, meta_resource_type):
"""Return True if the meta_resource_type is mutable in this context."""
if context.is_admin:
return True
if context.owner is None:
return False
# (lakshmiS): resource type can exist without an association with
# namespace and resource type cannot be created/update/deleted directly(
# they have to be associated/de-associated from namespace)
if meta_resource_type.namespace:
return meta_resource_type.namespace.owner == context.owner
else:
return False
def proxy_meta_resource_type(context, meta_resource_type):
if is_meta_resource_type_mutable(context, meta_resource_type):
return meta_resource_type
else:
return ImmutableMetadefResourceTypeProxy(meta_resource_type)
class ImmutableMetadefResourceTypeProxy(object):
def __init__(self, base):
self.base = base
self.resource_name = 'meta_resource_type'
namespace = _immutable_attr('base', 'namespace')
name = _immutable_attr('base', 'name')
prefix = _immutable_attr('base', 'prefix')
properties_target = _immutable_attr('base', 'properties_target')
created_at = _immutable_attr('base', 'created_at')
updated_at = _immutable_attr('base', 'updated_at')
def delete(self):
message = _("You are not permitted to delete this meta_resource_type.")
raise exception.Forbidden(message)
class MetadefResourceTypeProxy(daisy.domain.proxy.MetadefResourceType):
def __init__(self, meta_resource_type):
self.meta_resource_type = meta_resource_type
super(MetadefResourceTypeProxy, self).__init__(meta_resource_type)
class MetadefResourceTypeFactoryProxy(
daisy.domain.proxy.MetadefResourceTypeFactory):
def __init__(self, resource_type_factory, context):
self.meta_resource_type_factory = resource_type_factory
self.context = context
super(MetadefResourceTypeFactoryProxy, self).__init__(
resource_type_factory,
resource_type_proxy_class=MetadefResourceTypeProxy)
def new_resource_type(self, **kwargs):
owner = kwargs.pop('owner', self.context.owner)
if not self.context.is_admin:
if owner is None or owner != self.context.owner:
message = _("You are not permitted to create resource_type "
"owned by '%s'")
raise exception.Forbidden(message % (owner))
return super(MetadefResourceTypeFactoryProxy, self).new_resource_type(
**kwargs)
class MetadefResourceTypeRepoProxy(
daisy.domain.proxy.MetadefResourceTypeRepo):
def __init__(self, meta_resource_type_repo, context):
self.meta_resource_type_repo = meta_resource_type_repo
self.context = context
super(MetadefResourceTypeRepoProxy, self).__init__(
meta_resource_type_repo)
def list(self, *args, **kwargs):
meta_resource_types = self.meta_resource_type_repo.list(
*args, **kwargs)
return [proxy_meta_resource_type(self.context, meta_resource_type) for
meta_resource_type in meta_resource_types]
def get(self, *args, **kwargs):
meta_resource_type = self.meta_resource_type_repo.get(*args, **kwargs)
return proxy_meta_resource_type(self.context, meta_resource_type)
# Metadef namespace properties classes
def is_namespace_property_mutable(context, namespace_property):
"""Return True if the object is mutable in this context."""
if context.is_admin:
return True
if context.owner is None:
return False
return namespace_property.namespace.owner == context.owner
def proxy_namespace_property(context, namespace_property):
if is_namespace_property_mutable(context, namespace_property):
return namespace_property
else:
return ImmutableMetadefPropertyProxy(namespace_property)
class ImmutableMetadefPropertyProxy(object):
def __init__(self, base):
self.base = base
self.resource_name = 'namespace_property'
property_id = _immutable_attr('base', 'property_id')
name = _immutable_attr('base', 'name')
schema = _immutable_attr('base', 'schema')
def delete(self):
message = _("You are not permitted to delete this property.")
raise exception.Forbidden(message)
def save(self):
message = _("You are not permitted to update this property.")
raise exception.Forbidden(message)
class MetadefPropertyProxy(daisy.domain.proxy.MetadefProperty):
def __init__(self, namespace_property):
self.meta_object = namespace_property
super(MetadefPropertyProxy, self).__init__(namespace_property)
class MetadefPropertyFactoryProxy(daisy.domain.proxy.MetadefPropertyFactory):
def __init__(self, namespace_property_factory, context):
self.meta_object_factory = namespace_property_factory
self.context = context
super(MetadefPropertyFactoryProxy, self).__init__(
namespace_property_factory,
property_proxy_class=MetadefPropertyProxy)
def new_namespace_property(self, **kwargs):
owner = kwargs.pop('owner', self.context.owner)
if not self.context.is_admin:
if owner is None or owner != self.context.owner:
message = _("You are not permitted to create property "
"owned by '%s'")
raise exception.Forbidden(message % (owner))
return super(MetadefPropertyFactoryProxy, self).new_namespace_property(
**kwargs)
class MetadefPropertyRepoProxy(daisy.domain.proxy.MetadefPropertyRepo):
def __init__(self, namespace_property_repo, context):
self.namespace_property_repo = namespace_property_repo
self.context = context
super(MetadefPropertyRepoProxy, self).__init__(namespace_property_repo)
def get(self, namespace, object_name):
namespace_property = self.namespace_property_repo.get(namespace,
object_name)
return proxy_namespace_property(self.context, namespace_property)
def list(self, *args, **kwargs):
namespace_properties = self.namespace_property_repo.list(
*args, **kwargs)
return [proxy_namespace_property(self.context, namespace_property) for
namespace_property in namespace_properties]
# Metadef Tag classes
def is_tag_mutable(context, tag):
"""Return True if the tag is mutable in this context."""
if context.is_admin:
return True
if context.owner is None:
return False
return tag.namespace.owner == context.owner
def proxy_tag(context, tag):
if is_tag_mutable(context, tag):
return tag
else:
return ImmutableMetadefTagProxy(tag)
class ImmutableMetadefTagProxy(object):
def __init__(self, base):
self.base = base
self.resource_name = 'tag'
tag_id = _immutable_attr('base', 'tag_id')
name = _immutable_attr('base', 'name')
created_at = _immutable_attr('base', 'created_at')
updated_at = _immutable_attr('base', 'updated_at')
def delete(self):
message = _("You are not permitted to delete this tag.")
raise exception.Forbidden(message)
def save(self):
message = _("You are not permitted to update this tag.")
raise exception.Forbidden(message)
class MetadefTagProxy(daisy.domain.proxy.MetadefTag):
pass
class MetadefTagFactoryProxy(daisy.domain.proxy.MetadefTagFactory):
def __init__(self, meta_tag_factory, context):
self.meta_tag_factory = meta_tag_factory
self.context = context
super(MetadefTagFactoryProxy, self).__init__(
meta_tag_factory,
meta_tag_proxy_class=MetadefTagProxy)
def new_tag(self, **kwargs):
owner = kwargs.pop('owner', self.context.owner)
if not self.context.is_admin:
if owner is None:
message = _("Owner must be specified to create a tag.")
raise exception.Forbidden(message)
elif owner != self.context.owner:
message = _("You are not permitted to create a tag"
" in the namespace owned by '%s'")
raise exception.Forbidden(message % (owner))
return super(MetadefTagFactoryProxy, self).new_tag(**kwargs)
class MetadefTagRepoProxy(daisy.domain.proxy.MetadefTagRepo):
def __init__(self, tag_repo, context):
self.tag_repo = tag_repo
self.context = context
super(MetadefTagRepoProxy, self).__init__(tag_repo)
def get(self, namespace, tag_name):
meta_tag = self.tag_repo.get(namespace, tag_name)
return proxy_tag(self.context, meta_tag)
def list(self, *args, **kwargs):
tags = self.tag_repo.list(*args, **kwargs)
return [proxy_tag(self.context, meta_tag) for
meta_tag in tags]

View File

@ -16,34 +16,16 @@
""" """
/install endpoint for tecs API /install endpoint for tecs API
""" """
import copy
import subprocess import subprocess
import time import time
import traceback
import webob.exc
from oslo_config import cfg
from oslo_log import log as logging from oslo_log import log as logging
from webob.exc import HTTPBadRequest from webob.exc import HTTPBadRequest
from webob.exc import HTTPForbidden
from threading import Thread
from daisy import i18n from daisy import i18n
from daisy import notifier
from daisy.api import policy
import daisy.api.v1
from daisy.common import exception from daisy.common import exception
import daisy.registry.client.v1.api as registry import daisy.registry.client.v1.api as registry
try:
import simplejson as json
except ImportError:
import json
LOG = logging.getLogger(__name__) LOG = logging.getLogger(__name__)
_ = i18n._ _ = i18n._
_LE = i18n._LE _LE = i18n._LE
@ -56,6 +38,7 @@ zenic_backend_name = "zenic"
proton_backend_name = "proton" proton_backend_name = "proton"
os_install_start_time = 0.0 os_install_start_time = 0.0
def subprocess_call(command, file=None): def subprocess_call(command, file=None):
if file: if file:
return_code = subprocess.call(command, return_code = subprocess.call(command,
@ -71,6 +54,7 @@ def subprocess_call(command,file=None):
msg = "execute '%s' failed by subprocess call." % command msg = "execute '%s' failed by subprocess call." % command
raise exception.SubprocessCmdFailed(msg) raise exception.SubprocessCmdFailed(msg)
def get_host_detail(req, host_id): def get_host_detail(req, host_id):
try: try:
host_detail = registry.get_host_metadata(req.context, host_id) host_detail = registry.get_host_metadata(req.context, host_id)
@ -78,6 +62,7 @@ def get_host_detail(req, host_id):
raise HTTPBadRequest(explanation=e.msg, request=req) raise HTTPBadRequest(explanation=e.msg, request=req)
return host_detail return host_detail
def get_roles_detail(req): def get_roles_detail(req):
try: try:
roles = registry.get_roles_detail(req.context) roles = registry.get_roles_detail(req.context)
@ -85,6 +70,7 @@ def get_roles_detail(req):
raise HTTPBadRequest(explanation=e.msg, request=req) raise HTTPBadRequest(explanation=e.msg, request=req)
return roles return roles
def get_cluster_roles_detail(req, cluster_id): def get_cluster_roles_detail(req, cluster_id):
try: try:
params = {'cluster_id': cluster_id} params = {'cluster_id': cluster_id}
@ -93,6 +79,7 @@ def get_cluster_roles_detail(req, cluster_id):
raise HTTPBadRequest(explanation=e.msg, request=req) raise HTTPBadRequest(explanation=e.msg, request=req)
return roles return roles
def get_hosts_of_role(req, role_id): def get_hosts_of_role(req, role_id):
try: try:
hosts = registry.get_role_host_metadata(req.context, role_id) hosts = registry.get_role_host_metadata(req.context, role_id)
@ -100,6 +87,7 @@ def get_hosts_of_role(req, role_id):
raise HTTPBadRequest(explanation=e.msg, request=req) raise HTTPBadRequest(explanation=e.msg, request=req)
return hosts return hosts
def get_role_detail(req, role_id): def get_role_detail(req, role_id):
try: try:
role = registry.get_role_metadata(req.context, role_id) role = registry.get_role_metadata(req.context, role_id)
@ -107,24 +95,58 @@ def get_role_detail(req, role_id):
raise HTTPBadRequest(explanation=e.msg, request=req) raise HTTPBadRequest(explanation=e.msg, request=req)
return role return role
def get_cluster_configs_list(req, cluster_id):
roles = get_cluster_roles_detail(req, cluster_id)
config_set_list = [role['config_set_id'] for role in roles]
cluster_configs_list = []
for config_set_id in config_set_list:
config_set_metadata = registry.get_config_set_metadata(req.context,
config_set_id)
if config_set_metadata.get('config', None):
cluster_configs_list.extend(config_set_metadata['config'])
return cluster_configs_list
def update_role(req, role_id, role_meta): def update_role(req, role_id, role_meta):
try: try:
registry.update_role_metadata(req.context, role_id, role_meta) registry.update_role_metadata(req.context, role_id, role_meta)
except exception.Invalid as e: except exception.Invalid as e:
raise HTTPBadRequest(explanation=e.msg, request=req) raise HTTPBadRequest(explanation=e.msg, request=req)
def update_role_host(req, role_id, role_host): def update_role_host(req, role_id, role_host):
try: try:
registry.update_role_host_metadata(req.context, role_id, role_host) registry.update_role_host_metadata(req.context, role_id, role_host)
except exception.Invalid as e: except exception.Invalid as e:
raise HTTPBadRequest(explanation=e.msg, request=req) raise HTTPBadRequest(explanation=e.msg, request=req)
def set_role_status_and_progress(req, cluster_id, opera, status,
backend_name='tecs'):
"""
set information in role of some backend.
:status:key in host_role tables, such as:
{'messages':'Waiting','progress': '0'}
"""
roles = get_cluster_roles_detail(req, cluster_id)
for role in roles:
if role.get('deployment_backend') == backend_name:
role_hosts = get_hosts_of_role(req, role['id'])
for role_host in role_hosts:
if (opera == 'upgrade' and role_host['status'] in ['active']) \
or (opera == 'install' and role_host['status'] not in
['active', 'updating', 'update-failed']):
update_role_host(req, role_host['id'], status)
def delete_role_hosts(req, role_id): def delete_role_hosts(req, role_id):
try: try:
registry.delete_role_host_metadata(req.context, role_id) registry.delete_role_host_metadata(req.context, role_id)
except exception.Invalid as e: except exception.Invalid as e:
raise HTTPBadRequest(explanation=e.msg, request=req) raise HTTPBadRequest(explanation=e.msg, request=req)
def get_cluster_networks_detail(req, cluster_id): def get_cluster_networks_detail(req, cluster_id):
try: try:
networks = registry.get_networks_detail(req.context, cluster_id) networks = registry.get_networks_detail(req.context, cluster_id)
@ -132,23 +154,29 @@ def get_cluster_networks_detail(req, cluster_id):
raise HTTPBadRequest(explanation=e.msg, request=req) raise HTTPBadRequest(explanation=e.msg, request=req)
return networks return networks
def get_assigned_network(req, host_interface_id, network_id): def get_assigned_network(req, host_interface_id, network_id):
try: try:
assigned_network = registry.get_assigned_network(req.context, host_interface_id, network_id) assigned_network = registry.get_assigned_network(
req.context, host_interface_id, network_id)
except exception.Invalid as e: except exception.Invalid as e:
raise HTTPBadRequest(explanation=e.msg, request=req) raise HTTPBadRequest(explanation=e.msg, request=req)
return assigned_network return assigned_network
def _ping_hosts_test(ips): def _ping_hosts_test(ips):
ping_cmd = 'fping' ping_cmd = 'fping'
for ip in set(ips): for ip in set(ips):
ping_cmd = ping_cmd + ' ' + ip ping_cmd = ping_cmd + ' ' + ip
obj = subprocess.Popen(ping_cmd, shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE) obj = subprocess.Popen(
ping_cmd, shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
(stdoutput, erroutput) = obj.communicate() (stdoutput, erroutput) = obj.communicate()
_returncode = obj.returncode _returncode = obj.returncode
if _returncode == 0 or _returncode == 1: if _returncode == 0 or _returncode == 1:
ping_result = stdoutput.split('\n') ping_result = stdoutput.split('\n')
unreachable_hosts = [result.split()[0] for result in ping_result if result and result.split()[2] != 'alive'] unreachable_hosts = [result.split(
)[0] for result in ping_result if result and
result.split()[2] != 'alive']
else: else:
msg = "ping failed beaceuse there is invlid ip in %s" % ips msg = "ping failed beaceuse there is invlid ip in %s" % ips
raise exception.InvalidIP(msg) raise exception.InvalidIP(msg)
@ -170,58 +198,73 @@ def check_ping_hosts(ping_ips, max_ping_times):
ping_count += 1 ping_count += 1
if ips: if ips:
LOG.debug(_("ping host %s for %s times" % (','.join(ips), ping_count))) LOG.debug(
_("ping host %s for %s times" % (','.join(ips), ping_count)))
if ping_count >= max_ping_times: if ping_count >= max_ping_times:
LOG.info(_("ping host %s timeout for %ss" % (','.join(ips), ping_count*time_step))) LOG.info(_("ping host %s timeout for %ss" %
(','.join(ips), ping_count * time_step)))
return ips return ips
time.sleep(time_step) time.sleep(time_step)
else: else:
LOG.info(_("ping %s successfully" % ','.join(ping_ips))) LOG.info(_("ping %s successfully" % ','.join(ping_ips)))
return ips return ips
def _ping_reachable_to_unreachable_host_test(ip, max_ping_times): def _ping_reachable_to_unreachable_host_test(ip, max_ping_times):
ping_cmd = 'fping' ping_cmd = 'fping'
ping_cmd = ping_cmd + ' ' + ip ping_cmd = ping_cmd + ' ' + ip
ping_count = 0 ping_count = 0
time_step = 5 time_step = 5
while True: while True:
obj = subprocess.Popen(ping_cmd, shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE) obj = subprocess.Popen(
ping_cmd, shell=True, stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
(stdoutput, erroutput) = obj.communicate() (stdoutput, erroutput) = obj.communicate()
_returncode = obj.returncode _returncode = obj.returncode
if _returncode != 0: if _returncode != 0:
return True return True
ping_count += 1 ping_count += 1
if ping_count >= max_ping_times: if ping_count >= max_ping_times:
LOG.info(_("ping host %s timeout for %ss" % (ip, ping_count*time_step))) LOG.info(
_("ping host %s timeout for %ss"
% (ip, ping_count * time_step)))
return False return False
time.sleep(time_step) time.sleep(time_step)
return False return False
def _ping_unreachable_to_reachable_host_test(ip, max_ping_times): def _ping_unreachable_to_reachable_host_test(ip, max_ping_times):
ping_count = 0 ping_count = 0
time_step = 5 time_step = 5
ping_cmd = 'fping' ping_cmd = 'fping'
ping_cmd = ping_cmd + ' ' + ip ping_cmd = ping_cmd + ' ' + ip
while True: while True:
obj = subprocess.Popen(ping_cmd, shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE) obj = subprocess.Popen(
ping_cmd, shell=True, stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
(stdoutput, erroutput) = obj.communicate() (stdoutput, erroutput) = obj.communicate()
_returncode = obj.returncode _returncode = obj.returncode
if _returncode == 0: if _returncode == 0:
return True return True
ping_count += 1 ping_count += 1
if ping_count >= max_ping_times: if ping_count >= max_ping_times:
LOG.info(_("ping host %s timeout for %ss" % (ip, ping_count*time_step))) LOG.info(
_("ping host %s timeout for %ss"
% (ip, ping_count * time_step)))
return False return False
time.sleep(time_step) time.sleep(time_step)
return False return False
def check_reboot_ping(ip): def check_reboot_ping(ip):
stop_max_ping_times = 360 #ha host reboot may spend 20 min,so timeout time is 30min # ha host reboot may spend 20 min,so timeout time is 30min
stop_max_ping_times = 360
start_max_ping_times = 60 start_max_ping_times = 60
_ping_reachable_to_unreachable_host_test(ip, stop_max_ping_times) _ping_reachable_to_unreachable_host_test(ip, stop_max_ping_times)
_ping_unreachable_to_reachable_host_test(ip, start_max_ping_times) _ping_unreachable_to_reachable_host_test(ip, start_max_ping_times)
time.sleep(5) time.sleep(5)
def cidr_to_netmask(cidr): def cidr_to_netmask(cidr):
ip_netmask = cidr.split('/') ip_netmask = cidr.split('/')
if len(ip_netmask) != 2 or not ip_netmask[1]: if len(ip_netmask) != 2 or not ip_netmask[1]:
@ -229,7 +272,96 @@ def cidr_to_netmask(cidr):
cidr_end = ip_netmask[1] cidr_end = ip_netmask[1]
mask = ~(2 ** (32 - int(cidr_end)) - 1) mask = ~(2 ** (32 - int(cidr_end)) - 1)
inter_ip = lambda x: '.'.join([str(x/(256**i)%256) for i in range(3,-1,-1)]) inter_ip = lambda x: '.'.join(
[str(x / (256 ** i) % 256) for i in range(3, -1, -1)])
netmask = inter_ip(mask) netmask = inter_ip(mask)
return netmask return netmask
def get_rpm_package_by_name(path, rpm_name):
cmd = "ls %s | grep ^%s.*\.rpm" % (path, rpm_name)
try:
rpm_name = subprocess.check_output(
cmd, shell=True, stderr=subprocess.STDOUT).split('\n')[0]
except subprocess.CalledProcessError:
msg = _("Get rpm %s failed in %s!" % (rpm_name, path))
raise exception.SubprocessCmdFailed(message=msg)
return rpm_name
def remote_remove_rpm(rpm_name, dest_ip):
remove_cmd = 'clush -S -w %s "rpm -q %s && rpm -e %s"' % (dest_ip,
rpm_name,
rpm_name)
subprocess.call(remove_cmd,
shell=True,
stdout=open('/dev/null', 'w'),
stderr=subprocess.STDOUT)
def remote_install_rpm(rpm_name, rpm_src_path, rpm_dest_path, dest_ips):
rpm_package = get_rpm_package_by_name(rpm_src_path, rpm_name)
for dest_ip in dest_ips:
scp_rpm = "scp -o ConnectTimeout=10 %s/%s root@%s:%s" \
% (rpm_src_path, rpm_package, dest_ip, rpm_dest_path)
subprocess_call(scp_rpm)
remote_remove_rpm(rpm_name, dest_ip)
install_cmd = 'clush -S -w %s "rpm -i %s/%s"' % (dest_ip,
rpm_dest_path,
rpm_package)
subprocess_call(install_cmd)
def remote_upgrade_rpm(rpm_name, rpm_src_path, rpm_dest_path, dest_ip):
rpm_package = get_rpm_package_by_name(rpm_src_path, rpm_name)
scp_rpm = "scp -o ConnectTimeout=10 %s/%s root@%s:%s" \
% (rpm_src_path, rpm_package, dest_ip, rpm_dest_path)
subprocess_call(scp_rpm)
upgrade_cmd = 'clush -S -w %s "rpm -U %s/%s"' % (dest_ip,
rpm_dest_path,
rpm_package)
subprocess.call(upgrade_cmd,
shell=True,
stdout=open('/dev/null', 'w'),
stderr=subprocess.STDOUT)
def trust_me(host_ips, root_passwd):
for host_ip in host_ips:
count = 0
try_times = 10
while count < try_times:
try:
trust_me_cmd = "/var/lib/daisy/tecs/trustme.sh\
%s %s" % (host_ip, root_passwd)
subprocess_call(trust_me_cmd)
except:
count += 1
LOG.info("Trying to trust '%s' for %s times" %
(host_ip, count))
time.sleep(2)
if count >= try_times:
message = "Setup trust for '%s' failed,"\
"see '/var/log/trustme.log' please" % (host_ip)
raise exception.TrustMeFailed(message=message)
else:
message = "Setup trust to '%s' successfully" % (host_ip)
LOG.info(message)
break
def calc_host_iqn(min_mac):
cmd = "echo -n %s |openssl md5" % min_mac
obj = subprocess.Popen(cmd,
shell=True,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
(stdoutput, erroutput) = obj.communicate()
iqn = ""
if stdoutput:
get_uuid = stdoutput.split('=')[1]
iqn = "iqn.opencos.rh:" + get_uuid.strip()
return iqn

View File

@ -17,18 +17,15 @@
""" """
Driver base-classes: Driver base-classes:
(Beginning of) the contract that deployment backends drivers must follow, and shared (Beginning of) the contract that deployment backends drivers must follow,
types that support that contract and shared types that support that contract
""" """
import sys
from oslo_config import cfg
from oslo_log import log as logging from oslo_log import log as logging
from oslo_utils import importutils from oslo_utils import importutils
from daisy import i18n from daisy import i18n
from daisy.common import exception
_ = i18n._ _ = i18n._
_LE = i18n._LE _LE = i18n._LE
@ -36,10 +33,13 @@ _LI = i18n._LI
_LW = i18n._LW _LW = i18n._LW
LOG = logging.getLogger(__name__) LOG = logging.getLogger(__name__)
class DeploymentDriver(object): class DeploymentDriver(object):
"""base class for deployment interface. """base class for deployment interface.
""" """
def install(self, req, cluster_id): def install(self, req, cluster_id):
raise NotImplementedError() raise NotImplementedError()
@ -65,12 +65,14 @@ class DeploymentDriver(object):
LOG.info(_("driver no interface for 'update_disk_array'")) LOG.info(_("driver no interface for 'update_disk_array'"))
return {} return {}
def check_isinstance(obj, cls): def check_isinstance(obj, cls):
"""Checks that obj is of type cls, and lets PyLint infer types.""" """Checks that obj is of type cls, and lets PyLint infer types."""
if isinstance(obj, cls): if isinstance(obj, cls):
return obj return obj
raise Exception(_('Expected object of type: %s') % (str(cls))) raise Exception(_('Expected object of type: %s') % (str(cls)))
def load_deployment_dirver(backend_name): def load_deployment_dirver(backend_name):
"""Load a cluster backend installation driver. """Load a cluster backend installation driver.
""" """
@ -78,8 +80,11 @@ def load_deployment_dirver(backend_name):
LOG.info(_("Loading deployment backend '%s'") % backend_driver) LOG.info(_("Loading deployment backend '%s'") % backend_driver)
try: try:
driver = importutils.import_object_ns('daisy.api.backends',backend_driver) driver = importutils.import_object_ns(
'daisy.api.backends', backend_driver)
return check_isinstance(driver, DeploymentDriver) return check_isinstance(driver, DeploymentDriver)
except ImportError: except ImportError:
LOG.exception(_("Error, unable to load the deployment backends '%s'" % backend_driver)) LOG.exception(
_("Error, unable to load the deployment backends '%s'"
% backend_driver))
return None return None

View File

@ -20,36 +20,28 @@ import copy
import subprocess import subprocess
import time import time
import traceback
import webob.exc
from oslo_config import cfg from oslo_config import cfg
from oslo_log import log as logging from oslo_log import log as logging
from webob.exc import HTTPBadRequest from webob.exc import HTTPBadRequest
from webob.exc import HTTPForbidden
from webob.exc import HTTPServerError
import threading import threading
from threading import Thread
from daisy import i18n from daisy import i18n
from daisy import notifier
from daisy.api import policy
import daisy.api.v1
from daisy.common import exception from daisy.common import exception
from daisy.api import common
from daisy.common import utils
import daisy.registry.client.v1.api as registry import daisy.registry.client.v1.api as registry
from daisy.api.backends.tecs import config
from daisy.api.backends import driver
from daisy.api.network_api import network as neutron
from ironicclient import client as ironic_client from ironicclient import client as ironic_client
from daisyclient.v1 import client as daisy_client
import daisy.api.backends.common as daisy_cmn import daisy.api.backends.common as daisy_cmn
import daisy.api.backends.tecs.common as tecs_cmn import daisy.api.backends.tecs.common as tecs_cmn
try: import ConfigParser
import simplejson as json DISCOVER_DEFAULTS = {
except ImportError: 'listen_port': '5050',
import json 'ironic_url': 'http://127.0.0.1:6385/v1',
}
LOG = logging.getLogger(__name__) LOG = logging.getLogger(__name__)
_ = i18n._ _ = i18n._
@ -71,6 +63,7 @@ CONF.register_opts(upgrade_opts)
host_os_status = { host_os_status = {
'INIT': 'init', 'INIT': 'init',
'PRE_INSTALL': 'pre-install',
'INSTALLING': 'installing', 'INSTALLING': 'installing',
'ACTIVE': 'active', 'ACTIVE': 'active',
'INSTALL_FAILED': 'install-failed', 'INSTALL_FAILED': 'install-failed',
@ -78,48 +71,97 @@ host_os_status = {
'UPDATE_FAILED': 'update-failed' 'UPDATE_FAILED': 'update-failed'
} }
LINUX_BOND_MODE = {'balance-rr':'0', 'active-backup':'1', 'balance-xor':'2', 'broadcast':'3','802.3ad':'4', 'balance-tlb':'5', 'balance-alb':'6'} LINUX_BOND_MODE = {'balance-rr': '0', 'active-backup': '1',
'balance-xor': '2', 'broadcast': '3',
'802.3ad': '4', 'balance-tlb': '5',
'balance-alb': '6'}
daisy_tecs_path = tecs_cmn.daisy_tecs_path daisy_tecs_path = tecs_cmn.daisy_tecs_path
def get_ironicclient(): # pragma: no cover def get_ironicclient(): # pragma: no cover
"""Get Ironic client instance.""" """Get Ironic client instance."""
config_discoverd = ConfigParser.ConfigParser(defaults=DISCOVER_DEFAULTS)
config_discoverd.read("/etc/ironic-discoverd/discoverd.conf")
ironic_url = config_discoverd.get("discoverd", "ironic_url")
args = {'os_auth_token': 'fake', args = {'os_auth_token': 'fake',
'ironic_url':'http://127.0.0.1:6385/v1'} 'ironic_url': ironic_url}
return ironic_client.get_client(1, **args) return ironic_client.get_client(1, **args)
def get_daisyclient():
"""Get Daisy client instance."""
config_daisy = ConfigParser.ConfigParser()
config_daisy.read("/etc/daisy/daisy-api.conf")
daisy_port = config_daisy.get("DEFAULT", "bind_port")
args = {'version': 1.0, 'endpoint': 'http://127.0.0.1:' + daisy_port}
return daisy_client.Client(**args)
def pxe_server_build(req, install_meta): def pxe_server_build(req, install_meta):
cluster_id = install_meta['cluster_id'] params = {'filters': {'type': 'system'}}
try: try:
networks = registry.get_networks_detail(req.context, cluster_id) networks = registry.get_all_networks(req.context, **params)
except exception.Invalid as e: except exception.Invalid as e:
raise HTTPBadRequest(explanation=e.msg, request=req) raise HTTPBadRequest(explanation=e.msg, request=req)
try: try:
ip_inter = lambda x:sum([256**j*int(i) for j,i in enumerate(x.split('.')[::-1])]) ip_inter = lambda x: sum([256 ** j * int(i)
inter_ip = lambda x: '.'.join([str(x/(256**i)%256) for i in range(3,-1,-1)]) for j, i in enumerate(x.split('.')[::-1])])
network_cidr = [network['cidr'] for network in networks if network['name'] == 'DEPLOYMENT'][0] inter_ip = lambda x: '.'.join(
[str(x / (256**i) % 256) for i in range(3, -1, -1)])
for network in networks:
if 'system' in network['type']:
network_cidr = network.get('cidr')
if not network_cidr: if not network_cidr:
msg = "Error:The CIDR is blank of DEPLOYMENT!" msg = "Error:The CIDR is blank of pxe server!"
LOG.error(msg)
raise exception.Forbidden(msg) raise exception.Forbidden(msg)
cidr_end = network_cidr.split('/')[1] cidr_end = network_cidr.split('/')[1]
ip_addr = network_cidr.split('/')[0]
ip_addr_int=ip_inter(ip_addr)
mask = ~(2**(32 - int(cidr_end)) - 1) mask = ~(2**(32 - int(cidr_end)) - 1)
net_mask = inter_ip(mask) net_mask = inter_ip(mask)
pxe_server_ip = network.get('ip')
ip_ranges = network.get('ip_ranges')
for ip_range in ip_ranges:
client_ip_begin = ip_range.get('start')
client_ip_end = ip_range.get('end')
ip_addr = network_cidr.split('/')[0]
ip_addr_int = ip_inter(ip_addr)
ip_addr_min = inter_ip(ip_addr_int & (mask & 0xffffffff)) ip_addr_min = inter_ip(ip_addr_int & (mask & 0xffffffff))
ip_addr_max = inter_ip(ip_addr_int | (~mask & 0xffffffff)) ip_addr_max = inter_ip(ip_addr_int | (~mask & 0xffffffff))
pxe_server_ip=inter_ip((ip_inter(ip_addr_min))+1) if not client_ip_begin and not client_ip_end:
client_ip_begin = inter_ip((ip_inter(ip_addr_min)) + 2) client_ip_begin = inter_ip((ip_inter(ip_addr_min)) + 2)
client_ip_end = ip_addr_max client_ip_end = ip_addr_max
args = {'build_pxe': 'yes', 'eth_name': install_meta['deployment_interface'], 'ip_address': pxe_server_ip, 'net_mask': net_mask, if pxe_server_ip:
'client_ip_begin': client_ip_begin, 'client_ip_end': client_ip_end} ip_in_cidr = utils.is_ip_in_cidr(pxe_server_ip,
network_cidr)
if not ip_in_cidr:
msg = "Error:The ip '%s' is not in cidr '%s'" \
" range." % (pxe_server_ip, network_cidr)
LOG.error(msg)
raise HTTPBadRequest(explanation=msg)
else:
pxe_server_ip = inter_ip((ip_inter(ip_addr_min)) + 1)
eth_name = install_meta.get('deployment_interface')
if not eth_name:
msg = "Error:The nic name is blank of build pxe server!"
LOG.error(msg)
raise exception.Forbidden(msg)
args = {'build_pxe': 'yes',
'eth_name': eth_name,
'ip_address': pxe_server_ip,
'net_mask': net_mask,
'client_ip_begin': client_ip_begin,
'client_ip_end': client_ip_end}
ironic = get_ironicclient() ironic = get_ironicclient()
ironic.daisy.build_pxe(**args) ironic.daisy.build_pxe(**args)
except exception.Invalid as e: except exception.Invalid as e:
msg = "build pxe server failed" msg = "build pxe server failed"
LOG.error(msg)
raise exception.InvalidNetworkConfig(msg) raise exception.InvalidNetworkConfig(msg)
def _get_network_plat(req, host_config, cluster_networks, dhcp_mac): def _get_network_plat(req, host_config, cluster_networks, dhcp_mac):
host_config['dhcp_mac'] = dhcp_mac host_config['dhcp_mac'] = dhcp_mac
if host_config['interfaces']: if host_config['interfaces']:
@ -127,44 +169,58 @@ def _get_network_plat(req,host_config, cluster_networks, dhcp_mac):
host_config_orig = copy.deepcopy(host_config) host_config_orig = copy.deepcopy(host_config)
for interface in host_config['interfaces']: for interface in host_config['interfaces']:
count += 1 count += 1
if (interface.has_key('assigned_networks') and # if (interface.has_key('assigned_networks') and
if ('assigned_networks' in interface and
interface['assigned_networks']): interface['assigned_networks']):
assigned_networks = copy.deepcopy(interface['assigned_networks']) assigned_networks = copy.deepcopy(
interface['assigned_networks'])
host_config['interfaces'][count - 1]['assigned_networks'] = [] host_config['interfaces'][count - 1]['assigned_networks'] = []
alias = [] alias = []
for assigned_network in assigned_networks: for assigned_network in assigned_networks:
network_name = assigned_network['name'] network_name = assigned_network['name']
cluster_network = [network for network in cluster_networks if network['name'] in network_name][0] cluster_network = [
network for network in cluster_networks
if network['name'] in network_name][0]
alias.append(cluster_network['alias']) alias.append(cluster_network['alias'])
# convert cidr to netmask # convert cidr to netmask
cidr_to_ip = "" cidr_to_ip = ""
assigned_networks_ip=tecs_cmn.get_host_network_ip(req, host_config_orig, cluster_networks, network_name) assigned_networks_ip = tecs_cmn.get_host_network_ip(
req, host_config_orig, cluster_networks, network_name)
if cluster_network.get('cidr', None): if cluster_network.get('cidr', None):
inter_ip = lambda x: '.'.join([str(x/(256**i)%256) for i in range(3,-1,-1)]) inter_ip = lambda x: '.'.join(
cidr_to_ip = inter_ip(2**32-2**(32-int(cluster_network['cidr'].split('/')[1]))) [str(x / (256**i) % 256) for i in
if cluster_network['alias'] == None or len(alias) == 1: range(3, -1, -1)])
cidr_to_ip = inter_ip(
2**32 - 2**(32 - int(
cluster_network['cidr'].split('/')[1])))
if cluster_network['alias'] is None or len(alias) == 1:
network_type = cluster_network['network_type'] network_type = cluster_network['network_type']
network_plat = dict(network_type=network_type, network_plat = dict(network_type=network_type,
ml2_type=cluster_network['ml2_type'], ml2_type=cluster_network[
capability=cluster_network['capability'], 'ml2_type'],
physnet_name=cluster_network['physnet_name'], capability=cluster_network[
gateway=cluster_network.get('gateway', ""), 'capability'],
physnet_name=cluster_network[
'physnet_name'],
gateway=cluster_network.get(
'gateway', ""),
ip=assigned_networks_ip, ip=assigned_networks_ip,
# ip=cluster_network.get('ip', ""), # ip=cluster_network.get('ip', ""),
netmask=cidr_to_ip, netmask=cidr_to_ip,
vlan_id=cluster_network.get('vlan_id', "")) vlan_id=cluster_network.get(
if network_type == "MANAGEMENT" and cluster_network.get('gateway', "") == "": 'vlan_id', ""))
msg = "Error: The gateway of network 'MANAGEMENT' is not given!" host_config['interfaces'][
raise exception.Forbidden(msg) count - 1][
host_config['interfaces'][count-1]['assigned_networks'].append(network_plat) 'assigned_networks'].append(network_plat)
interface['ip'] = "" interface['ip'] = ""
interface['netmask'] = "" interface['netmask'] = ""
interface['gateway'] = "" interface['gateway'] = ""
return host_config return host_config
def get_cluster_hosts_config(req, cluster_id): def get_cluster_hosts_config(req, cluster_id):
params = dict(limit=1000000) # params = dict(limit=1000000)
try: try:
cluster_data = registry.get_cluster_metadata(req.context, cluster_id) cluster_data = registry.get_cluster_metadata(req.context, cluster_id)
networks = registry.get_networks_detail(req.context, cluster_id) networks = registry.get_networks_detail(req.context, cluster_id)
@ -178,18 +234,25 @@ def get_cluster_hosts_config(req, cluster_id):
for host_id in all_hosts_ids: for host_id in all_hosts_ids:
host_detail = daisy_cmn.get_host_detail(req, host_id) host_detail = daisy_cmn.get_host_detail(req, host_id)
role_host_db_lv_size_lists = list() role_host_db_lv_size_lists = list()
if host_detail.has_key('role') and host_detail['role']: # if host_detail.has_key('role') and host_detail['role']:
if 'role' in host_detail and host_detail['role']:
host_roles = host_detail['role'] host_roles = host_detail['role']
for role in roles: for role in roles:
if role['name'] in host_detail['role'] and role['glance_lv_size']: if role['name'] in host_detail['role'] and\
role['glance_lv_size']:
host_detail['glance_lv_size'] = role['glance_lv_size'] host_detail['glance_lv_size'] = role['glance_lv_size']
if role.get('db_lv_size', None) and host_roles and role['name'] in host_roles: if role.get('db_lv_size', None) and host_roles and\
role['name'] in host_roles:
role_host_db_lv_size_lists.append(role['db_lv_size']) role_host_db_lv_size_lists.append(role['db_lv_size'])
if role['name'] == 'COMPUTER' and role['name'] in host_detail['role'] and role['nova_lv_size']: if role['name'] == 'COMPUTER' and\
role['name'] in host_detail['role'] and\
role['nova_lv_size']:
host_detail['nova_lv_size'] = role['nova_lv_size'] host_detail['nova_lv_size'] = role['nova_lv_size']
service_disks = tecs_cmn.get_service_disk_list(req, {'role_id':role['id']}) service_disks = tecs_cmn.get_service_disk_list(
req, {'role_id': role['id']})
for service_disk in service_disks: for service_disk in service_disks:
if service_disk['disk_location'] == 'local' and service_disk['service'] == 'mongodb': if service_disk['disk_location'] == 'local' and\
service_disk['service'] == 'mongodb':
host_detail['mongodb_lv_size'] = service_disk['size'] host_detail['mongodb_lv_size'] = service_disk['size']
break break
if role_host_db_lv_size_lists: if role_host_db_lv_size_lists:
@ -198,27 +261,33 @@ def get_cluster_hosts_config(req, cluster_id):
host_detail['db_lv_size'] = 0 host_detail['db_lv_size'] = 0
for interface in host_detail['interfaces']: for interface in host_detail['interfaces']:
if interface['type'] == 'bond'and interface['mode'] in LINUX_BOND_MODE.keys(): if interface['type'] == 'bond'and\
interface['mode'] in LINUX_BOND_MODE.keys():
interface['mode'] = LINUX_BOND_MODE[interface['mode']] interface['mode'] = LINUX_BOND_MODE[interface['mode']]
if (host_detail['os_status'] == host_os_status['INIT'] or if (host_detail['os_status'] == host_os_status['INIT'] or
host_detail['os_status'] == host_os_status['PRE_INSTALL'] or
host_detail['os_status'] == host_os_status['INSTALLING'] or host_detail['os_status'] == host_os_status['INSTALLING'] or
host_detail['os_status'] == host_os_status['INSTALL_FAILED']): host_detail['os_status'] == host_os_status['INSTALL_FAILED']):
host_dhcp_interface = [hi for hi in host_detail['interfaces'] if hi['is_deployment']] pxe_macs = common.get_pxe_mac(host_detail)
if not host_dhcp_interface: if not pxe_macs:
msg = "cann't find dhcp interface on host %s" % host_detail['id'] msg = "cann't find dhcp interface on host %s" % host_detail[
'id']
raise exception.InvalidNetworkConfig(msg) raise exception.InvalidNetworkConfig(msg)
if len(host_dhcp_interface) > 1: if len(pxe_macs) > 1:
msg = "dhcp interface should only has one on host %s" % host_detail['id'] msg = "dhcp interface should only has one on host %s"\
% host_detail['id']
raise exception.InvalidNetworkConfig(msg) raise exception.InvalidNetworkConfig(msg)
host_config_detail = copy.deepcopy(host_detail) host_config_detail = copy.deepcopy(host_detail)
host_config = _get_network_plat(req, host_config_detail, host_config = _get_network_plat(req, host_config_detail,
networks, networks,
host_dhcp_interface[0]['mac']) pxe_macs[0])
hosts_config.append(tecs_cmn.sort_interfaces_by_pci(host_config)) hosts_config.append(tecs_cmn.sort_interfaces_by_pci(networks,
host_config))
return hosts_config return hosts_config
def check_tfg_exist(): def check_tfg_exist():
get_tfg_patch = "ls %s|grep CGSL_VPLAT-.*\.iso$" % daisy_tecs_path get_tfg_patch = "ls %s|grep CGSL_VPLAT-.*\.iso$" % daisy_tecs_path
obj = subprocess.Popen(get_tfg_patch, obj = subprocess.Popen(get_tfg_patch,
@ -239,6 +308,7 @@ def check_tfg_exist():
return "" return ""
return tfg_patch_pkg_file return tfg_patch_pkg_file
def update_db_host_status(req, host_id, host_status): def update_db_host_status(req, host_id, host_status):
""" """
Update host status and intallation progress to db. Update host status and intallation progress to db.
@ -255,11 +325,14 @@ def update_db_host_status(req, host_id, host_status):
except exception.Invalid as e: except exception.Invalid as e:
raise HTTPBadRequest(explanation=e.msg, request=req) raise HTTPBadRequest(explanation=e.msg, request=req)
class OSInstall(): class OSInstall():
""" """
Class for install OS. Class for install OS.
""" """
""" Definition for install states.""" """ Definition for install states."""
def __init__(self, req, cluster_id): def __init__(self, req, cluster_id):
self.req = req self.req = req
self.cluster_id = cluster_id self.cluster_id = cluster_id
@ -269,8 +342,10 @@ class OSInstall():
self.single_host_install_timeout = 30 * (12 * self.time_step) self.single_host_install_timeout = 30 * (12 * self.time_step)
self.max_parallel_os_num = int(CONF.max_parallel_os_number) self.max_parallel_os_num = int(CONF.max_parallel_os_number)
self.cluster_hosts_install_timeout = (self.max_parallel_os_num/4 + 2 )* 60 * (12*self.time_step) self.cluster_hosts_install_timeout = (
self.max_parallel_os_num / 4 + 2) * 60 * (12 * self.time_step)
self.ironicclient = get_ironicclient() self.ironicclient = get_ironicclient()
self.daisyclient = get_daisyclient()
def _set_boot_or_power_state(self, user, passwd, addr, action): def _set_boot_or_power_state(self, user, passwd, addr, action):
count = 0 count = 0
@ -280,27 +355,41 @@ class OSInstall():
passwd, passwd,
addr, addr,
action) action)
set_dict = dict([(f, getattr(set_obj, f, '')) for f in ['return_code', 'info']]) set_dict = dict([(f, getattr(set_obj, f, ''))
for f in ['return_code', 'info']])
rc = int(set_dict['return_code']) rc = int(set_dict['return_code'])
if rc == 0: if rc == 0:
LOG.info(_("set %s to '%s' successfully for %s times by ironic" % (addr,action,count+1))) LOG.info(
_("Set %s to '%s' successfully for %s times by ironic" % (
addr, action, count + 1)))
# One host set 'disk' return success, but it still 'pxe'
# mode in German site. If we have a method to confirm,
# this can be deleted.
if action == 'pxe' or action == 'disk':
self.ironicclient.daisy.set_boot_or_power_state(user,
passwd,
addr,
action)
break break
else: else:
count += 1 count += 1
LOG.info(_("try setting %s to '%s' failed for %s times by ironic" % (addr,action,count))) LOG.info(
_("Try setting %s to '%s' failed for %s times by ironic"
% (addr, action, count)))
time.sleep(count * 2) time.sleep(count * 2)
if count >= repeat_times: if count >= repeat_times:
message = "set %s to '%s' failed for 10 mins" % (addr,action) message = "Set %s to '%s' failed for 10 mins" % (addr, action)
raise exception.IMPIOprationFailed(message=message) raise exception.IMPIOprationFailed(message=message)
def _baremetal_install_os(self, host_detail): def _install_os_for_baremetal(self, host_detail):
# os_install_disk = 'sda' # os_install_disk = 'sda'
os_version_file = host_detail['os_version_file'] os_version_file = host_detail['os_version_file']
if os_version_file: if os_version_file:
test_os_version_exist = 'test -f %s' % os_version_file test_os_version_exist = 'test -f %s' % os_version_file
daisy_cmn.subprocess_call(test_os_version_exist) daisy_cmn.subprocess_call(test_os_version_exist)
else: else:
self.message = "no OS version file configed for host %s" % host_detail['id'] self.message = "no OS version file configed for host %s"\
% host_detail['id']
raise exception.NotFound(message=self.message) raise exception.NotFound(message=self.message)
if host_detail.get('root_disk', None): if host_detail.get('root_disk', None):
@ -310,7 +399,7 @@ class OSInstall():
if host_detail.get('root_lv_size', None): if host_detail.get('root_lv_size', None):
root_lv_size_m = host_detail['root_lv_size'] root_lv_size_m = host_detail['root_lv_size']
else: else:
root_lv_size_m = 51200 root_lv_size_m = 102400
memory_size_b_str = str(host_detail['memory']['total']) memory_size_b_str = str(host_detail['memory']['total'])
memory_size_b_int = int(memory_size_b_str.strip().split()[0]) memory_size_b_int = int(memory_size_b_str.strip().split()[0])
memory_size_m = memory_size_b_int // 1024 memory_size_m = memory_size_b_int // 1024
@ -320,22 +409,35 @@ class OSInstall():
disk_list = [] disk_list = []
disk_storage_size_b = 0 disk_storage_size_b = 0
for key in host_detail['disks']: for key in host_detail['disks']:
if host_detail['disks'][key]['disk'].find("-fc-") != -1 \
or host_detail['disks'][key]['disk'].\
find("-iscsi-") != -1 \
or host_detail['disks'][key]['name'].\
find("mpath") != -1 \
or host_detail['disks'][key]['name'].\
find("spath") != -1:
continue
disk_list.append(host_detail['disks'][key]['name']) disk_list.append(host_detail['disks'][key]['name'])
stroage_size_str = host_detail['disks'][key]['size'] stroage_size_str = host_detail['disks'][key]['size']
stroage_size_b_int = int(stroage_size_str.strip().split()[0]) stroage_size_b_int = int(stroage_size_str.strip().split()[0])
disk_storage_size_b = disk_storage_size_b + stroage_size_b_int disk_storage_size_b = disk_storage_size_b + stroage_size_b_int
disk_list = ','.join(disk_list) disk_list = ','.join(disk_list)
disk_storage_size_m = disk_storage_size_b // (1024 * 1024) disk_storage_size_m = disk_storage_size_b // (1024 * 1024)
if host_detail.has_key('root_pwd') and host_detail['root_pwd']:
if 'root_pwd' in host_detail and host_detail['root_pwd']:
root_pwd = host_detail['root_pwd'] root_pwd = host_detail['root_pwd']
else: else:
root_pwd = 'ossdbg1' root_pwd = 'ossdbg1'
if host_detail.has_key('isolcpus') and host_detail['isolcpus']:
isolcpus = host_detail['isolcpus']
else:
isolcpus = None isolcpus = None
if 'os_cpus' in host_detail and host_detail['os_cpus']:
os_cpus = utils.cpu_str_to_list(host_detail['os_cpus'])
host_cpu = host_detail.get('cpu', {})
if 'total' in host_cpu:
total_cpus = range(0, host_cpu['total'])
isolcpus_list = list(set(total_cpus) - set(os_cpus))
isolcpus_list.sort()
isolcpus = utils.cpu_list_to_str(isolcpus_list)
if host_detail.get('hugepages', None): if host_detail.get('hugepages', None):
hugepages = host_detail['hugepages'] hugepages = host_detail['hugepages']
@ -346,19 +448,23 @@ class OSInstall():
hugepagesize = host_detail['hugepagesize'] hugepagesize = host_detail['hugepagesize']
else: else:
hugepagesize = '1G' hugepagesize = '1G'
# tfg_patch_pkg_file = check_tfg_exist() # tfg_patch_pkg_file = check_tfg_exist()
if host_detail.get('hwm_id'):
host_hwm_meta = {
"hwm_ip": host_detail.get('hwm_ip'),
"hwm_id": host_detail.get('hwm_id'),
"boot_type": "pxe"
}
self.daisyclient.node.set_boot(**host_hwm_meta)
else:
if (not host_detail['ipmi_user'] or if (not host_detail['ipmi_user'] or
not host_detail['ipmi_passwd'] or not host_detail['ipmi_passwd'] or
not host_detail['ipmi_addr']): not host_detail['ipmi_addr']):
self.message = "Invalid ipmi information configed for host %s" % host_detail['id'] self.message = "Invalid ipmi information configed for host %s"\
% host_detail['id']
raise exception.NotFound(message=self.message) raise exception.NotFound(message=self.message)
self._set_boot_or_power_state(host_detail['ipmi_user'], self._set_boot_or_power_state(host_detail['ipmi_user'],
host_detail['ipmi_passwd'], host_detail['ipmi_passwd'],
host_detail['ipmi_addr'], host_detail['ipmi_addr'],
@ -382,59 +488,85 @@ class OSInstall():
'hugepages': hugepages, 'hugepages': hugepages,
'reboot': 'no'} 'reboot': 'no'}
if host_detail.has_key('glance_lv_size'): # if host_detail.has_key('glance_lv_size'):
if 'glance_lv_size' in host_detail:
kwargs['glance_lv_size'] = host_detail['glance_lv_size'] kwargs['glance_lv_size'] = host_detail['glance_lv_size']
else: else:
kwargs['glance_lv_size'] = 0 kwargs['glance_lv_size'] = 0
if host_detail.has_key('db_lv_size') and host_detail['db_lv_size']: # if host_detail.has_key('db_lv_size') and host_detail['db_lv_size']:
if 'db_lv_size' in host_detail and host_detail['db_lv_size']:
kwargs['db_lv_size'] = host_detail['db_lv_size'] kwargs['db_lv_size'] = host_detail['db_lv_size']
else: else:
kwargs['db_lv_size'] = 0 kwargs['db_lv_size'] = 0
if host_detail.has_key('mongodb_lv_size') and host_detail['mongodb_lv_size']: # if host_detail.has_key('mongodb_lv_size') and
# host_detail['mongodb_lv_size']:
if 'mongodb_lv_size' in host_detail and host_detail['mongodb_lv_size']:
kwargs['mongodb_lv_size'] = host_detail['mongodb_lv_size'] kwargs['mongodb_lv_size'] = host_detail['mongodb_lv_size']
else: else:
kwargs['mongodb_lv_size'] = 0 kwargs['mongodb_lv_size'] = 0
if host_detail.has_key('nova_lv_size') and host_detail['nova_lv_size']: # if host_detail.has_key('nova_lv_size') and
# host_detail['nova_lv_size']:
if 'nova_lv_size' in host_detail and host_detail['nova_lv_size']:
kwargs['nova_lv_size'] = host_detail['nova_lv_size'] kwargs['nova_lv_size'] = host_detail['nova_lv_size']
else: else:
kwargs['nova_lv_size'] = 0 kwargs['nova_lv_size'] = 0
install_os_obj = self.ironicclient.daisy.install_os(**kwargs) install_os_obj = self.ironicclient.daisy.install_os(**kwargs)
install_os_dict = dict([(f, getattr(install_os_obj, f, '')) for f in ['return_code', 'info']]) install_os_dict = dict(
[(f, getattr(install_os_obj, f, '')) for f in
['return_code', 'info']])
rc = int(install_os_dict['return_code']) rc = int(install_os_dict['return_code'])
if rc != 0: if rc != 0:
install_os_description = install_os_dict['info'] install_os_description = install_os_dict['info']
LOG.info(_("install os config failed because of '%s'" % (install_os_description))) LOG.info(
_("install os config failed because of '%s'"
% (install_os_description)))
host_status = {'os_status': host_os_status['INSTALL_FAILED'], host_status = {'os_status': host_os_status['INSTALL_FAILED'],
'os_progress': 0, 'os_progress': 0,
'messages': install_os_description} 'messages': install_os_description}
update_db_host_status(self.req, host_detail['id'], host_status) update_db_host_status(self.req, host_detail['id'], host_status)
msg = "ironic install os return failed for host %s" % host_detail['id'] msg = "ironic install os return failed for host %s" % host_detail[
'id']
raise exception.OSInstallFailed(message=msg) raise exception.OSInstallFailed(message=msg)
if host_detail.get('hwm_id'):
host_hwm_meta = {
"hwm_ip": host_detail.get('hwm_ip'),
"hwm_id": host_detail.get('hwm_id')
}
self.daisyclient.node.restart(**host_hwm_meta)
else:
self._set_boot_or_power_state(host_detail['ipmi_user'], self._set_boot_or_power_state(host_detail['ipmi_user'],
host_detail['ipmi_passwd'], host_detail['ipmi_passwd'],
host_detail['ipmi_addr'], host_detail['ipmi_addr'],
'reset') 'reset')
def _begin_install_os(self, hosts_detail):
# all hosts status is set to 'pre-install' before os installing
def _install_os_by_rousource_type(self, hosts_detail):
# all hosts status set to 'init' before install os
for host_detail in hosts_detail: for host_detail in hosts_detail:
host_status = {'os_status':host_os_status['INIT'], host_status = {'os_status': host_os_status['PRE_INSTALL'],
'os_progress': 0, 'os_progress': 0,
'messages':''} 'messages': 'Preparing for OS installation'}
update_db_host_status(self.req, host_detail['id'], host_status) update_db_host_status(self.req, host_detail['id'], host_status)
for host_detail in hosts_detail: for host_detail in hosts_detail:
self._baremetal_install_os(host_detail) self._install_os_for_baremetal(host_detail)
def _set_disk_start_mode(self, host_detail): def _set_disk_start_mode(self, host_detail):
LOG.info(_("Set boot from disk for host %s" % (host_detail['id']))) LOG.info(_("Set boot from disk for host %s" % (host_detail['id'])))
if host_detail.get('hwm_id'):
host_hwm_meta = {
"hwm_ip": host_detail.get('hwm_ip'),
"hwm_id": host_detail.get('hwm_id'),
"boot_type": "disk"
}
self.daisyclient.node.set_boot(**host_hwm_meta)
LOG.info(_("reboot host %s" % (host_detail['id'])))
host_hwm_meta.pop('boot_type')
self.daisyclient.node.restart(**host_hwm_meta)
else:
self._set_boot_or_power_state(host_detail['ipmi_user'], self._set_boot_or_power_state(host_detail['ipmi_user'],
host_detail['ipmi_passwd'], host_detail['ipmi_passwd'],
host_detail['ipmi_addr'], host_detail['ipmi_addr'],
@ -462,15 +594,20 @@ class OSInstall():
def _query_host_progress(self, host_detail, host_status, host_last_status): def _query_host_progress(self, host_detail, host_status, host_last_status):
host_id = host_detail['id'] host_id = host_detail['id']
install_result_obj = \ install_result_obj = \
self.ironicclient.daisy.get_install_progress(host_detail['dhcp_mac']) self.ironicclient.daisy.get_install_progress(
host_detail['dhcp_mac'])
install_result = dict([(f, getattr(install_result_obj, f, '')) install_result = dict([(f, getattr(install_result_obj, f, ''))
for f in ['return_code', 'info', 'progress']]) for f in ['return_code', 'info', 'progress']])
rc = int(install_result['return_code']) rc = int(install_result['return_code'])
host_status['os_progress'] = int(install_result['progress']) host_status['os_progress'] = int(install_result['progress'])
if rc == 0: if rc == 0:
if host_status['os_progress'] == 100: if host_status['os_progress'] == 100:
time_cost = str(round((time.time() - daisy_cmn.os_install_start_time)/60, 2)) time_cost = str(
LOG.info(_("It takes %s min for host %s to install os" % (time_cost, host_id))) round((time.time() -
daisy_cmn.os_install_start_time) / 60, 2))
LOG.info(
_("It takes %s min for host %s to install os"
% (time_cost, host_id)))
LOG.info(_("host %s install os completely." % host_id)) LOG.info(_("host %s install os completely." % host_id))
host_status['os_status'] = host_os_status['ACTIVE'] host_status['os_status'] = host_os_status['ACTIVE']
host_status['messages'] = "OS installed successfully" host_status['messages'] = "OS installed successfully"
@ -478,10 +615,13 @@ class OSInstall():
time.sleep(10) time.sleep(10)
self._set_disk_start_mode(host_detail) self._set_disk_start_mode(host_detail)
else: else:
if host_status['os_progress'] == host_last_status['os_progress']: if host_status['os_progress'] ==\
host_last_status['os_progress']:
host_status['count'] = host_status['count'] + 1 host_status['count'] = host_status['count'] + 1
LOG.debug(_("host %s has kept %ss when progress is %s." % (host_id, LOG.debug(_("host %s has kept %ss when progress is %s."
host_status['count']*self.time_step, host_status['os_progress']))) % (host_id,
host_status['count'] * self.time_step,
host_status['os_progress'])))
else: else:
LOG.info(_("host %s install failed." % host_id)) LOG.info(_("host %s install failed." % host_id))
host_status['os_status'] = host_os_status['INSTALL_FAILED'] host_status['os_status'] = host_os_status['INSTALL_FAILED']
@ -491,25 +631,31 @@ class OSInstall():
hosts_status = copy.deepcopy(hosts_last_status) hosts_status = copy.deepcopy(hosts_last_status)
for host_detail in hosts_detail: for host_detail in hosts_detail:
host_id = host_detail['id'] host_id = host_detail['id']
if not hosts_status.has_key(host_id): # if not hosts_status.has_key(host_id):
if host_id not in hosts_status:
self._init_progress(host_detail, hosts_status) self._init_progress(host_detail, hosts_status)
continue continue
host_status = hosts_status[host_id] host_status = hosts_status[host_id]
host_last_status = hosts_last_status[host_id] host_last_status = hosts_last_status[host_id]
#only process installing hosts after init, other hosts info will be kept in hosts_status # only process installing hosts after init, other hosts info will
# be kept in hosts_status
if host_status['os_status'] != host_os_status['INSTALLING']: if host_status['os_status'] != host_os_status['INSTALLING']:
continue continue
self._query_host_progress(host_detail, host_status, host_last_status) self._query_host_progress(
host_detail, host_status, host_last_status)
if host_status['count']*self.time_step >= self.single_host_install_timeout: if host_status['count'] * self.time_step >=\
self.single_host_install_timeout:
host_status['os_status'] = host_os_status['INSTALL_FAILED'] host_status['os_status'] = host_os_status['INSTALL_FAILED']
if host_detail['resource_type'] == 'docker': if host_detail['resource_type'] == 'docker':
host_status['messages'] = "docker container created timeout" host_status[
'messages'] = "docker container created timeout"
else: else:
host_status['messages'] = "os installed timeout" host_status['messages'] = "os installed timeout"
if (host_status['os_progress'] != host_last_status['os_progress'] or\ if (host_status['os_progress'] !=
host_last_status['os_progress'] or
host_status['os_status'] != host_last_status['os_status']): host_status['os_status'] != host_last_status['os_status']):
host_status['count'] = 0 host_status['count'] = 0
update_db_host_status(self.req, host_id, host_status) update_db_host_status(self.req, host_id, host_status)
@ -519,20 +665,28 @@ class OSInstall():
query_count = 0 query_count = 0
hosts_last_status = {} hosts_last_status = {}
while True: while True:
hosts_install_status = self._query_progress(hosts_last_status, hosts_detail) hosts_install_status = self._query_progress(
hosts_last_status, hosts_detail)
# if all hosts install over, break # if all hosts install over, break
installing_hosts = [id for id in hosts_install_status.keys() installing_hosts = [id for id in hosts_install_status.keys()
if hosts_install_status[id]['os_status'] == host_os_status['INSTALLING']] if hosts_install_status[id]['os_status'] ==
host_os_status['INSTALLING']]
if not installing_hosts: if not installing_hosts:
break break
# after 3h, if some hosts are not 'active', label them to 'failed'. # after 3h, if some hosts are not 'active', label them to 'failed'.
elif query_count*self.time_step >= self.cluster_hosts_install_timeout: elif query_count * self.time_step >=\
self.cluster_hosts_install_timeout:
for host_id, host_status in hosts_install_status.iteritems(): for host_id, host_status in hosts_install_status.iteritems():
if (host_status['os_status'] != host_os_status['ACTIVE'] and if (host_status['os_status'] !=
host_status['os_status'] != host_os_status['INSTALL_FAILED']): host_os_status['ACTIVE'] and
# label the host install failed because of time out for 3h host_status['os_status'] !=
host_status['os_status'] = host_os_status['INSTALL_FAILED'] host_os_status['INSTALL_FAILED']):
host_status['messages'] = "cluster os installed timeout" # label the host install failed because of time out for
# 3h
host_status['os_status'] = host_os_status[
'INSTALL_FAILED']
host_status[
'messages'] = "cluster os installed timeout"
update_db_host_status(self.req, host_id, host_status) update_db_host_status(self.req, host_id, host_status)
break break
else: else:
@ -550,21 +704,29 @@ class OSInstall():
hosts_detail = [] hosts_detail = []
install_hosts_id = [host_detail['id'] for host_detail in install_hosts] install_hosts_id = [host_detail['id'] for host_detail in install_hosts]
LOG.info(_("Begin install os for hosts %s." % ','.join(install_hosts_id))) LOG.info(
_("Begin install os for hosts %s." % ','.join(install_hosts_id)))
daisy_cmn.os_install_start_time = time.time() daisy_cmn.os_install_start_time = time.time()
self._install_os_by_rousource_type(install_hosts) self._begin_install_os(install_hosts)
LOG.info(_("Begin to query install progress...")) LOG.info(_("Begin to query install progress..."))
# wait to install completely # wait to install completely
cluster_install_status = self._get_install_status(install_hosts) cluster_install_status = self._get_install_status(install_hosts)
total_time_cost = str(round((time.time() - daisy_cmn.os_install_start_time)/60, 2)) total_time_cost = str(
LOG.info(_("It totally takes %s min for all host to install os" % total_time_cost)) round((time.time() - daisy_cmn.os_install_start_time) / 60, 2))
LOG.info(
_("It totally takes %s min for all host to install os"
% total_time_cost))
LOG.info(_("OS install in cluster %s result is:" % self.cluster_id)) LOG.info(_("OS install in cluster %s result is:" % self.cluster_id))
LOG.info(_("%s %s %s" % ('host-id', 'os-status', 'description'))) LOG.info(_("%s %s %s" %
('host-id', 'os-status', 'description')))
for host_id, host_status in cluster_install_status.iteritems(): for host_id, host_status in cluster_install_status.iteritems():
LOG.info(_("%s %s %s" % (host_id, host_status['os_status'], host_status['messages']))) LOG.info(
_("%s %s %s" % (host_id, host_status['os_status'],
host_status['messages'])))
if host_id in role_hosts_ids: if host_id in role_hosts_ids:
if host_status['os_status'] == host_os_status['INSTALL_FAILED']: if host_status['os_status'] ==\
host_os_status['INSTALL_FAILED']:
break break
else: else:
role_hosts_ids.remove(host_id) role_hosts_ids.remove(host_id)
@ -582,33 +744,40 @@ def _os_thread_bin(req, host_ip, host_id):
with open(var_log_path, "w+") as fp: with open(var_log_path, "w+") as fp:
cmd = '/var/lib/daisy/tecs/trustme.sh %s %s' % (host_ip, password) cmd = '/var/lib/daisy/tecs/trustme.sh %s %s' % (host_ip, password)
daisy_cmn.subprocess_call(cmd, fp) daisy_cmn.subprocess_call(cmd, fp)
cmd = 'clush -S -w %s "mkdir -p /home/daisy_update"' % (host_ip,) cmd = 'clush -S -w %s "mkdir -p /home/daisy_update/"' % (host_ip,)
daisy_cmn.subprocess_call(cmd, fp) daisy_cmn.subprocess_call(cmd, fp)
cmd = 'clush -S -b -w %s "rm -rf /home/daisy_update/*"' % (host_ip,) cmd = 'clush -S -b -w %s "rm -rf /home/daisy_update/*"' % (host_ip,)
daisy_cmn.subprocess_call(cmd, fp) daisy_cmn.subprocess_call(cmd, fp)
cmd = 'clush -S -w %s -c /var/lib/daisy/tecs/*CGSL_VPLAT*.iso /var/lib/daisy/tecs/tfg_upgrade.sh --dest=/home/daisy_update' % (host_ip,) cmd = 'clush -S -w %s -c /var/lib/daisy/tecs/*CGSL_VPLAT*.iso\
/var/lib/daisy/tecs/tfg_upgrade.sh \
--dest=/home/daisy_update' % (
host_ip,)
daisy_cmn.subprocess_call(cmd, fp) daisy_cmn.subprocess_call(cmd, fp)
cmd = 'clush -S -w %s "chmod 777 /home/daisy_update/*"' % (host_ip,) cmd = 'clush -S -w %s "chmod 777 /home/daisy_update/*"' % (host_ip,)
daisy_cmn.subprocess_call(cmd, fp) daisy_cmn.subprocess_call(cmd, fp)
host_meta['os_progress'] = 30 host_meta['os_progress'] = 30
host_meta['os_status'] = host_os_status['UPDATING'] host_meta['os_status'] = host_os_status['UPDATING']
host_meta['messages'] = "" host_meta['messages'] = "os updating,copy iso successfully"
update_db_host_status(req, host_id, host_meta) update_db_host_status(req, host_id, host_meta)
try: try:
exc_result = subprocess.check_output( exc_result = subprocess.check_output(
'clush -S -w %s "/home/daisy_update/tfg_upgrade.sh"' % (host_ip,), 'clush -S -w %s "/home/daisy_update/tfg_upgrade.sh"' % (
host_ip,),
shell=True, stderr=subprocess.STDOUT) shell=True, stderr=subprocess.STDOUT)
except subprocess.CalledProcessError as e: except subprocess.CalledProcessError as e:
if e.returncode == 255 and "reboot" in e.output.strip(): if e.returncode == 255 and "reboot" in e.output.strip():
host_meta['os_progress'] = 100 host_meta['os_progress'] = 100
host_meta['os_status'] = host_os_status['ACTIVE'] host_meta['os_status'] = host_os_status['ACTIVE']
host_meta['messages'] = "upgrade tfg successfully,os reboot" host_meta['messages'] = "upgrade tfg successfully,os reboot"
LOG.info(_("Update tfg for %s successfully,os reboot!" % host_ip)) LOG.info(
_("Update tfg for %s successfully,os reboot!" % host_ip))
daisy_cmn.check_reboot_ping(host_ip) daisy_cmn.check_reboot_ping(host_ip)
else: else:
host_meta['os_progress'] = 0 host_meta['os_progress'] = 0
host_meta['os_status'] = host_os_status['UPDATE_FAILED'] host_meta['os_status'] = host_os_status['UPDATE_FAILED']
host_meta['messages'] = e.output.strip()[-400:-200].replace('\n',' ') host_meta[
'messages'] =\
e.output.strip()[-400:-200].replace('\n', ' ')
LOG.error(_("Update tfg for %s failed!" % host_ip)) LOG.error(_("Update tfg for %s failed!" % host_ip))
update_db_host_status(req, host_id, host_meta) update_db_host_status(req, host_id, host_meta)
fp.write(e.output.strip()) fp.write(e.output.strip())
@ -621,6 +790,8 @@ def _os_thread_bin(req, host_ip, host_id):
fp.write(exc_result) fp.write(exc_result)
if "reboot" in exc_result: if "reboot" in exc_result:
daisy_cmn.check_reboot_ping(host_ip) daisy_cmn.check_reboot_ping(host_ip)
# this will be raise raise all the exceptions of the thread to log file # this will be raise raise all the exceptions of the thread to log file
def os_thread_bin(req, host_ip, host_id): def os_thread_bin(req, host_ip, host_id):
try: try:
@ -642,7 +813,8 @@ def _get_host_os_version(host_ip, host_pwd='ossdbg1'):
LOG.info(_("Host %s os version is TFG" % host_ip)) LOG.info(_("Host %s os version is TFG" % host_ip))
return version return version
try: try:
process = subprocess.Popen(["sshpass", "-p", "%s" % host_pwd, "ssh", process =\
subprocess.Popen(["sshpass", "-p", "%s" % host_pwd, "ssh",
"-o StrictHostKeyChecking=no", "%s" % host_ip, "-o StrictHostKeyChecking=no", "%s" % host_ip,
'tfg_showversion'], shell=False, 'tfg_showversion'], shell=False,
stdout=subprocess.PIPE, stderr=subprocess.PIPE) stdout=subprocess.PIPE, stderr=subprocess.PIPE)
@ -660,14 +832,16 @@ def _get_host_os_version(host_ip, host_pwd='ossdbg1'):
raise exception.Invalid(message=msg) raise exception.Invalid(message=msg)
def _cmp_os_version(new_os_file, old_os_version, target_host_ip, password='ossdbg1'): def _cmp_os_version(new_os_file, old_os_version,
target_host_ip, password='ossdbg1'):
shell_file = '/usr/sbin/tfg_showversion' shell_file = '/usr/sbin/tfg_showversion'
if old_os_version: if old_os_version:
try: try:
subprocess.check_output("test -f %s" % shell_file, shell=True, subprocess.check_output("test -f %s" % shell_file, shell=True,
stderr=subprocess.STDOUT) stderr=subprocess.STDOUT)
except subprocess.CalledProcessError: except subprocess.CalledProcessError:
scripts = ["sshpass -p %s scp -r -o StrictHostKeyChecking=no %s:%s " scripts = ["sshpass -p %s scp -r -o\
StrictHostKeyChecking=no %s:%s "
"/usr/sbin/" % (password, target_host_ip, shell_file)] "/usr/sbin/" % (password, target_host_ip, shell_file)]
tecs_cmn.run_scrip(scripts) tecs_cmn.run_scrip(scripts)
@ -705,9 +879,10 @@ def upgrade_os(req, hosts_list):
host_id = host_info.keys()[0] host_id = host_info.keys()[0]
host_ip = host_info.values()[0] host_ip = host_info.values()[0]
host_detail = daisy_cmn.get_host_detail(req, host_id) host_detail = daisy_cmn.get_host_detail(req, host_id)
target_host_os = _get_host_os_version(host_ip, host_detail['root_pwd']) target_host_os = _get_host_os_version(
host_ip, host_detail['root_pwd'])
if _cmp_os_version(new_os_file, target_host_os, host_ip) == 0: if _cmp_os_version(new_os_file, target_host_os, host_ip) != -1:
host_meta['os_progress'] = 10 host_meta['os_progress'] = 10
host_meta['os_status'] = host_os_status['UPDATING'] host_meta['os_status'] = host_os_status['UPDATING']
host_meta['messages'] = "os updating,begin copy iso" host_meta['messages'] = "os updating,begin copy iso"
@ -731,12 +906,15 @@ def upgrade_os(req, hosts_list):
host_id = host_info.keys()[0] host_id = host_info.keys()[0]
host_ip = host_info.values()[0] host_ip = host_info.values()[0]
host = registry.get_host_metadata(req.context, host_id) host = registry.get_host_metadata(req.context, host_id)
if host['os_status'] == host_os_status['UPDATE_FAILED'] or host['os_status'] == host_os_status['INIT']: if host['os_status'] == host_os_status['UPDATE_FAILED'] or\
host['os_status'] == host_os_status['INIT']:
update_failed_flag = True update_failed_flag = True
raise exception.ThreadBinException("%s update tfg failed! %s" % (host_ip, host['messages'])) raise exception.ThreadBinException(
"%s update tfg failed! %s" % (
host_ip, host['messages']))
if not update_failed_flag: if not update_failed_flag:
host_meta = {} host_meta = {}
host_meta['os_progress'] = 100 host_meta['os_progress'] = 100
host_meta['os_status'] = host_os_status['ACTIVE'] host_meta['os_status'] = host_os_status['ACTIVE']
host_meta['messages'] = "os upgrade successfully" host_meta['messages'] = "upgrade tfg successfully"
update_db_host_status(req, host_id, host_meta) update_db_host_status(req, host_id, host_meta)

View File

@ -54,7 +54,6 @@ def get_proton_ip(req, role_hosts):
return proton_ip_list return proton_ip_list
def get_proton_hosts(req, cluster_id): def get_proton_hosts(req, cluster_id):
all_roles = proton_cmn.get_roles_detail(req) all_roles = proton_cmn.get_roles_detail(req)
for role in all_roles: for role in all_roles:

View File

@ -16,35 +16,20 @@
""" """
/install endpoint for tecs API /install endpoint for tecs API
""" """
import os
import copy
import subprocess import subprocess
import time
import commands import commands
import traceback
import webob.exc
from oslo_config import cfg from oslo_config import cfg
from oslo_log import log as logging from oslo_log import log as logging
from webob.exc import HTTPBadRequest from webob.exc import HTTPBadRequest
from webob.exc import HTTPForbidden
from webob.exc import HTTPServerError
import threading import threading
from threading import Thread
from daisy import i18n from daisy import i18n
from daisy import notifier
from daisy.api import policy
import daisy.api.v1
from daisy.common import exception from daisy.common import exception
import daisy.registry.client.v1.api as registry
from daisy.api.backends.tecs import config from daisy.api.backends.tecs import config
from daisy.api.backends import driver from daisy.api.backends import driver
from daisy.api.network_api import network as neutron
from ironicclient import client as ironic_client
import daisy.api.backends.os as os_handle import daisy.api.backends.os as os_handle
import daisy.api.backends.common as daisy_cmn import daisy.api.backends.common as daisy_cmn
import daisy.api.backends.tecs.common as tecs_cmn import daisy.api.backends.tecs.common as tecs_cmn
@ -52,11 +37,9 @@ import daisy.api.backends.tecs.install as instl
import daisy.api.backends.tecs.uninstall as unstl import daisy.api.backends.tecs.uninstall as unstl
import daisy.api.backends.tecs.upgrade as upgrd import daisy.api.backends.tecs.upgrade as upgrd
import daisy.api.backends.tecs.disk_array as disk_array import daisy.api.backends.tecs.disk_array as disk_array
from daisy.api.backends.tecs import write_configs
import daisy.registry.client.v1.api as registry
try:
import simplejson as json
except ImportError:
import json
LOG = logging.getLogger(__name__) LOG = logging.getLogger(__name__)
_ = i18n._ _ = i18n._
@ -71,8 +54,11 @@ upgrade_opts = [
CONF.register_opts(upgrade_opts) CONF.register_opts(upgrade_opts)
tecs_state = tecs_cmn.TECS_STATE tecs_state = tecs_cmn.TECS_STATE
daisy_tecs_path = tecs_cmn.daisy_tecs_path
class API(driver.DeploymentDriver): class API(driver.DeploymentDriver):
""" """
The hosts API is a RESTful web service for host data. The API The hosts API is a RESTful web service for host data. The API
is as follows:: is as follows::
@ -100,19 +86,21 @@ class API(driver.DeploymentDriver):
param req: The WSGI/Webob Request object param req: The WSGI/Webob Request object
cluster_id:cluster id cluster_id:cluster id
""" """
write_configs.update_configset(req, cluster_id)
tecs_install_task = instl.TECSInstallTask(req, cluster_id) tecs_install_task = instl.TECSInstallTask(req, cluster_id)
tecs_install_task.start() tecs_install_task.start()
def _get_roles_and_hosts_ip_list(self, req, cluster_id): def _get_roles_and_hosts_ip_list(self, req, cluster_id):
host_ha_list = set() role_host_ips = {'ha': set(), 'lb': set(), 'all': set()}
host_ip_list = set()
role_id_list = set() role_id_list = set()
hosts_id_list = [] hosts_id_list = []
hosts_list = [] hosts_list = []
tecs_install_failed_list = set()
roles = daisy_cmn.get_cluster_roles_detail(req, cluster_id) roles = daisy_cmn.get_cluster_roles_detail(req, cluster_id)
cluster_networks = daisy_cmn.get_cluster_networks_detail(req, cluster_id) cluster_networks = daisy_cmn.get_cluster_networks_detail(
req, cluster_id)
for role in roles: for role in roles:
if role['deployment_backend'] != daisy_cmn.tecs_backend_name: if role['deployment_backend'] != daisy_cmn.tecs_backend_name:
continue continue
@ -120,21 +108,28 @@ class API(driver.DeploymentDriver):
if role_hosts: if role_hosts:
for role_host in role_hosts: for role_host in role_hosts:
host = daisy_cmn.get_host_detail(req, role_host['host_id']) host = daisy_cmn.get_host_detail(req, role_host['host_id'])
host_ip = tecs_cmn.get_host_network_ip(req, host, cluster_networks, 'MANAGEMENT') host_ip = tecs_cmn.get_host_network_ip(
req, host, cluster_networks, 'MANAGEMENT')
if role['name'] == "CONTROLLER_HA": if role['name'] == "CONTROLLER_HA":
host_ha_list.add(host_ip) role_host_ips['ha'].add(host_ip)
host_ip_list.add(host_ip) if role['name'] == "CONTROLLER_LB":
role_host_ips['lb'].add(host_ip)
role_host_ips['all'].add(host_ip)
hosts_id_list.append({host['id']: host_ip}) hosts_id_list.append({host['id']: host_ip})
if role_host['status'] == tecs_state['INSTALL_FAILED']:
tecs_install_failed_list.add(host_ip)
role_id_list.add(role['id']) role_id_list.add(role['id'])
for host in hosts_id_list: for host in hosts_id_list:
if host not in hosts_list: if host not in hosts_list:
hosts_list.append(host) hosts_list.append(host)
return (role_id_list, host_ip_list, host_ha_list, hosts_list) return (role_id_list, role_host_ips,
hosts_list, tecs_install_failed_list)
def _query_progress(self, req, cluster_id, action=""): def _query_progress(self, req, cluster_id, action=""):
nodes_list = [] nodes_list = []
roles = daisy_cmn.get_roles_detail(req) roles = daisy_cmn.get_roles_detail(req)
(role_id_list,host_ip_list,host_ha_list,hosts_list) = self._get_roles_and_hosts_ip_list(req, cluster_id) (role_id_list, role_host_ips, hosts_list, tecs_install_failed_list) =\
self._get_roles_and_hosts_ip_list(req, cluster_id)
for host in hosts_list: for host in hosts_list:
node = {} node = {}
host_id = host.keys()[0] host_id = host.keys()[0]
@ -148,7 +143,8 @@ class API(driver.DeploymentDriver):
node['os-messages'] = host['messages'] node['os-messages'] = host['messages']
if host['status'] == "with-role": if host['status'] == "with-role":
host_roles = [ role for role in roles if role['name'] in host['role'] and role['cluster_id'] == cluster_id] host_roles = [role for role in roles if role['name'] in host[
'role'] and role['cluster_id'] == cluster_id]
if host_roles: if host_roles:
node['role-status'] = host_roles[0]['status'] node['role-status'] = host_roles[0]['status']
node['role-progress'] = str(host_roles[0]['progress']) node['role-progress'] = str(host_roles[0]['progress'])
@ -157,7 +153,19 @@ class API(driver.DeploymentDriver):
if nodes_list: if nodes_list:
return {'tecs_nodes': nodes_list} return {'tecs_nodes': nodes_list}
else: else:
return {'tecs_nodes': "TECS uninstall successfully, the host has been removed from the host_roles table"} return {'tecs_nodes': "TECS uninstall successfully,\
the host has been removed from the host_roles table"}
def _modify_running_version_of_configs(self, req,
running_version, cluster_id):
cluster_configs_list = daisy_cmn.get_cluster_configs_list(req,
cluster_id)
if cluster_configs_list:
for cluster_config in cluster_configs_list:
registry.update_config_metadata(req.context,
cluster_config['id'],
{'running_version':
running_version})
def uninstall(self, req, cluster_id): def uninstall(self, req, cluster_id):
""" """
@ -167,17 +175,21 @@ class API(driver.DeploymentDriver):
:raises HTTPBadRequest if x-install-cluster is missing :raises HTTPBadRequest if x-install-cluster is missing
""" """
(role_id_list, host_ip_list,host_ha_list, hosts_list) = self._get_roles_and_hosts_ip_list(req, cluster_id) (role_id_list, role_host_ips, hosts_list, tecs_install_failed_list) =\
self._get_roles_and_hosts_ip_list(req, cluster_id)
if role_id_list: if role_id_list:
if not host_ip_list: if not role_host_ips['all']:
msg = _("there is no host in cluster %s") % cluster_id msg = _("there is no host in cluster %s") % cluster_id
raise exception.ThreadBinException(msg) raise exception.ThreadBinException(msg)
unstl.update_progress_to_db(req, role_id_list, tecs_state['UNINSTALLING'], hosts_list) unstl.update_progress_to_db(
req, role_id_list, tecs_state['UNINSTALLING'], hosts_list)
threads = [] threads = []
for host_ip in host_ip_list: for host_ip in role_host_ips['all']:
t = threading.Thread(target=unstl.thread_bin,args=(req,host_ip,role_id_list,hosts_list)) t = threading.Thread(
target=unstl.thread_bin, args=(req, host_ip, role_id_list,
hosts_list))
t.setDaemon(True) t.setDaemon(True)
t.start() t.start()
threads.append(t) threads.append(t)
@ -193,16 +205,29 @@ class API(driver.DeploymentDriver):
for role_id in role_id_list: for role_id in role_id_list:
role_hosts = daisy_cmn.get_hosts_of_role(req, role_id) role_hosts = daisy_cmn.get_hosts_of_role(req, role_id)
for role_host in role_hosts: for role_host in role_hosts:
if role_host['status'] == tecs_state['UNINSTALL_FAILED']: if role_host['status'] ==\
unstl.update_progress_to_db(req, role_id_list, tecs_state['UNINSTALL_FAILED'],hosts_list) tecs_state['UNINSTALL_FAILED']:
unstl.update_progress_to_db(
req, role_id_list, tecs_state[
'UNINSTALL_FAILED'], hosts_list)
uninstall_failed_flag = True uninstall_failed_flag = True
break break
if not uninstall_failed_flag: if not uninstall_failed_flag:
LOG.info(_("All uninstall threads have done, set all roles status to 'init'!")) LOG.info(
unstl.update_progress_to_db(req, role_id_list, tecs_state['INIT'], hosts_list) _("All uninstall threads have done,\
set all roles status to 'init'!"))
unstl.update_progress_to_db(
req, role_id_list, tecs_state['INIT'], hosts_list)
LOG.info(_("modify the running_version of configs to 0"))
running_version = 0
self._modify_running_version_of_configs(
req, running_version, cluster_id)
tecs_cmn.inform_provider_cloud_state(req.context, cluster_id,
operation='delete')
try: try:
(status, output) = commands.getstatusoutput('rpm -e --nodeps openstack-packstack\ (status, output) = commands.getstatusoutput('rpm -e --nodeps openstack-packstack\
openstack-packstack-puppet openstack-puppet-modules puppet') openstack-packstack-puppet \
openstack-puppet-modules puppet')
except exception.Invalid as e: except exception.Invalid as e:
raise HTTPBadRequest(explanation=e.msg, request=req) raise HTTPBadRequest(explanation=e.msg, request=req)
@ -217,54 +242,73 @@ class API(driver.DeploymentDriver):
:raises HTTPBadRequest if x-install-cluster is missing :raises HTTPBadRequest if x-install-cluster is missing
""" """
(role_id_list,host_ip_list,host_ha_list,hosts_list) = self._get_roles_and_hosts_ip_list(req, cluster_id) # daisy_update_path = '/home/daisy_update/'
(role_id_list, role_host_ips, hosts_list, tecs_install_failed_list) =\
self._get_roles_and_hosts_ip_list(req, cluster_id)
if role_id_list: if role_id_list:
if not host_ip_list: if not role_host_ips['all']:
msg = _("there is no host in cluster %s") % cluster_id msg = _("there is no host in cluster %s") % cluster_id
raise exception.ThreadBinException(msg) raise exception.ThreadBinException(msg)
unreached_hosts = daisy_cmn.check_ping_hosts(host_ip_list, 1) unreached_hosts = daisy_cmn.check_ping_hosts(
role_host_ips['all'], 1)
if unreached_hosts: if unreached_hosts:
self.message = "hosts %s ping failed" % unreached_hosts self.message = "hosts %s ping failed" % unreached_hosts
raise exception.NotFound(message=self.message) raise exception.NotFound(message=self.message)
daisy_cmn.subprocess_call('rm -rf /root/.ssh/known_hosts') daisy_cmn.subprocess_call('rm -rf /root/.ssh/known_hosts')
if os_handle.check_tfg_exist(): if os_handle.check_tfg_exist():
os_handle.upgrade_os(req, hosts_list) os_handle.upgrade_os(req, hosts_list)
unreached_hosts = daisy_cmn.check_ping_hosts(host_ip_list, 30) unreached_hosts = daisy_cmn.check_ping_hosts(
role_host_ips['all'], 30)
if unreached_hosts: if unreached_hosts:
self.message = "hosts %s ping failed after tfg upgrade" % unreached_hosts self.message = "hosts %s ping failed after tfg upgrade" \
% unreached_hosts
raise exception.NotFound(message=self.message) raise exception.NotFound(message=self.message)
# check and get TECS version # check and get TECS version
tecs_version_pkg_file = tecs_cmn.check_and_get_tecs_version(tecs_cmn.daisy_tecs_path) tecs_version_pkg_file = tecs_cmn.check_and_get_tecs_version(
tecs_cmn.daisy_tecs_path)
if not tecs_version_pkg_file: if not tecs_version_pkg_file:
self.state = tecs_state['INSTALL_FAILED'] self.state = tecs_state['UPDATE_FAILED']
self.message = "TECS version file not found in %s" % tecs_cmn.daisy_tecs_path self.message = "TECS version file not found in %s"\
% tecs_cmn.daisy_tecs_path
raise exception.NotFound(message=self.message) raise exception.NotFound(message=self.message)
threads = [] threads = []
LOG.info(_("Begin to update TECS controller nodes, please waiting....")) LOG.info(
upgrd.update_progress_to_db(req, role_id_list, tecs_state['UPDATING'], hosts_list) _("Begin to update TECS controller nodes, please waiting...."))
for host_ip in host_ha_list: upgrd.update_progress_to_db(
req, role_id_list, tecs_state['UPDATING'], hosts_list)
for host_ip in role_host_ips['ha']:
if host_ip in tecs_install_failed_list:
continue
LOG.info(_("Update TECS controller node %s..." % host_ip)) LOG.info(_("Update TECS controller node %s..." % host_ip))
rc = upgrd.thread_bin(req, role_id_list, host_ip, hosts_list) rc = upgrd.thread_bin(req, role_id_list, host_ip, hosts_list)
if rc == 0: if rc == 0:
LOG.info(_("Update TECS for %s successfully" % host_ip)) LOG.info(_("Update TECS for %s successfully" % host_ip))
else: else:
LOG.info(_("Update TECS failed for %s, return %s" % (host_ip,rc))) LOG.info(
_("Update TECS failed for %s, return %s"
% (host_ip, rc)))
return return
LOG.info(_("Begin to update TECS other nodes, please waiting....")) LOG.info(_("Begin to update TECS other nodes, please waiting...."))
max_parallel_upgrade_number = int(CONF.max_parallel_os_upgrade_number) max_parallel_upgrade_number = int(
compute_ip_list = host_ip_list - host_ha_list CONF.max_parallel_os_upgrade_number)
compute_ip_list = role_host_ips[
'all'] - role_host_ips['ha'] - tecs_install_failed_list
while compute_ip_list: while compute_ip_list:
threads = [] threads = []
if len(compute_ip_list) > max_parallel_upgrade_number: if len(compute_ip_list) > max_parallel_upgrade_number:
upgrade_hosts = compute_ip_list[:max_parallel_upgrade_number] upgrade_hosts = compute_ip_list[
compute_ip_list = compute_ip_list[max_parallel_upgrade_number:] :max_parallel_upgrade_number]
compute_ip_list = compute_ip_list[
max_parallel_upgrade_number:]
else: else:
upgrade_hosts = compute_ip_list upgrade_hosts = compute_ip_list
compute_ip_list = [] compute_ip_list = []
for host_ip in upgrade_hosts: for host_ip in upgrade_hosts:
t = threading.Thread(target=upgrd.thread_bin,args=(req,role_id_list,host_ip,hosts_list)) t = threading.Thread(
target=upgrd.thread_bin,
args=(req, role_id_list, host_ip, hosts_list))
t.setDaemon(True) t.setDaemon(True)
t.start() t.start()
threads.append(t) threads.append(t)
@ -282,7 +326,8 @@ class API(driver.DeploymentDriver):
role_id = [role_host['role_id']] role_id = [role_host['role_id']]
upgrd.update_progress_to_db(req, upgrd.update_progress_to_db(req,
role_id, role_id,
tecs_state['UPDATE_FAILED'], tecs_state[
'UPDATE_FAILED'],
hosts_list) hosts_list)
break break
elif role_host['status'] == tecs_state['ACTIVE']: elif role_host['status'] == tecs_state['ACTIVE']:
@ -295,7 +340,6 @@ class API(driver.DeploymentDriver):
def upgrade_progress(self, req, cluster_id): def upgrade_progress(self, req, cluster_id):
return self._query_progress(req, cluster_id, "upgrade") return self._query_progress(req, cluster_id, "upgrade")
def export_db(self, req, cluster_id): def export_db(self, req, cluster_id):
""" """
Export daisy db data to tecs.conf and HA.conf. Export daisy db data to tecs.conf and HA.conf.
@ -305,12 +349,11 @@ class API(driver.DeploymentDriver):
:raises HTTPBadRequest if x-install-cluster is missing :raises HTTPBadRequest if x-install-cluster is missing
""" """
(tecs_config, mgnt_ip_list) =\ tecs_config =\
instl.get_cluster_tecs_config(req, cluster_id) instl.get_cluster_tecs_config(req, cluster_id)
config_files = {'tecs_conf': '', 'ha_conf': ''} config_files = {'tecs_conf': '', 'ha_conf': ''}
tecs_install_path = "/home/tecs_install" tecs_install_path = "/home/tecs_install"
tecs_config_file = ''
if tecs_config: if tecs_config:
cluster_conf_path = tecs_install_path + "/" + cluster_id cluster_conf_path = tecs_install_path + "/" + cluster_id
create_cluster_conf_path =\ create_cluster_conf_path =\
@ -349,9 +392,11 @@ class API(driver.DeploymentDriver):
def update_disk_array(self, req, cluster_id): def update_disk_array(self, req, cluster_id):
(share_disk_info, volume_disk_info) =\ (share_disk_info, volume_disk_info) =\
disk_array.get_disk_array_info(req, cluster_id) disk_array.get_disk_array_info(req, cluster_id)
(controller_ha_nodes, computer_ips) =\ array_nodes_addr =\
disk_array.get_ha_and_compute_ips(req, cluster_id) tecs_cmn.get_disk_array_nodes_addr(req, cluster_id)
all_nodes_ip = computer_ips + controller_ha_nodes.keys()
ha_nodes_ip = array_nodes_addr['ha'].keys()
all_nodes_ip = list(array_nodes_addr['computer']) + ha_nodes_ip
if all_nodes_ip: if all_nodes_ip:
compute_error_msg =\ compute_error_msg =\
@ -364,7 +409,7 @@ class API(driver.DeploymentDriver):
if share_disk_info: if share_disk_info:
ha_error_msg =\ ha_error_msg =\
disk_array.config_ha_share_disk(share_disk_info, disk_array.config_ha_share_disk(share_disk_info,
controller_ha_nodes) array_nodes_addr['ha'])
if ha_error_msg: if ha_error_msg:
return ha_error_msg return ha_error_msg
else: else:
@ -373,7 +418,7 @@ class API(driver.DeploymentDriver):
if volume_disk_info: if volume_disk_info:
cinder_error_msg =\ cinder_error_msg =\
disk_array.config_ha_cinder_volume(volume_disk_info, disk_array.config_ha_cinder_volume(volume_disk_info,
controller_ha_nodes.keys()) ha_nodes_ip)
if cinder_error_msg: if cinder_error_msg:
return cinder_error_msg return cinder_error_msg
else: else:

View File

@ -19,33 +19,21 @@
import os import os
import copy import copy
import subprocess import subprocess
import time
import re import re
import traceback
import webob.exc
from oslo_config import cfg
from oslo_log import log as logging from oslo_log import log as logging
from webob.exc import HTTPBadRequest from webob.exc import HTTPBadRequest
from webob.exc import HTTPForbidden from webob.exc import HTTPForbidden
from threading import Thread
from daisy import i18n from daisy import i18n
from daisy import notifier from daisy.common import utils
from daisy.api import policy
import daisy.api.v1
from daisy.common import exception from daisy.common import exception
import daisy.registry.client.v1.api as registry import daisy.registry.client.v1.api as registry
import daisy.api.backends.common as daisy_cmn import daisy.api.backends.common as daisy_cmn
from daisyclient.v1 import client as daisy_client
import ConfigParser
try: STR_MASK = '*' * 8
import simplejson as json
except ImportError:
import json
LOG = logging.getLogger(__name__) LOG = logging.getLogger(__name__)
_ = i18n._ _ = i18n._
_LE = i18n._LE _LE = i18n._LE
@ -53,6 +41,7 @@ _LI = i18n._LI
_LW = i18n._LW _LW = i18n._LW
daisy_tecs_path = '/var/lib/daisy/tecs/' daisy_tecs_path = '/var/lib/daisy/tecs/'
tecs_install_path = '/home/tecs_install'
TECS_STATE = { TECS_STATE = {
'INIT': 'init', 'INIT': 'init',
@ -66,30 +55,52 @@ TECS_STATE = {
} }
def get_daisyclient():
"""Get Daisy client instance."""
config_daisy = ConfigParser.ConfigParser()
config_daisy.read("/etc/daisy/daisy-api.conf")
daisy_port = config_daisy.get("DEFAULT", "bind_port")
args = {'version': 1.0, 'endpoint': 'http://127.0.0.1:' + daisy_port}
return daisy_client.Client(**args)
def mkdir_tecs_install(host_ips=None):
if not host_ips:
cmd = "mkdir -p %s" % tecs_install_path
daisy_cmn.subprocess_call(cmd)
return
for host_ip in host_ips:
cmd = 'clush -S -w %s "mkdir -p %s"' % (host_ip, tecs_install_path)
daisy_cmn.subprocess_call(cmd)
def _get_cluster_network(cluster_networks, network_name): def _get_cluster_network(cluster_networks, network_name):
network = [cn for cn in cluster_networks network = [cn for cn in cluster_networks if cn['name'] == network_name]
if cn['name'] in network_name]
if not network or not network[0]: if not network or not network[0]:
msg = "network %s is not exist" % (network_name) msg = "network %s is not exist" % (network_name)
raise exception.InvalidNetworkConfig(msg) raise exception.InvalidNetworkConfig(msg)
else: else:
return network[0] return network[0]
def get_host_interface_by_network(host_detail, network_name): def get_host_interface_by_network(host_detail, network_name):
host_detail_info = copy.deepcopy(host_detail) host_detail_info = copy.deepcopy(host_detail)
interface_list = [hi for hi in host_detail_info['interfaces'] interface_list = [hi for hi in host_detail_info['interfaces']
for assigned_network in hi['assigned_networks'] for assigned_network in hi['assigned_networks']
if assigned_network and network_name == assigned_network['name']] if assigned_network and
network_name == assigned_network['name']]
interface = {} interface = {}
if interface_list: if interface_list:
interface = interface_list[0] interface = interface_list[0]
if not interface and 'MANAGEMENT' == network_name: if not interface and 'MANAGEMENT' == network_name:
msg = "network %s of host %s is not exist" % (network_name, host_detail_info['id']) msg = "network %s of host %s is not exist" % (
network_name, host_detail_info['id'])
raise exception.InvalidNetworkConfig(msg) raise exception.InvalidNetworkConfig(msg)
return interface return interface
def get_host_network_ip(req, host_detail, cluster_networks, network_name): def get_host_network_ip(req, host_detail, cluster_networks, network_name):
interface_network_ip = '' interface_network_ip = ''
host_interface = get_host_interface_by_network(host_detail, network_name) host_interface = get_host_interface_by_network(host_detail, network_name)
@ -101,7 +112,8 @@ def get_host_network_ip(req, host_detail, cluster_networks, network_name):
interface_network_ip = assigned_network['ip'] interface_network_ip = assigned_network['ip']
if not interface_network_ip and 'MANAGEMENT' == network_name: if not interface_network_ip and 'MANAGEMENT' == network_name:
msg = "%s network ip of host %s can't be empty" % (network_name, host_detail['id']) msg = "%s network ip of host %s can't be empty" % (
network_name, host_detail['id'])
raise exception.InvalidNetworkConfig(msg) raise exception.InvalidNetworkConfig(msg)
return interface_network_ip return interface_network_ip
@ -147,21 +159,31 @@ def get_network_netmask(cluster_networks, network_name):
raise exception.InvalidNetworkConfig(msg) raise exception.InvalidNetworkConfig(msg)
return netmask return netmask
# every host only have one gateway # every host only have one gateway
def get_network_gateway(cluster_networks, network_name): def get_network_gateway(cluster_networks, network_name):
network = _get_cluster_network(cluster_networks, network_name) network = _get_cluster_network(cluster_networks, network_name)
gateway = network['gateway'] gateway = network['gateway']
if not gateway and 'MANAGEMENT' == network_name:
msg = "gateway of network %s can't be empty" % (network_name)
raise exception.InvalidNetworkConfig(msg)
return gateway return gateway
def get_network_cidr(cluster_networks, network_name):
network = _get_cluster_network(cluster_networks, network_name)
cidr = network['cidr']
if not cidr:
msg = "cidr of network %s is not exist" % (network_name)
raise exception.InvalidNetworkConfig(msg)
return cidr
def get_mngt_network_vlan_id(cluster_networks): def get_mngt_network_vlan_id(cluster_networks):
mgnt_vlan_id = "" mgnt_vlan_id = ""
management_network = [network for network in cluster_networks if network['network_type'] == 'MANAGEMENT'] management_network = [network for network in cluster_networks if network[
'network_type'] == 'MANAGEMENT']
if (not management_network or if (not management_network or
not management_network[0] or not management_network[0] or
not management_network[0].has_key('vlan_id')): # not management_network[0].has_key('vlan_id')):
'vlan_id' not in management_network[0]):
msg = "can't get management network vlan id" msg = "can't get management network vlan id"
raise exception.InvalidNetworkConfig(msg) raise exception.InvalidNetworkConfig(msg)
else: else:
@ -174,7 +196,8 @@ def get_network_vlan_id(cluster_networks, network_type):
general_network = [network for network in cluster_networks general_network = [network for network in cluster_networks
if network['network_type'] == network_type] if network['network_type'] == network_type]
if (not general_network or not general_network[0] or if (not general_network or not general_network[0] or
not general_network[0].has_key('vlan_id')): # not general_network[0].has_key('vlan_id')):
'vlan_id' not in general_network[0]):
msg = "can't get %s network vlan id" % network_type msg = "can't get %s network vlan id" % network_type
raise exception.InvalidNetworkConfig(msg) raise exception.InvalidNetworkConfig(msg)
else: else:
@ -182,7 +205,7 @@ def get_network_vlan_id(cluster_networks, network_type):
return vlan_id return vlan_id
def sort_interfaces_by_pci(host_detail): def sort_interfaces_by_pci(networks, host_detail):
""" """
Sort interfaces by pci segment, if interface type is bond, Sort interfaces by pci segment, if interface type is bond,
user the pci of first memeber nic.This function is fix bug for user the pci of first memeber nic.This function is fix bug for
@ -192,53 +215,37 @@ def sort_interfaces_by_pci(host_detail):
:return: :return:
""" """
interfaces = eval(host_detail.get('interfaces', None)) \ interfaces = eval(host_detail.get('interfaces', None)) \
if isinstance(host_detail, unicode) else host_detail.get('interfaces', None) if isinstance(host_detail, unicode) else \
host_detail.get('interfaces', None)
if not interfaces: if not interfaces:
LOG.info("This host don't have /interfaces info.") LOG.info("This host has no interfaces info.")
return host_detail return host_detail
tmp_interfaces = copy.deepcopy(interfaces) tmp_interfaces = copy.deepcopy(interfaces)
if not [interface for interface in tmp_interfaces
if interface.get('name', None) and len(interface['name']) > 8]:
LOG.info("The interfaces name of host is all less than 9 character, no need sort.")
return host_detail
# add pci segment for the bond nic, the pci is equal to the first member nic pci
slaves_name_list = [] slaves_name_list = []
for interface in tmp_interfaces: for interface in tmp_interfaces:
if interface.get('type', None) == "bond" and\ if interface.get('type', None) == "bond" and\
interface.get('slave1', None) and interface.get('slave2', None): interface.get('slave1', None) and\
interface.get('slave2', None):
slaves_name_list.append(interface['slave1']) slaves_name_list.append(interface['slave1'])
slaves_name_list.append(interface['slave2']) slaves_name_list.append(interface['slave2'])
first_member_nic_name = interface['slave1']
tmp_pci = [interface_tmp['pci']
for interface_tmp in tmp_interfaces
if interface_tmp.get('name', None) and
interface_tmp.get('pci', None) and
interface_tmp['name'] == first_member_nic_name]
if len(tmp_pci) != 1:
LOG.error("This host have two nics with same pci.")
continue
interface['pci'] = tmp_pci[0]
tmp_interfaces = [interface for interface in tmp_interfaces
if interface.get('name', None) and
interface['name'] not in slaves_name_list]
tmp_interfaces = sorted(tmp_interfaces, key = lambda interface: interface['pci'])
for index in range(0, len(tmp_interfaces)):
for interface in interfaces: for interface in interfaces:
if interface['name'] != tmp_interfaces[index]['name']: if interface.get('name') not in slaves_name_list:
continue vlan_id_len_list = [len(network['vlan_id'])
for assigned_network in interface.get(
'assigned_networks', [])
for network in networks
if assigned_network.get('name') ==
network.get('name') and network.get('vlan_id')]
max_vlan_id_len = max(vlan_id_len_list) if vlan_id_len_list else 0
interface_name_len = len(interface['name'])
redundant_bit = interface_name_len + max_vlan_id_len - 14
interface['name'] = interface['name'][
redundant_bit:] if redundant_bit > 0 else interface['name']
return host_detail
interface['name'] = "b" + str(index) if interface['type'] == "bond" else "e" + str(index)
tmp_host_detail = copy.deepcopy(host_detail)
tmp_host_detail.update({'interfaces': interfaces})
return tmp_host_detail
def check_and_get_tecs_version(daisy_tecs_pkg_path): def check_and_get_tecs_version(daisy_tecs_pkg_path):
tecs_version_pkg_file = "" tecs_version_pkg_file = ""
@ -255,33 +262,60 @@ def check_and_get_tecs_version(daisy_tecs_pkg_path):
daisy_cmn.subprocess_call(chmod_for_tecs_version) daisy_cmn.subprocess_call(chmod_for_tecs_version)
return tecs_version_pkg_file return tecs_version_pkg_file
def get_service_disk_list(req, params): def get_service_disk_list(req, params):
try: try:
service_disks = registry.list_service_disk_metadata(req.context, **params) service_disks = registry.list_service_disk_metadata(
req.context, **params)
except exception.Invalid as e: except exception.Invalid as e:
raise HTTPBadRequest(explanation=e.msg, request=req) raise HTTPBadRequest(explanation=e.msg, request=req)
return service_disks return service_disks
def get_cinder_volume_list(req, params): def get_cinder_volume_list(req, params):
try: try:
cinder_volumes = registry.list_cinder_volume_metadata(req.context, **params) cinder_volumes = registry.list_cinder_volume_metadata(
req.context, **params)
except exception.Invalid as e: except exception.Invalid as e:
raise HTTPBadRequest(explanation=e.msg, request=req) raise HTTPBadRequest(explanation=e.msg, request=req)
return cinder_volumes return cinder_volumes
def get_network_configuration_rpm_name(): def mask_string(unmasked, mask_list=None, replace_list=None):
cmd = "ls %s | grep ^network-configuration.*\.rpm" % daisy_tecs_path """
Replaces words from mask_list with MASK in unmasked string.
If words are needed to be transformed before masking, transformation
could be describe in replace list. For example [("'","'\\''")]
replaces all ' characters with '\\''.
"""
mask_list = mask_list or []
replace_list = replace_list or []
masked = unmasked
for word in sorted(mask_list, lambda x, y: len(y) - len(x)):
if not word:
continue
for before, after in replace_list:
word = word.replace(before, after)
masked = masked.replace(word, STR_MASK)
return masked
def run_scrip(script, ip=None, password=None, msg=None):
try: try:
network_rpm_name = subprocess.check_output( _run_scrip(script, ip, password)
cmd, shell=True, stderr=subprocess.STDOUT).split('\n')[0] except:
except subprocess.CalledProcessError: msg1 = 'Error occurred during running scripts.'
msg = _("Get network-configuration rpm name by subprocess failed!") message = msg1 + msg if msg else msg1
raise exception.SubprocessCmdFailed(message=msg) LOG.error(message)
return network_rpm_name raise HTTPForbidden(explanation=message)
else:
LOG.info('Running scripts successfully!')
def run_scrip(script, ip=None, password=None): def _run_scrip(script, ip=None, password=None):
mask_list = []
repl_list = [("'", "'\\''")]
script = "\n".join(script) script = "\n".join(script)
_PIPE = subprocess.PIPE _PIPE = subprocess.PIPE
if ip: if ip:
@ -297,19 +331,104 @@ def run_scrip(script, ip=None, password=None):
script = "function t(){ exit $? ; } \n trap t ERR \n" + script script = "function t(){ exit $? ; } \n trap t ERR \n" + script
out, err = obj.communicate(script) out, err = obj.communicate(script)
return out, err masked_out = mask_string(out, mask_list, repl_list)
masked_err = mask_string(err, mask_list, repl_list)
if obj.returncode:
pattern = (r'^ssh\:')
if re.search(pattern, err):
LOG.error(_("Network error occured when run script."))
raise exception.NetworkError(masked_err, stdout=out, stderr=err)
else:
msg = ('Failed to run remote script, stdout: %s\nstderr: %s' %
(masked_out, masked_err))
LOG.error(msg)
raise exception.ScriptRuntimeError(msg, stdout=out, stderr=err)
return obj.returncode, out
def inform_provider_cloud_state(context, cluster_id, **kwargs):
params = dict()
daisyclient = get_daisyclient()
cluster = registry.get_cluster_metadata(context, cluster_id)
params['operation'] = kwargs.get('operation')
params['name'] = cluster.get('name')
params['url'] = "http://" + cluster.get('public_vip')
params['provider_ip'] = cluster.get('hwm_ip')
daisyclient.node.cloud_state(**params)
def get_disk_array_nodes_addr(req, cluster_id):
controller_ha_nodes = {}
computer_ips = set()
roles = daisy_cmn.get_cluster_roles_detail(req, cluster_id)
cluster_networks =\
daisy_cmn.get_cluster_networks_detail(req, cluster_id)
for role in roles:
if role['deployment_backend'] != daisy_cmn.tecs_backend_name:
continue
role_hosts = daisy_cmn.get_hosts_of_role(req, role['id'])
for role_host in role_hosts:
# host has installed tecs are exclusive
if (role_host['status'] == TECS_STATE['ACTIVE'] or
role_host['status'] == TECS_STATE['UPDATING'] or
role_host['status'] == TECS_STATE['UPDATE_FAILED']):
continue
host_detail = daisy_cmn.get_host_detail(req,
role_host['host_id'])
host_ip = get_host_network_ip(req,
host_detail,
cluster_networks,
'MANAGEMENT')
if role['name'] == "CONTROLLER_HA":
min_mac = utils.get_host_min_mac(host_detail['interfaces'])
controller_ha_nodes[host_ip] = min_mac
if role['name'] == "COMPUTER":
computer_ips.add(host_ip)
return {'ha': controller_ha_nodes, 'computer': computer_ips}
def get_ctl_ha_nodes_min_mac(req, cluster_id):
'''
ctl_ha_nodes_min_mac = {'host_name1':'min_mac1', ...}
'''
ctl_ha_nodes_min_mac = {}
roles = daisy_cmn.get_cluster_roles_detail(req, cluster_id)
cluster_networks =\
daisy_cmn.get_cluster_networks_detail(req, cluster_id)
for role in roles:
if role['deployment_backend'] != daisy_cmn.tecs_backend_name:
continue
role_hosts = daisy_cmn.get_hosts_of_role(req, role['id'])
for role_host in role_hosts:
# host has installed tecs are exclusive
if (role_host['status'] == TECS_STATE['ACTIVE'] or
role_host['status'] == TECS_STATE['UPDATING'] or
role_host['status'] == TECS_STATE['UPDATE_FAILED']):
continue
host_detail = daisy_cmn.get_host_detail(req,
role_host['host_id'])
host_name = host_detail['name']
if role['name'] == "CONTROLLER_HA":
min_mac = utils.get_host_min_mac(host_detail['interfaces'])
ctl_ha_nodes_min_mac[host_name] = min_mac
return ctl_ha_nodes_min_mac
class TecsShellExector(object): class TecsShellExector(object):
""" """
Class config task before install tecs bin. Class config task before install tecs bin.
""" """
def __init__(self, mgnt_ip, task_type, params={}): def __init__(self, mgnt_ip, task_type, params={}):
self.task_type = task_type self.task_type = task_type
self.mgnt_ip = mgnt_ip self.mgnt_ip = mgnt_ip
self.params = params self.params = params
self.clush_cmd = "" self.clush_cmd = ""
self.rpm_name = get_network_configuration_rpm_name() self.rpm_name =\
daisy_cmn.get_rpm_package_by_name(daisy_tecs_path,
'network-configuration')
self.NETCFG_RPM_PATH = daisy_tecs_path + self.rpm_name self.NETCFG_RPM_PATH = daisy_tecs_path + self.rpm_name
self.oper_type = { self.oper_type = {
'install_rpm': self._install_netcfg_rpm, 'install_rpm': self._install_netcfg_rpm,
@ -321,7 +440,8 @@ class TecsShellExector(object):
'CMD_RPM_UNINSTALL': "rpm -e network-configuration", 'CMD_RPM_UNINSTALL': "rpm -e network-configuration",
'CMD_RPM_INSTALL': "rpm -i /home/%(rpm)s" % {'rpm': self.rpm_name}, 'CMD_RPM_INSTALL': "rpm -i /home/%(rpm)s" % {'rpm': self.rpm_name},
'CMD_RPM_UPDATE': "rpm -U /home/%(rpm)s" % {'rpm': self.rpm_name}, 'CMD_RPM_UPDATE': "rpm -U /home/%(rpm)s" % {'rpm': self.rpm_name},
'CMD_RPM_SCP': "scp -o StrictHostKeyChecking=no %(path)s root@%(ssh_ip)s:/home" % 'CMD_RPM_SCP': "scp -o StrictHostKeyChecking=no \
%(path)s root@%(ssh_ip)s:/home" %
{'path': self.NETCFG_RPM_PATH, 'ssh_ip': mgnt_ip} {'path': self.NETCFG_RPM_PATH, 'ssh_ip': mgnt_ip}
} }
LOG.info(_("<<<Network configuration rpm is %s>>>" % self.rpm_name)) LOG.info(_("<<<Network configuration rpm is %s>>>" % self.rpm_name))
@ -329,13 +449,17 @@ class TecsShellExector(object):
def _uninstall_netcfg_rpm(self): def _uninstall_netcfg_rpm(self):
self.clush_cmd = self.oper_shell['CMD_SSHPASS_PRE'] % \ self.clush_cmd = self.oper_shell['CMD_SSHPASS_PRE'] % \
{"ssh_ip":"ssh -o StrictHostKeyChecking=no " + self.mgnt_ip, "cmd":self.oper_shell['CMD_RPM_UNINSTALL']} {"ssh_ip": "ssh -o StrictHostKeyChecking=no " + self.mgnt_ip,
subprocess.check_output(self.clush_cmd, shell = True, stderr=subprocess.STDOUT) "cmd": self.oper_shell['CMD_RPM_UNINSTALL']}
subprocess.check_output(
self.clush_cmd, shell=True, stderr=subprocess.STDOUT)
def _update_netcfg_rpm(self): def _update_netcfg_rpm(self):
self.clush_cmd = self.oper_shell['CMD_SSHPASS_PRE'] % \ self.clush_cmd = self.oper_shell['CMD_SSHPASS_PRE'] % \
{"ssh_ip":"ssh -o StrictHostKeyChecking=no " + self.mgnt_ip, "cmd":self.oper_shell['CMD_RPM_UPDATE']} {"ssh_ip": "ssh -o StrictHostKeyChecking=no " + self.mgnt_ip,
subprocess.check_output(self.clush_cmd, shell = True, stderr=subprocess.STDOUT) "cmd": self.oper_shell['CMD_RPM_UPDATE']}
subprocess.check_output(
self.clush_cmd, shell=True, stderr=subprocess.STDOUT)
def _install_netcfg_rpm(self): def _install_netcfg_rpm(self):
if not os.path.exists(self.NETCFG_RPM_PATH): if not os.path.exists(self.NETCFG_RPM_PATH):
@ -344,21 +468,29 @@ class TecsShellExector(object):
self.clush_cmd = "%s;%s" % \ self.clush_cmd = "%s;%s" % \
(self.oper_shell['CMD_SSHPASS_PRE'] % (self.oper_shell['CMD_SSHPASS_PRE'] %
{"ssh_ip":"", "cmd":self.oper_shell['CMD_RPM_SCP']}, \ {"ssh_ip": "", "cmd": self.oper_shell['CMD_RPM_SCP']},
self.oper_shell['CMD_SSHPASS_PRE'] % self.oper_shell['CMD_SSHPASS_PRE'] %
{"ssh_ip":"ssh -o StrictHostKeyChecking=no " + self.mgnt_ip, "cmd":self.oper_shell['CMD_RPM_INSTALL']}) {"ssh_ip": "ssh -o StrictHostKeyChecking=no " +
subprocess.check_output(self.clush_cmd, shell = True, stderr=subprocess.STDOUT) self.mgnt_ip, "cmd": self.oper_shell['CMD_RPM_INSTALL']})
subprocess.check_output(
self.clush_cmd, shell=True, stderr=subprocess.STDOUT)
def _execute(self): def _execute(self):
try: try:
if not self.task_type or not self.mgnt_ip: if not self.task_type or not self.mgnt_ip:
LOG.error(_("<<<TecsShellExector::execute, input params invalid on %s!>>>" % self.mgnt_ip, )) LOG.error(
_("<<<TecsShellExector::execute, input params invalid on \
%s!>>>" % self.mgnt_ip, ))
return return
self.oper_type[self.task_type]() self.oper_type[self.task_type]()
except subprocess.CalledProcessError as e: except subprocess.CalledProcessError as e:
LOG.warn(_("<<<TecsShellExector::execute:Execute command failed on %s! Reason:%s>>>" % (self.mgnt_ip, e.output.strip()))) LOG.warn(_("<<<TecsShellExector::execute:Execute command failed on\
%s! Reason:%s>>>" % (
self.mgnt_ip, e.output.strip())))
except Exception as e: except Exception as e:
LOG.exception(_(e.message)) LOG.exception(_(e.message))
else: else:
LOG.info(_("<<<TecsShellExector::execute:Execute command:%s,successful on %s!>>>" % (self.clush_cmd, self.mgnt_ip))) LOG.info(_("<<<TecsShellExector::execute:Execute command:\
%s,successful on %s!>>>" % (
self.clush_cmd, self.mgnt_ip)))

View File

@ -4,6 +4,8 @@ import re
import commands import commands
import types import types
import subprocess import subprocess
import socket
import netaddr
from oslo_log import log as logging from oslo_log import log as logging
from ConfigParser import ConfigParser from ConfigParser import ConfigParser
from daisy.common import exception from daisy.common import exception
@ -22,15 +24,17 @@ service_map = {
'mariadb': 'mariadb', 'mariadb': 'mariadb',
'amqp': 'rabbitmq-server', 'amqp': 'rabbitmq-server',
'ceilometer-api': 'openstack-ceilometer-api', 'ceilometer-api': 'openstack-ceilometer-api',
'ceilometer-collector':'openstack-ceilometer-collector,openstack-ceilometer-mend', 'ceilometer-collector': 'openstack-ceilometer-collector,\
openstack-ceilometer-mend',
'ceilometer-central': 'openstack-ceilometer-central', 'ceilometer-central': 'openstack-ceilometer-central',
'ceilometer-notification': 'openstack-ceilometer-notification', 'ceilometer-notification': 'openstack-ceilometer-notification',
'ceilometer-alarm':'openstack-ceilometer-alarm-evaluator,openstack-ceilometer-alarm-notifier', 'ceilometer-alarm': 'openstack-ceilometer-alarm-evaluator,\
openstack-ceilometer-alarm-notifier',
'heat-api': 'openstack-heat-api', 'heat-api': 'openstack-heat-api',
'heat-api-cfn': 'openstack-heat-api-cfn', 'heat-api-cfn': 'openstack-heat-api-cfn',
'heat-engine': 'openstack-heat-engine', 'heat-engine': 'openstack-heat-engine',
'ironic': 'openstack-ironic-api,openstack-ironic-conductor', 'ironic': 'openstack-ironic-api,openstack-ironic-conductor',
'horizon': 'httpd', 'horizon': 'httpd,opencos-alarmmanager',
'keystone': 'openstack-keystone', 'keystone': 'openstack-keystone',
'glance': 'openstack-glance-api,openstack-glance-registry', 'glance': 'openstack-glance-api,openstack-glance-registry',
'cinder-volume': 'openstack-cinder-volume', 'cinder-volume': 'openstack-cinder-volume',
@ -47,7 +51,8 @@ service_map = {
'nova-vncproxy': 'openstack-nova-novncproxy,openstack-nova-consoleauth', 'nova-vncproxy': 'openstack-nova-novncproxy,openstack-nova-consoleauth',
'nova-conductor': 'openstack-nova-conductor', 'nova-conductor': 'openstack-nova-conductor',
'nova-api': 'openstack-nova-api', 'nova-api': 'openstack-nova-api',
'nova-cells': 'openstack-nova-cells' 'nova-cells': 'openstack-nova-cells',
'camellia-api': 'camellia-api'
} }
@ -63,36 +68,33 @@ def add_service_with_hosts(services, name, hosts):
for h in hosts: for h in hosts:
services[name].append(h['management']['ip']) services[name].append(h['management']['ip'])
def test_ping(ping_src_nic, ping_desc_ips): def test_ping(ping_src_nic, ping_desc_ips):
ping_cmd = 'fping' ping_cmd = 'fping'
for ip in set(ping_desc_ips): for ip in set(ping_desc_ips):
ping_cmd = ping_cmd + ' -I ' + ping_src_nic + ' ' + ip ping_cmd = ping_cmd + ' -I ' + ping_src_nic + ' ' + ip
obj = subprocess.Popen(ping_cmd, shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE) obj = subprocess.Popen(
ping_cmd, shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
(stdoutput, erroutput) = obj.communicate() (stdoutput, erroutput) = obj.communicate()
_returncode = obj.returncode _returncode = obj.returncode
if _returncode == 0 or _returncode == 1: if _returncode == 0 or _returncode == 1:
ping_result = stdoutput.split('\n') ping_result = stdoutput.split('\n')
unreachable_hosts = [result.split()[0] for result in ping_result if result and result.split()[2] != 'alive'] if "No such device" in erroutput:
return []
reachable_hosts = [result.split(
)[0] for result in ping_result if result and
result.split()[2] == 'alive']
else: else:
msg = "ping failed beaceuse there is invlid ip in %s" % ping_desc_ips msg = "ping failed beaceuse there is invlid ip in %s" % ping_desc_ips
raise exception.InvalidIP(msg) raise exception.InvalidIP(msg)
return unreachable_hosts return reachable_hosts
def get_local_deployment_ip(tecs_deployment_ip):
def _get_ip_segment(full_ip):
if not full_ip:
return None
match = re.search('([0-9]{1,3}\.){3}', full_ip)
if match:
return match.group()
else:
print "can't find ip segment"
return None
def get_local_deployment_ip(tecs_deployment_ips):
(status, output) = commands.getstatusoutput('ifconfig') (status, output) = commands.getstatusoutput('ifconfig')
netcard_pattern = re.compile('\S*: ') netcard_pattern = re.compile('\S*: ')
ip_str = '([0-9]{1,3}\.){3}[0-9]{1,3}' ip_str = '([0-9]{1,3}\.){3}[0-9]{1,3}'
ip_pattern = re.compile('(inet %s)' % ip_str) # ip_pattern = re.compile('(inet %s)' % ip_str)
pattern = re.compile(ip_str) pattern = re.compile(ip_str)
nic_ip = {} nic_ip = {}
for netcard in re.finditer(netcard_pattern, str(output)): for netcard in re.finditer(netcard_pattern, str(output)):
@ -108,20 +110,20 @@ def get_local_deployment_ip(tecs_deployment_ip):
nic_ip[nic_name] = ip.group() nic_ip[nic_name] = ip.group()
deployment_ip = '' deployment_ip = ''
ip_segment = _get_ip_segment(tecs_deployment_ip)
for nic in nic_ip.keys(): for nic in nic_ip.keys():
if ip_segment == _get_ip_segment(nic_ip[nic]): if nic_ip[nic] in tecs_deployment_ips:
deployment_ip = nic_ip[nic] deployment_ip = nic_ip[nic]
break break
if not deployment_ip: if not deployment_ip:
for nic, ip in nic_ip.items(): for nic, ip in nic_ip.items():
if not test_ping(nic,[tecs_deployment_ip]): if test_ping(nic, tecs_deployment_ips):
deployment_ip = nic_ip[nic] deployment_ip = nic_ip[nic]
break break
return deployment_ip return deployment_ip
class AnalsyConfig(object): class AnalsyConfig(object):
def __init__(self, all_configs): def __init__(self, all_configs):
self.all_configs = all_configs self.all_configs = all_configs
@ -139,17 +141,32 @@ class AnalsyConfig(object):
self.glance_vip = '' self.glance_vip = ''
self.public_vip = '' self.public_vip = ''
self.share_disk_services = [] self.share_disk_services = []
self.share_cluster_disk_services = []
self.ha_conf = {} self.ha_conf = {}
self.child_cell_dict = {} self.child_cell_dict = {}
self.ha_master_host = {} self.ha_master_host = {}
def get_heartbeats(self, host_interfaces): def get_heartbeats(self, host_interfaces):
for network in host_interfaces: for network in host_interfaces:
#if network.has_key("deployment") and network["deployment"]["ip"]:
# self.heartbeats[0].append(network["deployment"]["ip"])
self.heartbeats[0].append(network["management"]["ip"]) self.heartbeats[0].append(network["management"]["ip"])
if network.has_key("storage") and network["storage"]["ip"]: # if network.has_key("heartbeat1") and network["heartbeat1"]["ip"]:
if "heartbeat1" in network and network["heartbeat1"]["ip"]:
self.heartbeats[1].append(network["heartbeat1"]["ip"])
# if network.has_key("heartbeat2") and network["heartbeat2"]["ip"]:
if "heartbeat2" in network and network["heartbeat2"]["ip"]:
self.heartbeats[2].append(network["heartbeat2"]["ip"])
# if network.has_key("storage") and network["storage"]["ip"]:
if "storage" in network and network["storage"]["ip"]:
# if not network.has_key("heartbeat1"):
if "heartbeat1" not in network:
self.heartbeats[1].append(network["storage"]["ip"]) self.heartbeats[1].append(network["storage"]["ip"])
# if network.has_key("heartbeat1") and not \
# network.has_key("heartbeat2"):
if "heartbeat1" in network and \
"heartbeat2" not in network:
self.heartbeats[2].append(network["storage"]["ip"])
# delete empty heartbeat line # delete empty heartbeat line
if not self.heartbeats[0]: if not self.heartbeats[0]:
@ -164,7 +181,8 @@ class AnalsyConfig(object):
if set(self.heartbeats[2]) != set(self.heartbeats[0]): if set(self.heartbeats[2]) != set(self.heartbeats[0]):
self.heartbeats[1] = self.heartbeats[2] self.heartbeats[1] = self.heartbeats[2]
self.heartbeats[2] = [] self.heartbeats[2] = []
if set(self.heartbeats[2]) == set(self.heartbeats[0]) or set(self.heartbeats[2]) == set(self.heartbeats[1]): if set(self.heartbeats[2]) == set(self.heartbeats[0]) or \
set(self.heartbeats[2]) == set(self.heartbeats[1]):
self.heartbeats[2] = [] self.heartbeats[2] = []
def prepare_child_cell(self, child_cell_name, configs): def prepare_child_cell(self, child_cell_name, configs):
@ -181,24 +199,50 @@ class AnalsyConfig(object):
child_cell_host = configs['host_interfaces'][0]['management']['ip'] child_cell_host = configs['host_interfaces'][0]['management']['ip']
self.child_cell_dict[repr(child_cell_host).strip("u'")] \ self.child_cell_dict[repr(child_cell_host).strip("u'")] \
= repr(cell_compute_hosts).strip("u'") = repr(cell_compute_hosts).strip("u'")
add_service_with_host(self.services, 'CONFIG_CHILD_CELL_DICT',
str(self.child_cell_dict))
def prepare_ha_lb(self, role_configs, is_ha, is_lb): def prepare_ha_lb(self, role_configs, is_ha, is_lb):
if is_lb: if is_lb:
self.ha_master_host['ip'] = role_configs['host_interfaces'][0]['management']['ip'] self.ha_master_host['ip'] = role_configs[
self.ha_master_host['hostname'] = role_configs['host_interfaces'][0]['name'] 'host_interfaces'][0]['management']['ip']
self.ha_master_host['hostname'] = role_configs[
'host_interfaces'][0]['name']
self.components.append('CONFIG_LB_INSTALL') self.components.append('CONFIG_LB_INSTALL')
add_service_with_hosts(self.services, add_service_with_hosts(self.services,
'CONFIG_LB_BACKEND_HOSTS', 'CONFIG_LB_BACKEND_HOSTS',
role_configs['host_interfaces']) role_configs['host_interfaces'])
self.lb_vip = role_configs['vip'] self.lb_vip = role_configs['vip']
if is_ha: if is_ha:
# convert dns to ip
manage_ips = []
for host_interface in role_configs['host_interfaces']:
manage_ip = ''
management_addr =\
host_interface['management']['ip']
try:
ip_lists = socket.gethostbyname_ex(management_addr)
manage_ip = ip_lists[2][0]
except Exception:
if netaddr.IPAddress(management_addr).version == 6:
manage_ip = management_addr
else:
raise exception.InvalidNetworkConfig(
"manage ip is not valid %s" % management_addr)
finally:
manage_ips.append(manage_ip)
self.ha_vip = role_configs['vip'] self.ha_vip = role_configs['vip']
self.share_disk_services += role_configs['share_disk_services'] self.share_disk_services += role_configs['share_disk_services']
local_deployment_ip = get_local_deployment_ip( self.share_cluster_disk_services += \
role_configs['host_interfaces'][0]['management']['ip']) role_configs['share_cluster_disk_services']
local_deployment_ip = get_local_deployment_ip(manage_ips)
filename = r'/etc/zte-docker'
if local_deployment_ip: if local_deployment_ip:
if os.path.exists(filename):
add_service_with_host(
self.services, 'CONFIG_REPO',
'http://' + local_deployment_ip +
':18080' + '/tecs_install/')
else:
add_service_with_host( add_service_with_host(
self.services, 'CONFIG_REPO', self.services, 'CONFIG_REPO',
'http://' + local_deployment_ip + '/tecs_install/') 'http://' + local_deployment_ip + '/tecs_install/')
@ -218,32 +262,42 @@ class AnalsyConfig(object):
if role_configs['db_vip']: if role_configs['db_vip']:
self.db_vip = role_configs['db_vip'] self.db_vip = role_configs['db_vip']
add_service_with_host(self.services, 'CONFIG_MARIADB_HOST', role_configs['db_vip']) add_service_with_host(
self.services, 'CONFIG_MARIADB_HOST',
role_configs['db_vip'])
else: else:
self.db_vip = role_configs['vip'] self.db_vip = role_configs['vip']
add_service_with_host(self.services, 'CONFIG_MARIADB_HOST', role_configs['vip']) add_service_with_host(
self.services, 'CONFIG_MARIADB_HOST', role_configs['vip'])
if role_configs['glance_vip']: if role_configs['glance_vip']:
self.glance_vip = role_configs['glance_vip'] self.glance_vip = role_configs['glance_vip']
add_service_with_host(self.services, 'CONFIG_GLANCE_HOST', role_configs['glance_vip']) add_service_with_host(
self.services, 'CONFIG_GLANCE_HOST',
role_configs['glance_vip'])
else: else:
self.glance_vip = role_configs['vip'] self.glance_vip = role_configs['vip']
add_service_with_host(self.services, 'CONFIG_GLANCE_HOST', role_configs['vip']) add_service_with_host(
self.services, 'CONFIG_GLANCE_HOST', role_configs['vip'])
if role_configs['public_vip']: if role_configs['public_vip']:
vip = role_configs['public_vip']
self.public_vip = role_configs['public_vip'] self.public_vip = role_configs['public_vip']
else: else:
vip = role_configs['vip'] self.public_vip = role_configs['vip']
self.public_vip = vip
add_service_with_host(self.services, add_service_with_host(self.services,
'CONFIG_NOVA_VNCPROXY_HOST', vip) 'CONFIG_NOVA_VNCPROXY_HOST',
add_service_with_host(self.services, 'CONFIG_PUBLIC_IP', vip) self.public_vip)
add_service_with_host(self.services, 'CONFIG_HORIZON_HOST', vip) add_service_with_host(self.services, 'CONFIG_PUBLIC_IP',
self.public_vip)
add_service_with_host(self.services, 'CONFIG_ADMIN_IP', vip) add_service_with_host(self.services, 'CONFIG_HORIZON_HOST',
add_service_with_host(self.services, 'CONFIG_INTERNAL_IP', vip) self.public_vip)
'''
add_service_with_host(self.services, 'CONFIG_ADMIN_IP',
role_configs['vip'])
add_service_with_host(self.services, 'CONFIG_INTERNAL_IP',
role_configs['vip'])
'''
def prepare_role_service(self, is_ha, service, role_configs): def prepare_role_service(self, is_ha, service, role_configs):
host_key_name = "CONFIG_%s_HOST" % service host_key_name = "CONFIG_%s_HOST" % service
@ -251,7 +305,8 @@ class AnalsyConfig(object):
add_service_with_hosts(self.services, hosts_key_name, add_service_with_hosts(self.services, hosts_key_name,
role_configs['host_interfaces']) role_configs['host_interfaces'])
if service != 'LB' and service not in ['NOVA_VNCPROXY', 'MARIADB', 'GLANCE', 'HORIZON']: if service != 'LB' and service not in ['NOVA_VNCPROXY', 'MARIADB',
'GLANCE', 'HORIZON']:
add_service_with_host(self.services, host_key_name, add_service_with_host(self.services, host_key_name,
role_configs['vip']) role_configs['vip'])
@ -276,7 +331,8 @@ class AnalsyConfig(object):
# self.modes.update({'CONFIG_HEAT_API_INSTALL_MODE': 'LB'}) # self.modes.update({'CONFIG_HEAT_API_INSTALL_MODE': 'LB'})
# self.modes.update({'CONFIG_HEAT_API_CFN_INSTALL_MODE': 'LB'}) # self.modes.update({'CONFIG_HEAT_API_CFN_INSTALL_MODE': 'LB'})
# if s == 'CEILOMETER': # if s == 'CEILOMETER':
# self.modes.update({'CONFIG_CEILOMETER_API_INSTALL_MODE': 'LB'}) # self.modes.update({
# 'CONFIG_CEILOMETER_API_INSTALL_MODE': 'LB'})
if service == 'IRONIC': if service == 'IRONIC':
self.modes.update( self.modes.update(
{'CONFIG_IRONIC_API_INSTALL_MODE': 'LB'}) {'CONFIG_IRONIC_API_INSTALL_MODE': 'LB'})
@ -287,8 +343,8 @@ class AnalsyConfig(object):
if component not in self.services_in_component.keys(): if component not in self.services_in_component.keys():
self.services_in_component[component] = {} self.services_in_component[component] = {}
self.services_in_component[component]["service"] = [] self.services_in_component[component]["service"] = []
self.services_in_component[component]["service"].append(service_map[service]) self.services_in_component[component][
"service"].append(service_map[service])
if component == "horizon": if component == "horizon":
self.services_in_component[component]["fip"] = self.public_vip self.services_in_component[component]["fip"] = self.public_vip
@ -299,10 +355,10 @@ class AnalsyConfig(object):
else: else:
self.services_in_component[component]["fip"] = role_configs["vip"] self.services_in_component[component]["fip"] = role_configs["vip"]
network_name = '' network_name = ''
if component in ['horizon'] and role_configs["host_interfaces"][0].has_key('public'): if component in ['horizon'] and\
network_name = 'public' 'publicapi' in role_configs["host_interfaces"][0]:
network_name = 'publicapi'
else: else:
network_name = 'management' network_name = 'management'
@ -311,7 +367,7 @@ class AnalsyConfig(object):
self.services_in_component[component]["nic_name"] = \ self.services_in_component[component]["nic_name"] = \
role_configs["host_interfaces"][0][network_name]["name"] role_configs["host_interfaces"][0][network_name]["name"]
if component == 'loadbalance' and \ if component == 'loadbalance' and \
self.all_configs.has_key('CONTROLLER_LB') and \ 'CONTROLLER_LB' in self.all_configs and \
self.all_configs['CONTROLLER_LB']['vip']: self.all_configs['CONTROLLER_LB']['vip']:
self.services_in_component[component]["fip"] = \ self.services_in_component[component]["fip"] = \
self.all_configs['CONTROLLER_LB']['vip'] self.all_configs['CONTROLLER_LB']['vip']
@ -331,15 +387,20 @@ class AnalsyConfig(object):
else: else:
amqp_vip = self.ha_vip amqp_vip = self.ha_vip
amqp_dict = "{'%s':'%s,%s,%s,%s'}" % (amqp_vip, self.ha_vip, amqp_dict = "{'%s':'%s,%s,%s,%s'}" % (amqp_vip, self.ha_vip,
self.lb_vip, self.glance_vip, self.public_vip) self.lb_vip, self.glance_vip,
self.public_vip)
mariadb_dict = "{'%s':'%s,%s,%s,%s'}" % (self.db_vip, self.ha_vip, mariadb_dict = "{'%s':'%s,%s,%s,%s'}" % (self.db_vip, self.ha_vip,
self.lb_vip, self.glance_vip, self.public_vip) self.lb_vip,
self.glance_vip,
self.public_vip)
add_service_with_host(self.services, 'CONFIG_LB_HOST', self.lb_vip) add_service_with_host(self.services, 'CONFIG_LB_HOST', self.lb_vip)
elif self.ha_vip: elif self.ha_vip:
amqp_dict = "{'%s':'%s,%s,%s'}" % (self.ha_vip, self.ha_vip, amqp_dict = "{'%s':'%s,%s,%s'}" % (self.ha_vip, self.ha_vip,
self.glance_vip, self.public_vip) self.glance_vip,
self.public_vip)
mariadb_dict = "{'%s':'%s,%s,%s'}" % (self.db_vip, self.ha_vip, mariadb_dict = "{'%s':'%s,%s,%s'}" % (self.db_vip, self.ha_vip,
self.glance_vip, self.public_vip) self.glance_vip,
self.public_vip)
else: else:
amqp_dict = "{}" amqp_dict = "{}"
mariadb_dict = "{}" mariadb_dict = "{}"
@ -382,32 +443,37 @@ class AnalsyConfig(object):
self.prepare_amqp_mariadb() self.prepare_amqp_mariadb()
if self.child_cell_dict:
add_service_with_host(self.services, 'CONFIG_CHILD_CELL_DICT',
str(self.child_cell_dict))
def update_conf_with_services(self, tecs): def update_conf_with_services(self, tecs):
for s in self.services: for s in self.services:
if tecs.has_option("general", s): if tecs.has_option("general", s):
print "%s is update" % s # if type(self.services[s]) is types.ListType:
if type(self.services[s]) is types.ListType: if isinstance(self.services[s], types.ListType):
if self.services[s] and not self.services[s][0]: if self.services[s] and not self.services[s][0]:
return return
tecs.set("general", s, ','.join(self.services[s])) tecs.set("general", s, ','.join(self.services[s]))
else: else:
print "service %s is not exit in conf file" % s msg = "service %s is not exit in conf file" % s
LOG.info(msg)
def update_conf_with_components(self, tecs): def update_conf_with_components(self, tecs):
for s in self.components: for s in self.components:
if tecs.has_option("general", s): if tecs.has_option("general", s):
print "Component %s is update" % s
tecs.set("general", s, 'y') tecs.set("general", s, 'y')
else: else:
print "component %s is not exit in conf file" % s msg = "component %s is not exit in conf file" % s
LOG.info(msg)
def update_conf_with_modes(self, tecs): def update_conf_with_modes(self, tecs):
for k, v in self.modes.items(): for k, v in self.modes.items():
if tecs.has_option("general", k): if tecs.has_option("general", k):
print "mode %s is update" % k
tecs.set("general", k, v) tecs.set("general", k, v)
else: else:
print "mode %s is not exit in conf file" % k msg = "mode %s is not exit in conf file" % k
LOG.info(msg)
def update_tecs_conf(self, tecs): def update_tecs_conf(self, tecs):
self.update_conf_with_services(tecs) self.update_conf_with_services(tecs)
@ -415,17 +481,13 @@ class AnalsyConfig(object):
self.update_conf_with_modes(tecs) self.update_conf_with_modes(tecs)
def update_ha_conf(self, ha, ha_nic_name, tecs=None): def update_ha_conf(self, ha, ha_nic_name, tecs=None):
print "heartbeat line is update"
heart_beat_list = []
if self.all_configs['OTHER'].get('dns_config'): if self.all_configs['OTHER'].get('dns_config'):
for heartbeat in self.heartbeats: for heartbeat in self.heartbeats:
tmp_list = []
for name_ip in self.all_configs['OTHER']['dns_config']: for name_ip in self.all_configs['OTHER']['dns_config']:
for tmp in heartbeat: for tmp in heartbeat:
if tmp == name_ip.keys()[0]: if tmp == name_ip.keys()[0]:
tmp_list.append(name_ip.values()[0]) heartbeat.remove(tmp)
heart_beat_list.append(tmp_list) heartbeat.append(name_ip.values()[0])
self.heartbeats = heart_beat_list
for k, v in self.services_in_component.items(): for k, v in self.services_in_component.items():
for name_ip in self.all_configs['OTHER']['dns_config']: for name_ip in self.all_configs['OTHER']['dns_config']:
@ -435,38 +497,80 @@ class AnalsyConfig(object):
ha.set('DEFAULT', 'heartbeat_link2', ','.join(self.heartbeats[1])) ha.set('DEFAULT', 'heartbeat_link2', ','.join(self.heartbeats[1]))
ha.set('DEFAULT', 'heartbeat_link3', ','.join(self.heartbeats[2])) ha.set('DEFAULT', 'heartbeat_link3', ','.join(self.heartbeats[2]))
ha.set('DEFAULT', 'components', ','.join(self.services_in_component.keys())) ha.set('DEFAULT', 'components', ','.join(
self.services_in_component.keys()))
for k, v in self.services_in_component.items(): for k, v in self.services_in_component.items():
print "component %s is update" % k
ha.set('DEFAULT', k, ','.join(v['service'])) ha.set('DEFAULT', k, ','.join(v['service']))
if k == 'glance': if k == 'glance':
if 'glance' in self.share_disk_services: if 'glance' in self.share_disk_services:
ha.set('DEFAULT', 'glance_device_type', 'iscsi') ha.set('DEFAULT', 'glance_device_type', 'iscsi')
ha.set('DEFAULT', 'glance_device', '/dev/mapper/vg_glance-lv_glance') ha.set(
'DEFAULT', 'glance_device',
'/dev/mapper/vg_glance-lv_glance')
ha.set('DEFAULT', 'glance_fs_type', 'ext4') ha.set('DEFAULT', 'glance_fs_type', 'ext4')
else: else:
ha.set('DEFAULT', 'glance_device_type', 'drbd') ha.set('DEFAULT', 'glance_device_type', 'drbd')
ha.set('DEFAULT', 'glance_device', '/dev/vg_data/lv_glance') ha.set(
'DEFAULT', 'glance_device', '/dev/vg_data/lv_glance')
ha.set('DEFAULT', 'glance_fs_type', 'ext4') ha.set('DEFAULT', 'glance_fs_type', 'ext4')
# mariadb now not support db cluster, don't support share disk. # mariadb now not support db cluster, don't support share disk.
if k == "database": if k == "database":
if 'db' in self.share_disk_services: if 'db' in self.share_disk_services:
ha.set('DEFAULT', 'database_device', '/dev/mapper/vg_db-lv_db') ha.set(
'DEFAULT', 'database_device',
'/dev/mapper/vg_db-lv_db')
ha.set('DEFAULT', 'database_fs_type', 'ext4') ha.set('DEFAULT', 'database_fs_type', 'ext4')
ha.set('DEFAULT', 'database_device_type', 'share')
if tecs:
tecs.set(
"general",
'CONFIG_HA_INSTALL_MARIADB_LOCAL',
'n')
elif 'db' in self.share_cluster_disk_services:
ha.set(
'DEFAULT', 'database_device',
'/dev/mapper/vg_db-lv_db')
ha.set('DEFAULT', 'database_fs_type', 'ext4')
ha.set('DEFAULT', 'database_device_type', 'share_cluster')
if tecs:
tecs.set(
"general",
'CONFIG_HA_INSTALL_MARIADB_LOCAL',
'y')
else:
ha.set('DEFAULT', 'database_device_type', 'local_cluster')
if tecs:
tecs.set(
"general",
'CONFIG_HA_INSTALL_MARIADB_LOCAL',
'y')
if 'db_backup' in self.share_disk_services:
ha.set(
'DEFAULT',
'backup_database_device',
'/dev/mapper/vg_db_backup-lv_db_backup')
ha.set('DEFAULT', 'backup_database_fs_type', 'ext4')
if "mongod" in v['service']: if "mongod" in v['service']:
if 'mongodb' in self.share_disk_services: if 'mongodb' in self.share_disk_services:
ha.set('DEFAULT', 'mongod_device', '/dev/mapper/vg_mongodb-lv_mongodb') ha.set(
'DEFAULT', 'mongod_device',
'/dev/mapper/vg_mongodb-lv_mongodb')
ha.set('DEFAULT', 'mongod_fs_type', 'ext4') ha.set('DEFAULT', 'mongod_fs_type', 'ext4')
ha.set('DEFAULT', 'mongod_local', '') ha.set('DEFAULT', 'mongod_local', '')
if tecs: if tecs:
tecs.set("general", 'CONFIG_HA_INSTALL_MONGODB_LOCAL', 'n') tecs.set(
"general",
'CONFIG_HA_INSTALL_MONGODB_LOCAL', 'n')
else: else:
ha.set('DEFAULT', 'mongod_fs_type', 'ext4') ha.set('DEFAULT', 'mongod_fs_type', 'ext4')
ha.set('DEFAULT', 'mongod_local', 'yes') ha.set('DEFAULT', 'mongod_local', 'yes')
if tecs: if tecs:
tecs.set("general", 'CONFIG_HA_INSTALL_MONGODB_LOCAL', 'y') tecs.set(
"general",
'CONFIG_HA_INSTALL_MONGODB_LOCAL', 'y')
if k not in self.lb_components: if k not in self.lb_components:
# if "bond" in v['nic_name']: # if "bond" in v['nic_name']:
@ -478,12 +582,15 @@ class AnalsyConfig(object):
nic_name = v['nic_name'] nic_name = v['nic_name']
ha.set('DEFAULT', k + '_nic', nic_name) ha.set('DEFAULT', k + '_nic', nic_name)
cidr_netmask = reduce(lambda x, y: x + y, cidr_netmask = reduce(lambda x, y: x + y,
[bin(int(i)).count('1') for i in v['netmask'].split('.')]) [bin(int(i)).count('1')
for i in v['netmask'].split('.')])
ha.set('DEFAULT', k + '_netmask', cidr_netmask) ha.set('DEFAULT', k + '_netmask', cidr_netmask)
def update_conf(tecs, key, value): def update_conf(tecs, key, value):
tecs.set("general", key, value) tecs.set("general", key, value)
def get_conf(tecs_conf_file, **kwargs): def get_conf(tecs_conf_file, **kwargs):
result = {} result = {}
if not kwargs: if not kwargs:
@ -563,6 +670,7 @@ class DvsDaisyConfig(object):
# common # common
self.dvs_network_type = [] self.dvs_network_type = []
self.dvs_vswitch_type = {} self.dvs_vswitch_type = {}
self.dvs_cpu_sets = []
self.dvs_physnics = [] self.dvs_physnics = []
self.enable_sdn = False self.enable_sdn = False
@ -586,6 +694,9 @@ class DvsDaisyConfig(object):
return return
self.dvs_vswitch_type.update(vswitch_type) self.dvs_vswitch_type.update(vswitch_type)
dvs_cpu_sets = network.get('dvs_cpu_sets')
self.dvs_cpu_sets.extend(dvs_cpu_sets)
network_type = network['network_config'].get('network_type') network_type = network['network_config'].get('network_type')
if network_type in ['vlan']: if network_type in ['vlan']:
@ -602,12 +713,15 @@ class DvsDaisyConfig(object):
len(self.dvs_vswitch_type.get('ovs_agent_patch')) > 0): len(self.dvs_vswitch_type.get('ovs_agent_patch')) > 0):
return return
if not self.dvs_vswitch_type.get('ovs_agent_patch') and not self.dvs_vswitch_type.get('ovdk'): if not self.dvs_vswitch_type.get('ovs_agent_patch') and not\
self.dvs_vswitch_type.get('ovdk'):
return return
update_conf(self.tecs, 'CONFIG_DVS_TYPE', self.dvs_vswitch_type) update_conf(self.tecs, 'CONFIG_DVS_TYPE', self.dvs_vswitch_type)
update_conf(self.tecs, 'CONFIG_DVS_PHYSICAL_NICS', update_conf(self.tecs, 'CONFIG_DVS_PHYSICAL_NICS',
",".join(set(self.dvs_physnics))) ",".join(set(self.dvs_physnics)))
# cpu sets for dvs, add CONFIG_DVS_CPU_SETS to tecs.conf firstly
update_conf(self.tecs, 'CONFIG_DVS_CPU_SETS', self.dvs_cpu_sets)
if 'vlan' in self.dvs_network_type: if 'vlan' in self.dvs_network_type:
update_conf(self.tecs, 'CONFIG_NEUTRON_OVS_BRIDGE_MAPPINGS', update_conf(self.tecs, 'CONFIG_NEUTRON_OVS_BRIDGE_MAPPINGS',
@ -693,6 +807,7 @@ class DvsDaisyConfig(object):
default_tecs_conf_template_path = "/var/lib/daisy/tecs/" default_tecs_conf_template_path = "/var/lib/daisy/tecs/"
tecs_conf_template_path = default_tecs_conf_template_path tecs_conf_template_path = default_tecs_conf_template_path
def private_network_conf(tecs, private_networks_config): def private_network_conf(tecs, private_networks_config):
if private_networks_config: if private_networks_config:
mode_str = { mode_str = {
@ -712,7 +827,8 @@ def private_network_conf(tecs, private_networks_config):
slave1 = private_network.get('slave1', None) slave1 = private_network.get('slave1', None)
slave2 = private_network.get('slave2', None) slave2 = private_network.get('slave2', None)
mode = private_network.get('mode', None) mode = private_network.get('mode', None)
if not type or not name or not assign_networks or not slave1 or not slave2 or not mode: if not type or not name or not assign_networks or not\
slave1 or not slave2 or not mode:
break break
for assign_network in assign_networks: for assign_network in assign_networks:
@ -724,23 +840,33 @@ def private_network_conf(tecs, private_networks_config):
break break
# ether # ether
if 0 == cmp(type, 'ether') and 0 == cmp(network_type, 'PRIVATE'): if 0 == cmp(type, 'ether') and\
0 == cmp(network_type, 'DATAPLANE'):
if 0 == cmp(ml2_type, 'sriov'): if 0 == cmp(ml2_type, 'sriov'):
config_neutron_sriov_bridge_mappings.append("%s:%s" % (physnet_name, "br-" + name)) config_neutron_sriov_bridge_mappings.append(
config_neutron_sriov_physnet_ifaces.append("%s:%s" % (physnet_name, name)) "%s:%s" % (physnet_name, "br-" + name))
elif 0 == cmp(ml2_type, 'ovs'):
config_neutron_ovs_bridge_mappings.append("%s:%s" % (physnet_name, "br-" + name))
config_neutron_ovs_physnet_ifaces.append("%s:%s" % (physnet_name, name))
# bond
elif 0 == cmp(type, 'bond') and 0 == cmp(network_type, 'PRIVATE'):
if 0 == cmp(ml2_type, 'sriov'):
config_neutron_sriov_bridge_mappings.append("%s:%s" % (physnet_name, "br-" + name))
config_neutron_sriov_physnet_ifaces.append( config_neutron_sriov_physnet_ifaces.append(
"%s:%s" % (physnet_name, name + mode_str[mode] % (slave1, slave2))) "%s:%s" % (physnet_name, name))
elif 0 == cmp(ml2_type, 'ovs'): elif 0 == cmp(ml2_type, 'ovs'):
config_neutron_ovs_bridge_mappings.append("%s:%s" % (physnet_name, "br-" + name)) config_neutron_ovs_bridge_mappings.append(
"%s:%s" % (physnet_name, "br-" + name))
config_neutron_ovs_physnet_ifaces.append( config_neutron_ovs_physnet_ifaces.append(
"%s:%s" % (physnet_name, name + mode_str[mode] % (slave1, slave2))) "%s:%s" % (physnet_name, name))
# bond
elif 0 == cmp(type, 'bond') and\
0 == cmp(network_type, 'DATAPLANE'):
if 0 == cmp(ml2_type, 'sriov'):
config_neutron_sriov_bridge_mappings.append(
"%s:%s" % (physnet_name, "br-" + name))
config_neutron_sriov_physnet_ifaces.append(
"%s:%s" % (physnet_name, name + mode_str[mode]
% (slave1, slave2)))
elif 0 == cmp(ml2_type, 'ovs'):
config_neutron_ovs_bridge_mappings.append(
"%s:%s" % (physnet_name, "br-" + name))
config_neutron_ovs_physnet_ifaces.append(
"%s:%s" % (physnet_name, name + mode_str[mode]
% (slave1, slave2)))
if config_neutron_sriov_bridge_mappings: if config_neutron_sriov_bridge_mappings:
update_conf(tecs, update_conf(tecs,
@ -751,14 +877,14 @@ def private_network_conf(tecs, private_networks_config):
'CONFIG_NEUTRON_SRIOV_PHYSNET_IFACES', 'CONFIG_NEUTRON_SRIOV_PHYSNET_IFACES',
",".join(config_neutron_sriov_physnet_ifaces)) ",".join(config_neutron_sriov_physnet_ifaces))
if config_neutron_ovs_bridge_mappings: if config_neutron_ovs_bridge_mappings:
update_conf(tecs, 'CONFIG_NEUTRON_OVS_BRIDGE_MAPPINGS', ",".join(config_neutron_ovs_bridge_mappings)) update_conf(tecs, 'CONFIG_NEUTRON_OVS_BRIDGE_MAPPINGS',
",".join(config_neutron_ovs_bridge_mappings))
if config_neutron_ovs_physnet_ifaces: if config_neutron_ovs_physnet_ifaces:
update_conf(tecs, 'CONFIG_NEUTRON_OVS_PHYSNET_IFACES', ",".join(config_neutron_ovs_physnet_ifaces)) update_conf(tecs, 'CONFIG_NEUTRON_OVS_PHYSNET_IFACES',
",".join(config_neutron_ovs_physnet_ifaces))
def update_tecs_config(config_data, cluster_conf_path): def update_tecs_config(config_data, cluster_conf_path):
print "tecs config data is:"
import pprint
pprint.pprint(config_data)
msg = "tecs config data is: %s" % config_data msg = "tecs config data is: %s" % config_data
LOG.info(msg) LOG.info(msg)
@ -776,35 +902,52 @@ def update_tecs_config(config_data, cluster_conf_path):
cluster_data = config_data['OTHER']['cluster_data'] cluster_data = config_data['OTHER']['cluster_data']
update_conf(tecs, 'CLUSTER_ID', cluster_data['id']) update_conf(tecs, 'CLUSTER_ID', cluster_data['id'])
if cluster_data.has_key('networking_parameters'): # if cluster_data.has_key('networking_parameters'):
if 'networking_parameters' in cluster_data:
networking_parameters = cluster_data['networking_parameters'] networking_parameters = cluster_data['networking_parameters']
if networking_parameters.has_key('base_mac') and networking_parameters['base_mac']: # if networking_parameters.has_key('base_mac') and\
update_conf(tecs, 'CONFIG_NEUTRON_BASE_MAC', networking_parameters['base_mac']) if 'base_mac'in networking_parameters and\
if networking_parameters.has_key('gre_id_range') and len(networking_parameters['gre_id_range'])>1 \ networking_parameters['base_mac']:
and networking_parameters['gre_id_range'][0] and networking_parameters['gre_id_range'][1]: update_conf(
update_conf(tecs, 'CONFIG_NEUTRON_ML2_TUNNEL_ID_RANGES', ("%s:%s" % (networking_parameters['gre_id_range'][0],networking_parameters['gre_id_range'][1]))) tecs, 'CONFIG_NEUTRON_BASE_MAC',
if networking_parameters.get("vni_range",['1000','3000']) and len(networking_parameters['vni_range'])>1 \ networking_parameters['base_mac'])
and networking_parameters['vni_range'][0] and networking_parameters['vni_range'][1]: # if networking_parameters.has_key('gre_id_range') and\
update_conf(tecs, 'CONFIG_NEUTRON_ML2_VNI_RANGES', ("%s:%s" % (networking_parameters['vni_range'][0],networking_parameters['vni_range'][1]))) if 'gre_id_range' in networking_parameters and\
if networking_parameters.get("segmentation_type","vlan"): len(networking_parameters['gre_id_range']) > 1 \
segmentation_type = networking_parameters.get("segmentation_type","vlan") and networking_parameters['gre_id_range'][0] and\
update_conf(tecs, 'CONFIG_NEUTRON_ML2_TENANT_NETWORK_TYPES', segmentation_type) networking_parameters['gre_id_range'][1]:
update_conf(tecs, 'CONFIG_NEUTRON_ML2_TYPE_DRIVERS', segmentation_type) update_conf(tecs, 'CONFIG_NEUTRON_ML2_TUNNEL_ID_RANGES',
("%s:%s" % (networking_parameters['gre_id_range'][0],
networking_parameters['gre_id_range'][1])))
if 'vxlan' in config_data['OTHER'].get('segmentation_type', {}):
update_conf(
tecs, 'CONFIG_NEUTRON_ML2_VNI_RANGES',
config_data['OTHER']['segmentation_type']['vxlan']['vni_range'])
update_conf(tecs, 'CONFIG_NEUTRON_ML2_TENANT_NETWORK_TYPES', 'vxlan')
update_conf(tecs, 'CONFIG_NEUTRON_ML2_TYPE_DRIVERS', 'vxlan')
else:
update_conf(tecs, 'CONFIG_NEUTRON_ML2_TENANT_NETWORK_TYPES', 'vlan')
update_conf(tecs, 'CONFIG_NEUTRON_ML2_TYPE_DRIVERS', 'vlan')
physic_network_cfg = config_data['OTHER']['physic_network_config'] physic_network_cfg = config_data['OTHER']['physic_network_config']
if physic_network_cfg.get('json_path', None): if physic_network_cfg.get('json_path', None):
update_conf(tecs, 'CONFIG_NEUTRON_ML2_JSON_PATH', physic_network_cfg['json_path']) update_conf(
tecs, 'CONFIG_NEUTRON_ML2_JSON_PATH',
physic_network_cfg['json_path'])
if physic_network_cfg.get('vlan_ranges', None): if physic_network_cfg.get('vlan_ranges', None):
update_conf(tecs, 'CONFIG_NEUTRON_ML2_VLAN_RANGES',physic_network_cfg['vlan_ranges']) update_conf(tecs, 'CONFIG_NEUTRON_ML2_VLAN_RANGES',
physic_network_cfg['vlan_ranges'])
if config_data['OTHER']['tecs_installed_hosts']: if config_data['OTHER']['tecs_installed_hosts']:
update_conf(tecs, 'EXCLUDE_SERVERS', ",".join(config_data['OTHER']['tecs_installed_hosts'])) update_conf(tecs, 'EXCLUDE_SERVERS', ",".join(
config_data['OTHER']['tecs_installed_hosts']))
ha = ConfigParser() ha = ConfigParser()
ha.optionxform = str ha.optionxform = str
ha.read(ha_conf_template_file) ha.read(ha_conf_template_file)
config = AnalsyConfig(config_data) config = AnalsyConfig(config_data)
if config_data['OTHER'].has_key('ha_nic_name'): # if config_data['OTHER'].has_key('ha_nic_name'):
if 'ha_nic_name'in config_data['OTHER']:
ha_nic_name = config_data['OTHER']['ha_nic_name'] ha_nic_name = config_data['OTHER']['ha_nic_name']
else: else:
ha_nic_name = "" ha_nic_name = ""
@ -815,7 +958,8 @@ def update_tecs_config(config_data, cluster_conf_path):
config.update_ha_conf(ha, ha_nic_name, tecs) config.update_ha_conf(ha, ha_nic_name, tecs)
update_conf_with_zenic(tecs, config_data['OTHER']['zenic_config']) update_conf_with_zenic(tecs, config_data['OTHER']['zenic_config'])
if config_data['OTHER']['dvs_config'].has_key('network_config'): # if config_data['OTHER']['dvs_config'].has_key('network_config'):
if 'network_config' in config_data['OTHER']['dvs_config']:
config_data['OTHER']['dvs_config']['network_config']['enable_sdn'] = \ config_data['OTHER']['dvs_config']['network_config']['enable_sdn'] = \
config_data['OTHER']['zenic_config'].get('vip', False) config_data['OTHER']['zenic_config'].get('vip', False)
dvs_config = DvsDaisyConfig(tecs, config_data['OTHER']['dvs_config']) dvs_config = DvsDaisyConfig(tecs, config_data['OTHER']['dvs_config'])

View File

@ -16,25 +16,12 @@
""" """
/install endpoint for tecs API /install endpoint for tecs API
""" """
import os
import copy
import subprocess import subprocess
import time
import traceback
import webob.exc
from oslo_config import cfg
from oslo_log import log as logging from oslo_log import log as logging
from webob.exc import HTTPBadRequest
from webob.exc import HTTPForbidden
from threading import Thread
from daisy import i18n from daisy import i18n
from daisy import notifier
from daisy.common import exception from daisy.common import exception
import daisy.registry.client.v1.api as registry
import daisy.api.backends.common as daisy_cmn import daisy.api.backends.common as daisy_cmn
import daisy.api.backends.tecs.common as tecs_cmn import daisy.api.backends.tecs.common as tecs_cmn
@ -48,43 +35,96 @@ _ = i18n._
_LE = i18n._LE _LE = i18n._LE
_LI = i18n._LI _LI = i18n._LI
_LW = i18n._LW _LW = i18n._LW
tecs_state = tecs_cmn.TECS_STATE
def _get_service_disk_for_disk_array(req, role_id): def _get_service_disk_for_disk_array(req, role_id):
disk_info = [] disk_info = []
service_disks = tecs_cmn.get_service_disk_list(req, {'filters': {'role_id': role_id}}) service_disks = tecs_cmn.get_service_disk_list(req,
{'filters': {
'role_id': role_id}})
for service_disk in service_disks: for service_disk in service_disks:
share_disk = {} share_disk = {}
if service_disk['disk_location'] == 'share': if service_disk['disk_location'] == 'share':
share_disk['service'] = service_disk['service'] share_disk['service'] = service_disk['service']
share_disk['protocol_type'] = service_disk['protocol_type']
share_disk['lun'] = service_disk['lun'] share_disk['lun'] = service_disk['lun']
if service_disk['protocol_type'] == 'FIBER':
share_disk['fc_hba_wwpn'] = \
service_disk['data_ips'].split(',')
else:
share_disk['data_ips'] = service_disk['data_ips'].split(',') share_disk['data_ips'] = service_disk['data_ips'].split(',')
share_disk['lvm_config'] = {} share_disk['lvm_config'] = {}
share_disk['lvm_config']['size'] = service_disk['size'] share_disk['lvm_config']['size'] = service_disk['size']
share_disk['lvm_config']['vg_name'] = 'vg_%s' % service_disk['service'] share_disk['lvm_config']['vg_name'] =\
share_disk['lvm_config']['lv_name'] = 'lv_%s' % service_disk['service'] 'vg_%s' % service_disk['service']
share_disk['lvm_config']['lv_name'] =\
'lv_%s' % service_disk['service']
share_disk['lvm_config']['fs_type'] = 'ext4' share_disk['lvm_config']['fs_type'] = 'ext4'
disk_info.append(share_disk) disk_info.append(share_disk)
return disk_info return disk_info
def _get_share_cluster_disk_for_disk_array(req, role_id):
'''
disk_info = [{'service': 'db', 'lun': 'lun1', 'data_ips':'data_ip1'},
{'service': 'db', 'lun': 'lun2', 'data_ips':'data_ip2'},
{'service': 'glance', 'lun': 'lun3', 'data_ips':'data_ip3'},
{'service': 'glance', 'lun': 'lun4', 'data_ips':'data_ip4'},]
'''
disk_info = []
service_disks = \
tecs_cmn.get_service_disk_list(req, {'filters': {'role_id': role_id}})
service_name = 'db'
for service_disk in service_disks:
share_cluster_disk = {}
if service_disk['disk_location'] == 'share_cluster':
share_cluster_disk['service'] = service_disk['service']
share_cluster_disk['protocol_type'] = service_disk['protocol_type']
share_cluster_disk['lun'] = service_disk['lun']
if service_disk['protocol_type'] == 'FIBER':
share_cluster_disk['fc_hba_wwpn'] = \
service_disk['data_ips'].split(',')
else:
share_cluster_disk['data_ips'] = \
service_disk['data_ips'].split(',')
share_cluster_disk['lvm_config'] = {}
share_cluster_disk['lvm_config']['size'] = service_disk['size']
share_cluster_disk['lvm_config']['vg_name'] =\
'vg_%s' % service_disk['service']
share_cluster_disk['lvm_config']['lv_name'] =\
'lv_%s' % service_disk['service']
share_cluster_disk['lvm_config']['fs_type'] = 'ext4'
disk_info.append(share_cluster_disk)
return disk_info
def _get_cinder_volume_for_disk_array(req, role_id): def _get_cinder_volume_for_disk_array(req, role_id):
cinder_volume_info = [] cinder_volume_info = []
cinder_volumes = tecs_cmn.get_cinder_volume_list(req, {'filters': {'role_id': role_id}}) cinder_volumes = tecs_cmn.get_cinder_volume_list(req,
{'filters': {
'role_id': role_id}})
for cinder_volume in cinder_volumes: for cinder_volume in cinder_volumes:
cv_info = {} cv_info = {}
cv_info['management_ips'] = cinder_volume['management_ips'].split(',') cv_info['management_ips'] =\
cinder_volume['management_ips'].split(',')
cv_info['data_ips'] = cinder_volume['data_ips'].split(',') cv_info['data_ips'] = cinder_volume['data_ips'].split(',')
cv_info['user_name'] = cinder_volume['user_name'] cv_info['user_name'] = cinder_volume['user_name']
cv_info['user_pwd'] = cinder_volume['user_pwd'] cv_info['user_pwd'] = cinder_volume['user_pwd']
index = cinder_volume['backend_index'] index = cinder_volume['backend_index']
cv_info['backend'] = {index: {}} cv_info['backend'] = {index: {}}
cv_info['backend'][index]['volume_driver'] = cinder_volume['volume_driver'] cv_info['backend'][index]['volume_driver'] =\
cv_info['backend'][index]['volume_type'] = cinder_volume['volume_type'] cinder_volume['volume_driver']
cv_info['backend'][index]['pools'] = cinder_volume['pools'].split(',') cv_info['backend'][index]['volume_type'] =\
cinder_volume['volume_type']
cv_info['backend'][index]['pools'] =\
cinder_volume['pools'].split(',')
cinder_volume_info.append(cv_info) cinder_volume_info.append(cv_info)
return cinder_volume_info return cinder_volume_info
def get_disk_array_info(req, cluster_id): def get_disk_array_info(req, cluster_id):
share_disk_info = [] share_disk_info = []
share_cluster_disk_info = []
volume_disk_info = {} volume_disk_info = {}
cinder_volume_disk_list = [] cinder_volume_disk_list = []
roles = daisy_cmn.get_cluster_roles_detail(req, cluster_id) roles = daisy_cmn.get_cluster_roles_detail(req, cluster_id)
@ -93,138 +133,183 @@ def get_disk_array_info(req, cluster_id):
continue continue
if role['name'] == 'CONTROLLER_HA': if role['name'] == 'CONTROLLER_HA':
share_disks = _get_service_disk_for_disk_array(req, role['id']) share_disks = _get_service_disk_for_disk_array(req, role['id'])
share_cluster_disks = \
_get_share_cluster_disk_for_disk_array(req, role['id'])
share_disk_info += share_disks share_disk_info += share_disks
cinder_volumes = _get_cinder_volume_for_disk_array(req, role['id']) share_cluster_disk_info += share_cluster_disks
cinder_volumes =\
_get_cinder_volume_for_disk_array(req, role['id'])
cinder_volume_disk_list += cinder_volumes cinder_volume_disk_list += cinder_volumes
if cinder_volume_disk_list: if cinder_volume_disk_list:
volume_disk_info['disk_array'] = cinder_volume_disk_list volume_disk_info['disk_array'] = cinder_volume_disk_list
return (share_disk_info, volume_disk_info) return (share_disk_info, volume_disk_info, share_cluster_disk_info)
def get_host_min_mac(host_interfaces):
macs = [interface['mac'] for interface in host_interfaces
if interface['type'] == 'ether' and interface['mac']]
macs.sort()
return macs[0]
def get_ha_and_compute_ips(req, cluster_id): def config_ha_share_disk(share_disk_info,
controller_ha_nodes = {} controller_ha_nodes,
computer_ips = [] share_cluster_disk_info=None):
'''
roles = daisy_cmn.get_cluster_roles_detail(req,cluster_id) share_disk_info = \
cluster_networks = daisy_cmn.get_cluster_networks_detail(req, cluster_id) [{'service': 'db', 'lun': 'lun1', 'data_ips':'data_ip1'},
for role in roles: {'service': 'glance', 'lun': 'lun3', 'data_ips':'data_ip3'},]
if role['deployment_backend'] != daisy_cmn.tecs_backend_name: share_cluster_disk_info = \
continue [{'service': 'db', 'lun': 'lun1', 'data_ips':'data_ip1', ...},
role_hosts = daisy_cmn.get_hosts_of_role(req, role['id']) {'service': 'db', 'lun': 'lun2', 'data_ips':'data_ip2', ...},
for role_host in role_hosts: {'service': 'glance', 'lun': 'lun3', 'data_ips':'data_ip3'},
#host has installed tecs are exclusive {'service': 'glance', 'lun': 'lun4', 'data_ips':'data_ip4'},]
if (role_host['status'] == tecs_state['ACTIVE'] or
role_host['status'] == tecs_state['UPDATING'] or
role_host['status'] == tecs_state['UPDATE_FAILED']):
continue
host_detail = daisy_cmn.get_host_detail(req,
role_host['host_id'])
host_ip = tecs_cmn.get_host_network_ip(req,
host_detail,
cluster_networks,
'MANAGEMENT')
if role['name'] == "CONTROLLER_HA":
pxe_mac = [interface['mac'] for interface in host_detail['interfaces']
if interface['is_deployment'] == True]
if pxe_mac and pxe_mac[0]:
controller_ha_nodes[host_ip] = pxe_mac[0]
else:
min_mac = get_host_min_mac(host_detail['interfaces'])
controller_ha_nodes[host_ip] = min_mac controller_ha_nodes[host_ip] = min_mac
if role['name'] == "COMPUTER": '''
computer_ips.append(host_ip) sorted_db_share_cluster = []
return (controller_ha_nodes, computer_ips) if share_cluster_disk_info:
db_share_cluster_disk = \
[disk for disk in share_cluster_disk_info
if disk['service'] == 'db']
if len(db_share_cluster_disk) != 2:
error_msg = 'share cluster disk: %s must be existed in pair.' % \
db_share_cluster_disk
LOG.error(error_msg)
raise exception.InstallException(error_msg)
sorted_db_share_cluster = \
sorted(db_share_cluster_disk, key=lambda s: s['lun'])
sorted_ha_nodes = \
sorted(controller_ha_nodes.iteritems(), key=lambda d: d[1])
sorted_ha_nodes_ip = [node[0] for node in sorted_ha_nodes]
def config_ha_share_disk(share_disk_info, controller_ha_nodes): all_share_disk_info = []
if sorted_db_share_cluster:
all_share_disk_info = \
[[disk] + share_disk_info for disk in sorted_db_share_cluster]
# all_share_disk_info = \
# [[{'lun': 'lun1', 'service': 'db', 'data_ips': 'data_ip1'},
# {'lun': 'lun3', 'service': 'glance', 'data_ips': 'data_ip3'}],
# [{'lun': 'lun2', 'service': 'db', 'data_ips': 'data_ip2'},
# {'lun': 'lun3', 'service': 'glance', 'data_ips': 'data_ip3'}]]
else:
for index in range(len(sorted_ha_nodes)):
all_share_disk_info.append(share_disk_info)
# all_share_disk_info = \
# [{'lun': 'lun3', 'service': 'glance', 'data_ips': 'data_ip3'},
# {'lun': 'lun3', 'service': 'glance', 'data_ips': 'data_ip3'}]
error_msg = "" '''
cmd = 'rm -rf /var/lib/daisy/tecs/storage_auto_config/base/*.json' cmd = 'rm -rf /var/lib/daisy/tecs/storage_auto_config/base/*.json'
daisy_cmn.subprocess_call(cmd) daisy_cmn.subprocess_call(cmd)
with open("/var/lib/daisy/tecs/storage_auto_config/base/control.json", "w") as fp: with open("/var/lib/daisy/tecs/storage_auto_config/base/control.json",\
"w") as fp:
json.dump(share_disk_info, fp, indent=2) json.dump(share_disk_info, fp, indent=2)
for host_ip in controller_ha_nodes.keys(): for host_ip in controller_ha_nodes.keys():
password = "ossdbg1"
cmd = '/var/lib/daisy/tecs/trustme.sh %s %s' % (host_ip, password)
daisy_cmn.subprocess_call(cmd)
cmd = 'clush -S -w %s "mkdir -p /home/tecs_install"' % (host_ip,)
daisy_cmn.subprocess_call(cmd)
try: try:
scp_bin_result = subprocess.check_output( scp_bin_result = subprocess.check_output(
'scp -o StrictHostKeyChecking=no -r /var/lib/daisy/tecs/storage_auto_config %s:/home/tecs_install' % (host_ip,), 'scp -o StrictHostKeyChecking=no -r\
/var/lib/daisy/tecs/storage_auto_config\
%s:/home/tecs_install' % (host_ip,),
shell=True, stderr=subprocess.STDOUT) shell=True, stderr=subprocess.STDOUT)
except subprocess.CalledProcessError as e: except subprocess.CalledProcessError as e:
error_msg = "scp /var/lib/daisy/tecs/storage_auto_config to %s failed!" % host_ip error_msg = "scp /var/lib/daisy/tecs/storage_auto_config\
return error_msg to %s failed!" % host_ip
raise exception.InstallException(error_msg)
try: try:
LOG.info(_("Config share disk for host %s" % host_ip)) LOG.info(_("Config share disk for host %s" % host_ip))
cmd = "cd /home/tecs_install/storage_auto_config/; python storage_auto_config.py share_disk %s" % controller_ha_nodes[host_ip] cmd = "cd /home/tecs_install/storage_auto_config/;\
exc_result = subprocess.check_output('clush -S -w %s "%s"' % (host_ip,cmd), python storage_auto_config.py share_disk %s"\
% controller_ha_nodes[host_ip]
exc_result = subprocess.check_output(
'clush -S -w %s "%s"' % (host_ip,cmd),
shell=True, stderr=subprocess.STDOUT) shell=True, stderr=subprocess.STDOUT)
except subprocess.CalledProcessError as e: except subprocess.CalledProcessError as e:
LOG.info(_("Storage script error message: %s" % e.output)) LOG.info(_("Storage script error message: %s" % e.output))
error_msg = "config Disk Array share disks on %s failed!" % host_ip error_msg = "config Disk Array share disks\
return error_msg on %s failed!" % host_ip
return error_msg raise exception.InstallException(error_msg)
'''
def config_ha_cinder_volume(volume_disk_info, controller_ha_ips):
error_msg = ""
cmd = 'rm -rf /var/lib/daisy/tecs/storage_auto_config/base/*.json' cmd = 'rm -rf /var/lib/daisy/tecs/storage_auto_config/base/*.json'
daisy_cmn.subprocess_call(cmd) daisy_cmn.subprocess_call(cmd)
with open("/var/lib/daisy/tecs/storage_auto_config/base/cinder.json", "w") as fp:
for (host_ip, share_disk) in zip(sorted_ha_nodes_ip, all_share_disk_info):
with open("/var/lib/daisy/tecs/storage_auto_config/base/control.json",
"w") as fp:
json.dump(share_disk, fp, indent=2)
try:
subprocess.check_output(
'scp -o StrictHostKeyChecking=no -r\
/var/lib/daisy/tecs/storage_auto_config\
%s:/home/tecs_install' % (host_ip,),
shell=True, stderr=subprocess.STDOUT)
except subprocess.CalledProcessError as e:
error_msg = "scp /var/lib/daisy/tecs/storage_auto_config\
to %s failed!" % host_ip
raise exception.InstallException(error_msg)
try:
LOG.info(_("Config share disk for host %s" % host_ip))
cmd = "cd /home/tecs_install/storage_auto_config/;\
python storage_auto_config.py share_disk %s"\
% controller_ha_nodes[host_ip]
subprocess.check_output(
'clush -S -w %s "%s"' % (host_ip, cmd),
shell=True, stderr=subprocess.STDOUT)
except subprocess.CalledProcessError as e:
LOG.info(_("Storage script error message: %s" % e.output))
error_msg = "config Disk Array share disks\
on %s failed!" % host_ip
raise exception.InstallException(error_msg)
def config_ha_cinder_volume(volume_disk_info, controller_ha_ips):
cmd = 'rm -rf /var/lib/daisy/tecs/storage_auto_config/base/*.json'
daisy_cmn.subprocess_call(cmd)
with open("/var/lib/daisy/tecs/storage_auto_config/base/cinder.json",
"w") as fp:
json.dump(volume_disk_info, fp, indent=2) json.dump(volume_disk_info, fp, indent=2)
for host_ip in controller_ha_ips: for host_ip in controller_ha_ips:
password = "ossdbg1"
cmd = '/var/lib/daisy/tecs/trustme.sh %s %s' % (host_ip, password)
daisy_cmn.subprocess_call(cmd)
cmd = 'clush -S -w %s "mkdir -p /home/tecs_install"' % (host_ip,)
daisy_cmn.subprocess_call(cmd)
try: try:
scp_bin_result = subprocess.check_output( subprocess.check_output(
'scp -o StrictHostKeyChecking=no -r /var/lib/daisy/tecs/storage_auto_config %s:/home/tecs_install' % (host_ip,), 'scp -o StrictHostKeyChecking=no -r\
/var/lib/daisy/tecs/storage_auto_config\
%s:/home/tecs_install' % (host_ip,),
shell=True, stderr=subprocess.STDOUT) shell=True, stderr=subprocess.STDOUT)
except subprocess.CalledProcessError as e: except subprocess.CalledProcessError as e:
error_msg = "scp /var/lib/daisy/tecs/storage_auto_config to %s failed!" % host_ip error_msg = "scp /var/lib/daisy/tecs/storage_auto_config\
return error_msg to %s failed!" % host_ip
raise exception.InstallException(error_msg)
try: try:
LOG.info(_("Config cinder volume for host %s" % host_ip)) LOG.info(_("Config cinder volume for host %s" % host_ip))
cmd = 'cd /home/tecs_install/storage_auto_config/; python storage_auto_config.py cinder_conf %s' % host_ip cmd = 'cd /home/tecs_install/storage_auto_config/;\
exc_result = subprocess.check_output('clush -S -w %s "%s"' % (host_ip,cmd), python storage_auto_config.py cinder_conf %s' % host_ip
subprocess.check_output(
'clush -S -w %s "%s"' % (host_ip, cmd),
shell=True, stderr=subprocess.STDOUT) shell=True, stderr=subprocess.STDOUT)
except subprocess.CalledProcessError as e: except subprocess.CalledProcessError as e:
LOG.info(_("Storage script error message: %s" % e.output)) LOG.info(_("Storage script error message: %s" % e.output))
error_msg = "config Disk Array cinder volumes on %s failed!" % host_ip error_msg = "config Disk Array cinder volumes\
return error_msg on %s failed!" % host_ip
return error_msg raise exception.InstallException(error_msg)
def config_compute_multipath(all_nodes_ip):
error_msg = "" def config_compute_multipath(hosts_ip):
for host_ip in all_nodes_ip: for host_ip in hosts_ip:
password = "ossdbg1"
cmd = '/var/lib/daisy/tecs/trustme.sh %s %s' % (host_ip, password)
daisy_cmn.subprocess_call(cmd)
cmd = 'clush -S -w %s "mkdir -p /home/tecs_install"' % (host_ip,)
daisy_cmn.subprocess_call(cmd)
try: try:
scp_bin_result = subprocess.check_output( subprocess.check_output(
'scp -o StrictHostKeyChecking=no -r /var/lib/daisy/tecs/storage_auto_config %s:/home/tecs_install' % (host_ip,), 'scp -o StrictHostKeyChecking=no -r\
/var/lib/daisy/tecs/storage_auto_config\
%s:/home/tecs_install' % (host_ip,),
shell=True, stderr=subprocess.STDOUT) shell=True, stderr=subprocess.STDOUT)
except subprocess.CalledProcessError as e: except subprocess.CalledProcessError as e:
error_msg = "scp /var/lib/daisy/tecs/storage_auto_config to %s failed!" % host_ip error_msg = "scp /var/lib/daisy/tecs/storage_auto_config\
return error_msg to %s failed!" % host_ip
raise exception.InstallException(error_msg)
try: try:
LOG.info(_("Config multipath for host %s" % host_ip)) LOG.info(_("Config multipath for host %s" % host_ip))
cmd = 'cd /home/tecs_install/storage_auto_config/; python storage_auto_config.py check_multipath' cmd = 'cd /home/tecs_install/storage_auto_config/;\
exc_result = subprocess.check_output('clush -S -w %s "%s"' % (host_ip,cmd), python storage_auto_config.py check_multipath'
subprocess.check_output(
'clush -S -w %s "%s"' % (host_ip, cmd),
shell=True, stderr=subprocess.STDOUT) shell=True, stderr=subprocess.STDOUT)
except subprocess.CalledProcessError as e: except subprocess.CalledProcessError as e:
LOG.info(_("Storage script error message: %s" % e.output)) LOG.info(_("Storage script error message: %s" % e.output))
error_msg = "config Disk Array multipath on %s failed!" % host_ip error_msg = "config Disk Array multipath\
return error_msg on %s failed!" % host_ip
return error_msg raise exception.InstallException(error_msg)

File diff suppressed because it is too large Load Diff

View File

@ -17,31 +17,12 @@
/hosts endpoint for Daisy v1 API /hosts endpoint for Daisy v1 API
""" """
import webob.exc
import subprocess import subprocess
from oslo_config import cfg
from oslo_log import log as logging from oslo_log import log as logging
from webob.exc import HTTPBadRequest
from webob.exc import HTTPForbidden
from threading import Thread, Lock
import threading
from daisy import i18n from daisy import i18n
from daisy import notifier
from daisy.api import policy
import daisy.api.v1
from daisy.common import exception
from daisy.common import property_utils
from daisy.common import utils
from daisy.common import wsgi
from daisy.api.v1 import controller
from daisy.api.v1 import filters
import daisy.api.backends.common as daisy_cmn import daisy.api.backends.common as daisy_cmn
import daisy.api.backends.tecs.common as tecs_cmn import daisy.api.backends.tecs.common as tecs_cmn
import daisy.registry.client.v1.api as registry
LOG = logging.getLogger(__name__) LOG = logging.getLogger(__name__)
_ = i18n._ _ = i18n._
@ -51,9 +32,11 @@ _LW = i18n._LW
tecs_state = tecs_cmn.TECS_STATE tecs_state = tecs_cmn.TECS_STATE
def update_progress_to_db(req, role_id_list, status, hosts_list, host_ip=None): def update_progress_to_db(req, role_id_list, status, hosts_list, host_ip=None):
""" """
Write uninstall progress and status to db, we use global lock object 'uninstall_mutex' Write uninstall progress and status to db,
we use global lock object 'uninstall_mutex'
to make sure this function is thread safety. to make sure this function is thread safety.
:param req: http req. :param req: http req.
:param role_id_list: Column neeb be update in role table. :param role_id_list: Column neeb be update in role table.
@ -74,14 +57,15 @@ def update_progress_to_db(req, role_id_list, status, hosts_list,host_ip=None):
role_host_meta['progress'] = 10 role_host_meta['progress'] = 10
role_host_meta['messages'] = 'TECS uninstalling' role_host_meta['messages'] = 'TECS uninstalling'
if 0 == cmp(status, tecs_state['UNINSTALL_FAILED']): if 0 == cmp(status, tecs_state['UNINSTALL_FAILED']):
role_host_meta['messages'] = 'TECS uninstalled failed' role_host_meta[
elif 0 == cmp(status, tecs_state['ACTIVE']): 'messages'] = 'TECS uninstalled failed'
elif 0 == cmp(status, tecs_state['INIT']):
role_host_meta['progress'] = 100 role_host_meta['progress'] = 100
role_host_meta['messages'] = 'TECS uninstalled successfully' role_host_meta[
'messages'] = 'TECS uninstalled successfully'
if role_host_meta: if role_host_meta:
role_host_meta['status'] = status role_host_meta['status'] = status
daisy_cmn.update_role_host(req, daisy_cmn.update_role_host(req, role_host['id'],
role_host['id'],
role_host_meta) role_host_meta)
else: else:
role = {} role = {}
@ -103,18 +87,19 @@ def update_progress_to_db(req, role_id_list, status, hosts_list,host_ip=None):
if role: if role:
role['status'] = status role['status'] = status
daisy_cmn.update_role(req, role_id, role) daisy_cmn.update_role(req, role_id, role)
if 0 == cmp(status, tecs_state['INIT']):
daisy_cmn.delete_role_hosts(req, role_id)
def _thread_bin(req, host_ip, role_id_list, hosts_list): def _thread_bin(req, host_ip, role_id_list, hosts_list):
# uninstall network-configuration-1.1.1-15.x86_64.rpm # uninstall network-configuration-1.1.1-15.x86_64.rpm
update_progress_to_db(req,role_id_list,tecs_state['UNINSTALLING'],hosts_list,host_ip) update_progress_to_db(
req, role_id_list, tecs_state['UNINSTALLING'], hosts_list, host_ip)
tecs_cmn.TecsShellExector(host_ip, 'uninstall_rpm') tecs_cmn.TecsShellExector(host_ip, 'uninstall_rpm')
cmd = 'mkdir -p /var/log/daisy/daisy_uninstall/' cmd = 'mkdir -p /var/log/daisy/daisy_uninstall/'
daisy_cmn.subprocess_call(cmd) daisy_cmn.subprocess_call(cmd)
password = "ossdbg1" password = "ossdbg1"
var_log_path = "/var/log/daisy/daisy_uninstall/%s_uninstall_tecs.log" % host_ip var_log_path = "/var/log/daisy/daisy_uninstall/\
%s_uninstall_tecs.log" % host_ip
with open(var_log_path, "w+") as fp: with open(var_log_path, "w+") as fp:
cmd = '/var/lib/daisy/tecs/trustme.sh %s %s' % (host_ip, password) cmd = '/var/lib/daisy/tecs/trustme.sh %s %s' % (host_ip, password)
daisy_cmn.subprocess_call(cmd, fp) daisy_cmn.subprocess_call(cmd, fp)
@ -124,11 +109,15 @@ def _thread_bin(req, host_ip, role_id_list,hosts_list):
daisy_cmn.subprocess_call(cmd, fp) daisy_cmn.subprocess_call(cmd, fp)
try: try:
scp_bin_result = subprocess.check_output( subprocess.check_output(
'clush -S -w %s -c /var/lib/daisy/tecs/ZXTECS*.bin --dest=/home/daisy_uninstall' % (host_ip,), 'clush -S -w %s -c /var/lib/daisy/tecs/ZXTECS*.bin \
--dest=/home/daisy_uninstall' % (
host_ip,),
shell=True, stderr=subprocess.STDOUT) shell=True, stderr=subprocess.STDOUT)
except subprocess.CalledProcessError as e: except subprocess.CalledProcessError as e:
update_progress_to_db(req, role_id_list, tecs_state['UNINSTALL_FAILED'],hosts_list,host_ip) update_progress_to_db(
req, role_id_list, tecs_state[
'UNINSTALL_FAILED'], hosts_list, host_ip)
LOG.error(_("scp TECS bin for %s failed!" % host_ip)) LOG.error(_("scp TECS bin for %s failed!" % host_ip))
fp.write(e.output.strip()) fp.write(e.output.strip())
@ -137,17 +126,23 @@ def _thread_bin(req, host_ip, role_id_list,hosts_list):
try: try:
exc_result = subprocess.check_output( exc_result = subprocess.check_output(
'clush -S -w %s /home/daisy_uninstall/ZXTECS*.bin clean' % (host_ip,), 'clush -S -w %s /home/daisy_uninstall/ZXTECS*.bin clean' % (
host_ip,),
shell=True, stderr=subprocess.STDOUT) shell=True, stderr=subprocess.STDOUT)
except subprocess.CalledProcessError as e: except subprocess.CalledProcessError as e:
update_progress_to_db(req, role_id_list, tecs_state['UNINSTALL_FAILED'],hosts_list,host_ip) update_progress_to_db(
req, role_id_list, tecs_state[
'UNINSTALL_FAILED'], hosts_list, host_ip)
LOG.error(_("Uninstall TECS for %s failed!" % host_ip)) LOG.error(_("Uninstall TECS for %s failed!" % host_ip))
fp.write(e.output.strip()) fp.write(e.output.strip())
else: else:
update_progress_to_db(req, role_id_list, tecs_state['ACTIVE'], hosts_list,host_ip) update_progress_to_db(req, role_id_list, tecs_state['INIT'],
hosts_list, host_ip)
LOG.info(_("Uninstall TECS for %s successfully!" % host_ip)) LOG.info(_("Uninstall TECS for %s successfully!" % host_ip))
fp.write(exc_result) fp.write(exc_result)
# this will be raise raise all the exceptions of the thread to log file # this will be raise raise all the exceptions of the thread to log file
def thread_bin(req, host_ip, role_id_list, hosts_list): def thread_bin(req, host_ip, role_id_list, hosts_list):
try: try:
_thread_bin(req, host_ip, role_id_list, hosts_list) _thread_bin(req, host_ip, role_id_list, hosts_list)

View File

@ -17,30 +17,10 @@
/update endpoint for Daisy v1 API /update endpoint for Daisy v1 API
""" """
import webob.exc
import subprocess import subprocess
from oslo_config import cfg
from oslo_log import log as logging from oslo_log import log as logging
from webob.exc import HTTPBadRequest
from webob.exc import HTTPForbidden
from threading import Thread, Lock
import threading
import time
from daisy import i18n from daisy import i18n
from daisy import notifier
from daisy.api import policy
import daisy.api.v1
import daisy.registry.client.v1.api as registry
from daisy.common import exception
from daisy.common import property_utils
from daisy.common import utils
from daisy.common import wsgi
from daisy.api.v1 import controller
from daisy.api.v1 import filters
from daisy.api.backends import os as os_handle
import daisy.api.backends.common as daisy_cmn import daisy.api.backends.common as daisy_cmn
import daisy.api.backends.tecs.common as tecs_cmn import daisy.api.backends.tecs.common as tecs_cmn
@ -53,6 +33,7 @@ _LW = i18n._LW
tecs_state = tecs_cmn.TECS_STATE tecs_state = tecs_cmn.TECS_STATE
def update_progress_to_db(req, role_id_list, status, hosts_list, host_ip=None): def update_progress_to_db(req, role_id_list, status, hosts_list, host_ip=None):
""" """
Write update progress and status to db, Write update progress and status to db,
@ -79,7 +60,8 @@ def update_progress_to_db(req,role_id_list,status,hosts_list,host_ip=None):
role_host_meta['messages'] = 'TECS upgraded failed' role_host_meta['messages'] = 'TECS upgraded failed'
elif 0 == cmp(status, tecs_state['ACTIVE']): elif 0 == cmp(status, tecs_state['ACTIVE']):
role_host_meta['progress'] = 100 role_host_meta['progress'] = 100
role_host_meta['messages'] = 'TECS upgraded successfully' role_host_meta[
'messages'] = 'TECS upgraded successfully'
if role_host_meta: if role_host_meta:
role_host_meta['status'] = status role_host_meta['status'] = status
daisy_cmn.update_role_host(req, daisy_cmn.update_role_host(req,
@ -89,6 +71,8 @@ def update_progress_to_db(req,role_id_list,status,hosts_list,host_ip=None):
role = {} role = {}
if 0 == cmp(status, tecs_state['UPDATING']): if 0 == cmp(status, tecs_state['UPDATING']):
for role_host in role_hosts: for role_host in role_hosts:
if role_host['status'] == tecs_state['INSTALL_FAILED']:
continue
role_host_meta = {} role_host_meta = {}
role_host_meta['status'] = status role_host_meta['status'] = status
role_host_meta['progress'] = 0 role_host_meta['progress'] = 0
@ -107,9 +91,11 @@ def update_progress_to_db(req,role_id_list,status,hosts_list,host_ip=None):
role['status'] = status role['status'] = status
daisy_cmn.update_role(req, role_id, role) daisy_cmn.update_role(req, role_id, role)
def thread_bin(req, role_id_list, host_ip, hosts_list): def thread_bin(req, role_id_list, host_ip, hosts_list):
# update network-configuration-1.1.1-15.x86_64.rpm # update network-configuration-1.1.1-15.x86_64.rpm
update_progress_to_db(req,role_id_list,tecs_state['UPDATING'],hosts_list,host_ip) update_progress_to_db(
req, role_id_list, tecs_state['UPDATING'], hosts_list, host_ip)
cmd = 'mkdir -p /var/log/daisy/daisy_update/' cmd = 'mkdir -p /var/log/daisy/daisy_update/'
daisy_cmn.subprocess_call(cmd) daisy_cmn.subprocess_call(cmd)
password = "ossdbg1" password = "ossdbg1"
@ -117,35 +103,43 @@ def thread_bin(req,role_id_list, host_ip,hosts_list):
with open(var_log_path, "w+") as fp: with open(var_log_path, "w+") as fp:
cmd = '/var/lib/daisy/tecs/trustme.sh %s %s' % (host_ip, password) cmd = '/var/lib/daisy/tecs/trustme.sh %s %s' % (host_ip, password)
daisy_cmn.subprocess_call(cmd, fp) daisy_cmn.subprocess_call(cmd, fp)
cmd = 'clush -S -w %s "mkdir -p /home/daisy_update"' % (host_ip,) cmd = 'clush -S -w %s "mkdir -p /home/tecs_update/"' % (host_ip,)
daisy_cmn.subprocess_call(cmd, fp) daisy_cmn.subprocess_call(cmd, fp)
cmd = 'clush -S -b -w %s "rm -rf /home/daisy_update/ZXTECS*.bin"' % (host_ip,) cmd = 'clush -S -b -w %s "rm -rf /home/tecs_update/ZXTECS*.bin"' % (
host_ip,)
daisy_cmn.subprocess_call(cmd, fp) daisy_cmn.subprocess_call(cmd, fp)
tecs_cmn.TecsShellExector(host_ip, 'update_rpm') tecs_cmn.TecsShellExector(host_ip, 'update_rpm')
try: try:
scp_bin_result = subprocess.check_output( subprocess.check_output(
'clush -S -w %s -c /var/lib/daisy/tecs/ZXTECS*.bin --dest=/home/daisy_update' % (host_ip,), 'clush -S -w %s -c /var/lib/daisy/tecs/ZXTECS*.bin \
--dest=/home/tecs_update' % (
host_ip,),
shell=True, stderr=subprocess.STDOUT) shell=True, stderr=subprocess.STDOUT)
except subprocess.CalledProcessError as e: except subprocess.CalledProcessError as e:
update_progress_to_db(req,role_id_list,tecs_state['UPDATE_FAILED'],hosts_list,host_ip) update_progress_to_db(
req, role_id_list, tecs_state[
'UPDATE_FAILED'], hosts_list, host_ip)
LOG.error(_("scp TECS bin for %s failed!" % host_ip)) LOG.error(_("scp TECS bin for %s failed!" % host_ip))
fp.write(e.output.strip()) fp.write(e.output.strip())
return 1 return 1
cmd = 'clush -S -w %s "chmod 777 /home/daisy_update/*"' % (host_ip,) cmd = 'clush -S -w %s "chmod 777 /home/tecs_update/*"' % (host_ip,)
daisy_cmn.subprocess_call(cmd, fp) daisy_cmn.subprocess_call(cmd, fp)
try: try:
exc_result = subprocess.check_output( exc_result = subprocess.check_output(
'clush -S -w %s "/home/daisy_update/ZXTECS*.bin upgrade"' % (host_ip,), 'clush -S -w %s "/home/tecs_update/ZXTECS*.bin upgrade"' % (
host_ip,),
shell=True, stderr=subprocess.STDOUT) shell=True, stderr=subprocess.STDOUT)
except subprocess.CalledProcessError as e: except subprocess.CalledProcessError as e:
update_progress_to_db(req,role_id_list,tecs_state['UPDATE_FAILED'],hosts_list,host_ip) update_progress_to_db(
req, role_id_list, tecs_state[
'UPDATE_FAILED'], hosts_list, host_ip)
LOG.error(_("Update TECS for %s failed!" % host_ip)) LOG.error(_("Update TECS for %s failed!" % host_ip))
fp.write(e.output.strip()) fp.write(e.output.strip())
return 2 return 2
else: else:
update_progress_to_db(req,role_id_list,tecs_state['ACTIVE'],hosts_list,host_ip) update_progress_to_db(
req, role_id_list, tecs_state['ACTIVE'], hosts_list, host_ip)
fp.write(exc_result) fp.write(exc_result)
return 0 return 0

View File

@ -0,0 +1,142 @@
# Copyright 2013 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
/install endpoint for tecs API
"""
import daisy.registry.client.v1.api as registry
import daisy.api.backends.common as daisy_cmn
from daisy.common import utils
def _write_role_configs_to_db(req, cluster_id, role_name, configs):
config_meta = {'cluster': cluster_id,
'role': role_name,
'config': configs}
registry.config_interface_metadata(req.context,
config_meta)
def _write_host_configs_to_db(req, host_id, configs):
config_meta = {'host_id': host_id,
'config': configs}
registry.config_interface_metadata(req.context,
config_meta)
def _get_config_item(file, section, key, value, description):
return {'file-name': file,
'section': section,
'key': key,
'value': value,
'description': description}
def _add_configs_for_nova(req, host_detail):
config_file = '/etc/nova/nova.conf'
default_section = 'DEFAULT'
key_name = 'vcpu_pin_set'
key_value = host_detail.get(key_name)
config_items = []
if not key_value:
key_value = host_detail.get('isolcpus')
nova_key_name = key_name
description = 'vcpu pin set for all vm'
item = _get_config_item(config_file,
default_section,
nova_key_name,
key_value,
description)
config_items.append(item)
key_name = 'dvs_high_cpuset'
key_value = host_detail.get(key_name)
nova_key_name = 'dvs_high_cpu_set'
description = 'vcpu pin set for high-performance dvs vm'
item = _get_config_item(config_file,
default_section,
nova_key_name,
key_value,
description)
config_items.append(item)
numa_cpus = utils.get_numa_node_cpus(host_detail.get('cpu', {}))
numa_nodes = utils.get_numa_node_from_cpus(numa_cpus, key_value)
if numa_nodes:
libvirt_section = 'libvirt'
nova_key_name = 'reserved_huge_pages'
# only support one NUMA node for DVS now
key_value = 'node:%s,size:1048576,count:4' % numa_nodes[0]
description = 'reserved huges for DVS service '\
'on high NUMA node'
config_items.append({'file-name': config_file,
'key': nova_key_name,
'section': libvirt_section,
'value': key_value,
'description': description})
key_name = 'pci_high_cpuset'
pci_key_value = host_detail.get(key_name)
nova_key_name = 'vsg_card_cpu_set'
description = 'vcpu pin set for high-performance CLC card vm'
item = _get_config_item(config_file,
default_section,
nova_key_name,
pci_key_value,
description)
config_items.append(item)
if pci_key_value:
nova_key_name = 'default_ephemeral_format'
description = 'config for CLC card'
key_value = 'ext3'
item = _get_config_item(config_file,
default_section,
nova_key_name,
key_value,
description)
config_items.append(item)
nova_key_name = 'pci_passthrough_whitelist'
description = 'config for CLC card'
key_value = '[{"vendor_id": "8086","product_id": "0435"}]'
item = _get_config_item(config_file,
default_section,
nova_key_name,
key_value,
description)
config_items.append(item)
_write_host_configs_to_db(req,
host_detail['id'],
config_items)
def update_configset(req, cluster_id):
roles = daisy_cmn.get_cluster_roles_detail(req, cluster_id)
for role in roles:
# now only computer has configs
if role['name'] != 'COMPUTER':
continue
role_meta = {'config_set_update_progress': 0}
daisy_cmn.update_role(req, role['id'], role_meta)
role_hosts = daisy_cmn.get_hosts_of_role(req, role['id'])
for host in role_hosts:
host_detail = daisy_cmn.get_host_detail(req, host['host_id'])
_add_configs_for_nova(req, host_detail)

View File

@ -16,46 +16,21 @@
""" """
/install endpoint for zenic API /install endpoint for zenic API
""" """
import os
import copy
import subprocess
import time import time
import commands
import traceback
import webob.exc
from oslo_config import cfg
from oslo_log import log as logging from oslo_log import log as logging
from webob.exc import HTTPBadRequest
from webob.exc import HTTPForbidden
from webob.exc import HTTPServerError
import threading import threading
from threading import Thread
from daisy import i18n from daisy import i18n
from daisy import notifier
from daisy.api import policy
import daisy.api.v1
from daisy.common import exception from daisy.common import exception
import daisy.registry.client.v1.api as registry
from daisy.api.backends.zenic import config
from daisy.api.backends import driver from daisy.api.backends import driver
from daisy.api.network_api import network as neutron
from ironicclient import client as ironic_client
import daisy.api.backends.os as os_handle
import daisy.api.backends.common as daisy_cmn import daisy.api.backends.common as daisy_cmn
import daisy.api.backends.zenic.common as zenic_cmn import daisy.api.backends.zenic.common as zenic_cmn
import daisy.api.backends.zenic.install as instl import daisy.api.backends.zenic.install as instl
import daisy.api.backends.zenic.uninstall as unstl import daisy.api.backends.zenic.uninstall as unstl
import daisy.api.backends.zenic.upgrade as upgrd import daisy.api.backends.zenic.upgrade as upgrd
try:
import simplejson as json
except ImportError:
import json
LOG = logging.getLogger(__name__) LOG = logging.getLogger(__name__)
_ = i18n._ _ = i18n._
@ -65,6 +40,7 @@ _LW = i18n._LW
zenic_state = zenic_cmn.ZENIC_STATE zenic_state = zenic_cmn.ZENIC_STATE
class API(driver.DeploymentDriver): class API(driver.DeploymentDriver):
def __init__(self): def __init__(self):
@ -82,13 +58,16 @@ class API(driver.DeploymentDriver):
# instl.pxe_server_build(req, install_meta) # instl.pxe_server_build(req, install_meta)
# get hosts config which need to install OS # get hosts config which need to install OS
# hosts_need_os = instl.get_cluster_hosts_config(req, cluster_id) # hosts_need_os = instl.get_cluster_hosts_config(req, cluster_id)
# if have hosts need to install os, ZENIC installataion executed in OSInstallTask # if have hosts need to install os, ZENIC installataion executed
# in OSInstallTask
# if hosts_need_os: # if hosts_need_os:
# os_install_obj = instl.OSInstallTask(req, cluster_id, hosts_need_os) # os_install_obj = instl.OSInstallTask(req, cluster_id, hosts_need_os)
# os_install_thread = Thread(target=os_install_obj.run) # os_install_thread = Thread(target=os_install_obj.run)
# os_install_thread.start() # os_install_thread.start()
# else: # else:
LOG.info(_("No host need to install os, begin install ZENIC for cluster %s." % cluster_id)) LOG.info(
_("No host need to install os, begin install ZENIC for cluster %s."
% cluster_id))
zenic_install_task = instl.ZENICInstallTask(req, cluster_id) zenic_install_task = instl.ZENICInstallTask(req, cluster_id)
zenic_install_task.start() zenic_install_task.start()
@ -105,18 +84,22 @@ class API(driver.DeploymentDriver):
:raises HTTPBadRequest if x-install-cluster is missing :raises HTTPBadRequest if x-install-cluster is missing
""" """
(role_id_list, hosts_list) = zenic_cmn.get_roles_and_hosts_list(req, cluster_id) (role_id_list, hosts_list) = zenic_cmn.get_roles_and_hosts_list(
req, cluster_id)
if role_id_list: if role_id_list:
if not hosts_list: if not hosts_list:
msg = _("there is no host in cluster %s") % cluster_id msg = _("there is no host in cluster %s") % cluster_id
raise exception.ThreadBinException(msg) raise exception.ThreadBinException(msg)
unstl.update_progress_to_db(req, role_id_list, zenic_state['UNINSTALLING'], 0.0) unstl.update_progress_to_db(
uninstall_progress_percentage = round(1*1.0/len(hosts_list), 2)*100 req, role_id_list, zenic_state['UNINSTALLING'], 0.0)
uninstall_progress_percentage =\
round(1 * 1.0 / len(hosts_list), 2) * 100
threads = [] threads = []
for host in hosts_list: for host in hosts_list:
t = threading.Thread(target=unstl.thread_bin,args=(req,host,role_id_list,uninstall_progress_percentage)) t = threading.Thread(target=unstl.thread_bin, args=(
req, host, role_id_list, uninstall_progress_percentage))
t.setDaemon(True) t.setDaemon(True)
t.start() t.start()
threads.append(t) threads.append(t)
@ -132,15 +115,19 @@ class API(driver.DeploymentDriver):
for role_id in role_id_list: for role_id in role_id_list:
role = daisy_cmn.get_role_detail(req, role_id) role = daisy_cmn.get_role_detail(req, role_id)
if role['progress'] == 100: if role['progress'] == 100:
unstl.update_progress_to_db(req, role_id_list, zenic_state['UNINSTALL_FAILED']) unstl.update_progress_to_db(
req, role_id_list, zenic_state['UNINSTALL_FAILED'])
uninstall_failed_flag = True uninstall_failed_flag = True
break break
if role['status'] == zenic_state['UNINSTALL_FAILED']: if role['status'] == zenic_state['UNINSTALL_FAILED']:
uninstall_failed_flag = True uninstall_failed_flag = True
break break
if not uninstall_failed_flag: if not uninstall_failed_flag:
LOG.info(_("all uninstall threads have done, set all roles status to 'init'!")) LOG.info(
unstl.update_progress_to_db(req, role_id_list, zenic_state['INIT']) _("all uninstall threads have done,\
set all roles status to 'init'!"))
unstl.update_progress_to_db(
req, role_id_list, zenic_state['INIT'])
LOG.info((_("begin uninstall zenic, please waiting...."))) LOG.info((_("begin uninstall zenic, please waiting....")))
time.sleep(5) time.sleep(5)
@ -155,17 +142,20 @@ class API(driver.DeploymentDriver):
:raises HTTPBadRequest if x-install-cluster is missing :raises HTTPBadRequest if x-install-cluster is missing
""" """
(role_id_list, hosts_list) = zenic_cmn.get_roles_and_hosts_list(req, cluster_id) (role_id_list, hosts_list) = zenic_cmn.get_roles_and_hosts_list(
req, cluster_id)
if not hosts_list: if not hosts_list:
msg = _("there is no host in cluster %s") % cluster_id msg = _("there is no host in cluster %s") % cluster_id
raise exception.ThreadBinException(msg) raise exception.ThreadBinException(msg)
upgrd.update_progress_to_db(req, role_id_list, zenic_state['UPDATING'], 0.0) upgrd.update_progress_to_db(
req, role_id_list, zenic_state['UPDATING'], 0.0)
update_progress_percentage = round(1 * 1.0 / len(hosts_list), 2) * 100 update_progress_percentage = round(1 * 1.0 / len(hosts_list), 2) * 100
threads = [] threads = []
for host in hosts_list: for host in hosts_list:
t = threading.Thread(target=upgrd.thread_bin,args=(req,host,role_id_list,update_progress_percentage)) t = threading.Thread(target=upgrd.thread_bin, args=(
req, host, role_id_list, update_progress_percentage))
t.setDaemon(True) t.setDaemon(True)
t.start() t.start()
threads.append(t) threads.append(t)
@ -181,14 +171,16 @@ class API(driver.DeploymentDriver):
for role_id in role_id_list: for role_id in role_id_list:
role = daisy_cmn.get_role_detail(req, role_id) role = daisy_cmn.get_role_detail(req, role_id)
if role['progress'] == 0: if role['progress'] == 0:
upgrd.update_progress_to_db(req, role_id_list, zenic_state['UPDATE_FAILED']) upgrd.update_progress_to_db(
req, role_id_list, zenic_state['UPDATE_FAILED'])
update_failed_flag = True update_failed_flag = True
break break
if role['status'] == zenic_state['UPDATE_FAILED']: if role['status'] == zenic_state['UPDATE_FAILED']:
update_failed_flag = True update_failed_flag = True
break break
if not update_failed_flag: if not update_failed_flag:
LOG.info(_("all update threads have done, set all roles status to 'active'!")) LOG.info(
upgrd.update_progress_to_db(req, role_id_list, zenic_state['ACTIVE']) _("all update threads have done, \
set all roles status to 'active'!"))
upgrd.update_progress_to_db(
req, role_id_list, zenic_state['ACTIVE'])

View File

@ -19,33 +19,16 @@
import os import os
import copy import copy
import subprocess import subprocess
import time
import traceback
import webob.exc
from oslo_config import cfg
from oslo_log import log as logging from oslo_log import log as logging
from webob.exc import HTTPBadRequest from webob.exc import HTTPBadRequest
from webob.exc import HTTPForbidden
from threading import Thread
from daisy import i18n from daisy import i18n
from daisy import notifier
from daisy.api import policy
import daisy.api.v1
from daisy.common import exception from daisy.common import exception
import daisy.registry.client.v1.api as registry import daisy.registry.client.v1.api as registry
import daisy.api.backends.common as daisy_cmn import daisy.api.backends.common as daisy_cmn
try:
import simplejson as json
except ImportError:
import json
LOG = logging.getLogger(__name__) LOG = logging.getLogger(__name__)
_ = i18n._ _ = i18n._
_LE = i18n._LE _LE = i18n._LE
@ -64,6 +47,7 @@ ZENIC_STATE = {
'UPDATE_FAILED': 'update-failed', 'UPDATE_FAILED': 'update-failed',
} }
def get_cluster_hosts(req, cluster_id): def get_cluster_hosts(req, cluster_id):
try: try:
cluster_hosts = registry.get_cluster_hosts(req.context, cluster_id) cluster_hosts = registry.get_cluster_hosts(req.context, cluster_id)
@ -71,6 +55,7 @@ def get_cluster_hosts(req, cluster_id):
raise HTTPBadRequest(explanation=e.msg, request=req) raise HTTPBadRequest(explanation=e.msg, request=req)
return cluster_hosts return cluster_hosts
def get_host_detail(req, host_id): def get_host_detail(req, host_id):
try: try:
host_detail = registry.get_host_metadata(req.context, host_id) host_detail = registry.get_host_metadata(req.context, host_id)
@ -78,6 +63,7 @@ def get_host_detail(req, host_id):
raise HTTPBadRequest(explanation=e.msg, request=req) raise HTTPBadRequest(explanation=e.msg, request=req)
return host_detail return host_detail
def get_roles_detail(req): def get_roles_detail(req):
try: try:
roles = registry.get_roles_detail(req.context) roles = registry.get_roles_detail(req.context)
@ -85,6 +71,7 @@ def get_roles_detail(req):
raise HTTPBadRequest(explanation=e.msg, request=req) raise HTTPBadRequest(explanation=e.msg, request=req)
return roles return roles
def get_hosts_of_role(req, role_id): def get_hosts_of_role(req, role_id):
try: try:
hosts = registry.get_role_host_metadata(req.context, role_id) hosts = registry.get_role_host_metadata(req.context, role_id)
@ -92,6 +79,7 @@ def get_hosts_of_role(req, role_id):
raise HTTPBadRequest(explanation=e.msg, request=req) raise HTTPBadRequest(explanation=e.msg, request=req)
return hosts return hosts
def get_role_detail(req, role_id): def get_role_detail(req, role_id):
try: try:
role = registry.get_role_metadata(req.context, role_id) role = registry.get_role_metadata(req.context, role_id)
@ -99,24 +87,28 @@ def get_role_detail(req, role_id):
raise HTTPBadRequest(explanation=e.msg, request=req) raise HTTPBadRequest(explanation=e.msg, request=req)
return role return role
def update_role(req, role_id, role_meta): def update_role(req, role_id, role_meta):
try: try:
registry.update_role_metadata(req.context, role_id, role_meta) registry.update_role_metadata(req.context, role_id, role_meta)
except exception.Invalid as e: except exception.Invalid as e:
raise HTTPBadRequest(explanation=e.msg, request=req) raise HTTPBadRequest(explanation=e.msg, request=req)
def update_role_host(req, role_id, role_host): def update_role_host(req, role_id, role_host):
try: try:
registry.update_role_host_metadata(req.context, role_id, role_host) registry.update_role_host_metadata(req.context, role_id, role_host)
except exception.Invalid as e: except exception.Invalid as e:
raise HTTPBadRequest(explanation=e.msg, request=req) raise HTTPBadRequest(explanation=e.msg, request=req)
def delete_role_hosts(req, role_id): def delete_role_hosts(req, role_id):
try: try:
registry.delete_role_host_metadata(req.context, role_id) registry.delete_role_host_metadata(req.context, role_id)
except exception.Invalid as e: except exception.Invalid as e:
raise HTTPBadRequest(explanation=e.msg, request=req) raise HTTPBadRequest(explanation=e.msg, request=req)
def _get_cluster_network(cluster_networks, network_type): def _get_cluster_network(cluster_networks, network_type):
network = [cn for cn in cluster_networks network = [cn for cn in cluster_networks
if cn['name'] in network_type] if cn['name'] in network_type]
@ -126,21 +118,25 @@ def _get_cluster_network(cluster_networks, network_type):
else: else:
return network[0] return network[0]
def get_host_interface_by_network(host_detail, network_type): def get_host_interface_by_network(host_detail, network_type):
host_detail_info = copy.deepcopy(host_detail) host_detail_info = copy.deepcopy(host_detail)
interface_list = [hi for hi in host_detail_info['interfaces'] interface_list = [hi for hi in host_detail_info['interfaces']
for assigned_network in hi['assigned_networks'] for assigned_network in hi['assigned_networks']
if assigned_network and network_type == assigned_network['name']] if assigned_network and
network_type == assigned_network['name']]
interface = {} interface = {}
if interface_list: if interface_list:
interface = interface_list[0] interface = interface_list[0]
if not interface: if not interface:
msg = "network %s of host %s is not exist" % (network_type, host_detail_info['id']) msg = "network %s of host %s is not exist" % (
network_type, host_detail_info['id'])
raise exception.InvalidNetworkConfig(msg) raise exception.InvalidNetworkConfig(msg)
return interface return interface
def get_host_network_ip(req, host_detail, cluster_networks, network_type): def get_host_network_ip(req, host_detail, cluster_networks, network_type):
interface_network_ip = '' interface_network_ip = ''
host_interface = get_host_interface_by_network(host_detail, network_type) host_interface = get_host_interface_by_network(host_detail, network_type)
@ -152,22 +148,28 @@ def get_host_network_ip(req, host_detail, cluster_networks, network_type):
interface_network_ip = assigned_network['ip'] interface_network_ip = assigned_network['ip']
if not interface_network_ip: if not interface_network_ip:
msg = "%s network ip of host %s can't be empty" % (network_type, host_detail['id']) msg = "%s network ip of host %s can't be empty" % (
network_type, host_detail['id'])
raise exception.InvalidNetworkConfig(msg) raise exception.InvalidNetworkConfig(msg)
return interface_network_ip return interface_network_ip
def get_deploy_node_cfg(req, host_detail, cluster_networks): def get_deploy_node_cfg(req, host_detail, cluster_networks):
host_deploy_network = get_host_interface_by_network(host_detail, 'DEPLOYMENT') host_deploy_network = get_host_interface_by_network(
host_deploy_ip = get_host_network_ip(req, host_detail, cluster_networks, 'DEPLOYMENT') host_detail, 'DEPLOYMENT')
host_deploy_ip = get_host_network_ip(
req, host_detail, cluster_networks, 'DEPLOYMENT')
if not host_deploy_ip: if not host_deploy_ip:
msg = "deployment ip of host %s can't be empty" % host_detail['id'] msg = "deployment ip of host %s can't be empty" % host_detail['id']
raise exception.InvalidNetworkConfig(msg) raise exception.InvalidNetworkConfig(msg)
host_deploy_macname = host_deploy_network['name'] host_deploy_macname = host_deploy_network['name']
if not host_deploy_macname: if not host_deploy_macname:
msg = "deployment macname of host %s can't be empty" % host_detail['id'] msg = "deployment macname of host %s can't be empty" % host_detail[
'id']
raise exception.InvalidNetworkConfig(msg) raise exception.InvalidNetworkConfig(msg)
host_mgt_ip = get_host_network_ip(req, host_detail, cluster_networks, 'MANAGEMENT') host_mgt_ip = get_host_network_ip(
req, host_detail, cluster_networks, 'MANAGEMENT')
if not host_mgt_ip: if not host_mgt_ip:
msg = "management ip of host %s can't be empty" % host_detail['id'] msg = "management ip of host %s can't be empty" % host_detail['id']
raise exception.InvalidNetworkConfig(msg) raise exception.InvalidNetworkConfig(msg)
@ -175,8 +177,11 @@ def get_deploy_node_cfg(req, host_detail, cluster_networks):
memmode = 'tiny' memmode = 'tiny'
host_memory = 0 host_memory = 0
if host_detail.has_key('memory'): # if host_detail.has_key('memory'):
host_memory = (int(host_detail['memory']['total'].strip().split()[0]))/(1024*1024) if 'memory' in host_detail:
host_memory = (
int(host_detail['memory'][
'total'].strip().split()[0])) / (1024 * 1024)
if host_memory < 8: if host_memory < 8:
memmode = 'tiny' memmode = 'tiny'
@ -187,7 +192,6 @@ def get_deploy_node_cfg(req, host_detail, cluster_networks):
else: else:
memmode = 'large' memmode = 'large'
deploy_node_cfg = {} deploy_node_cfg = {}
deploy_node_cfg.update({'hostid': host_detail['id']}) deploy_node_cfg.update({'hostid': host_detail['id']})
deploy_node_cfg.update({'hostname': host_detail['name']}) deploy_node_cfg.update({'hostname': host_detail['name']})
@ -197,6 +201,7 @@ def get_deploy_node_cfg(req, host_detail, cluster_networks):
deploy_node_cfg.update({'mgtip': host_mgt_ip}) deploy_node_cfg.update({'mgtip': host_mgt_ip})
return deploy_node_cfg return deploy_node_cfg
def get_roles_and_hosts_list(req, cluster_id): def get_roles_and_hosts_list(req, cluster_id):
roles_id_list = set() roles_id_list = set()
hosts_id_list = set() hosts_id_list = set()
@ -212,7 +217,8 @@ def get_roles_and_hosts_list(req, cluster_id):
for role_host in role_hosts: for role_host in role_hosts:
if role_host['host_id'] not in hosts_id_list: if role_host['host_id'] not in hosts_id_list:
host = daisy_cmn.get_host_detail(req, role_host['host_id']) host = daisy_cmn.get_host_detail(req, role_host['host_id'])
host_ip = get_host_network_ip(req, host, cluster_networks, 'MANAGEMENT') host_ip = get_host_network_ip(
req, host, cluster_networks, 'MANAGEMENT')
hosts_id_list.add(host['id']) hosts_id_list.add(host['id'])
host_cfg = {} host_cfg = {}
@ -224,6 +230,7 @@ def get_roles_and_hosts_list(req, cluster_id):
return (roles_id_list, hosts_list) return (roles_id_list, hosts_list)
def check_and_get_zenic_version(daisy_zenic_pkg_path): def check_and_get_zenic_version(daisy_zenic_pkg_path):
zenic_version_pkg_file = "" zenic_version_pkg_file = ""
zenic_version_pkg_name = "" zenic_version_pkg_name = ""
@ -240,10 +247,13 @@ def check_and_get_zenic_version(daisy_zenic_pkg_path):
daisy_cmn.subprocess_call(chmod_for_zenic_version) daisy_cmn.subprocess_call(chmod_for_zenic_version)
return (zenic_version_pkg_file, zenic_version_pkg_name) return (zenic_version_pkg_file, zenic_version_pkg_name)
class ZenicShellExector(): class ZenicShellExector():
""" """
Class config task before install zenic bin. Class config task before install zenic bin.
""" """
def __init__(self, mgt_ip, task_type, params={}): def __init__(self, mgt_ip, task_type, params={}):
self.task_type = task_type self.task_type = task_type
self.mgt_ip = mgt_ip self.mgt_ip = mgt_ip
@ -259,7 +269,8 @@ class ZenicShellExector():
'CMD_SSHPASS_PRE': "sshpass -p ossdbg1 %(ssh_ip)s %(cmd)s", 'CMD_SSHPASS_PRE': "sshpass -p ossdbg1 %(ssh_ip)s %(cmd)s",
'CMD_CFG_SCP': "scp %(path)s root@%(ssh_ip)s:/etc/zenic/config" % 'CMD_CFG_SCP': "scp %(path)s root@%(ssh_ip)s:/etc/zenic/config" %
{'path': self.CFG_PATH, 'ssh_ip': mgt_ip}, {'path': self.CFG_PATH, 'ssh_ip': mgt_ip},
'CMD_PKG_UNZIP' : "unzip /home/workspace/%(pkg_name)s -d /home/workspace/PKG" % {'pkg_name':self.PKG_NAME}, 'CMD_PKG_UNZIP': "unzip /home/workspace/%(pkg_name)s \
-d /home/workspace/PKG" % {'pkg_name': self.PKG_NAME},
'CMD_PKG_SCP': "scp %(path)s root@%(ssh_ip)s:/home/workspace/" % 'CMD_PKG_SCP': "scp %(path)s root@%(ssh_ip)s:/home/workspace/" %
{'path': self.PKG_PATH, 'ssh_ip': mgt_ip} {'path': self.PKG_PATH, 'ssh_ip': mgt_ip}
} }
@ -277,24 +288,32 @@ class ZenicShellExector():
self.clush_cmd = "%s;%s;%s" % \ self.clush_cmd = "%s;%s;%s" % \
(self.oper_shell['CMD_SSHPASS_PRE'] % (self.oper_shell['CMD_SSHPASS_PRE'] %
{"ssh_ip":"", "cmd":self.oper_shell['CMD_PKG_SCP']}, \ {"ssh_ip": "", "cmd": self.oper_shell['CMD_PKG_SCP']},
self.oper_shell['CMD_SSHPASS_PRE'] % self.oper_shell['CMD_SSHPASS_PRE'] %
{"ssh_ip":"", "cmd":self.oper_shell['CMD_CFG_SCP']}, \ {"ssh_ip": "", "cmd": self.oper_shell['CMD_CFG_SCP']},
self.oper_shell['CMD_SSHPASS_PRE'] % self.oper_shell['CMD_SSHPASS_PRE'] %
{"ssh_ip":"ssh " + self.mgt_ip, "cmd":self.oper_shell['CMD_PKG_UNZIP']}) {"ssh_ip": "ssh " + self.mgt_ip, "cmd": self.oper_shell[
'CMD_PKG_UNZIP']})
subprocess.check_output(self.clush_cmd, shell = True, stderr=subprocess.STDOUT) subprocess.check_output(
self.clush_cmd, shell=True, stderr=subprocess.STDOUT)
def _execute(self): def _execute(self):
try: try:
if not self.task_type or not self.mgt_ip: if not self.task_type or not self.mgt_ip:
LOG.error(_("<<<ZenicShellExector::execute, input params invalid!>>>")) LOG.error(
_("<<<ZenicShellExector::execute, \
input params invalid!>>>"))
return return
self.oper_type[self.task_type]() self.oper_type[self.task_type]()
except subprocess.CalledProcessError as e: except subprocess.CalledProcessError as e:
LOG.warn(_("<<<ZenicShellExector::execute:Execute command failed! Reason:%s>>>" % e.output.strip())) LOG.warn(
_("<<<ZenicShellExector::execute:Execute command failed! Reason\
:%s>>>" % e.output.strip()))
except Exception as e: except Exception as e:
LOG.exception(_(e.message)) LOG.exception(_(e.message))
else: else:
LOG.info(_("<<<ZenicShellExector::execute:Execute command:%s,successful!>>>" % self.clush_cmd)) LOG.info(
_("<<<ZenicShellExector::execute:Execute command:\
%s,successful!>>>" % self.clush_cmd))

View File

@ -1,20 +1,16 @@
# -*- coding: utf-8 -*- # -*- coding: utf-8 -*-
import os import os
import re
import commands
import types
import subprocess
from ConfigParser import ConfigParser from ConfigParser import ConfigParser
from daisy.common import exception
default_zenic_conf_template_path = "/var/lib/daisy/zenic/" default_zenic_conf_template_path = "/var/lib/daisy/zenic/"
zenic_conf_template_path = default_zenic_conf_template_path zenic_conf_template_path = default_zenic_conf_template_path
def update_conf(zenic, key, value): def update_conf(zenic, key, value):
zenic.set("general", key, value) zenic.set("general", key, value)
def get_conf(zenic_conf_file, **kwargs): def get_conf(zenic_conf_file, **kwargs):
result = {} result = {}
if not kwargs: if not kwargs:
@ -29,6 +25,7 @@ def get_conf(zenic_conf_file, **kwargs):
if zenic.has_option("general", kwargs.get(key, None))} if zenic.has_option("general", kwargs.get(key, None))}
return result return result
def get_nodeid(deploy_ip, zbp_ips): def get_nodeid(deploy_ip, zbp_ips):
nodeid = 0 nodeid = 0
i = 0 i = 0
@ -85,7 +82,6 @@ def update_zenic_conf(config_data, cluster_conf_path):
update_conf(zenic, 'zampips', zampips) update_conf(zenic, 'zampips', zampips)
update_conf(zenic, 'zamp_node_num', config_data['zamp_node_num']) update_conf(zenic, 'zamp_node_num', config_data['zamp_node_num'])
mongodbips = '' mongodbips = ''
for ip in config_data['mongodb_ips']: for ip in config_data['mongodb_ips']:
if not mongodbips: if not mongodbips:
@ -98,7 +94,6 @@ def update_zenic_conf(config_data, cluster_conf_path):
update_conf(zenic, 'zamp_vip', config_data['zamp_vip']) update_conf(zenic, 'zamp_vip', config_data['zamp_vip'])
update_conf(zenic, 'mongodb_vip', config_data['mongodb_vip']) update_conf(zenic, 'mongodb_vip', config_data['mongodb_vip'])
deploy_hosts = config_data['deploy_hosts'] deploy_hosts = config_data['deploy_hosts']
for deploy_host in deploy_hosts: for deploy_host in deploy_hosts:
nodeip = deploy_host['nodeip'] nodeip = deploy_host['nodeip']
@ -124,12 +119,12 @@ def update_zenic_conf(config_data, cluster_conf_path):
zenic_conf_out = os.path.join(daisy_zenic_path, zenic_conf) zenic_conf_out = os.path.join(daisy_zenic_path, zenic_conf)
zenic.write(open(zenic_conf_cluster_out, "w+")) zenic.write(open(zenic_conf_cluster_out, "w+"))
with open(zenic_conf_cluster_out,'r') as fr,open(zenic_conf_out,'w') as fw: with open(zenic_conf_cluster_out, 'r') as fr,\
open(zenic_conf_out, 'w') as fw:
for line in fr.readlines(): for line in fr.readlines():
fw.write(line.replace(' ', '')) fw.write(line.replace(' ', ''))
return return
def test(): def test():
print("Hello, world!") print("Hello, world!")

View File

@ -16,43 +16,23 @@
""" """
/install endpoint for zenic API /install endpoint for zenic API
""" """
import os
import copy
import subprocess import subprocess
import time import time
import traceback
import webob.exc
from oslo_config import cfg from oslo_config import cfg
from oslo_log import log as logging from oslo_log import log as logging
from webob.exc import HTTPBadRequest
from webob.exc import HTTPForbidden
from webob.exc import HTTPServerError
from threading import Thread, Lock
import threading import threading
from daisy import i18n from daisy import i18n
from daisy import notifier
from daisy.api import policy
import daisy.api.v1 import daisy.api.v1
from daisy.common import exception from daisy.common import exception
import daisy.registry.client.v1.api as registry
from daisy.api.backends.zenic import config from daisy.api.backends.zenic import config
from daisy.api.backends import driver
from daisy.api.network_api import network as neutron
from ironicclient import client as ironic_client
import daisy.api.backends.common as daisy_cmn import daisy.api.backends.common as daisy_cmn
import daisy.api.backends.zenic.common as zenic_cmn import daisy.api.backends.zenic.common as zenic_cmn
try:
import simplejson as json
except ImportError:
import json
LOG = logging.getLogger(__name__) LOG = logging.getLogger(__name__)
_ = i18n._ _ = i18n._
_LE = i18n._LE _LE = i18n._LE
@ -88,9 +68,12 @@ daisy_zenic_path = zenic_cmn.daisy_zenic_path
install_zenic_progress = 0.0 install_zenic_progress = 0.0
install_mutex = threading.Lock() install_mutex = threading.Lock()
def update_progress_to_db(req, role_id_list, status, progress_percentage_step=0.0):
def update_progress_to_db(req, role_id_list,
status, progress_percentage_step=0.0):
""" """
Write install progress and status to db, we use global lock object 'install_mutex' Write install progress and status to db,
we use global lock object 'install_mutex'
to make sure this function is thread safety. to make sure this function is thread safety.
:param req: http req. :param req: http req.
:param role_id_list: Column neeb be update in role table. :param role_id_list: Column neeb be update in role table.
@ -115,21 +98,26 @@ def update_progress_to_db(req, role_id_list, status, progress_percentage_step=0.
daisy_cmn.update_role(req, role_id, role) daisy_cmn.update_role(req, role_id, role)
install_mutex.release() install_mutex.release()
def _ping_hosts_test(ips): def _ping_hosts_test(ips):
ping_cmd = 'fping' ping_cmd = 'fping'
for ip in set(ips): for ip in set(ips):
ping_cmd = ping_cmd + ' ' + ip ping_cmd = ping_cmd + ' ' + ip
obj = subprocess.Popen(ping_cmd, shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE) obj = subprocess.Popen(
ping_cmd, shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
(stdoutput, erroutput) = obj.communicate() (stdoutput, erroutput) = obj.communicate()
_returncode = obj.returncode _returncode = obj.returncode
if _returncode == 0 or _returncode == 1: if _returncode == 0 or _returncode == 1:
ping_result = stdoutput.split('\n') ping_result = stdoutput.split('\n')
unreachable_hosts = [result.split()[0] for result in ping_result if result and result.split()[2] != 'alive'] unreachable_hosts = [result.split(
)[0] for result in ping_result if result and
result.split()[2] != 'alive']
else: else:
msg = "ping failed beaceuse there is invlid ip in %s" % ips msg = "ping failed beaceuse there is invlid ip in %s" % ips
raise exception.InvalidIP(msg) raise exception.InvalidIP(msg)
return unreachable_hosts return unreachable_hosts
def _check_ping_hosts(ping_ips, max_ping_times): def _check_ping_hosts(ping_ips, max_ping_times):
if not ping_ips: if not ping_ips:
LOG.info(_("no ip got for ping test")) LOG.info(_("no ip got for ping test"))
@ -145,9 +133,11 @@ def _check_ping_hosts(ping_ips, max_ping_times):
ping_count += 1 ping_count += 1
if ips: if ips:
LOG.debug(_("ping host %s for %s times" % (','.join(ips), ping_count))) LOG.debug(
_("ping host %s for %s times" % (','.join(ips), ping_count)))
if ping_count >= max_ping_times: if ping_count >= max_ping_times:
LOG.info(_("ping host %s timeout for %ss" % (','.join(ips), ping_count*time_step))) LOG.info(_("ping host %s timeout for %ss" %
(','.join(ips), ping_count * time_step)))
return ips return ips
time.sleep(time_step) time.sleep(time_step)
else: else:
@ -159,9 +149,11 @@ def _check_ping_hosts(ping_ips, max_ping_times):
def _get_host_private_networks(host_detail, cluster_private_networks_name): def _get_host_private_networks(host_detail, cluster_private_networks_name):
host_private_networks = [hi for pn in cluster_private_networks_name host_private_networks = [hi for pn in cluster_private_networks_name
for hi in host_detail['interfaces'] if pn in hi['assigned_networks']] for hi in
host_detail['interfaces'] if pn in
# If port type is bond,use pci segment of member port replace pci1 & pci2 segments of bond port hi['assigned_networks']]
# If port type is bond,use pci segment of member port replace pci1 & pci2
# segments of bond port
for interface_outer in host_private_networks: for interface_outer in host_private_networks:
if 0 != cmp(interface_outer.get('type', None), "bond"): if 0 != cmp(interface_outer.get('type', None), "bond"):
continue continue
@ -180,7 +172,7 @@ def _get_host_private_networks(host_detail, cluster_private_networks_name):
def get_cluster_zenic_config(req, cluster_id): def get_cluster_zenic_config(req, cluster_id):
LOG.info(_("get zenic config from database...")) LOG.info(_("get zenic config from database..."))
params = dict(limit=1000000) # params = dict(limit=1000000)
zenic_config = {} zenic_config = {}
@ -201,7 +193,10 @@ def get_cluster_zenic_config(req, cluster_id):
all_roles = zenic_cmn.get_roles_detail(req) all_roles = zenic_cmn.get_roles_detail(req)
roles = [role for role in all_roles if (role['cluster_id'] == cluster_id and role['deployment_backend'] == daisy_cmn.zenic_backend_name)] roles = [role for role in all_roles if (role['cluster_id'] ==
cluster_id and role[
'deployment_backend'] ==
daisy_cmn.zenic_backend_name)]
for role in roles: for role in roles:
if not (role['name'] == 'ZENIC_CTL' or role['name'] == 'ZENIC_NFM'): if not (role['name'] == 'ZENIC_CTL' or role['name'] == 'ZENIC_NFM'):
continue continue
@ -220,13 +215,14 @@ def get_cluster_zenic_config(req, cluster_id):
deploy_ip = deploy_host['nodeip'] deploy_ip = deploy_host['nodeip']
break break
if not mgt_ip: if not mgt_ip:
host_detail = zenic_cmn.get_host_detail(req, role_host['host_id']) host_detail = zenic_cmn.get_host_detail(
deploy_host_cfg = zenic_cmn.get_deploy_node_cfg(req, host_detail, cluster_networks) req, role_host['host_id'])
deploy_host_cfg = zenic_cmn.get_deploy_node_cfg(
req, host_detail, cluster_networks)
deploy_hosts.append(deploy_host_cfg) deploy_hosts.append(deploy_host_cfg)
mgt_ip = deploy_host_cfg['mgtip'] mgt_ip = deploy_host_cfg['mgtip']
deploy_ip = deploy_host_cfg['nodeip'] deploy_ip = deploy_host_cfg['nodeip']
mgt_ip_list.add(mgt_ip) mgt_ip_list.add(mgt_ip)
if role['name'] == 'ZENIC_CTL': if role['name'] == 'ZENIC_CTL':
zbp_ip_list.add(deploy_ip) zbp_ip_list.add(deploy_ip)
@ -234,7 +230,9 @@ def get_cluster_zenic_config(req, cluster_id):
zamp_ip_list.add(deploy_ip) zamp_ip_list.add(deploy_ip)
mongodb_ip_list.add(deploy_ip) mongodb_ip_list.add(deploy_ip)
else: else:
LOG.warn(_("<<<Zenic Install role %s is invalid >>>" % role['name'])) LOG.warn(
_("<<<Zenic Install role %s is invalid >>>"
% role['name']))
zenic_config.update({'deploy_hosts': deploy_hosts}) zenic_config.update({'deploy_hosts': deploy_hosts})
zenic_config.update({'zbp_ips': zbp_ip_list}) zenic_config.update({'zbp_ips': zbp_ip_list})
@ -247,12 +245,14 @@ def get_cluster_zenic_config(req, cluster_id):
zenic_config.update({'mongodb_vip': mongodb_vip}) zenic_config.update({'mongodb_vip': mongodb_vip})
return (zenic_config, mgt_ip_list) return (zenic_config, mgt_ip_list)
def generate_zenic_config_file(cluster_id, zenic_config): def generate_zenic_config_file(cluster_id, zenic_config):
LOG.info(_("generate zenic config...")) LOG.info(_("generate zenic config..."))
if zenic_config: if zenic_config:
cluster_conf_path = daisy_zenic_path + cluster_id cluster_conf_path = daisy_zenic_path + cluster_id
config.update_zenic_conf(zenic_config, cluster_conf_path) config.update_zenic_conf(zenic_config, cluster_conf_path)
def thread_bin(req, host, role_id_list, pkg_name, install_progress_percentage): def thread_bin(req, host, role_id_list, pkg_name, install_progress_percentage):
host_ip = host['mgtip'] host_ip = host['mgtip']
password = host['rootpwd'] password = host['rootpwd']
@ -260,7 +260,8 @@ def thread_bin(req,host, role_id_list, pkg_name, install_progress_percentage):
cmd = 'mkdir -p /var/log/daisy/daisy_install/' cmd = 'mkdir -p /var/log/daisy/daisy_install/'
daisy_cmn.subprocess_call(cmd) daisy_cmn.subprocess_call(cmd)
var_log_path = "/var/log/daisy/daisy_install/%s_install_zenic.log" % host_ip var_log_path =\
"/var/log/daisy/daisy_install/%s_install_zenic.log" % host_ip
with open(var_log_path, "w+") as fp: with open(var_log_path, "w+") as fp:
cmd = '/var/lib/daisy/zenic/trustme.sh %s %s' % (host_ip, password) cmd = '/var/lib/daisy/zenic/trustme.sh %s %s' % (host_ip, password)
@ -282,17 +283,19 @@ def thread_bin(req,host, role_id_list, pkg_name, install_progress_percentage):
daisy_cmn.subprocess_call(cmd, fp) daisy_cmn.subprocess_call(cmd, fp)
pkg_file = daisy_zenic_path + pkg_name pkg_file = daisy_zenic_path + pkg_name
cmd = 'clush -S -b -w %s rm -rf /home/workspace/%s' % (host_ip,pkg_name) cmd = 'clush -S -b -w %s rm -rf /home/workspace/%s' % (
host_ip, pkg_name)
daisy_cmn.subprocess_call(cmd, fp) daisy_cmn.subprocess_call(cmd, fp)
cfg_file = daisy_zenic_path + host_ip + "_zenic.conf" cfg_file = daisy_zenic_path + host_ip + "_zenic.conf"
try: try:
exc_result = subprocess.check_output( exc_result = subprocess.check_output(
'sshpass -p ossdbg1 scp %s root@%s:/etc/zenic/config' % (cfg_file,host_ip,), 'sshpass -p ossdbg1 scp %s root@%s:/etc/zenic/config' % (
cfg_file, host_ip,),
shell=True, stderr=fp) shell=True, stderr=fp)
except subprocess.CalledProcessError as e: except subprocess.CalledProcessError as e:
update_progress_to_db(req, role_id_list, zenic_state['INSTALL_FAILED']) update_progress_to_db(
req, role_id_list, zenic_state['INSTALL_FAILED'])
LOG.info(_("scp zenic pkg for %s failed!" % host_ip)) LOG.info(_("scp zenic pkg for %s failed!" % host_ip))
fp.write(e.output.strip()) fp.write(e.output.strip())
exit() exit()
@ -300,13 +303,14 @@ def thread_bin(req,host, role_id_list, pkg_name, install_progress_percentage):
LOG.info(_("scp zenic config for %s successfully!" % host_ip)) LOG.info(_("scp zenic config for %s successfully!" % host_ip))
fp.write(exc_result) fp.write(exc_result)
try: try:
exc_result = subprocess.check_output( exc_result = subprocess.check_output(
'sshpass -p ossdbg1 scp %s root@%s:/home/workspace/' % (pkg_file,host_ip,), 'sshpass -p ossdbg1 scp %s root@%s:/home/workspace/' % (
pkg_file, host_ip,),
shell=True, stderr=fp) shell=True, stderr=fp)
except subprocess.CalledProcessError as e: except subprocess.CalledProcessError as e:
update_progress_to_db(req, role_id_list, zenic_state['INSTALL_FAILED']) update_progress_to_db(
req, role_id_list, zenic_state['INSTALL_FAILED'])
LOG.info(_("scp zenic pkg for %s failed!" % host_ip)) LOG.info(_("scp zenic pkg for %s failed!" % host_ip))
fp.write(e.output.strip()) fp.write(e.output.strip())
exit() exit()
@ -314,15 +318,19 @@ def thread_bin(req,host, role_id_list, pkg_name, install_progress_percentage):
LOG.info(_("scp zenic pkg for %s successfully!" % host_ip)) LOG.info(_("scp zenic pkg for %s successfully!" % host_ip))
fp.write(exc_result) fp.write(exc_result)
cmd = 'clush -S -b -w %s unzip /home/workspace/%s -d /home/workspace/unipack' % (host_ip,pkg_name,) cmd = 'clush -S -b -w %s unzip /home/workspace/%s \
-d /home/workspace/unipack' % (
host_ip, pkg_name,)
daisy_cmn.subprocess_call(cmd) daisy_cmn.subprocess_call(cmd)
try: try:
exc_result = subprocess.check_output( exc_result = subprocess.check_output(
'clush -S -b -w %s /home/workspace/unipack/node_install.sh' % (host_ip,), 'clush -S -b -w %s /home/workspace/unipack/node_install.sh'
% (host_ip,),
shell=True, stderr=subprocess.STDOUT) shell=True, stderr=subprocess.STDOUT)
except subprocess.CalledProcessError as e: except subprocess.CalledProcessError as e:
update_progress_to_db(req, role_id_list, zenic_state['INSTALL_FAILED']) update_progress_to_db(
req, role_id_list, zenic_state['INSTALL_FAILED'])
LOG.info(_("install zenic for %s failed!" % host_ip)) LOG.info(_("install zenic for %s failed!" % host_ip))
fp.write(e.output.strip()) fp.write(e.output.strip())
exit() exit()
@ -335,16 +343,21 @@ def thread_bin(req,host, role_id_list, pkg_name, install_progress_percentage):
'clush -S -b -w %s /home/zenic/node_start.sh' % (host_ip,), 'clush -S -b -w %s /home/zenic/node_start.sh' % (host_ip,),
shell=True, stderr=subprocess.STDOUT) shell=True, stderr=subprocess.STDOUT)
except subprocess.CalledProcessError as e: except subprocess.CalledProcessError as e:
update_progress_to_db(req, role_id_list, zenic_state['INSTALL_FAILED']) update_progress_to_db(
req, role_id_list, zenic_state['INSTALL_FAILED'])
LOG.info(_("start zenic for %s failed!" % host_ip)) LOG.info(_("start zenic for %s failed!" % host_ip))
fp.write(e.output.strip()) fp.write(e.output.strip())
exit() exit()
else: else:
update_progress_to_db(req, role_id_list, zenic_state['INSTALLING'], install_progress_percentage) update_progress_to_db(
req, role_id_list, zenic_state['INSTALLING'],
install_progress_percentage)
LOG.info(_("start zenic for %s successfully!" % host_ip)) LOG.info(_("start zenic for %s successfully!" % host_ip))
fp.write(exc_result) fp.write(exc_result)
class ZENICInstallTask(Thread): class ZENICInstallTask(Thread):
""" """
Class for install tecs bin. Class for install tecs bin.
""" """
@ -371,9 +384,6 @@ class ZENICInstallTask(Thread):
self.ping_times = 36 self.ping_times = 36
self.log_file = "/var/log/daisy/zenic_%s_install.log" % self.cluster_id self.log_file = "/var/log/daisy/zenic_%s_install.log" % self.cluster_id
def run(self): def run(self):
try: try:
self._run() self._run()
@ -392,7 +402,8 @@ class ZENICInstallTask(Thread):
def _run(self): def _run(self):
(zenic_config, self.mgt_ip_list) = get_cluster_zenic_config(self.req, self.cluster_id) (zenic_config, self.mgt_ip_list) = get_cluster_zenic_config(
self.req, self.cluster_id)
if not self.mgt_ip_list: if not self.mgt_ip_list:
msg = _("there is no host in cluster %s") % self.cluster_id msg = _("there is no host in cluster %s") % self.cluster_id
@ -406,22 +417,28 @@ class ZENICInstallTask(Thread):
generate_zenic_config_file(self.cluster_id, zenic_config) generate_zenic_config_file(self.cluster_id, zenic_config)
# check and get ZENIC version # check and get ZENIC version
(zenic_version_pkg_file,zenic_version_pkg_name) = zenic_cmn.check_and_get_zenic_version(daisy_zenic_path) (zenic_version_pkg_file, zenic_version_pkg_name) =\
zenic_cmn.check_and_get_zenic_version(
daisy_zenic_path)
if not zenic_version_pkg_file: if not zenic_version_pkg_file:
self.state = zenic_state['INSTALL_FAILED'] self.state = zenic_state['INSTALL_FAILED']
self.message = "ZENIC version file not found in %s" % daisy_zenic_path self.message = \
"ZENIC version file not found in %s" % daisy_zenic_path
raise exception.NotFound(message=self.message) raise exception.NotFound(message=self.message)
(role_id_list, hosts_list) = zenic_cmn.get_roles_and_hosts_list(self.req, self.cluster_id) (role_id_list, hosts_list) = zenic_cmn.get_roles_and_hosts_list(
self.req, self.cluster_id)
update_progress_to_db(self.req, role_id_list, zenic_state['INSTALLING'], 0.0) update_progress_to_db(
self.req, role_id_list, zenic_state['INSTALLING'], 0.0)
install_progress_percentage = round(1 * 1.0 / len(hosts_list), 2) * 100 install_progress_percentage = round(1 * 1.0 / len(hosts_list), 2) * 100
threads = [] threads = []
for host in hosts_list: for host in hosts_list:
t = threading.Thread(target=thread_bin,args=(self.req,host,role_id_list,zenic_version_pkg_name,install_progress_percentage)) t = threading.Thread(target=thread_bin, args=(
self.req, host, role_id_list,
zenic_version_pkg_name, install_progress_percentage))
t.setDaemon(True) t.setDaemon(True)
t.start() t.start()
threads.append(t) threads.append(t)
@ -437,14 +454,16 @@ class ZENICInstallTask(Thread):
for role_id in role_id_list: for role_id in role_id_list:
role = daisy_cmn.get_role_detail(self.req, role_id) role = daisy_cmn.get_role_detail(self.req, role_id)
if role['progress'] == 0: if role['progress'] == 0:
update_progress_to_db(self.req, role_id_list, zenic_state['INSTALL_FAILED']) update_progress_to_db(
self.req, role_id_list, zenic_state['INSTALL_FAILED'])
install_failed_flag = True install_failed_flag = True
break break
if role['status'] == zenic_state['INSTALL_FAILED']: if role['status'] == zenic_state['INSTALL_FAILED']:
install_failed_flag = True install_failed_flag = True
break break
if not install_failed_flag: if not install_failed_flag:
LOG.info(_("all install threads have done, set all roles status to 'active'!")) LOG.info(
update_progress_to_db(self.req, role_id_list, zenic_state['ACTIVE']) _("all install threads have done, \
set all roles status to 'active'!"))
update_progress_to_db(
self.req, role_id_list, zenic_state['ACTIVE'])

View File

@ -17,30 +17,12 @@
/hosts endpoint for Daisy v1 API /hosts endpoint for Daisy v1 API
""" """
import os
import webob.exc
import subprocess import subprocess
from oslo_config import cfg
from oslo_log import log as logging from oslo_log import log as logging
from webob.exc import HTTPBadRequest
from webob.exc import HTTPForbidden
from threading import Thread, Lock
import threading import threading
from daisy import i18n from daisy import i18n
from daisy import notifier
from daisy.api import policy
import daisy.api.v1
from daisy.common import exception
from daisy.common import property_utils
from daisy.common import utils
from daisy.common import wsgi
from daisy.api.v1 import controller
from daisy.api.v1 import filters
from daisy.api.backends.zenic.common import ZenicShellExector
import daisy.api.backends.common as daisy_cmn import daisy.api.backends.common as daisy_cmn
import daisy.api.backends.zenic.common as zenic_cmn import daisy.api.backends.zenic.common as zenic_cmn
@ -55,9 +37,12 @@ zenic_state = zenic_cmn.ZENIC_STATE
uninstall_zenic_progress = 100.0 uninstall_zenic_progress = 100.0
uninstall_mutex = threading.Lock() uninstall_mutex = threading.Lock()
def update_progress_to_db(req, role_id_list, status, progress_percentage_step=0.0):
def update_progress_to_db(req, role_id_list, status,
progress_percentage_step=0.0):
""" """
Write uninstall progress and status to db, we use global lock object 'uninstall_mutex' Write uninstall progress and status to db,
we use global lock object 'uninstall_mutex'
to make sure this function is thread safety. to make sure this function is thread safety.
:param req: http req. :param req: http req.
:param role_id_list: Column neeb be update in role table. :param role_id_list: Column neeb be update in role table.
@ -82,12 +67,14 @@ def update_progress_to_db(req, role_id_list, status, progress_percentage_step=0.
daisy_cmn.update_role(req, role_id, role) daisy_cmn.update_role(req, role_id, role)
uninstall_mutex.release() uninstall_mutex.release()
def thread_bin(req, host, role_id_list, uninstall_progress_percentage): def thread_bin(req, host, role_id_list, uninstall_progress_percentage):
host_ip = host['mgtip'] host_ip = host['mgtip']
password = host['rootpwd'] password = host['rootpwd']
cmd = 'mkdir -p /var/log/daisy/daisy_uninstall/' cmd = 'mkdir -p /var/log/daisy/daisy_uninstall/'
daisy_cmn.subprocess_call(cmd) daisy_cmn.subprocess_call(cmd)
var_log_path = "/var/log/daisy/daisy_uninstall/%s_uninstall_zenic.log" % host_ip var_log_path =\
"/var/log/daisy/daisy_uninstall/%s_uninstall_zenic.log" % host_ip
with open(var_log_path, "w+") as fp: with open(var_log_path, "w+") as fp:
cmd = '/var/lib/daisy/zenic/trustme.sh %s %s' % (host_ip, password) cmd = '/var/lib/daisy/zenic/trustme.sh %s %s' % (host_ip, password)
daisy_cmn.subprocess_call(cmd, fp) daisy_cmn.subprocess_call(cmd, fp)
@ -97,10 +84,11 @@ def thread_bin(req, host, role_id_list,uninstall_progress_percentage):
'clush -S -b -w %s /home/zenic/node_stop.sh' % (host_ip,), 'clush -S -b -w %s /home/zenic/node_stop.sh' % (host_ip,),
shell=True, stderr=subprocess.STDOUT) shell=True, stderr=subprocess.STDOUT)
except subprocess.CalledProcessError as e: except subprocess.CalledProcessError as e:
update_progress_to_db(req, role_id_list, zenic_state['UNINSTALL_FAILED']) update_progress_to_db(
req, role_id_list, zenic_state['UNINSTALL_FAILED'])
fp.write(e.output.strip()) fp.write(e.output.strip())
else: else:
update_progress_to_db(req, role_id_list, zenic_state['UNINSTALLING'], uninstall_progress_percentage) update_progress_to_db(
req, role_id_list, zenic_state['UNINSTALLING'],
uninstall_progress_percentage)
fp.write(exc_result) fp.write(exc_result)

View File

@ -17,30 +17,13 @@
/update endpoint for Daisy v1 API /update endpoint for Daisy v1 API
""" """
import os
import webob.exc
import subprocess import subprocess
from oslo_config import cfg
from oslo_log import log as logging from oslo_log import log as logging
from webob.exc import HTTPBadRequest
from webob.exc import HTTPForbidden
from threading import Thread, Lock
import threading import threading
from daisy import i18n from daisy import i18n
from daisy import notifier
from daisy.api import policy
import daisy.api.v1
from daisy.common import exception from daisy.common import exception
from daisy.common import property_utils
from daisy.common import utils
from daisy.common import wsgi
from daisy.api.v1 import controller
from daisy.api.v1 import filters
from daisy.api.backends.zenic.common import ZenicShellExector
import daisy.api.backends.common as daisy_cmn import daisy.api.backends.common as daisy_cmn
import daisy.api.backends.zenic.common as zenic_cmn import daisy.api.backends.zenic.common as zenic_cmn
@ -57,9 +40,12 @@ daisy_zenic_path = zenic_cmn.daisy_zenic_path
update_zenic_progress = 0.0 update_zenic_progress = 0.0
update_mutex = threading.Lock() update_mutex = threading.Lock()
def update_progress_to_db(req, role_id_list, status, progress_percentage_step=0.0):
def update_progress_to_db(req, role_id_list, status,
progress_percentage_step=0.0):
""" """
Write update progress and status to db, we use global lock object 'update_mutex' Write update progress and status to db,
we use global lock object 'update_mutex'
to make sure this function is thread safety. to make sure this function is thread safety.
:param req: http req. :param req: http req.
:param role_id_list: Column neeb be update in role table. :param role_id_list: Column neeb be update in role table.
@ -87,11 +73,13 @@ def update_progress_to_db(req, role_id_list, status, progress_percentage_step=0.
def thread_bin(req, host, role_id_list, update_progress_percentage): def thread_bin(req, host, role_id_list, update_progress_percentage):
(zenic_version_pkg_file,zenic_version_pkg_name) = zenic_cmn.check_and_get_zenic_version(daisy_zenic_path) (zenic_version_pkg_file, zenic_version_pkg_name) = \
zenic_cmn.check_and_get_zenic_version(
daisy_zenic_path)
if not zenic_version_pkg_file: if not zenic_version_pkg_file:
self.state = zenic_state['INSTALL_FAILED'] # selfstate = zenic_state['INSTALL_FAILED']
self.message = "ZENIC version file not found in %s" % daisy_zenic_path selfmessage = "ZENIC version file not found in %s" % daisy_zenic_path
raise exception.NotFound(message=self.message) raise exception.NotFound(message=selfmessage)
host_ip = host['mgtip'] host_ip = host['mgtip']
password = host['rootpwd'] password = host['rootpwd']
@ -99,15 +87,16 @@ def thread_bin(req, host,role_id_list,update_progress_percentage):
cmd = 'mkdir -p /var/log/daisy/daisy_upgrade/' cmd = 'mkdir -p /var/log/daisy/daisy_upgrade/'
daisy_cmn.subprocess_call(cmd) daisy_cmn.subprocess_call(cmd)
var_log_path = "/var/log/daisy/daisy_upgrade/%s_upgrade_zenic.log" % host_ip var_log_path = \
"/var/log/daisy/daisy_upgrade/%s_upgrade_zenic.log" % host_ip
with open(var_log_path, "w+") as fp: with open(var_log_path, "w+") as fp:
cmd = '/var/lib/daisy/zenic/trustme.sh %s %s' % (host_ip, password) cmd = '/var/lib/daisy/zenic/trustme.sh %s %s' % (host_ip, password)
daisy_cmn.subprocess_call(cmd, fp) daisy_cmn.subprocess_call(cmd, fp)
cmd = 'clush -S -b -w %s /home/zenic/node_stop.sh' % (host_ip,) cmd = 'clush -S -b -w %s /home/zenic/node_stop.sh' % (host_ip,)
daisy_cmn.subprocess_call(cmd, fp) daisy_cmn.subprocess_call(cmd, fp)
cmd = 'clush -S -b -w %s rm -rf /home/workspace/%s' % (
cmd = 'clush -S -b -w %s rm -rf /home/workspace/%s' % (host_ip,zenic_version_pkg_name) host_ip, zenic_version_pkg_name)
daisy_cmn.subprocess_call(cmd, fp) daisy_cmn.subprocess_call(cmd, fp)
cmd = 'clush -S -b -w %s rm -rf /home/workspace/unipack' % (host_ip,) cmd = 'clush -S -b -w %s rm -rf /home/workspace/unipack' % (host_ip,)
@ -115,10 +104,12 @@ def thread_bin(req, host,role_id_list,update_progress_percentage):
try: try:
exc_result = subprocess.check_output( exc_result = subprocess.check_output(
'sshpass -p ossdbg1 scp %s root@%s:/home/workspace/' % (zenic_version_pkg_file,host_ip,), 'sshpass -p ossdbg1 scp %s root@%s:/home/workspace/' % (
zenic_version_pkg_file, host_ip,),
shell=True, stderr=fp) shell=True, stderr=fp)
except subprocess.CalledProcessError as e: except subprocess.CalledProcessError as e:
update_progress_to_db(req, role_id_list, zenic_state['INSTALL_FAILED']) update_progress_to_db(
req, role_id_list, zenic_state['INSTALL_FAILED'])
LOG.info(_("scp zenic pkg for %s failed!" % host_ip)) LOG.info(_("scp zenic pkg for %s failed!" % host_ip))
fp.write(e.output.strip()) fp.write(e.output.strip())
exit() exit()
@ -126,19 +117,24 @@ def thread_bin(req, host,role_id_list,update_progress_percentage):
LOG.info(_("scp zenic pkg for %s successfully!" % host_ip)) LOG.info(_("scp zenic pkg for %s successfully!" % host_ip))
fp.write(exc_result) fp.write(exc_result)
cmd = 'clush -S -b -w %s unzip /home/workspace/%s -d /home/workspace/unipack' % (host_ip,zenic_version_pkg_name,) cmd = 'clush -S -b -w %s unzip /home/workspace/%s \
-d /home/workspace/unipack' % (host_ip, zenic_version_pkg_name,)
daisy_cmn.subprocess_call(cmd) daisy_cmn.subprocess_call(cmd)
try: try:
exc_result = subprocess.check_output( exc_result = subprocess.check_output(
'clush -S -b -w %s /home/workspace/unipack/node_upgrade.sh' % (host_ip,), 'clush -S -b -w %s /home/workspace/unipack/node_upgrade.sh'
% (host_ip,),
shell=True, stderr=subprocess.STDOUT) shell=True, stderr=subprocess.STDOUT)
except subprocess.CalledProcessError as e: except subprocess.CalledProcessError as e:
update_progress_to_db(req, role_id_list, zenic_state['UPDATE_FAILED']) update_progress_to_db(
req, role_id_list, zenic_state['UPDATE_FAILED'])
LOG.info(_("Upgrade zenic for %s failed!" % host_ip)) LOG.info(_("Upgrade zenic for %s failed!" % host_ip))
fp.write(e.output.strip()) fp.write(e.output.strip())
else: else:
update_progress_to_db(req, role_id_list, zenic_state['UPDATING'], update_progress_percentage) update_progress_to_db(
req, role_id_list, zenic_state['UPDATING'],
update_progress_percentage)
LOG.info(_("Upgrade zenic for %s successfully!" % host_ip)) LOG.info(_("Upgrade zenic for %s successfully!" % host_ip))
fp.write(exc_result) fp.write(exc_result)
@ -147,12 +143,13 @@ def thread_bin(req, host,role_id_list,update_progress_percentage):
'clush -S -b -w %s /home/zenic/node_start.sh' % (host_ip,), 'clush -S -b -w %s /home/zenic/node_start.sh' % (host_ip,),
shell=True, stderr=subprocess.STDOUT) shell=True, stderr=subprocess.STDOUT)
except subprocess.CalledProcessError as e: except subprocess.CalledProcessError as e:
update_progress_to_db(req, role_id_list, zenic_state['UPDATE_FAILED']) update_progress_to_db(
req, role_id_list, zenic_state['UPDATE_FAILED'])
LOG.info(_("Start zenic for %s failed!" % host_ip)) LOG.info(_("Start zenic for %s failed!" % host_ip))
fp.write(e.output.strip()) fp.write(e.output.strip())
else: else:
update_progress_to_db(req, role_id_list, zenic_state['UPDATING'], update_progress_percentage) update_progress_to_db(
req, role_id_list, zenic_state['UPDATING'],
update_progress_percentage)
LOG.info(_("Start zenic for %s successfully!" % host_ip)) LOG.info(_("Start zenic for %s successfully!" % host_ip))
fp.write(exc_result) fp.write(exc_result)

View File

@ -218,3 +218,9 @@ def get_thread_pool(lock_name, size=1024):
return wsgi.get_asynchronous_eventlet_pool(size=size) return wsgi.get_asynchronous_eventlet_pool(size=size)
return _get_thread_pool return _get_thread_pool
def get_pxe_mac(host_detail):
pxe_macs = [interface['mac'] for interface in host_detail['interfaces']
if interface['is_deployment']]
return pxe_macs

View File

@ -1,30 +1,29 @@
import subprocess import subprocess
import daisy.registry.client.v1.api as registry import daisy.registry.client.v1.api as registry
from daisy.api.backends.tecs import config from daisy.api.backends.tecs import config as role_service
from oslo_log import log as logging from oslo_log import log as logging
import webob.exc import webob.exc
from webob.exc import HTTPBadRequest
from daisy.common import exception
from daisy.common import utils
import daisy.api.backends.common as daisy_cmn
LOG = logging.getLogger(__name__) LOG = logging.getLogger(__name__)
CONFIG_MAP = {
'cinder_config': '/etc/cinder/cinder.conf',
'cinder_api_paste_ini': '/etc/cinder/api-paste.ini',
'glance_api_config': '/etc/glance/glance-api.conf',
'glance_api_paste_ini': '/etc/glance/glance-api-paste.ini',
}
class config_clushshell(): class config_clushshell():
""" Class for clush backend.""" """ Class for clush backend."""
def __init__(self, req, role_id):
if not req and not role_id:
LOG.error("<<<config_clushshell:push_config input params is invalid.>>>")
return
def __init__(self, req):
self.context = req.context self.context = req.context
self.role_id = role_id
self.CLUSH_CMD = "clush -S -w %(management_ip)s \"%(sub_command)s\"" self.CLUSH_CMD = 'clush -S -w %(management_ip)s "%(sub_command)s"'
self.SUB_COMMAND = "openstack-config --set %(config_file)s %(section)s %(key)s %(value)s" self.SUB_COMMAND_SET = "openstack-config --set %(config_file)s"\
" %(section)s %(key)s '%(value)s'"
self.SUB_COMMAND_DEL = "openstack-config --del %(config_file)s"\
" %(section)s %(key)s"
def _openstack_set_config(self, host_ip, config_set): def _openstack_set_config(self, host_ip, config_set):
""" """
@ -37,107 +36,259 @@ class config_clushshell():
LOG.debug('<<<FUN:_openstack_set_config input params invalid.>>>') LOG.debug('<<<FUN:_openstack_set_config input params invalid.>>>')
return return
sub_command_by_one_host = [] config_cmd = []
for config in config_set['config']: for config in config_set['config']:
if config['config_version'] == config['running_version']: if config['config_version'] == config['running_version']:
continue continue
config_file = registry.get_config_file_metadata(self.context, config['config_file_id']) config_file = registry.get_config_file_metadata(
sub_command_by_one_host.append( self.context, config['config_file_id'])
self.SUB_COMMAND % \ if config['value']:
{'config_file':config_file['name'] ,'section':config['section'], value = utils.translate_quotation_marks_for_shell(
'key':config['key'], 'value':config['value']}) config['value'])
config_cmd.append(self.SUB_COMMAND_SET %
{'config_file': config_file['name'],
'section': config['section'],
'key': config['key'],
'value': value})
else:
# if value is empty, delete or comment it.
config_cmd.append(self.SUB_COMMAND_DEL %
{'config_file': config_file['name'],
'section': config['section'],
'key': config['key']})
try: try:
sub_command_by_one_host = ";".join(sub_command_by_one_host) for cmd in config_cmd:
clush_cmd = self.CLUSH_CMD % {'management_ip':host_ip, 'sub_command':sub_command_by_one_host} clush_cmd = self.CLUSH_CMD % {
subprocess.check_output(clush_cmd, shell=True, stderr=subprocess.STDOUT) 'management_ip': host_ip, 'sub_command': cmd}
subprocess.check_output(
clush_cmd, shell=True, stderr=subprocess.STDOUT)
except subprocess.CalledProcessError as e: except subprocess.CalledProcessError as e:
msg = ("<<<Host %s excute clush failed:%s!>>>" % (host_ip, e.output.strip())) msg = ("<<<Host %s excute clush failed:%s.>>>" %
(host_ip, e.output.strip()))
LOG.exception(msg) LOG.exception(msg)
raise webob.exc.HTTPServerError(explanation=msg) raise webob.exc.HTTPServerError(explanation=msg)
else: else:
msg = ("<<<Host %s excute clush successful!>>>" % host_ip) msg = ("<<<Complete to push configs for host %s.>>>" % host_ip)
LOG.info(msg) LOG.info(msg)
config['running_version'] = config['config_version']
def push_config(self): # if push_status = None, we will push configs
# to all hosts in the role
def push_role_configs(self, role_id, push_status):
""" """
Push config to remote host. Push config to remote host.
:param req: http req :param req: http req
:param role_id: host role id :param role_id: host role id
:return: :return:
""" """
self.role_info = registry.get_role_metadata(self.context, self.role_id) role_info = registry.get_role_metadata(self.context, role_id)
if not self.role_info or not self.role_info.get('config_set_id'): if not role_info.get('config_set_id'):
LOG.error("<<<config_clushshell:push_config,get_role_metadata failed.>>>") LOG.info("<<<No config_set configed for role '%s'>>>"
% role_info['name'])
return return
config_set = registry.get_config_set_metadata(self.context, self.role_info['config_set_id']) config_set = registry.get_config_set_metadata(
if not config_set or not config_set.has_key('config'): self.context, role_info['config_set_id'])
LOG.info("<<<config_clushshell:push_config,get_config_set_metadata failed.>>>") if not config_set:
LOG.info("<<<Get config_set failed for role '%s'.>>>"
% role_info['name'])
return
else:
if 'config' not in config_set:
LOG.info("<<<No configs get for role '%s'.>>>"
% role_info['name'])
return return
config_set['config'] = \ config_set['config'] = [config for config in config_set['config']
[config for config in config_set['config'] if config.get('config_version', 0) !=
if config.has_key('config_version') and config.has_key('running_version') config.get('running_version', 0)]
and config['config_version'] != config['running_version']]
if not config_set['config']: if not config_set['config']:
LOG.info('<<<No config need to be modified, within the scope of the hosts in role_id:%s.>>>' % LOG.info("<<<No config need to push for role '%s'.>>>"
self.role_id) % role_info['name'])
return return
self.role_hosts = registry.get_role_host_metadata(self.context, self.role_id) self.role_hosts = registry.get_role_host_metadata(
self.context, role_id)
total_host_count = 0
if push_status:
for r_host in self.role_hosts:
if r_host['status'] == push_status:
total_host_count += 1
else:
total_host_count = len(self.role_hosts)
if total_host_count > 0:
LOG.info("Begin to push config for role '%s'"
% role_info['name'])
else:
return
current_count = 0 current_count = 0
all_host_config_sets = [] # all_host_config_sets = []
for role_host in self.role_hosts: for role_host in self.role_hosts:
host = registry.get_host_metadata(self.context, role_host['host_id']) host = registry.get_host_metadata(
#change by 10166727--------start------------- self.context, role_host['host_id'])
host_ip=[] if push_status and role_host['status'] != push_status:
LOG.debug("<<<Status of host '%s' is not '%s',"
" don't push configs.>>>"
% (role_host['host_id'], push_status))
continue
host_management_ip = ''
for interface in host['interfaces']: for interface in host['interfaces']:
find_flag=interface['ip'].find(':') if ('assigned_networks' in interface and
if find_flag<0: interface['assigned_networks']):
host_ip=[interface['ip']] for assigned_network in interface['assigned_networks']:
else: if (assigned_network['name'] == 'MANAGEMENT' and
ip_list_tmp=interface['ip'].split(",") 'ip' in assigned_network):
for ip_list in ip_list_tmp: host_management_ip = assigned_network['ip']
if ip_list.split(':')[0] == "MANAGEMENT":
host_ip=[str(ip_list.split(':')[1])]
#change by 10166727--------end---------------
if not host_ip:
continue
host_ip = host_ip[0]
if 0 != subprocess.call('/var/lib/daisy/tecs/trustme.sh %s %s' % (host_ip, 'ossdbg1'), if not host_management_ip:
shell=True, msg = "Can't find management ip for host %s"\
stderr=subprocess.STDOUT): % role_host['host_id']
raise Exception("trustme.sh error!") raise HTTPBadRequest(explanation=msg)
if not config_set.has_key("config"):
continue
self._openstack_set_config(host_ip, config_set) root_passwd = 'ossdbg1'
all_host_config_sets.append(config_set) daisy_cmn.trust_me([host_management_ip], root_passwd)
registry.update_configs_metadata_by_role_hosts(self.context, all_host_config_sets)
LOG.debug("Update config for host:%s successfully!" % host_ip) self._openstack_set_config(host_management_ip, config_set)
self._role_service_restart(role_info, host_management_ip)
self._host_service_restart(host_ip)
current_count += 1 current_count += 1
self.role_info['config_set_update_progress'] = round(current_count*1.0/len(self.role_hosts), 2)*100 role_info['config_set_update_progress'] =\
registry.update_role_metadata(self.context, self.role_id, self.role_info) round(current_count * 1.0 / total_host_count, 2) * 100
registry.update_role_metadata(
self.context, role_id, role_info)
def _host_service_restart(self,host_ip): all_config_sets = []
""" """ for config in config_set['config']:
for service in self.role_info['service_name']: config['running_version'] = config['config_version']
for service_detail_name in config.service_map.get(service).split(','): all_config_sets.append(config_set)
cmd = "" registry.update_configs_metadata_by_role_hosts(
if self.role_info['name'] == "CONTROLLER_HA": self.context, all_config_sets)
cmd = "clush -S -w %s [ `systemctl is-active %s` != 'active' ] && systemctl restart %s" % \
(host_ip, service_detail_name, service_detail_name) def _host_service_restart(self, host_ip, components_name):
params = {'limit': '200', 'filters': {}}
try:
services = registry.get_services_detail(self.context,
**params)
components = registry.get_components_detail(self.context,
**params)
except exception.Invalid as e:
raise HTTPBadRequest(explanation=e.msg)
components_id = [comp['id'] for comp in components
for comp_name in components_name
if comp['name'] == comp_name]
for service in services:
if service['component_id'] not in components_id:
continue
services_name = role_service.service_map.get(service['name'])
if not services_name:
msg = "Can't find service for '%s'" % service
raise HTTPBadRequest(explanation=msg)
for service_name in services_name.split(','):
active_service = "clush -S -w %s 'systemctl is-active\
%s'" % (host_ip, service_name)
if 0 == utils.simple_subprocess_call(active_service):
restart_service = "clush -S -w %s 'systemctl restart\
%s'" % (host_ip, service_name)
LOG.info("Restart service %s after pushing config"
% service_name)
if 0 != utils.simple_subprocess_call(restart_service):
msg = "Service %s restart failed on host '%s'."\
% (service_name, host_ip)
LOG.error(msg)
# now i don't known how to find component id by config file,
# so add you must tell me, and it can be deleted if i can find it
# in future.
def push_host_configs(self, host_id, components_name):
"""
Push config to remote host.
:param req: http req
:param host_id: host id
:return:
"""
host_detail = registry.get_host_metadata(self.context, host_id)
if not host_detail.get('config_set_id'):
LOG.info("<<<No config_set configed for host '%s'.>>>"
% host_id)
return
config_set =\
registry.get_config_set_metadata(self.context,
host_detail['config_set_id'])
if not config_set:
LOG.info("<<<Get config_set failed for host '%s'.>>>"
% host_id)
return
else: else:
cmd = "clush -S -w %s systemctl restart %s" % (host_ip, service_detail_name) if 'config' not in config_set:
if 0 != subprocess.call(cmd, shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE): LOG.info("<<<No configs get for host '%s'.>>>" % host_id)
LOG.error("Service %s restart failed in host:%s." % (service_detail_name, host_ip)) return
config_set['config'] = [config for config in config_set['config']
if config.get('config_version', 0) !=
config.get('running_version', 0)]
if not config_set['config']:
LOG.info("<<<No config need to push for host '%s'.>>>"
% host_id)
return
host_management_ip = ''
for interface in host_detail['interfaces']:
if ('assigned_networks' in interface and
interface['assigned_networks']):
for assigned_network in interface['assigned_networks']:
if (assigned_network['name'] == 'MANAGEMENT' and
'ip' in assigned_network):
host_management_ip = assigned_network['ip']
if not host_management_ip:
msg = "Can't find management ip for host %s"\
% host_detail['host_id']
raise HTTPBadRequest(explanation=msg)
root_passwd = 'ossdbg1'
daisy_cmn.trust_me([host_management_ip], root_passwd)
self._openstack_set_config(host_management_ip, config_set)
self._host_service_restart(host_management_ip, components_name)
all_config_sets = []
for config in config_set['config']:
config['running_version'] = config['config_version']
all_config_sets.append(config_set)
registry.update_configs_metadata_by_role_hosts(self.context,
all_config_sets)
def _role_service_restart(self, role_info, host_ip):
""" """
for service in role_info['service_name']:
services_name = role_service.service_map.get(service)
if not services_name:
msg = "Can't find service for '%s'" % service
raise HTTPBadRequest(explanation=msg)
for service_name in services_name.split(','):
active_service = "clush -S -w %s 'systemctl is-active\
%s'" % (host_ip, service_name)
if 0 == utils.simple_subprocess_call(active_service):
restart_service = "clush -S -w %s 'systemctl restart\
%s'" % (host_ip, service_name)
LOG.info("Restart service %s after pushing config"
% service_name)
if 0 != utils.simple_subprocess_call(restart_service):
msg = "Service %s restart failed on host '%s'."\
% (service_name, host_ip)
LOG.error(msg)

View File

@ -1,16 +1,24 @@
from daisy.api.configset.clush import config_clushshell from daisy.api.configset.clush import config_clushshell
class configBackend(): class configBackend():
def __init__(self, type, req, role_id):
def __init__(self, type, req):
self.type = type self.type = type
self._instance = None self._instance = None
if type == "clushshell": if type == "clushshell":
self._instance = config_clushshell(req, role_id) self._instance = config_clushshell(req)
elif type == "puppet": elif type == "puppet":
pass pass
def push_config(self): # if push_status = None, we will push configs
self._instance.push_config() # to all hosts in the role
def push_config_by_roles(self, role_ids, push_status=None):
for role_id in role_ids:
self._instance.push_role_configs(role_id, push_status)
def push_config_by_hosts(self, host_ids, component_names=[]):
for host_id in host_ids:
self._instance.push_host_configs(host_id,
component_names)

View File

@ -24,10 +24,12 @@ from neutronclient.v2_0 import client as clientv20
from daisy.common import exception from daisy.common import exception
LOG = logging.getLogger(__name__) LOG = logging.getLogger(__name__)
class network(object): class network(object):
""" """
network config network config
""" """
def __init__(self, req, neutron_host, keystone_host, cluster_id): def __init__(self, req, neutron_host, keystone_host, cluster_id):
registry.configure_registry_client() registry.configure_registry_client()
auth_url = 'http://' + keystone_host + ':35357/v2.0' auth_url = 'http://' + keystone_host + ':35357/v2.0'
@ -49,8 +51,10 @@ class network(object):
except exception.Invalid as e: except exception.Invalid as e:
LOG.exception(e.msg) LOG.exception(e.msg)
raise HTTPBadRequest(explanation=e.msg, request=req) raise HTTPBadRequest(explanation=e.msg, request=req)
LOG.info("<<<CLUSTER:%s,NEUTRON HOST:%s,KEYSTOEN:%s>>>", cluster, neutron_host, keystone_host) LOG.info("<<<CLUSTER:%s,NEUTRON HOST:%s,KEYSTOEN:%s>>>",
if 'logic_networks' in cluster and cluster['logic_networks'] is not None: cluster, neutron_host, keystone_host)
if 'logic_networks' in cluster and cluster[
'logic_networks'] is not None:
self.nets = cluster['logic_networks'] self.nets = cluster['logic_networks']
# self._flat_network_uniqueness_check() # self._flat_network_uniqueness_check()
if 'routers' in cluster and cluster['routers'] is not None: if 'routers' in cluster and cluster['routers'] is not None:
@ -83,7 +87,9 @@ class network(object):
for router in self.routers: for router in self.routers:
router_id = self._router_create(router['name']) router_id = self._router_create(router['name'])
if 'external_logic_network' in router: if 'external_logic_network' in router:
body = {'network_id': self.name_mappings[router['external_logic_network']]} body = {
'network_id': self.name_mappings[
router['external_logic_network']]}
self.neutron.add_gateway_router(router_id, body) self.neutron.add_gateway_router(router_id, body)
if 'subnets' in router: if 'subnets' in router:
for i in router['subnets']: for i in router['subnets']:
@ -92,7 +98,8 @@ class network(object):
def _net_subnet_same_router_check(self, ex_network, subnet): def _net_subnet_same_router_check(self, ex_network, subnet):
for router in self.routers: for router in self.routers:
if 'external_logic_network' in router and router['external_logic_network'] == ex_network: if 'external_logic_network' in router and router[
'external_logic_network'] == ex_network:
if 'subnets' in router: if 'subnets' in router:
for i in router['subnets']: for i in router['subnets']:
if i == subnet: if i == subnet:
@ -155,18 +162,25 @@ class network(object):
for net in self.nets: for net in self.nets:
body = {} body = {}
if net['type'] == 'external': if net['type'] == 'external':
body['network'] = {'name': net['name'], body['network'] = {
'name': net['name'],
'router:external': True, 'router:external': True,
'provider:network_type': net['segmentation_type']} 'provider:network_type': net['segmentation_type']}
if net['segmentation_type'].strip() == 'flat': if net['segmentation_type'].strip() == 'flat':
body['network']['provider:physical_network'] = net['physnet_name'] body['network']['provider:physical_network'] = net[
'physnet_name']
elif net['segmentation_type'].strip() == 'vxlan': elif net['segmentation_type'].strip() == 'vxlan':
if 'segmentation_id' in net and net['segmentation_id'] is not None: if 'segmentation_id' in net and net[
body['network']['provider:segmentation_id'] = net['segmentation_id'] 'segmentation_id'] is not None:
body['network']['provider:segmentation_id'] = net[
'segmentation_id']
else: else:
if 'segmentation_id' in net and net['segmentation_id'] is not None: if 'segmentation_id' in net and net[
body['network']['provider:segmentation_id'] = net['segmentation_id'] 'segmentation_id'] is not None:
body['network']['provider:physical_network'] = net['physnet_name'] body['network']['provider:segmentation_id'] = net[
'segmentation_id']
body['network']['provider:physical_network'] = net[
'physnet_name']
if net['shared']: if net['shared']:
body['network']['shared'] = True body['network']['shared'] = True
else: else:
@ -175,21 +189,28 @@ class network(object):
self.name_mappings[net['name']] = external['network']['id'] self.name_mappings[net['name']] = external['network']['id']
last_create_subnet = [] last_create_subnet = []
for subnet in net['subnets']: for subnet in net['subnets']:
if self._net_subnet_same_router_check(net['name'], subnet['name']): if self._net_subnet_same_router_check(
net['name'], subnet['name']):
last_create_subnet.append(subnet) last_create_subnet.append(subnet)
else: else:
subnet_id = self._subnet_check_and_create(external['network']['id'], subnet) subnet_id = self._subnet_check_and_create(
external['network']['id'], subnet)
self.name_mappings[subnet['name']] = subnet_id self.name_mappings[subnet['name']] = subnet_id
for subnet in last_create_subnet: for subnet in last_create_subnet:
subnet_id = self._subnet_check_and_create(external['network']['id'], subnet) subnet_id = self._subnet_check_and_create(
external['network']['id'], subnet)
self.name_mappings[subnet['name']] = subnet_id self.name_mappings[subnet['name']] = subnet_id
else: else:
body['network'] = {'name': net['name'], body['network'] = {
'name': net['name'],
'provider:network_type': net['segmentation_type']} 'provider:network_type': net['segmentation_type']}
if net['segmentation_type'].strip() == 'vlan': if net['segmentation_type'].strip() == 'vlan':
body['network']['provider:physical_network'] = net['physnet_name'] body['network']['provider:physical_network'] = net[
if 'segmentation_id' in net and net['segmentation_id'] is not None: 'physnet_name']
body['network']['provider:segmentation_id'] = net['segmentation_id'] if 'segmentation_id' in net and net[
'segmentation_id'] is not None:
body['network']['provider:segmentation_id'] = net[
'segmentation_id']
if net['shared']: if net['shared']:
body['network']['shared'] = True body['network']['shared'] = True
else: else:
@ -197,6 +218,7 @@ class network(object):
inner = self.neutron.create_network(body) inner = self.neutron.create_network(body)
self.name_mappings[net['name']] = inner['network']['id'] self.name_mappings[net['name']] = inner['network']['id']
for subnet in net['subnets']: for subnet in net['subnets']:
subnet_id = self._subnet_check_and_create(inner['network']['id'], subnet) subnet_id = self._subnet_check_and_create(
inner['network']['id'], subnet)
self.name_mappings[subnet['name']] = subnet_id self.name_mappings[subnet['name']] = subnet_id
self._router_link() self._router_link()

View File

@ -13,9 +13,11 @@
# License for the specific language governing permissions and limitations # License for the specific language governing permissions and limitations
# under the License. # under the License.
SUPPORTED_FILTERS = ['name', 'status','cluster_id','id','host_id', 'role_id', 'auto_scale','container_format', 'disk_format', SUPPORTED_FILTERS = ['name', 'status', 'cluster_id', 'id',
'host_id', 'role_id',
'auto_scale', 'container_format', 'disk_format',
'min_ram', 'min_disk', 'size_min', 'size_max', 'min_ram', 'min_disk', 'size_min', 'size_max',
'is_public', 'changes-since', 'protected'] 'is_public', 'changes-since', 'protected', 'type']
SUPPORTED_PARAMS = ('limit', 'marker', 'sort_key', 'sort_dir') SUPPORTED_PARAMS = ('limit', 'marker', 'sort_key', 'sort_dir')

View File

@ -0,0 +1,312 @@
# Copyright 2013 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
/hosts endpoint for Daisy v1 API
"""
import datetime
import os
import subprocess
from oslo_log import log as logging
from webob.exc import HTTPBadRequest
from webob.exc import HTTPForbidden
from daisy import i18n
from daisy import notifier
from daisy.api import policy
import daisy.api.v1
from daisy.common import exception
from daisy.common import property_utils
from daisy.common import utils
from daisy.common import wsgi
import daisy.registry.client.v1.api as registry
from daisy.api.v1 import controller
from daisy.api.v1 import filters
import daisy.api.backends.tecs.common as tecs_cmn
LOG = logging.getLogger(__name__)
_ = i18n._
_LE = i18n._LE
_LI = i18n._LI
_LW = i18n._LW
SUPPORTED_PARAMS = daisy.api.v1.SUPPORTED_PARAMS
SUPPORTED_FILTERS = daisy.api.v1.SUPPORTED_FILTERS
ACTIVE_IMMUTABLE = daisy.api.v1.ACTIVE_IMMUTABLE
BACK_PATH = '/home/daisy_backup/'
class Controller(controller.BaseController):
"""
WSGI controller for hosts resource in Daisy v1 API
The hosts resource API is a RESTful web service for host data. The API
is as follows::
GET /hosts -- Returns a set of brief metadata about hosts
GET /hosts/detail -- Returns a set of detailed metadata about
hosts
HEAD /hosts/<ID> -- Return metadata about an host with id <ID>
GET /hosts/<ID> -- Return host data for host with id <ID>
POST /hosts -- Store host data and return metadata about the
newly-stored host
PUT /hosts/<ID> -- Update host metadata and/or upload host
data for a previously-reserved host
DELETE /hosts/<ID> -- Delete the host with id <ID>
"""
def __init__(self):
self.notifier = notifier.Notifier()
registry.configure_registry_client()
self.policy = policy.Enforcer()
if property_utils.is_property_protection_enabled():
self.prop_enforcer = property_utils.PropertyRules(self.policy)
else:
self.prop_enforcer = None
def _enforce(self, req, action, target=None):
"""Authorize an action against our policies"""
if target is None:
target = {}
try:
self.policy.enforce(req.context, action, target)
except exception.Forbidden:
raise HTTPForbidden()
def _get_filters(self, req):
"""
Return a dictionary of query param filters from the request
:param req: the Request object coming from the wsgi layer
:retval a dict of key/value filters
"""
query_filters = {}
for param in req.params:
if param in SUPPORTED_FILTERS:
query_filters[param] = req.params.get(param)
if not filters.validate(param, query_filters[param]):
raise HTTPBadRequest(_('Bad value passed to filter '
'%(filter)s got %(val)s')
% {'filter': param,
'val': query_filters[param]})
return query_filters
def _get_query_params(self, req):
"""
Extracts necessary query params from request.
:param req: the WSGI Request object
:retval dict of parameters that can be used by registry client
"""
params = {'filters': self._get_filters(req)}
for PARAM in SUPPORTED_PARAMS:
if PARAM in req.params:
params[PARAM] = req.params.get(PARAM)
return params
def hostname(self):
if os.name == 'posix':
host = os.popen('echo $HOSTNAME')
try:
return host.read()
finally:
host.close()
else:
return 'Unkwon hostname'
def check_file_format(self, req, file_meta):
if not os.path.exists(file_meta.get('backup_file_path', '')):
msg = 'File not exists!'
LOG.error(msg)
raise HTTPForbidden(explanation=msg, request=req,
content_type="text/plain")
if not file_meta['backup_file_path'].endswith('.tar.gz'):
msg = 'File format not supported! .tar.gz format is required!'
LOG.error(msg)
raise HTTPForbidden(explanation=msg, request=req,
content_type="text/plain")
@utils.mutating
def backup(self, req):
"""
Backup daisy data..
:param req: The WSGI/Webob Request object
:raises HTTPBadRequest if backup failed
"""
version = self.version(req, {'type': 'internal'})
date_str = filter(lambda x: x.isdigit(),
str(datetime.datetime.now())[:19])
backup_file_name = '{0}_{1}_{2}.tar.gz'.format(
self.hostname().strip(), date_str, version['daisy_version'])
scripts = [
'test -d {0}daisy_tmp||mkdir -p {0}daisy_tmp'.format(BACK_PATH),
'echo {0}>{1}daisy_tmp/version.conf'.format(
version['daisy_version'], BACK_PATH),
'cp /home/daisy_install/daisy.conf {0}/daisy_tmp'.format(
BACK_PATH),
'mysqldump --all-databases > {0}daisy_tmp/database.sql'.format(
BACK_PATH),
'tar -zcvf {0}{1} -C {0} daisy_tmp >/dev/null 2>&1'.format(
BACK_PATH, backup_file_name),
'chmod 777 {0} {0}{1}'.format(BACK_PATH, backup_file_name),
'rm -rf {0}daisy_tmp'.format(BACK_PATH)
]
tecs_cmn.run_scrip(scripts, msg='Backup file failed!')
return {"backup_file": BACK_PATH + backup_file_name}
@utils.mutating
def restore(self, req, file_meta):
"""
Restore daisy data.
:param req: The WSGI/Webob Request object
:param file_meta: The daisy backup file path
:raises HTTPBadRequest if restore failed
"""
self.check_file_format(req, file_meta)
restore_scripts = [
'test -d {0} || mkdir {0}'.format(BACK_PATH),
'test -d {0} || mkdir {0}'.format('/home/daisy_install/'),
'tar -zxvf {1} -C {0}>/dev/null 2>&1'.format(
BACK_PATH, file_meta['backup_file_path']),
'mysql < {0}daisy_tmp/database.sql'.format(BACK_PATH),
'cp {0}daisy_tmp/daisy.conf /home/daisy_install/'.format(
BACK_PATH),
'rm -rf {0}daisy_tmp'.format(BACK_PATH)
]
tecs_cmn.run_scrip(restore_scripts, msg='Restore failed!')
LOG.info('Restore successfully')
@utils.mutating
def get_backup_file_version(self, req, file_meta):
"""
Get version of daisy backup file.
:param req: The WSGI/Webob Request object
:raises HTTPBadRequest if can't get version of backup file
"""
self.check_file_format(req, file_meta)
scripts = [
'test -d {0} || mkdir {0}'.format(BACK_PATH),
'tar -zxvf {0} -C {1}>/dev/null 2>&1'.format(
file_meta['backup_file_path'], BACK_PATH)
]
tecs_cmn.run_scrip(scripts, msg='Decompression file failed!')
try:
version = subprocess.check_output(
'cat {0}daisy_tmp/version.conf'.format(BACK_PATH),
shell=True, stderr=subprocess.STDOUT).strip()
except:
msg = 'Error occurred when running scripts to get version of' \
' backup file!'
LOG.error(msg)
raise HTTPForbidden(explanation=msg, request=req,
content_type="text/plain")
tecs_cmn.run_scrip(['rm -rf {0}daisy_tmp'.format(BACK_PATH)])
return {"backup_file_version": version}
@utils.mutating
def version(self, req, version):
"""
Get version of daisy.
:param req: The WSGI/Webob Request object
:raises HTTPBadRequest if can't get version of daisy
"""
if version.get('type') == 'internal':
scripts = "rpm -q python-daisy | awk -F'-' '{print $3\"-\"$4}'"
else:
# reserve for external version
return {"daisy_version": '1.0.0-1.1.0'}
try:
version = subprocess.check_output(scripts, shell=True,
stderr=subprocess.STDOUT).strip()
except:
msg = 'Error occurred when running scripts to get version of daisy'
LOG.error(msg)
raise HTTPForbidden(explanation=msg, request=req,
content_type="text/plain")
daisy_version = filter(lambda x: not x.isalpha(), version)[:-1]
return {"daisy_version": daisy_version}
class BackupRestoreDeserializer(wsgi.JSONRequestDeserializer):
"""Handles deserialization of specific controller method requests."""
def _deserialize(self, request):
result = {}
result['file_meta'] = utils.get_dict_meta(request)
return result
def backup(self, request):
return {}
def restore(self, request):
return self._deserialize(request)
def get_backup_file_version(self, request):
return self._deserialize(request)
def version(self, request):
result = {}
result['version'] = utils.get_dict_meta(request)
return result
class BackupRestoreSerializer(wsgi.JSONResponseSerializer):
"""Handles serialization of specific controller method responses."""
def __init__(self):
self.notifier = notifier.Notifier()
def backup(self, response, result):
response.status = 201
response.headers['Content-Type'] = 'application/json'
response.body = self.to_json(result)
return response
def restore(self, response, result):
response.status = 201
response.headers['Content-Type'] = 'application/json'
response.body = self.to_json(result)
return response
def get_backup_file_version(self, response, result):
response.status = 201
response.headers['Content-Type'] = 'application/json'
response.body = self.to_json(result)
return response
def version(self, response, result):
response.status = 201
response.headers['Content-Type'] = 'application/json'
response.body = self.to_json(result)
return response
def create_resource():
"""Version resource factory method"""
deserializer = BackupRestoreDeserializer()
serializer = BackupRestoreSerializer()
return wsgi.Resource(Controller(), deserializer, serializer)

View File

@ -38,6 +38,7 @@ from daisy.common import wsgi
from daisy import i18n from daisy import i18n
from daisy import notifier from daisy import notifier
import daisy.registry.client.v1.api as registry import daisy.registry.client.v1.api as registry
from functools import reduce
LOG = logging.getLogger(__name__) LOG = logging.getLogger(__name__)
_ = i18n._ _ = i18n._
@ -53,15 +54,16 @@ CONF.import_opt('disk_formats', 'daisy.common.config', group='image_format')
CONF.import_opt('container_formats', 'daisy.common.config', CONF.import_opt('container_formats', 'daisy.common.config',
group='image_format') group='image_format')
CONF.import_opt('image_property_quota', 'daisy.common.config') CONF.import_opt('image_property_quota', 'daisy.common.config')
CLUSTER_DEFAULT_NETWORKS = ['PUBLIC', 'DEPLOYMENT', 'PRIVATE', 'EXTERNAL', CLUSTER_DEFAULT_NETWORKS = ['PUBLICAPI', 'DEPLOYMENT', 'DATAPLANE', 'EXTERNAL',
'STORAGE', 'VXLAN', 'MANAGEMENT'] 'STORAGE', 'MANAGEMENT']
class Controller(controller.BaseController): class Controller(controller.BaseController):
""" """
WSGI controller for clusters resource in Daisy v1 API WSGI controller for clusters resource in Daisy v1 API
The clusters resource API is a RESTful web service for cluster data. The API The clusters resource API is a RESTful web service for cluster data.
is as follows:: The API is as follows::
GET /clusters -- Returns a set of brief metadata about clusters GET /clusters -- Returns a set of brief metadata about clusters
GET /clusters -- Returns a set of detailed metadata about GET /clusters -- Returns a set of detailed metadata about
@ -86,65 +88,83 @@ class Controller(controller.BaseController):
cluster_id = kwargs.get('id', None) cluster_id = kwargs.get('id', None)
errmsg = (_("I'm params checker.")) errmsg = (_("I'm params checker."))
LOG.debug(_("Params check for cluster-add or cluster-update begin!")) LOG.debug(
_("Params check for cluster-add or cluster-update begin!"))
def check_params_range(param, type=None): def check_params_range(param, type=None):
''' '''
param : input a list ,such as [start, end] param : input a list ,such as [start, end]
check condition: start must less than end, and existed with pair check condition: start must less than end,
and existed with pair
return True of False return True of False
''' '''
if len(param) != 2: if len(param) != 2:
msg = '%s range must be existed in pairs.' % type msg = '%s range must be existed in pairs.' % type
raise HTTPForbidden(explanation=msg) raise HTTPForbidden(explanation=msg)
if param[0] == None or param[0] == '': if param[0] is None or param[0] == '':
msg = 'The start value of %s range can not be None.' % type msg = 'The start value of %s range can not be None.' % type
raise HTTPForbidden(explanation=msg) raise HTTPForbidden(explanation=msg)
if param[1] == None: if param[1] is None:
msg = 'The end value of %s range can not be None.' % type msg = 'The end value of %s range can not be None.' % type
raise HTTPForbidden(explanation=msg) raise HTTPForbidden(explanation=msg)
if int(param[0]) > int(param[1]): if int(param[0]) > int(param[1]):
msg = 'The start value of the %s range must be less than the end value.' % type msg = 'The start value of the %s range must be less ' \
'than the end value.' % type
raise HTTPForbidden(explanation=msg) raise HTTPForbidden(explanation=msg)
if type not in ['vni']: if type not in ['vni']:
if int(param[0]) < 0 or int(param[0]) > 4096: if int(param[0]) < 0 or int(param[0]) > 4096:
msg = 'Invalid value of the start value(%s) of the %s range .' % (param[0], type) msg = 'Invalid value of the start value(%s) of ' \
'the %s range .' % (param[
0], type)
raise HTTPForbidden(explanation=msg) raise HTTPForbidden(explanation=msg)
if int(param[1]) < 0 or int(param[1]) > 4096: if int(param[1]) < 0 or int(param[1]) > 4096:
msg = 'Invalid value of the end value(%s) of the %s range .' % (param[1], type) msg = 'Invalid value of the end value(%s) of ' \
'the %s range .' % (param[
1], type)
raise HTTPForbidden(explanation=msg) raise HTTPForbidden(explanation=msg)
else: else:
if int(param[0]) < 0 or int(param[0]) > 16777216: if int(param[0]) < 0 or int(param[0]) > 16777216:
msg = 'Invalid value of the start value(%s) of the %s range .' % (param[0], type) msg = 'Invalid value of the start value(%s) of ' \
'the %s range .' % (param[
0], type)
raise HTTPForbidden(explanation=msg) raise HTTPForbidden(explanation=msg)
if int(param[1]) < 0 or int(param[1]) > 16777216: if int(param[1]) < 0 or int(param[1]) > 16777216:
msg = 'Invalid value of the end value(%s) of the %s range .' % (param[1], type) msg = 'Invalid value of the end value(%s) of ' \
'the %s range .' % (param[
1], type)
raise HTTPForbidden(explanation=msg) raise HTTPForbidden(explanation=msg)
return True return True
def _check_auto_scale(req, cluster_meta): def _check_auto_scale(req, cluster_meta):
if cluster_meta.has_key('auto_scale') and cluster_meta['auto_scale'] =='1': if 'auto_scale' in cluster_meta and cluster_meta[
'auto_scale'] == '1':
meta = {"auto_scale": '1'} meta = {"auto_scale": '1'}
params = {'filters': meta} params = {'filters': meta}
clusters = registry.get_clusters_detail(req.context, **params) clusters = registry.get_clusters_detail(
req.context, **params)
if clusters: if clusters:
if cluster_id: if cluster_id:
temp_cluster = [cluster for cluster in clusters if cluster['id'] !=cluster_id] temp_cluster = [
cluster for cluster in clusters if
cluster['id'] != cluster_id]
if temp_cluster: if temp_cluster:
errmsg = (_("already exist cluster auto_scale is true")) errmsg = (
_("already exist cluster "
"auto_scale is true"))
raise HTTPBadRequest(explanation=errmsg) raise HTTPBadRequest(explanation=errmsg)
else: else:
errmsg = (_("already exist cluster auto_scale is true")) errmsg = (
_("already exist cluster auto_scale is true"))
raise HTTPBadRequest(explanation=errmsg) raise HTTPBadRequest(explanation=errmsg)
def _ip_into_int(ip): def _ip_into_int(ip):
""" """
Switch ip string to decimalism integer.. Switch ip string to decimalism integer..
:param ip: ip string :param ip: ip string
:return: decimalism integer :return: decimalism integer
""" """
return reduce(lambda x, y: (x<<8)+y, map(int, ip.split('.'))) return reduce(lambda x, y: (x << 8) + y,
map(int, ip.split('.')))
def _is_in_network_range(ip, network): def _is_in_network_range(ip, network):
""" """
@ -155,9 +175,13 @@ class Controller(controller.BaseController):
""" """
network = network.split('/') network = network.split('/')
mask = ~(2**(32 - int(network[1])) - 1) mask = ~(2**(32 - int(network[1])) - 1)
return (_ip_into_int(ip) & mask) == (_ip_into_int(network[0]) & mask) return (
_ip_into_int(ip) & mask) == (
_ip_into_int(
network[0]) & mask)
def _check_param_nonull_and_valid(values_set, keys_set, valids_set={}): def _check_param_nonull_and_valid(
values_set, keys_set, valids_set={}):
""" """
Check operation params is not null and valid. Check operation params is not null and valid.
:param values_set: Params set. :param values_set: Params set.
@ -167,10 +191,10 @@ class Controller(controller.BaseController):
""" """
for k in keys_set: for k in keys_set:
v = values_set.get(k, None) v = values_set.get(k, None)
if type(v) == type(True) and v == None: if isinstance(v, type(True)) and v is None:
errmsg = (_("Segment %s can't be None." % k)) errmsg = (_("Segment %s can't be None." % k))
raise HTTPBadRequest(explanation=errmsg) raise HTTPBadRequest(explanation=errmsg)
elif type(v) != type(True) and not v: elif not isinstance(v, type(True)) and not v:
errmsg = (_("Segment %s can't be None." % k)) errmsg = (_("Segment %s can't be None." % k))
raise HTTPBadRequest(explanation=errmsg) raise HTTPBadRequest(explanation=errmsg)
@ -183,15 +207,18 @@ class Controller(controller.BaseController):
def _get_network_detail(req, cluster_id, networks_list): def _get_network_detail(req, cluster_id, networks_list):
all_network_list = [] all_network_list = []
if cluster_id: if cluster_id:
all_network_list = registry.get_networks_detail(req.context, cluster_id) all_network_list = registry.get_networks_detail(
req.context, cluster_id)
if networks_list: if networks_list:
for net_id in networks_list: for net_id in networks_list:
network_detail = registry.get_network_metadata(req.context, net_id) network_detail = registry.get_network_metadata(
req.context, net_id)
all_network_list.append(network_detail) all_network_list.append(network_detail)
all_private_network_list = \ all_private_network_list = [
[network for network in all_network_list if network['network_type'] == "PRIVATE"] network for network in all_network_list if network[
'network_type'] == "DATAPLANE"]
return all_private_network_list return all_private_network_list
def _check_cluster_add_parameters(req, cluster_meta): def _check_cluster_add_parameters(req, cluster_meta):
@ -201,54 +228,34 @@ class Controller(controller.BaseController):
:param cluster_meta: params set :param cluster_meta: params set
:return:error message :return:error message
""" """
if cluster_meta.has_key('nodes'): if 'nodes' in cluster_meta:
orig_keys = list(eval(cluster_meta['nodes'])) orig_keys = list(eval(cluster_meta['nodes']))
for host_id in orig_keys: for host_id in orig_keys:
controller._raise_404_if_host_deleted(req, host_id) controller._raise_404_if_host_deleted(req, host_id)
if cluster_meta.has_key('networks'): if 'networks' in cluster_meta:
orig_keys = list(eval(cluster_meta['networks'])) orig_keys = list(eval(cluster_meta['networks']))
network_with_same_name = [] network_with_same_name = []
for network_id in orig_keys: for network_id in orig_keys:
network_name = controller._raise_404_if_network_deleted(req, network_id) network_name = \
controller._raise_404_if_network_deleted(
req, network_id)
if network_name in CLUSTER_DEFAULT_NETWORKS: if network_name in CLUSTER_DEFAULT_NETWORKS:
return (_("Network name %s of %s already exits" return (_("Network name %s of %s already exits"
" in the cluster, please check." % " in the cluster, please check." %
(network_name, network_id))) (network_name, network_id)))
if network_name in network_with_same_name: if network_name in network_with_same_name:
return (_("Network name can't be same with each other in 'networks[]', " return (_("Network name can't be same with "
"each other in 'networks[]', "
"please check.")) "please check."))
network_with_same_name.append(network_name) network_with_same_name.append(network_name)
# checkout network_params-------------------------------------------------- # checkout network_params
if cluster_meta.get('networking_parameters', None): if cluster_meta.get('networking_parameters', None):
networking_parameters = eval(cluster_meta['networking_parameters']) networking_parameters =\
_check_param_nonull_and_valid(networking_parameters, eval(cluster_meta['networking_parameters'])
['segmentation_type'])
segmentation_type_set = networking_parameters['segmentation_type'].split(",")
for segmentation_type in segmentation_type_set:
if segmentation_type not in ['vlan', 'vxlan', 'flat', 'gre']:
return (_("Segmentation_type of networking_parameters is not valid."))
if segmentation_type =='vxlan':
_check_param_nonull_and_valid(networking_parameters,['vni_range'])
elif segmentation_type =='gre':
_check_param_nonull_and_valid(networking_parameters,['gre_id_range'])
vlan_range = networking_parameters.get("vlan_range", None) # check logic_networks
vni_range = networking_parameters.get("vni_range", None)
gre_id_range = networking_parameters.get("gre_id_range", None)
#if (vlan_range and len(vlan_range) != 2) \
# or (vni_range and len(vni_range) != 2) \
# or (gre_id_range and len(gre_id_range) != 2):
# return (_("Range params must be pair."))
if vlan_range:
check_params_range(vlan_range, 'vlan')
if vni_range:
check_params_range(vni_range, 'vni')
if gre_id_range:
check_params_range(gre_id_range, 'gre_id')
# check logic_networks--------------------------------------------------
subnet_name_set = [] # record all subnets's name subnet_name_set = [] # record all subnets's name
logic_network_name_set = [] # record all logic_network's name logic_network_name_set = [] # record all logic_network's name
subnets_in_logic_network = {} subnets_in_logic_network = {}
@ -256,23 +263,36 @@ class Controller(controller.BaseController):
if cluster_meta.get('logic_networks', None): if cluster_meta.get('logic_networks', None):
# get physnet_name list # get physnet_name list
all_private_cluster_networks_list = _get_network_detail( all_private_cluster_networks_list = _get_network_detail(
req, cluster_id, req, cluster_id, cluster_meta.get(
cluster_meta.get('networks', None) 'networks', None) if not isinstance(
if not isinstance(cluster_meta.get('networks', None), unicode) cluster_meta.get(
else eval(cluster_meta.get('networks', None))) 'networks', None), unicode) else eval(
cluster_meta.get(
'networks', None)))
if not all_private_cluster_networks_list: if not all_private_cluster_networks_list:
LOG.info("Private network is empty in db, it lead logical network config invalid.") LOG.info(
physnet_name_set = [net['name'] for net in all_private_cluster_networks_list] "Private network is empty in db, it lead "
"logical network config invalid.")
physnet_name_set = [net['name']
for net in
all_private_cluster_networks_list]
logic_networks = eval(cluster_meta['logic_networks']) logic_networks = eval(cluster_meta['logic_networks'])
for logic_network in logic_networks: for logic_network in logic_networks:
subnets_in_logic_network[logic_network['name']] = [] subnets_in_logic_network[logic_network['name']] = []
# We force setting the physnet_name of flat logical network to 'flat'. # We force setting the physnet_name of flat logical
if logic_network.get('segmentation_type', None) == "flat": # network to 'flat'.
if logic_network['physnet_name'] != "physnet1" or logic_network['type'] != "external": if logic_network.get(
LOG.info("When 'segmentation_type' is flat the 'physnet_name' and 'type' segmentation" 'segmentation_type', None) == "flat":
"must be 'physnet1'' and 'external'', but got '%s' and '%s'.We have changed" if logic_network['physnet_name'] != "physnet1" or \
logic_network[
'type'] != "external":
LOG.info(
"When 'segmentation_type' is flat the "
"'physnet_name' and 'type' segmentation"
"must be 'physnet1'' and 'external'', "
"but got '%s' and '%s'.We have changed"
"it to the valid value.") "it to the valid value.")
logic_network['physnet_name'] = "physnet1" logic_network['physnet_name'] = "physnet1"
logic_network['type'] = "external" logic_network['type'] = "external"
@ -280,44 +300,20 @@ class Controller(controller.BaseController):
_check_param_nonull_and_valid( _check_param_nonull_and_valid(
logic_network, logic_network,
['name', 'type', 'physnet_name', 'segmentation_type', 'shared', 'segmentation_id'], ['name', 'type', 'physnet_name',
{'segmentation_type' : networking_parameters['segmentation_type'], 'segmentation_type', 'shared', 'segmentation_id'],
{'segmentation_type': networking_parameters[
'segmentation_type'],
'physnet_name': ','.join(physnet_name_set), 'physnet_name': ','.join(physnet_name_set),
'type': ','.join(["external", "internal"])}) 'type': ','.join(["external", "internal"])})
if logic_network['type'] == "external": if logic_network['type'] == "external":
external_logic_network_name.append(logic_network['name']) external_logic_network_name.append(
logic_network['name'])
logic_network_name_set.append(logic_network['name']) logic_network_name_set.append(logic_network['name'])
# By segmentation_type check segmentation_id is in range # checkout subnets params------------------------------
segmentation_id = logic_network.get('segmentation_id', None)
if segmentation_id:
err = "Segmentation_id is out of private network %s of %s.Vaild range is [%s, %s]."
segmentation_type = logic_network.get('segmentation_type', None)
if 0 == cmp(segmentation_type, "vlan"):
private_vlan_range = \
[(net['vlan_start'], net['vlan_end'])
for net in all_private_cluster_networks_list
if logic_network['physnet_name'] == net['name']]
if private_vlan_range and \
not private_vlan_range[0][0] or \
not private_vlan_range[0][1]:
return (_("Private network plane %s don't config the 'vlan_start' or "
"'vlan_end' parameter."))
if int(segmentation_id) not in range(private_vlan_range[0][0], private_vlan_range[0][1]):
return (_(err % ("vlan_range", logic_network['physnet_name'],
private_vlan_range[0][0], private_vlan_range[0][1])))
elif 0 == cmp(segmentation_type, "vxlan") and vni_range:
if int(segmentation_id) not in range(vni_range[0], vni_range[1]):
return (_("Segmentation_id is out of vni_range."))
elif 0 == cmp(segmentation_type, "gre") and gre_id_range:
if int(segmentation_id) not in range(gre_id_range[0], gre_id_range[1]):
return (_("Segmentation_id is out of gre_id_range."))
# checkout subnets params--------------------------------------------------
if logic_network.get('subnets', None): if logic_network.get('subnets', None):
subnet_data = logic_network['subnets'] subnet_data = logic_network['subnets']
for subnet in subnet_data: for subnet in subnet_data:
@ -325,49 +321,78 @@ class Controller(controller.BaseController):
subnet, subnet,
['name', 'cidr']) ['name', 'cidr'])
subnet_name_set.append(subnet['name']) subnet_name_set.append(subnet['name'])
# By cidr check floating_ranges is in range and not overlap # By cidr check floating_ranges is in range
# and not overlap
# ---------------start----- # ---------------start-----
if subnet['gateway'] and not _is_in_network_range(subnet['gateway'], subnet['cidr']): if subnet['gateway'] and not \
_is_in_network_range(
subnet['gateway'], subnet['cidr']):
return (_("Wrong gateway format.")) return (_("Wrong gateway format."))
if subnet['floating_ranges']: if subnet['floating_ranges']:
inter_ip = lambda x: '.'.join([str(x/(256**i)%256) for i in range(3,-1,-1)]) inter_ip = lambda x: '.'.join(
[str(x / (256**i) % 256) for i in
range(3, -1, -1)])
floating_ranges_with_int_ip = list() floating_ranges_with_int_ip = list()
sorted_floating_ranges = list() sorted_floating_ranges = list()
sorted_floating_ranges_with_int_ip = list() sorted_floating_ranges_with_int_ip = list()
for floating_ip in subnet['floating_ranges']: for floating_ip in subnet[
'floating_ranges']:
if len(floating_ip) != 2: if len(floating_ip) != 2:
return (_("Floating ip must be paris.")) return (
_("Floating ip must "
"be paris."))
ip_start = _ip_into_int(floating_ip[0]) ip_start = _ip_into_int(floating_ip[0])
ip_end = _ip_into_int(floating_ip[1]) ip_end = _ip_into_int(floating_ip[1])
if ip_start > ip_end: if ip_start > ip_end:
return (_("Wrong floating ip format.")) return (
floating_ranges_with_int_ip.append([ip_start, ip_end]) _("Wrong floating ip format."))
sorted_floating_ranges_with_int_ip = sorted(floating_ranges_with_int_ip, key=lambda x : x[0]) floating_ranges_with_int_ip.append(
for ip_range in sorted_floating_ranges_with_int_ip: [ip_start, ip_end])
sorted_floating_ranges_with_int_ip = \
sorted(floating_ranges_with_int_ip,
key=lambda x: x[0])
for ip_range in \
sorted_floating_ranges_with_int_ip:
ip_start = inter_ip(ip_range[0]) ip_start = inter_ip(ip_range[0])
ip_end = inter_ip(ip_range[1]) ip_end = inter_ip(ip_range[1])
sorted_floating_ranges.append([ip_start, ip_end]) sorted_floating_ranges.append(
[ip_start, ip_end])
last_rang_ip = [] last_rang_ip = []
for floating in sorted_floating_ranges: for floating in sorted_floating_ranges:
if not _is_in_network_range(floating[0], subnet['cidr']) \ if not _is_in_network_range(
or not _is_in_network_range(floating[1], subnet['cidr']): floating[0],
return (_("Floating ip or gateway is out of range cidr.")) subnet['cidr']) or not \
_is_in_network_range(
floating[1], subnet['cidr']):
return (
_("Floating ip or gateway "
"is out of range cidr."))
err_list = [err for err in last_rang_ip if _ip_into_int(floating[0]) < err] err_list = [
err for err in last_rang_ip if
_ip_into_int(
floating[0]) < err]
if last_rang_ip and 0 < len(err_list): if last_rang_ip and 0 < len(err_list):
return (_("Between floating ip range can not be overlap.")) return (
last_rang_ip.append(_ip_into_int(floating[1])) _("Between floating ip range "
subnets_in_logic_network[logic_network['name']].append(subnet['name']) "can not be overlap."))
last_rang_ip.append(
_ip_into_int(floating[1]))
subnets_in_logic_network[logic_network[
'name']].append(subnet['name'])
# check external logical network uniqueness # check external logical network uniqueness
if len(external_logic_network_name) > 1: if len(external_logic_network_name) > 1:
return (_("External logical network is uniqueness in the cluster.Got %s." % return (_("External logical network is uniqueness "
"in the cluster.Got %s." %
",".join(external_logic_network_name))) ",".join(external_logic_network_name)))
# check logic_network_name uniqueness # check logic_network_name uniqueness
if len(logic_network_name_set) != len(set(logic_network_name_set)): if len(logic_network_name_set) != len(
return (_("Logic network name segment is repetition.")) set(logic_network_name_set)):
return (_("Logic network name segment "
"is repetition."))
# check subnet_name uniqueness # check subnet_name uniqueness
if len(subnet_name_set) != len(set(subnet_name_set)): if len(subnet_name_set) != len(set(subnet_name_set)):
@ -375,7 +400,7 @@ class Controller(controller.BaseController):
cluster_meta['logic_networks'] = unicode(logic_networks) cluster_meta['logic_networks'] = unicode(logic_networks)
# check routers-------------------------------------------------- # check routers------------------------------------------------
subnet_name_set_deepcopy = copy.deepcopy(subnet_name_set) subnet_name_set_deepcopy = copy.deepcopy(subnet_name_set)
router_name_set = [] # record all routers name router_name_set = [] # record all routers name
if cluster_meta.get('routers', None): if cluster_meta.get('routers', None):
@ -384,27 +409,38 @@ class Controller(controller.BaseController):
_check_param_nonull_and_valid(router, ['name']) _check_param_nonull_and_valid(router, ['name'])
# check relevance logic_network is valid # check relevance logic_network is valid
external_logic_network_data = router.get('external_logic_network', None) external_logic_network_data = router.get(
'external_logic_network', None)
if external_logic_network_data and \ if external_logic_network_data and \
external_logic_network_data not in logic_network_name_set: external_logic_network_data not in \
return (_("Logic_network %s is not valid range." % external_logic_network_data)) logic_network_name_set:
return (_("Logic_network %s is not valid range." %
external_logic_network_data))
router_name_set.append(router['name']) router_name_set.append(router['name'])
# check relevance subnets is valid # check relevance subnets is valid
for subnet in router.get('subnets', []): for subnet in router.get('subnets', []):
if subnet not in subnet_name_set: if subnet not in subnet_name_set:
return (_("Subnet %s is not valid range." % subnet)) return (
_("Subnet %s is not valid range." %
subnet))
# subnet cann't relate with two routers # subnet cann't relate with two routers
if subnet not in subnet_name_set_deepcopy: if subnet not in subnet_name_set_deepcopy:
return (_("The subnet can't be related with multiple routers.")) return (
_("The subnet can't be related with "
"multiple routers."))
subnet_name_set_deepcopy.remove(subnet) subnet_name_set_deepcopy.remove(subnet)
if external_logic_network_data and \ if external_logic_network_data and \
subnets_in_logic_network[external_logic_network_data] and \ subnets_in_logic_network[
set(subnets_in_logic_network[external_logic_network_data]). \ external_logic_network_data] and \
set(subnets_in_logic_network[
external_logic_network_data]). \
issubset(set(router['subnets'])): issubset(set(router['subnets'])):
return (_("Logic network's subnets is all related with a router, it's not allowed.")) return (
_("Logic network's subnets is all related"
" with a router, it's not allowed."))
# check subnet_name uniqueness # check subnet_name uniqueness
if len(router_name_set) != len(set(router_name_set)): if len(router_name_set) != len(set(router_name_set)):
@ -413,10 +449,13 @@ class Controller(controller.BaseController):
_check_auto_scale(req, cluster_meta) _check_auto_scale(req, cluster_meta)
check_result = _check_cluster_add_parameters(req, cluster_meta) check_result = _check_cluster_add_parameters(req, cluster_meta)
if 0 != cmp(check_result, errmsg): if 0 != cmp(check_result, errmsg):
LOG.exception(_("Params check for cluster-add or cluster-update is failed!")) LOG.exception(
_("Params check for cluster-add or cluster-update "
"is failed!"))
raise HTTPBadRequest(explanation=check_result) raise HTTPBadRequest(explanation=check_result)
LOG.debug(_("Params check for cluster-add or cluster-update is done!")) LOG.debug(
_("Params check for cluster-add or cluster-update is done!"))
return f(*args, **kwargs) return f(*args, **kwargs)
return wrapper return wrapper
@ -448,7 +487,8 @@ class Controller(controller.BaseController):
def _raise_404_if_network_deleted(self, req, network_id): def _raise_404_if_network_deleted(self, req, network_id):
network = self.get_network_meta_or_404(req, network_id) network = self.get_network_meta_or_404(req, network_id)
if network['deleted']: if network['deleted']:
msg = _("Network with identifier %s has been deleted.") % network_id msg = _("Network with identifier %s has been deleted.") % \
network_id
raise HTTPNotFound(msg) raise HTTPNotFound(msg)
return network.get('name', None) return network.get('name', None)
@ -502,7 +542,8 @@ class Controller(controller.BaseController):
cluster_name_split = cluster_name.split('_') cluster_name_split = cluster_name.split('_')
for cluster_name_info in cluster_name_split: for cluster_name_info in cluster_name_split:
if not cluster_name_info.isalnum(): if not cluster_name_info.isalnum():
raise ValueError('cluster name must be numbers or letters or underscores !') raise ValueError(
'cluster name must be numbers or letters or underscores !')
if cluster_meta.get('nodes', None): if cluster_meta.get('nodes', None):
orig_keys = list(eval(cluster_meta['nodes'])) orig_keys = list(eval(cluster_meta['nodes']))
for host_id in orig_keys: for host_id in orig_keys:
@ -514,11 +555,15 @@ class Controller(controller.BaseController):
raise HTTPForbidden(explanation=msg) raise HTTPForbidden(explanation=msg)
if node.get('interfaces', None): if node.get('interfaces', None):
interfaces = node['interfaces'] interfaces = node['interfaces']
input_host_pxe_info = [interface for interface in interfaces input_host_pxe_info = [
if interface.get('is_deployment', None) == 1] interface for interface in interfaces if interface.get(
if not input_host_pxe_info and node.get('os_status',None) != 'active': 'is_deployment', None) == 1]
msg = _("The host %s has more than one dhcp server, " if not input_host_pxe_info and node.get(
"please choose one interface for deployment") % host_id 'os_status', None) != 'active':
msg = _(
"The host %s has more than one dhcp server, "
"please choose one interface for deployment") % \
host_id
raise HTTPServerError(explanation=msg) raise HTTPServerError(explanation=msg)
print cluster_name print cluster_name
print cluster_meta print cluster_meta
@ -556,7 +601,8 @@ class Controller(controller.BaseController):
request=req, request=req,
content_type="text/plain") content_type="text/plain")
except exception.InUseByStore as e: except exception.InUseByStore as e:
msg = (_("cluster %(id)s could not be deleted because it is in use: " msg = (_("cluster %(id)s could not be deleted because "
"it is in use: "
"%(exc)s") % {"id": id, "exc": utils.exception_to_str(e)}) "%(exc)s") % {"id": id, "exc": utils.exception_to_str(e)})
LOG.warn(msg) LOG.warn(msg)
raise HTTPConflict(explanation=msg, raise HTTPConflict(explanation=msg,
@ -619,26 +665,31 @@ class Controller(controller.BaseController):
:retval Returns the updated cluster information as a mapping :retval Returns the updated cluster information as a mapping
""" """
self._enforce(req, 'update_cluster') self._enforce(req, 'update_cluster')
if cluster_meta.has_key('nodes'): if 'nodes' in cluster_meta:
orig_keys = list(eval(cluster_meta['nodes'])) orig_keys = list(eval(cluster_meta['nodes']))
for host_id in orig_keys: for host_id in orig_keys:
self._raise_404_if_host_deleted(req, host_id) self._raise_404_if_host_deleted(req, host_id)
node = registry.get_host_metadata(req.context, host_id) node = registry.get_host_metadata(req.context, host_id)
if node['status'] == 'in-cluster': if node['status'] == 'in-cluster':
host_cluster = registry.get_host_clusters(req.context, host_id) host_cluster = registry.get_host_clusters(
req.context, host_id)
if host_cluster[0]['cluster_id'] != id: if host_cluster[0]['cluster_id'] != id:
msg = _("Forbidden to add host %s with status " msg = _("Forbidden to add host %s with status "
"'in-cluster' in another cluster") % host_id "'in-cluster' in another cluster") % host_id
raise HTTPForbidden(explanation=msg) raise HTTPForbidden(explanation=msg)
if node.get('interfaces', None): if node.get('interfaces', None):
interfaces = node['interfaces'] interfaces = node['interfaces']
input_host_pxe_info = [interface for interface in interfaces input_host_pxe_info = [
if interface.get('is_deployment', None) == 1] interface for interface in interfaces if interface.get(
if not input_host_pxe_info and node.get('os_status', None) != 'active': 'is_deployment', None) == 1]
msg = _("The host %s has more than one dhcp server, " if not input_host_pxe_info and node.get(
"please choose one interface for deployment") % host_id 'os_status', None) != 'active':
msg = _(
"The host %s has more than one dhcp server, "
"please choose one interface for deployment") % \
host_id
raise HTTPServerError(explanation=msg) raise HTTPServerError(explanation=msg)
if cluster_meta.has_key('networks'): if 'networks' in cluster_meta:
orig_keys = list(eval(cluster_meta['networks'])) orig_keys = list(eval(cluster_meta['networks']))
for network_id in orig_keys: for network_id in orig_keys:
self._raise_404_if_network_deleted(req, network_id) self._raise_404_if_network_deleted(req, network_id)
@ -687,6 +738,7 @@ class Controller(controller.BaseController):
return {'cluster_meta': cluster_meta} return {'cluster_meta': cluster_meta}
class ProjectDeserializer(wsgi.JSONRequestDeserializer): class ProjectDeserializer(wsgi.JSONRequestDeserializer):
"""Handles deserialization of specific controller method requests.""" """Handles deserialization of specific controller method requests."""
@ -701,6 +753,7 @@ class ProjectDeserializer(wsgi.JSONRequestDeserializer):
def update_cluster(self, request): def update_cluster(self, request):
return self._deserialize(request) return self._deserialize(request)
class ProjectSerializer(wsgi.JSONResponseSerializer): class ProjectSerializer(wsgi.JSONResponseSerializer):
"""Handles serialization of specific controller method responses.""" """Handles serialization of specific controller method responses."""
@ -727,6 +780,7 @@ class ProjectSerializer(wsgi.JSONResponseSerializer):
response.headers['Content-Type'] = 'application/json' response.headers['Content-Type'] = 'application/json'
response.body = self.to_json(dict(cluster=cluster_meta)) response.body = self.to_json(dict(cluster=cluster_meta))
return response return response
def get_cluster(self, response, result): def get_cluster(self, response, result):
cluster_meta = result['cluster_meta'] cluster_meta = result['cluster_meta']
response.status = 201 response.status = 201
@ -734,9 +788,9 @@ class ProjectSerializer(wsgi.JSONResponseSerializer):
response.body = self.to_json(dict(cluster=cluster_meta)) response.body = self.to_json(dict(cluster=cluster_meta))
return response return response
def create_resource(): def create_resource():
"""Projects resource factory method""" """Projects resource factory method"""
deserializer = ProjectDeserializer() deserializer = ProjectDeserializer()
serializer = ProjectSerializer() serializer = ProjectSerializer()
return wsgi.Resource(Controller(), deserializer, serializer) return wsgi.Resource(Controller(), deserializer, serializer)

View File

@ -52,21 +52,25 @@ CONF.import_opt('container_formats', 'daisy.common.config',
group='image_format') group='image_format')
CONF.import_opt('image_property_quota', 'daisy.common.config') CONF.import_opt('image_property_quota', 'daisy.common.config')
class Controller(controller.BaseController): class Controller(controller.BaseController):
""" """
WSGI controller for components resource in Daisy v1 API WSGI controller for components resource in Daisy v1 API
The components resource API is a RESTful web service for component data. The API The components resource API is a RESTful web service for component data.
is as follows:: The API is as follows::
GET /components -- Returns a set of brief metadata about components GET /components -- Returns a set of brief metadata about components
GET /components/detail -- Returns a set of detailed metadata about GET /components/detail -- Returns a set of detailed metadata about
components components
HEAD /components/<ID> -- Return metadata about an component with id <ID> HEAD /components/<ID> --
GET /components/<ID> -- Return component data for component with id <ID> Return metadata about an component with id <ID>
GET /components/<ID> --
Return component data for component with id <ID>
POST /components -- Store component data and return metadata about the POST /components -- Store component data and return metadata about the
newly-stored component newly-stored component
PUT /components/<ID> -- Update component metadata and/or upload component PUT /components/<ID> --
Update component metadata and/or upload component
data for a previously-reserved component data for a previously-reserved component
DELETE /components/<ID> -- Delete the component with id <ID> DELETE /components/<ID> -- Delete the component with id <ID>
""" """
@ -140,7 +144,8 @@ class Controller(controller.BaseController):
# print component_owner # print component_owner
print component_name print component_name
print component_description print component_description
component_meta = registry.add_component_metadata(req.context, component_meta) component_meta = registry.add_component_metadata(
req.context, component_meta)
return {'component_meta': component_meta} return {'component_meta': component_meta}
@ -175,7 +180,8 @@ class Controller(controller.BaseController):
request=req, request=req,
content_type="text/plain") content_type="text/plain")
except exception.InUseByStore as e: except exception.InUseByStore as e:
msg = (_("component %(id)s could not be deleted because it is in use: " msg = (_("component %(id)s could not be "
"deleted because it is in use: "
"%(exc)s") % {"id": id, "exc": utils.exception_to_str(e)}) "%(exc)s") % {"id": id, "exc": utils.exception_to_str(e)})
LOG.warn(msg) LOG.warn(msg)
raise HTTPConflict(explanation=msg, raise HTTPConflict(explanation=msg,
@ -280,6 +286,7 @@ class Controller(controller.BaseController):
return {'component_meta': component_meta} return {'component_meta': component_meta}
class ComponentDeserializer(wsgi.JSONRequestDeserializer): class ComponentDeserializer(wsgi.JSONRequestDeserializer):
"""Handles deserialization of specific controller method requests.""" """Handles deserialization of specific controller method requests."""
@ -294,6 +301,7 @@ class ComponentDeserializer(wsgi.JSONRequestDeserializer):
def update_component(self, request): def update_component(self, request):
return self._deserialize(request) return self._deserialize(request)
class ComponentSerializer(wsgi.JSONResponseSerializer): class ComponentSerializer(wsgi.JSONResponseSerializer):
"""Handles serialization of specific controller method responses.""" """Handles serialization of specific controller method responses."""
@ -313,6 +321,7 @@ class ComponentSerializer(wsgi.JSONResponseSerializer):
response.headers['Content-Type'] = 'application/json' response.headers['Content-Type'] = 'application/json'
response.body = self.to_json(dict(component=component_meta)) response.body = self.to_json(dict(component=component_meta))
return response return response
def get_component(self, response, result): def get_component(self, response, result):
component_meta = result['component_meta'] component_meta = result['component_meta']
response.status = 201 response.status = 201
@ -320,9 +329,9 @@ class ComponentSerializer(wsgi.JSONResponseSerializer):
response.body = self.to_json(dict(component=component_meta)) response.body = self.to_json(dict(component=component_meta))
return response return response
def create_resource(): def create_resource():
"""Components resource factory method""" """Components resource factory method"""
deserializer = ComponentDeserializer() deserializer = ComponentDeserializer()
serializer = ComponentSerializer() serializer = ComponentSerializer()
return wsgi.Resource(Controller(), deserializer, serializer) return wsgi.Resource(Controller(), deserializer, serializer)

View File

@ -52,21 +52,28 @@ CONF.import_opt('container_formats', 'daisy.common.config',
group='image_format') group='image_format')
CONF.import_opt('image_property_quota', 'daisy.common.config') CONF.import_opt('image_property_quota', 'daisy.common.config')
class Controller(controller.BaseController): class Controller(controller.BaseController):
""" """
WSGI controller for config_files resource in Daisy v1 API WSGI controller for config_files resource in Daisy v1 API
The config_files resource API is a RESTful web service for config_file data. The API The config_files resource API is a RESTful web service
for config_file data. The API
is as follows:: is as follows::
GET /config_files -- Returns a set of brief metadata about config_files GET /config_files --
Returns a set of brief metadata about config_files
GET /config_files/detail -- Returns a set of detailed metadata about GET /config_files/detail -- Returns a set of detailed metadata about
config_files config_files
HEAD /config_files/<ID> -- Return metadata about an config_file with id <ID> HEAD /config_files/<ID> --
GET /config_files/<ID> -- Return config_file data for config_file with id <ID> Return metadata about an config_file with id <ID>
POST /config_files -- Store config_file data and return metadata about the GET /config_files/<ID> --
Return config_file data for config_file with id <ID>
POST /config_files --
Store config_file data and return metadata about the
newly-stored config_file newly-stored config_file
PUT /config_files/<ID> -- Update config_file metadata and/or upload config_file PUT /config_files/<ID> --
Update config_file metadata and/or upload config_file
data for a previously-reserved config_file data for a previously-reserved config_file
DELETE /config_files/<ID> -- Delete the config_file with id <ID> DELETE /config_files/<ID> -- Delete the config_file with id <ID>
""" """
@ -138,7 +145,8 @@ class Controller(controller.BaseController):
# print config_file_id # print config_file_id
print config_file_name print config_file_name
print config_file_description print config_file_description
config_file_meta = registry.add_config_file_metadata(req.context, config_file_meta) config_file_meta = registry.add_config_file_metadata(
req.context, config_file_meta)
return {'config_file_meta': config_file_meta} return {'config_file_meta': config_file_meta}
@ -171,7 +179,8 @@ class Controller(controller.BaseController):
request=req, request=req,
content_type="text/plain") content_type="text/plain")
except exception.InUseByStore as e: except exception.InUseByStore as e:
msg = (_("config_file %(id)s could not be deleted because it is in use: " msg = (_("config_file %(id)s could not be "
"deleted because it is in use: "
"%(exc)s") % {"id": id, "exc": utils.exception_to_str(e)}) "%(exc)s") % {"id": id, "exc": utils.exception_to_str(e)})
LOG.warn(msg) LOG.warn(msg)
raise HTTPConflict(explanation=msg, raise HTTPConflict(explanation=msg,
@ -215,7 +224,8 @@ class Controller(controller.BaseController):
self._enforce(req, 'get_config_files') self._enforce(req, 'get_config_files')
params = self._get_query_params(req) params = self._get_query_params(req)
try: try:
config_files = registry.get_config_files_detail(req.context, **params) config_files = registry.get_config_files_detail(
req.context, **params)
except exception.Invalid as e: except exception.Invalid as e:
raise HTTPBadRequest(explanation=e.msg, request=req) raise HTTPBadRequest(explanation=e.msg, request=req)
return dict(config_files=config_files) return dict(config_files=config_files)
@ -241,9 +251,8 @@ class Controller(controller.BaseController):
request=req, request=req,
content_type="text/plain") content_type="text/plain")
try: try:
config_file_meta = registry.update_config_file_metadata(req.context, config_file_meta = registry.update_config_file_metadata(
id, req.context, id, config_file_meta)
config_file_meta)
except exception.Invalid as e: except exception.Invalid as e:
msg = (_("Failed to update config_file metadata. Got error: %s") % msg = (_("Failed to update config_file metadata. Got error: %s") %
@ -276,6 +285,7 @@ class Controller(controller.BaseController):
return {'config_file_meta': config_file_meta} return {'config_file_meta': config_file_meta}
class Config_fileDeserializer(wsgi.JSONRequestDeserializer): class Config_fileDeserializer(wsgi.JSONRequestDeserializer):
"""Handles deserialization of specific controller method requests.""" """Handles deserialization of specific controller method requests."""
@ -290,6 +300,7 @@ class Config_fileDeserializer(wsgi.JSONRequestDeserializer):
def update_config_file(self, request): def update_config_file(self, request):
return self._deserialize(request) return self._deserialize(request)
class Config_fileSerializer(wsgi.JSONResponseSerializer): class Config_fileSerializer(wsgi.JSONResponseSerializer):
"""Handles serialization of specific controller method responses.""" """Handles serialization of specific controller method responses."""
@ -317,9 +328,9 @@ class Config_fileSerializer(wsgi.JSONResponseSerializer):
response.body = self.to_json(dict(config_file=config_file_meta)) response.body = self.to_json(dict(config_file=config_file_meta))
return response return response
def create_resource(): def create_resource():
"""config_files resource factory method""" """config_files resource factory method"""
deserializer = Config_fileDeserializer() deserializer = Config_fileDeserializer()
serializer = Config_fileSerializer() serializer = Config_fileSerializer()
return wsgi.Resource(Controller(), deserializer, serializer) return wsgi.Resource(Controller(), deserializer, serializer)

View File

@ -53,21 +53,26 @@ CONF.import_opt('container_formats', 'daisy.common.config',
group='image_format') group='image_format')
CONF.import_opt('image_property_quota', 'daisy.common.config') CONF.import_opt('image_property_quota', 'daisy.common.config')
class Controller(controller.BaseController): class Controller(controller.BaseController):
""" """
WSGI controller for config_sets resource in Daisy v1 API WSGI controller for config_sets resource in Daisy v1 API
The config_sets resource API is a RESTful web service for config_set data. The API The config_sets resource API is a RESTful web service for config_set data.
is as follows:: The API is as follows::
GET /config_sets -- Returns a set of brief metadata about config_sets GET /config_sets -- Returns a set of brief metadata about config_sets
GET /config_sets/detail -- Returns a set of detailed metadata about GET /config_sets/detail -- Returns a set of detailed metadata about
config_sets config_sets
HEAD /config_sets/<ID> -- Return metadata about an config_set with id <ID> HEAD /config_sets/<ID> --
GET /config_sets/<ID> -- Return config_set data for config_set with id <ID> Return metadata about an config_set with id <ID>
POST /config_sets -- Store config_set data and return metadata about the GET /config_sets/<ID> --
Return config_set data for config_set with id <ID>
POST /config_sets --
Store config_set data and return metadata about the
newly-stored config_set newly-stored config_set
PUT /config_sets/<ID> -- Update config_set metadata and/or upload config_set PUT /config_sets/<ID> --
Update config_set metadata and/or upload config_set
data for a previously-reserved config_set data for a previously-reserved config_set
DELETE /config_sets/<ID> -- Delete the config_set with id <ID> DELETE /config_sets/<ID> -- Delete the config_set with id <ID>
""" """
@ -125,7 +130,8 @@ class Controller(controller.BaseController):
def _raise_404_if_cluster_deleted(self, req, cluster_id): def _raise_404_if_cluster_deleted(self, req, cluster_id):
cluster = self.get_cluster_meta_or_404(req, cluster_id) cluster = self.get_cluster_meta_or_404(req, cluster_id)
if cluster['deleted']: if cluster['deleted']:
msg = _("cluster with identifier %s has been deleted.") % cluster_id msg = _("cluster with identifier %s has been deleted.") % \
cluster_id
raise HTTPNotFound(msg) raise HTTPNotFound(msg)
@utils.mutating @utils.mutating
@ -145,7 +151,8 @@ class Controller(controller.BaseController):
# print config_set_id # print config_set_id
print config_set_name print config_set_name
print config_set_description print config_set_description
config_set_meta = registry.add_config_set_metadata(req.context, config_set_meta) config_set_meta = registry.add_config_set_metadata(
req.context, config_set_meta)
return {'config_set_meta': config_set_meta} return {'config_set_meta': config_set_meta}
@ -171,14 +178,13 @@ class Controller(controller.BaseController):
request=req, request=req,
content_type="text/plain") content_type="text/plain")
except exception.Forbidden as e: except exception.Forbidden as e:
msg = (_("Forbidden to delete config_set: %s") % LOG.warn(e)
utils.exception_to_str(e)) raise HTTPForbidden(explanation=e,
LOG.warn(msg)
raise HTTPForbidden(explanation=msg,
request=req, request=req,
content_type="text/plain") content_type="text/plain")
except exception.InUseByStore as e: except exception.InUseByStore as e:
msg = (_("config_set %(id)s could not be deleted because it is in use: " msg = (_("config_set %(id)s could not be "
"deleted because it is in use: "
"%(exc)s") % {"id": id, "exc": utils.exception_to_str(e)}) "%(exc)s") % {"id": id, "exc": utils.exception_to_str(e)})
LOG.warn(msg) LOG.warn(msg)
raise HTTPConflict(explanation=msg, raise HTTPConflict(explanation=msg,
@ -222,7 +228,8 @@ class Controller(controller.BaseController):
self._enforce(req, 'get_config_sets') self._enforce(req, 'get_config_sets')
params = self._get_query_params(req) params = self._get_query_params(req)
try: try:
config_sets = registry.get_config_sets_detail(req.context, **params) config_sets = registry.get_config_sets_detail(
req.context, **params)
except exception.Invalid as e: except exception.Invalid as e:
raise HTTPBadRequest(explanation=e.msg, request=req) raise HTTPBadRequest(explanation=e.msg, request=req)
return dict(config_sets=config_sets) return dict(config_sets=config_sets)
@ -248,9 +255,8 @@ class Controller(controller.BaseController):
request=req, request=req,
content_type="text/plain") content_type="text/plain")
try: try:
config_set_meta = registry.update_config_set_metadata(req.context, config_set_meta = registry.update_config_set_metadata(
id, req.context, id, config_set_meta)
config_set_meta)
except exception.Invalid as e: except exception.Invalid as e:
msg = (_("Failed to update config_set metadata. Got error: %s") % msg = (_("Failed to update config_set metadata. Got error: %s") %
@ -289,7 +295,8 @@ class Controller(controller.BaseController):
roles = registry.get_roles_detail(req.context) roles = registry.get_roles_detail(req.context)
for role in roles: for role in roles:
for role_name in eval(config_set_meta['role']): for role_name in eval(config_set_meta['role']):
if role['cluster_id'] == config_set_meta['cluster'] and role['name'] == role_name: if role['cluster_id'] == config_set_meta[
'cluster'] and role['name'] == role_name:
role_id_list.append(role['id']) role_id_list.append(role['id'])
break break
except exception.Invalid as e: except exception.Invalid as e:
@ -298,26 +305,28 @@ class Controller(controller.BaseController):
@utils.mutating @utils.mutating
def cluster_config_set_update(self, req, config_set_meta): def cluster_config_set_update(self, req, config_set_meta):
if config_set_meta.has_key('cluster'): if 'cluster' in config_set_meta:
orig_cluster = str(config_set_meta['cluster']) orig_cluster = str(config_set_meta['cluster'])
self._raise_404_if_cluster_deleted(req, orig_cluster) self._raise_404_if_cluster_deleted(req, orig_cluster)
try: try:
if config_set_meta.get('role', None): if config_set_meta.get('role', None):
role_id_list=self._raise_404_if_role_exist(req,config_set_meta) role_id_list = self._raise_404_if_role_exist(
req, config_set_meta)
if len(role_id_list) == len(eval(config_set_meta['role'])): if len(role_id_list) == len(eval(config_set_meta['role'])):
for role_id in role_id_list: backend = manager.configBackend('clushshell', req)
backend=manager.configBackend('clushshell', req, role_id) backend.push_config_by_roles(role_id_list)
backend.push_config()
else: else:
msg = "the role is not exist" msg = "the role is not exist"
LOG.error(msg) LOG.error(msg)
raise HTTPNotFound(msg) raise HTTPNotFound(msg)
else: else:
roles = registry.get_roles_detail(req.context) roles = registry.get_roles_detail(req.context)
role_id_list = []
for role in roles: for role in roles:
if role['cluster_id'] == config_set_meta['cluster']: if role['cluster_id'] == config_set_meta['cluster']:
backend=manager.configBackend('clushshell', req, role['id']) role_id_list.append(role['id'])
backend.push_config() backend = manager.configBackend('clushshell', req)
backend.push_config_by_roles(role_id_list)
except exception.Invalid as e: except exception.Invalid as e:
raise HTTPBadRequest(explanation=e.msg, request=req) raise HTTPBadRequest(explanation=e.msg, request=req)
@ -332,18 +341,22 @@ class Controller(controller.BaseController):
@utils.mutating @utils.mutating
def cluster_config_set_progress(self, req, config_set_meta): def cluster_config_set_progress(self, req, config_set_meta):
role_list = [] role_list = []
if config_set_meta.has_key('cluster'): if 'cluster' in config_set_meta:
orig_cluster = str(config_set_meta['cluster']) orig_cluster = str(config_set_meta['cluster'])
self._raise_404_if_cluster_deleted(req, orig_cluster) self._raise_404_if_cluster_deleted(req, orig_cluster)
try: try:
if config_set_meta.get('role', None): if config_set_meta.get('role', None):
role_id_list=self._raise_404_if_role_exist(req,config_set_meta) role_id_list = self._raise_404_if_role_exist(
req, config_set_meta)
if len(role_id_list) == len(eval(config_set_meta['role'])): if len(role_id_list) == len(eval(config_set_meta['role'])):
for role_id in role_id_list: for role_id in role_id_list:
role_info = {} role_info = {}
role_meta=registry.get_role_metadata(req.context, role_id) role_meta = registry.get_role_metadata(
req.context, role_id)
role_info['role-name'] = role_meta['name'] role_info['role-name'] = role_meta['name']
role_info['config_set_update_progress']=role_meta['config_set_update_progress'] role_info['config_set_update_progress'] = \
role_meta[
'config_set_update_progress']
role_list.append(role_info) role_list.append(role_info)
else: else:
msg = "the role is not exist" msg = "the role is not exist"
@ -355,7 +368,8 @@ class Controller(controller.BaseController):
if role['cluster_id'] == config_set_meta['cluster']: if role['cluster_id'] == config_set_meta['cluster']:
role_info = {} role_info = {}
role_info['role-name'] = role['name'] role_info['role-name'] = role['name']
role_info['config_set_update_progress']=role['config_set_update_progress'] role_info['config_set_update_progress'] = role[
'config_set_update_progress']
role_list.append(role_info) role_list.append(role_info)
except exception.Invalid as e: except exception.Invalid as e:
@ -367,6 +381,7 @@ class Controller(controller.BaseController):
LOG.error(msg) LOG.error(msg)
raise HTTPNotFound(msg) raise HTTPNotFound(msg)
class Config_setDeserializer(wsgi.JSONRequestDeserializer): class Config_setDeserializer(wsgi.JSONRequestDeserializer):
"""Handles deserialization of specific controller method requests.""" """Handles deserialization of specific controller method requests."""
@ -387,6 +402,7 @@ class Config_setDeserializer(wsgi.JSONRequestDeserializer):
def cluster_config_set_progress(self, request): def cluster_config_set_progress(self, request):
return self._deserialize(request) return self._deserialize(request)
class Config_setSerializer(wsgi.JSONResponseSerializer): class Config_setSerializer(wsgi.JSONResponseSerializer):
"""Handles serialization of specific controller method responses.""" """Handles serialization of specific controller method responses."""
@ -426,9 +442,9 @@ class Config_setSerializer(wsgi.JSONResponseSerializer):
response.body = self.to_json(dict(config_set=result)) response.body = self.to_json(dict(config_set=result))
return response return response
def create_resource(): def create_resource():
"""config_sets resource factory method""" """config_sets resource factory method"""
deserializer = Config_setDeserializer() deserializer = Config_setDeserializer()
serializer = Config_setSerializer() serializer = Config_setSerializer()
return wsgi.Resource(Controller(), deserializer, serializer) return wsgi.Resource(Controller(), deserializer, serializer)

View File

@ -52,6 +52,7 @@ CONF.import_opt('container_formats', 'daisy.common.config',
group='image_format') group='image_format')
CONF.import_opt('image_property_quota', 'daisy.common.config') CONF.import_opt('image_property_quota', 'daisy.common.config')
class Controller(controller.BaseController): class Controller(controller.BaseController):
""" """
WSGI controller for configs resource in Daisy v1 API WSGI controller for configs resource in Daisy v1 API
@ -120,32 +121,40 @@ class Controller(controller.BaseController):
if PARAM in req.params: if PARAM in req.params:
params[PARAM] = req.params.get(PARAM) params[PARAM] = req.params.get(PARAM)
return params return params
def _raise_404_if_config_set_delete(self, req, config_set_id): def _raise_404_if_config_set_delete(self, req, config_set_id):
config_set = self.get_config_set_meta_or_404(req, config_set_id) config_set = self.get_config_set_meta_or_404(req, config_set_id)
if config_set['deleted']: if config_set['deleted']:
msg = _("config_set with identifier %s has been deleted.") % config_set_id msg = _("config_set with identifier %s has been deleted.") % \
config_set_id
raise HTTPNotFound(msg) raise HTTPNotFound(msg)
def _raise_404_if_config_file_delete(self, req, config_file_id): def _raise_404_if_config_file_delete(self, req, config_file_id):
config_file = self.get_config_file_meta_or_404(req, config_file_id) config_file = self.get_config_file_meta_or_404(req, config_file_id)
if config_file['deleted']: if config_file['deleted']:
msg = _("config_file with identifier %s has been deleted.") % config_file_id msg = _(
"config_file with identifier %s has been deleted.") % \
config_file_id
raise HTTPNotFound(msg) raise HTTPNotFound(msg)
def _raise_404_if_role_exist(self, req, config_meta): def _raise_404_if_role_exist(self, req, config_meta):
role_id = "" role_id = ""
try: try:
roles = registry.get_roles_detail(req.context) roles = registry.get_roles_detail(req.context)
for role in roles: for role in roles:
if role['cluster_id'] == config_meta['cluster'] and role['name'] == config_meta['role']: if role['cluster_id'] == config_meta[
'cluster'] and role['name'] == config_meta['role']:
role_id = role['id'] role_id = role['id']
break break
except exception.Invalid as e: except exception.Invalid as e:
raise HTTPBadRequest(explanation=e.msg, request=req) raise HTTPBadRequest(explanation=e.msg, request=req)
return role_id return role_id
def _raise_404_if_cluster_deleted(self, req, cluster_id): def _raise_404_if_cluster_deleted(self, req, cluster_id):
cluster = self.get_cluster_meta_or_404(req, cluster_id) cluster = self.get_cluster_meta_or_404(req, cluster_id)
if cluster['deleted']: if cluster['deleted']:
msg = _("cluster with identifier %s has been deleted.") % cluster_id msg = _("cluster with identifier %s has been deleted.") % \
cluster_id
raise HTTPNotFound(msg) raise HTTPNotFound(msg)
@utils.mutating @utils.mutating
@ -160,18 +169,56 @@ class Controller(controller.BaseController):
""" """
self._enforce(req, 'add_config') self._enforce(req, 'add_config')
if config_meta.has_key('cluster'): if ('role' in config_meta and
'host_id' in config_meta):
msg = "role name and host id only have one"
LOG.error(msg)
raise HTTPBadRequest(explanation=msg, request=req)
elif 'role' in config_meta:
# the first way to add config
# when have 'role', config_set will be ignore
if config_meta.get('cluster'):
orig_cluster = str(config_meta['cluster']) orig_cluster = str(config_meta['cluster'])
self._raise_404_if_cluster_deleted(req, orig_cluster) self._raise_404_if_cluster_deleted(req, orig_cluster)
else:
if config_meta.has_key('role'): msg = "cluster must be given when add config for role"
LOG.error(msg)
raise HTTPNotFound(msg)
if config_meta['role']:
role_id = self._raise_404_if_role_exist(req, config_meta) role_id = self._raise_404_if_role_exist(req, config_meta)
if not role_id: if not role_id:
msg = "the role name is not exist" msg = "the role name is not exist"
LOG.error(msg) LOG.error(msg)
raise HTTPNotFound(msg) raise HTTPNotFound(msg)
else:
msg = "the role name can't be empty"
LOG.error(msg)
raise HTTPBadRequest(explanation=msg, request=req)
elif 'host_id' in config_meta:
# the second way to add config
# when have 'host_id', config_set will be ignore
if config_meta['host_id']:
self.get_host_meta_or_404(req, config_meta['host_id'])
else:
msg = "the host id can't be empty"
LOG.error(msg)
raise HTTPBadRequest(explanation=msg, request=req)
elif 'config_set' in config_meta:
# the third way to add config
if config_meta['config_set']:
self.get_config_set_meta_or_404(req,
config_meta['config_set'])
else:
msg = "config set id can't be empty"
LOG.error(msg)
raise HTTPBadRequest(explanation=msg, request=req)
else:
msg = "no way to add config"
LOG.error(msg)
raise HTTPBadRequest(explanation=msg, request=req)
config_meta = registry.config_interface_metadata(req.context, config_meta) config_meta = registry.config_interface_metadata(
req.context, config_meta)
return config_meta return config_meta
@utils.mutating @utils.mutating
@ -204,7 +251,8 @@ class Controller(controller.BaseController):
request=req, request=req,
content_type="text/plain") content_type="text/plain")
except exception.InUseByStore as e: except exception.InUseByStore as e:
msg = (_("config %(id)s could not be deleted because it is in use: " msg = (_("config %(id)s could not be "
"deleted because it is in use: "
"%(exc)s") % {"id": id, "exc": utils.exception_to_str(e)}) "%(exc)s") % {"id": id, "exc": utils.exception_to_str(e)})
LOG.warn(msg) LOG.warn(msg)
raise HTTPConflict(explanation=msg, raise HTTPConflict(explanation=msg,
@ -253,6 +301,7 @@ class Controller(controller.BaseController):
raise HTTPBadRequest(explanation=e.msg, request=req) raise HTTPBadRequest(explanation=e.msg, request=req)
return dict(configs=configs) return dict(configs=configs)
class ConfigDeserializer(wsgi.JSONRequestDeserializer): class ConfigDeserializer(wsgi.JSONRequestDeserializer):
"""Handles deserialization of specific controller method requests.""" """Handles deserialization of specific controller method requests."""
@ -267,6 +316,7 @@ class ConfigDeserializer(wsgi.JSONRequestDeserializer):
def delete_config(self, request): def delete_config(self, request):
return self._deserialize(request) return self._deserialize(request)
class ConfigSerializer(wsgi.JSONResponseSerializer): class ConfigSerializer(wsgi.JSONResponseSerializer):
"""Handles serialization of specific controller method responses.""" """Handles serialization of specific controller method responses."""
@ -293,9 +343,9 @@ class ConfigSerializer(wsgi.JSONResponseSerializer):
response.body = self.to_json(dict(config=config_meta)) response.body = self.to_json(dict(config=config_meta))
return response return response
def create_resource(): def create_resource():
"""configs resource factory method""" """configs resource factory method"""
deserializer = ConfigDeserializer() deserializer = ConfigDeserializer()
serializer = ConfigSerializer() serializer = ConfigSerializer()
return wsgi.Resource(Controller(), deserializer, serializer) return wsgi.Resource(Controller(), deserializer, serializer)

View File

@ -27,6 +27,7 @@ _ = i18n._
class BaseController(object): class BaseController(object):
def get_image_meta_or_404(self, request, image_id): def get_image_meta_or_404(self, request, image_id):
""" """
Grabs the image metadata for an image with a supplied Grabs the image metadata for an image with a supplied
@ -101,6 +102,7 @@ class BaseController(object):
raise webob.exc.HTTPForbidden(msg, raise webob.exc.HTTPForbidden(msg,
request=request, request=request,
content_type='text/plain') content_type='text/plain')
def get_component_meta_or_404(self, request, component_id): def get_component_meta_or_404(self, request, component_id):
""" """
Grabs the component metadata for an component with a supplied Grabs the component metadata for an component with a supplied

View File

@ -16,19 +16,15 @@
""" """
/hosts endpoint for Daisy v1 API /hosts endpoint for Daisy v1 API
""" """
import time
import traceback
import ast import ast
import webob.exc
from oslo_log import log as logging from oslo_log import log as logging
from webob.exc import HTTPBadRequest from webob.exc import HTTPBadRequest
from webob.exc import HTTPForbidden from webob.exc import HTTPForbidden
from webob.exc import HTTPServerError from webob.exc import HTTPNotFound
from webob.exc import HTTPConflict
from webob import Response from webob import Response
from threading import Thread
from daisy import i18n from daisy import i18n
from daisy import notifier from daisy import notifier
@ -43,10 +39,6 @@ import daisy.registry.client.v1.api as registry
from daisy.api.v1 import controller from daisy.api.v1 import controller
from daisy.api.v1 import filters from daisy.api.v1 import filters
try:
import simplejson as json
except ImportError:
import json
LOG = logging.getLogger(__name__) LOG = logging.getLogger(__name__)
_ = i18n._ _ = i18n._
@ -56,13 +48,15 @@ _LW = i18n._LW
SUPPORTED_PARAMS = daisy.api.v1.SUPPORTED_PARAMS SUPPORTED_PARAMS = daisy.api.v1.SUPPORTED_PARAMS
SUPPORTED_FILTERS = daisy.api.v1.SUPPORTED_FILTERS SUPPORTED_FILTERS = daisy.api.v1.SUPPORTED_FILTERS
ACTIVE_IMMUTABLE = daisy.api.v1.ACTIVE_IMMUTABLE ACTIVE_IMMUTABLE = daisy.api.v1.ACTIVE_IMMUTABLE
SERVICE_DISK_SERVICE = ('db', 'glance', 'dbbackup', 'mongodb', 'nova') SERVICE_DISK_SERVICE = ('db', 'glance', 'db_backup', 'mongodb', 'nova')
DISK_LOCATION = ('local', 'share') DISK_LOCATION = ('local', 'share', 'share_cluster')
PROTOCOL_TYPE = ('FIBER', 'ISCSI', 'CEPH')
CINDER_VOLUME_BACKEND_PARAMS = ('management_ips', 'data_ips', 'pools', CINDER_VOLUME_BACKEND_PARAMS = ('management_ips', 'data_ips', 'pools',
'volume_driver', 'volume_type', 'volume_driver', 'volume_type',
'role_id', 'user_name', 'user_pwd') 'role_id', 'user_name', 'user_pwd')
CINDER_VOLUME_BACKEND_DRIVER = ['KS3200_IPSAN', 'KS3200_FCSAN', CINDER_VOLUME_BACKEND_DRIVER = ['KS3200_IPSAN', 'KS3200_FCSAN',
'FUJISTU_ETERNUS'] 'FUJITSU_ETERNUS', 'HP3PAR_FCSAN']
class Controller(controller.BaseController): class Controller(controller.BaseController):
""" """
@ -82,6 +76,7 @@ class Controller(controller.BaseController):
data for a previously-reserved host data for a previously-reserved host
DELETE /hosts/<ID> -- Delete the host with id <ID> DELETE /hosts/<ID> -- Delete the host with id <ID>
""" """
def __init__(self): def __init__(self):
self.notifier = notifier.Notifier() self.notifier = notifier.Notifier()
registry.configure_registry_client() registry.configure_registry_client()
@ -146,31 +141,49 @@ class Controller(controller.BaseController):
def _raise_404_if_service_disk_deleted(self, req, service_disk_id): def _raise_404_if_service_disk_deleted(self, req, service_disk_id):
service_disk = self.get_service_disk_meta_or_404(req, service_disk_id) service_disk = self.get_service_disk_meta_or_404(req, service_disk_id)
if service_disk is None or service_disk['deleted']: if service_disk is None or service_disk['deleted']:
msg = _("service_disk with identifier %s has been deleted.") % service_disk_id msg = _(
"service_disk with identifier %s has been deleted.") % \
service_disk_id
raise HTTPNotFound(msg) raise HTTPNotFound(msg)
def _default_value_set(self, disk_meta): def _default_value_set(self, disk_meta):
if (not disk_meta.has_key('disk_location') or if ('disk_location' not in disk_meta or
not disk_meta['disk_location'] or not disk_meta['disk_location'] or
disk_meta['disk_location'] == ''): disk_meta['disk_location'] == ''):
disk_meta['disk_location'] = 'local' disk_meta['disk_location'] = 'local'
if not disk_meta.has_key('lun'): if 'lun' not in disk_meta:
disk_meta['lun'] = 0 disk_meta['lun'] = 0
if not disk_meta.has_key('size'): if 'size' not in disk_meta:
disk_meta['size'] = -1 disk_meta['size'] = -1
if 'protocol_type' not in disk_meta:
disk_meta['protocol_type'] = 'ISCSI'
def _unique_service_in_role(self, req, disk_meta): def _unique_service_in_role(self, req, disk_meta):
params = {'filters': {'role_id': disk_meta['role_id']}} params = {'filters': {'role_id': disk_meta['role_id']}}
service_disks = registry.list_service_disk_metadata(req.context, **params) service_disks = registry.list_service_disk_metadata(
req.context, **params)
if disk_meta['disk_location'] == 'share_cluster':
for disk in service_disks:
if disk['service'] == disk_meta['service'] and \
disk['disk_location'] != 'share_cluster':
id = disk['id']
registry.delete_service_disk_metadata(req.context, id)
else:
for service_disk in service_disks: for service_disk in service_disks:
if service_disk['service'] == disk_meta['service']: if service_disk['disk_location'] == 'share_cluster' and \
msg = "disk service %s has existed in role %s" %(disk_meta['service'], disk_meta['role_id']) service_disk['service'] == disk_meta['service']:
id = service_disk['id']
registry.delete_service_disk_metadata(req.context, id)
elif service_disk['service'] == disk_meta['service']:
msg = "disk service %s has existed in role %s" % (
disk_meta['service'], disk_meta['role_id'])
LOG.error(msg)
raise HTTPBadRequest(explanation=msg, raise HTTPBadRequest(explanation=msg,
request=req, request=req,
content_type="text/plain") content_type="text/plain")
def _service_disk_add_meta_valid(self, req, disk_meta): def _service_disk_add_meta_valid(self, req, disk_meta):
if not disk_meta.has_key('role_id'): if 'role_id' not in disk_meta:
msg = "'role_id' must be given" msg = "'role_id' must be given"
raise HTTPBadRequest(explanation=msg, raise HTTPBadRequest(explanation=msg,
request=req, request=req,
@ -178,7 +191,7 @@ class Controller(controller.BaseController):
else: else:
self._raise_404_if_role_deleted(req, disk_meta['role_id']) self._raise_404_if_role_deleted(req, disk_meta['role_id'])
if not disk_meta.has_key('service'): if 'service' not in disk_meta:
msg = "'service' must be given" msg = "'service' must be given"
raise HTTPBadRequest(explanation=msg, raise HTTPBadRequest(explanation=msg,
request=req, request=req,
@ -191,12 +204,14 @@ class Controller(controller.BaseController):
content_type="text/plain") content_type="text/plain")
if disk_meta['disk_location'] not in DISK_LOCATION: if disk_meta['disk_location'] not in DISK_LOCATION:
msg = "disk_location %s is not supported" % disk_meta['disk_location'] msg = "disk_location %s is not supported" % disk_meta[
'disk_location']
raise HTTPBadRequest(explanation=msg, raise HTTPBadRequest(explanation=msg,
request=req, request=req,
content_type="text/plain") content_type="text/plain")
if disk_meta['disk_location'] == 'share' and not disk_meta.has_key('data_ips'): if disk_meta['disk_location'] in ['share', 'share_cluster'] \
msg = "'data_ips' must be given when disk_location is share" and 'data_ips' not in disk_meta:
msg = "'data_ips' must be given when disk_location was not local"
raise HTTPBadRequest(explanation=msg, raise HTTPBadRequest(explanation=msg,
request=req, request=req,
content_type="text/plain") content_type="text/plain")
@ -219,35 +234,44 @@ class Controller(controller.BaseController):
request=req, request=req,
content_type="text/plain") content_type="text/plain")
if disk_meta.get('protocol_type', None) \
and disk_meta['protocol_type'] not in PROTOCOL_TYPE:
msg = "protocol type %s is not supported" % disk_meta[
'protocol_type']
raise HTTPBadRequest(explanation=msg,
request=req,
content_type="text/plain")
self._unique_service_in_role(req, disk_meta) self._unique_service_in_role(req, disk_meta)
def _service_disk_update_meta_valid(self, req, id, disk_meta): def _service_disk_update_meta_valid(self, req, id, disk_meta):
orig_disk_meta = self.get_service_disk_meta_or_404(req, id) orig_disk_meta = self.get_service_disk_meta_or_404(req, id)
if disk_meta.has_key('role_id'): if 'role_id' in disk_meta:
self._raise_404_if_role_deleted(req, disk_meta['role_id']) self._raise_404_if_role_deleted(req, disk_meta['role_id'])
if disk_meta.has_key('service'): if 'service' in disk_meta:
if disk_meta['service'] not in SERVICE_DISK_SERVICE: if disk_meta['service'] not in SERVICE_DISK_SERVICE:
msg = "service '%s' is not supported" % disk_meta['service'] msg = "service '%s' is not supported" % disk_meta['service']
raise HTTPBadRequest(explanation=msg, raise HTTPBadRequest(explanation=msg,
request=req, request=req,
content_type="text/plain") content_type="text/plain")
if disk_meta.has_key('disk_location'): if 'disk_location' in disk_meta:
if disk_meta['disk_location'] not in DISK_LOCATION: if disk_meta['disk_location'] not in DISK_LOCATION:
msg = "disk_location '%s' is not supported" % disk_meta['disk_location'] msg = "disk_location '%s' is not supported" % disk_meta[
'disk_location']
raise HTTPBadRequest(explanation=msg, raise HTTPBadRequest(explanation=msg,
request=req, request=req,
content_type="text/plain") content_type="text/plain")
if (disk_meta['disk_location'] == 'share' and if (disk_meta['disk_location'] == 'share' and
not disk_meta.has_key('data_ips') and 'data_ips' not in disk_meta and
not orig_disk_meta['data_ips']): not orig_disk_meta['data_ips']):
msg = "'data_ips' must be given when disk_location is share" msg = "'data_ips' must be given when disk_location is share"
raise HTTPBadRequest(explanation=msg, raise HTTPBadRequest(explanation=msg,
request=req, request=req,
content_type="text/plain") content_type="text/plain")
if disk_meta.has_key('size'): if 'size' in disk_meta:
disk_meta['size'] = ast.literal_eval(str(disk_meta['size'])) disk_meta['size'] = ast.literal_eval(str(disk_meta['size']))
if not isinstance(disk_meta['size'], int): if not isinstance(disk_meta['size'], int):
msg = "'size' is not integer" msg = "'size' is not integer"
@ -260,6 +284,14 @@ class Controller(controller.BaseController):
request=req, request=req,
content_type="text/plain") content_type="text/plain")
if disk_meta.get('protocol_type', None) \
and disk_meta['protocol_type'] not in PROTOCOL_TYPE:
msg = "protocol type %s is not supported" % disk_meta[
'protocol_type']
raise HTTPBadRequest(explanation=msg,
request=req,
content_type="text/plain")
@utils.mutating @utils.mutating
def service_disk_add(self, req, disk_meta): def service_disk_add(self, req, disk_meta):
""" """
@ -273,10 +305,10 @@ class Controller(controller.BaseController):
self._enforce(req, 'service_disk_add') self._enforce(req, 'service_disk_add')
self._default_value_set(disk_meta) self._default_value_set(disk_meta)
self._service_disk_add_meta_valid(req, disk_meta) self._service_disk_add_meta_valid(req, disk_meta)
service_disk_meta = registry.add_service_disk_metadata(req.context, disk_meta) service_disk_meta = registry.add_service_disk_metadata(
req.context, disk_meta)
return {'disk_meta': service_disk_meta} return {'disk_meta': service_disk_meta}
@utils.mutating @utils.mutating
def service_disk_delete(self, req, id): def service_disk_delete(self, req, id):
""" """
@ -305,7 +337,8 @@ class Controller(controller.BaseController):
request=req, request=req,
content_type="text/plain") content_type="text/plain")
except exception.InUseByStore as e: except exception.InUseByStore as e:
msg = (_("service_disk %(id)s could not be deleted because it is in use: " msg = (_("service_disk %(id)s could not be deleted "
"because it is in use: "
"%(exc)s") % {"id": id, "exc": utils.exception_to_str(e)}) "%(exc)s") % {"id": id, "exc": utils.exception_to_str(e)})
LOG.warn(msg) LOG.warn(msg)
raise HTTPConflict(explanation=msg, raise HTTPConflict(explanation=msg,
@ -319,9 +352,8 @@ class Controller(controller.BaseController):
self._enforce(req, 'service_disk_update') self._enforce(req, 'service_disk_update')
self._service_disk_update_meta_valid(req, id, disk_meta) self._service_disk_update_meta_valid(req, id, disk_meta)
try: try:
service_disk_meta = registry.update_service_disk_metadata(req.context, service_disk_meta = registry.update_service_disk_metadata(
id, req.context, id, disk_meta)
disk_meta)
except exception.Invalid as e: except exception.Invalid as e:
msg = (_("Failed to update role metadata. Got error: %s") % msg = (_("Failed to update role metadata. Got error: %s") %
@ -354,7 +386,6 @@ class Controller(controller.BaseController):
return {'disk_meta': service_disk_meta} return {'disk_meta': service_disk_meta}
@utils.mutating @utils.mutating
def service_disk_detail(self, req, id): def service_disk_detail(self, req, id):
""" """
@ -379,36 +410,44 @@ class Controller(controller.BaseController):
role_id = filters['role_id'] role_id = filters['role_id']
self._raise_404_if_role_deleted(req, role_id) self._raise_404_if_role_deleted(req, role_id)
try: try:
service_disks = registry.list_service_disk_metadata(req.context, **params) service_disks = registry.list_service_disk_metadata(
req.context, **params)
except exception.Invalid as e: except exception.Invalid as e:
raise HTTPBadRequest(explanation=e.msg, request=req) raise HTTPBadRequest(explanation=e.msg, request=req)
return dict(disk_meta=service_disks) return dict(disk_meta=service_disks)
def _cinder_volume_list(self, req, params): def _cinder_volume_list(self, req, params):
try: try:
cinder_volumes = registry.list_cinder_volume_metadata(req.context, **params) cinder_volumes = registry.list_cinder_volume_metadata(
req.context, **params)
except exception.Invalid as e: except exception.Invalid as e:
raise HTTPBadRequest(explanation=e.msg, request=req) raise HTTPBadRequest(explanation=e.msg, request=req)
return cinder_volumes return cinder_volumes
def _is_cinder_volume_repeat(self, req, array_disk_info, update_id=None): def _is_cinder_volume_repeat(self, req, array_disk_info, update_id=None):
cinder_volume_id = None
params = {'filters': {}} params = {'filters': {}}
if update_id: if update_id:
cinder_volume_metal = self.get_cinder_volume_meta_or_404(req, update_id) cinder_volume_metal = self.get_cinder_volume_meta_or_404(
new_management_ips = array_disk_info.get('management_ips', cinder_volume_metal['management_ips']).split(",") req, update_id)
new_pools = array_disk_info.get('pools', cinder_volume_metal['pools']).split(",") new_management_ips = array_disk_info.get(
'management_ips', cinder_volume_metal[
'management_ips']).split(",")
new_pools = array_disk_info.get(
'pools', cinder_volume_metal['pools']).split(",")
else: else:
new_management_ips = array_disk_info['management_ips'].split(",") new_management_ips = array_disk_info['management_ips'].split(",")
new_pools = array_disk_info['pools'].split(",") new_pools = array_disk_info['pools'].split(",")
org_cinder_volumes = self._cinder_volume_list(req, params) org_cinder_volumes = self._cinder_volume_list(req, params)
for cinder_volume in org_cinder_volumes: for cinder_volume in org_cinder_volumes:
if (set(cinder_volume['management_ips'].split(",")) == set(new_management_ips) and if (set(cinder_volume['management_ips'].split(",")) == set(
new_management_ips) and
set(cinder_volume['pools'].split(",")) == set(new_pools)): set(cinder_volume['pools'].split(",")) == set(new_pools)):
if cinder_volume['id'] != update_id: if cinder_volume['id'] != update_id:
msg = 'cinder_volume array disks conflict with cinder_volume %s' % cinder_volume['id'] msg = 'cinder_volume array disks ' \
'conflict with cinder_volume %s' % cinder_volume[
'id']
raise HTTPBadRequest(explanation=msg, request=req) raise HTTPBadRequest(explanation=msg, request=req)
def _get_cinder_volume_backend_index(self, req, disk_array): def _get_cinder_volume_backend_index(self, req, disk_array):
@ -437,7 +476,7 @@ class Controller(controller.BaseController):
:raises HTTPBadRequest if x-install-cluster is missing :raises HTTPBadRequest if x-install-cluster is missing
""" """
self._enforce(req, 'cinder_volume_add') self._enforce(req, 'cinder_volume_add')
if not disk_meta.has_key('role_id'): if 'role_id' not in disk_meta:
msg = "'role_id' must be given" msg = "'role_id' must be given"
raise HTTPBadRequest(explanation=msg, raise HTTPBadRequest(explanation=msg,
request=req, request=req,
@ -454,22 +493,28 @@ class Controller(controller.BaseController):
raise HTTPBadRequest(explanation=msg, raise HTTPBadRequest(explanation=msg,
request=req, request=req,
content_type="text/plain") content_type="text/plain")
if disk_array['volume_driver'] not in CINDER_VOLUME_BACKEND_DRIVER: if disk_array[
msg = "volume_driver %s is not supported" % disk_array['volume_driver'] 'volume_driver'] not in CINDER_VOLUME_BACKEND_DRIVER:
msg = "volume_driver %s is not supported" % disk_array[
'volume_driver']
raise HTTPBadRequest(explanation=msg, raise HTTPBadRequest(explanation=msg,
request=req, request=req,
content_type="text/plain") content_type="text/plain")
if (disk_array['volume_driver'] == 'FUJISTU_ETERNUS' and if (disk_array['volume_driver'] == 'FUJITSU_ETERNUS' and
(not disk_array.has_key('data_ips') or ('data_ips' not in disk_array or
not disk_array['data_ips'])): not disk_array['data_ips'])):
msg = "data_ips must be given when using FUJISTU Disk Array" msg = "data_ips must be given " \
"when using FUJITSU Disk Array"
raise HTTPBadRequest(explanation=msg, raise HTTPBadRequest(explanation=msg,
request=req, request=req,
content_type="text/plain") content_type="text/plain")
self._is_cinder_volume_repeat(req, disk_array) self._is_cinder_volume_repeat(req, disk_array)
disk_array['role_id'] = disk_meta['role_id'] disk_array['role_id'] = disk_meta['role_id']
disk_array['backend_index'] = self._get_cinder_volume_backend_index(req, disk_array) disk_array['backend_index'] = \
cinder_volumes = registry.add_cinder_volume_metadata(req.context, disk_array) self._get_cinder_volume_backend_index(
req, disk_array)
cinder_volumes = registry.add_cinder_volume_metadata(
req.context, disk_array)
return {'disk_meta': cinder_volumes} return {'disk_meta': cinder_volumes}
@utils.mutating @utils.mutating
@ -500,7 +545,8 @@ class Controller(controller.BaseController):
request=req, request=req,
content_type="text/plain") content_type="text/plain")
except exception.InUseByStore as e: except exception.InUseByStore as e:
msg = (_("cindre volume %(id)s could not be deleted because it is in use: " msg = (_("cindre volume %(id)s could not "
"be deleted because it is in use: "
"%(exc)s") % {"id": id, "exc": utils.exception_to_str(e)}) "%(exc)s") % {"id": id, "exc": utils.exception_to_str(e)})
LOG.warn(msg) LOG.warn(msg)
raise HTTPConflict(explanation=msg, raise HTTPConflict(explanation=msg,
@ -510,17 +556,18 @@ class Controller(controller.BaseController):
return Response(body='', status=200) return Response(body='', status=200)
def _is_data_ips_valid(self, req, update_id, update_meta): def _is_data_ips_valid(self, req, update_id, update_meta):
orgin_cinder_volume = self.get_cinder_volume_meta_or_404(req, update_id) orgin_cinder_volume = self.get_cinder_volume_meta_or_404(
req, update_id)
new_driver = update_meta.get('volume_driver', new_driver = update_meta.get('volume_driver',
orgin_cinder_volume['volume_driver']) orgin_cinder_volume['volume_driver'])
if new_driver != 'FUJISTU_ETERNUS': if new_driver != 'FUJITSU_ETERNUS':
return return
new_data_ips = update_meta.get('data_ips', new_data_ips = update_meta.get('data_ips',
orgin_cinder_volume['data_ips']) orgin_cinder_volume['data_ips'])
if not new_data_ips: if not new_data_ips:
msg = "data_ips must be given when using FUJISTU Disk Array" msg = "data_ips must be given when using FUJITSU Disk Array"
raise HTTPBadRequest(explanation=msg, raise HTTPBadRequest(explanation=msg,
request=req, request=req,
content_type="text/plain") content_type="text/plain")
@ -533,11 +580,12 @@ class Controller(controller.BaseController):
raise HTTPBadRequest(explanation=msg, raise HTTPBadRequest(explanation=msg,
request=req, request=req,
content_type="text/plain") content_type="text/plain")
if disk_meta.has_key('role_id'): if 'role_id' in disk_meta:
self._raise_404_if_role_deleted(req, disk_meta['role_id']) self._raise_404_if_role_deleted(req, disk_meta['role_id'])
if (disk_meta.has_key('volume_driver') and if ('volume_driver' in disk_meta and disk_meta[
disk_meta['volume_driver'] not in CINDER_VOLUME_BACKEND_DRIVER): 'volume_driver'] not in CINDER_VOLUME_BACKEND_DRIVER):
msg = "volume_driver %s is not supported" % disk_meta['volume_driver'] msg = "volume_driver %s is not supported" % disk_meta[
'volume_driver']
raise HTTPBadRequest(explanation=msg, raise HTTPBadRequest(explanation=msg,
request=req, request=req,
content_type="text/plain") content_type="text/plain")
@ -546,12 +594,12 @@ class Controller(controller.BaseController):
self._is_data_ips_valid(req, id, disk_meta) self._is_data_ips_valid(req, id, disk_meta)
try: try:
cinder_volume_meta = registry.update_cinder_volume_metadata(req.context, cinder_volume_meta = registry.update_cinder_volume_metadata(
id, req.context, id, disk_meta)
disk_meta)
except exception.Invalid as e: except exception.Invalid as e:
msg = (_("Failed to update cinder_volume metadata. Got error: %s") % msg = (
_("Failed to update cinder_volume metadata. Got error: %s") %
utils.exception_to_str(e)) utils.exception_to_str(e))
LOG.warn(msg) LOG.warn(msg)
raise HTTPBadRequest(explanation=msg, raise HTTPBadRequest(explanation=msg,
@ -627,6 +675,7 @@ class DiskArrayDeserializer(wsgi.JSONRequestDeserializer):
def cinder_volume_update(self, request): def cinder_volume_update(self, request):
return self._deserialize(request) return self._deserialize(request)
class DiskArraySerializer(wsgi.JSONResponseSerializer): class DiskArraySerializer(wsgi.JSONResponseSerializer):
"""Handles serialization of specific controller method responses.""" """Handles serialization of specific controller method responses."""
@ -634,33 +683,30 @@ class DiskArraySerializer(wsgi.JSONResponseSerializer):
self.notifier = notifier.Notifier() self.notifier = notifier.Notifier()
def service_disk_add(self, response, result): def service_disk_add(self, response, result):
disk_meta = result['disk_meta']
response.status = 201 response.status = 201
response.headers['Content-Type'] = 'application/json' response.headers['Content-Type'] = 'application/json'
response.body = self.to_json(result) response.body = self.to_json(result)
return response return response
def service_disk_update(self, response, result): def service_disk_update(self, response, result):
disk_meta = result['disk_meta']
response.status = 201 response.status = 201
response.headers['Content-Type'] = 'application/json' response.headers['Content-Type'] = 'application/json'
response.body = self.to_json(result) response.body = self.to_json(result)
return response return response
def cinder_volume_add(self, response, result): def cinder_volume_add(self, response, result):
disk_meta = result['disk_meta']
response.status = 201 response.status = 201
response.headers['Content-Type'] = 'application/json' response.headers['Content-Type'] = 'application/json'
response.body = self.to_json(result) response.body = self.to_json(result)
return response return response
def cinder_volume_update(self, response, result): def cinder_volume_update(self, response, result):
disk_meta = result['disk_meta']
response.status = 201 response.status = 201
response.headers['Content-Type'] = 'application/json' response.headers['Content-Type'] = 'application/json'
response.body = self.to_json(result) response.body = self.to_json(result)
return response return response
def create_resource(): def create_resource():
"""Image members resource factory method""" """Image members resource factory method"""
deserializer = DiskArrayDeserializer() deserializer = DiskArrayDeserializer()

View File

@ -24,8 +24,8 @@ from webob.exc import HTTPConflict
from webob.exc import HTTPForbidden from webob.exc import HTTPForbidden
from webob.exc import HTTPNotFound from webob.exc import HTTPNotFound
from webob import Response from webob import Response
import copy
import json # import json
from daisy.api import policy from daisy.api import policy
import daisy.api.v1 import daisy.api.v1
@ -41,7 +41,7 @@ import daisy.registry.client.v1.api as registry
from daisy.registry.api.v1 import template from daisy.registry.api.v1 import template
import daisy.api.backends.tecs.common as tecs_cmn import daisy.api.backends.tecs.common as tecs_cmn
import daisy.api.backends.common as daisy_cmn
try: try:
import simplejson as json import simplejson as json
except ImportError: except ImportError:
@ -64,21 +64,26 @@ CONF.import_opt('container_formats', 'daisy.common.config',
group='image_format') group='image_format')
CONF.import_opt('image_property_quota', 'daisy.common.config') CONF.import_opt('image_property_quota', 'daisy.common.config')
class Controller(controller.BaseController): class Controller(controller.BaseController):
""" """
WSGI controller for Templates resource in Daisy v1 API WSGI controller for Templates resource in Daisy v1 API
The HostTemplates resource API is a RESTful web Template for Template data. The API The HostTemplates resource API is a RESTful web Template for Template data.
is as follows:: The API is as follows::
GET /HostTemplates -- Returns a set of brief metadata about Templates GET /HostTemplates -- Returns a set of brief metadata about Templates
GET /HostTemplates/detail -- Returns a set of detailed metadata about GET /HostTemplates/detail -- Returns a set of detailed metadata about
HostTemplates HostTemplates
HEAD /HostTemplates/<ID> -- Return metadata about an Template with id <ID> HEAD /HostTemplates/<ID> --
GET /HostTemplates/<ID> -- Return Template data for Template with id <ID> Return metadata about an Template with id <ID>
POST /HostTemplates -- Store Template data and return metadata about the GET /HostTemplates/<ID> --
Return Template data for Template with id <ID>
POST /HostTemplates --
Store Template data and return metadata about the
newly-stored Template newly-stored Template
PUT /HostTemplates/<ID> -- Update Template metadata and/or upload Template PUT /HostTemplates/<ID> --
Update Template metadata and/or upload Template
data for a previously-reserved Template data for a previously-reserved Template
DELETE /HostTemplates/<ID> -- Delete the Template with id <ID> DELETE /HostTemplates/<ID> -- Delete the Template with id <ID>
""" """
@ -136,8 +141,9 @@ class Controller(controller.BaseController):
def _raise_404_if_cluster_deleted(self, req, cluster_id): def _raise_404_if_cluster_deleted(self, req, cluster_id):
cluster = self.get_cluster_meta_or_404(req, cluster_id) cluster = self.get_cluster_meta_or_404(req, cluster_id)
if cluster['deleted']: if cluster['deleted']:
msg = _("Cluster with identifier %s has been deleted.") % cluster_id msg = _("Cluster with identifier %s has been deleted.") % \
raise webob.exc.HTTPNotFound(msg) cluster_id
raise HTTPNotFound(msg)
@utils.mutating @utils.mutating
def add_template(self, req, host_template): def add_template(self, req, host_template):
@ -150,9 +156,9 @@ class Controller(controller.BaseController):
:raises HTTPBadRequest if x-Template-name is missing :raises HTTPBadRequest if x-Template-name is missing
""" """
self._enforce(req, 'add_host_template') self._enforce(req, 'add_host_template')
template_name = host_template["name"]
host_template = registry.add_host_template_metadata(req.context, host_template) host_template = registry.add_host_template_metadata(
req.context, host_template)
return {'host_template': template} return {'host_template': template}
@ -176,9 +182,8 @@ class Controller(controller.BaseController):
content_type="text/plain") content_type="text/plain")
''' '''
try: try:
host_template = registry.update_host_template_metadata(req.context, host_template = registry.update_host_template_metadata(
template_id, req.context, template_id, host_template)
host_template)
except exception.Invalid as e: except exception.Invalid as e:
msg = (_("Failed to update template metadata. Got error: %s") % msg = (_("Failed to update template metadata. Got error: %s") %
@ -213,41 +218,45 @@ class Controller(controller.BaseController):
def _filter_params(self, host_meta): def _filter_params(self, host_meta):
for key in host_meta.keys(): for key in host_meta.keys():
if key=="id" or key=="updated_at" or key=="deleted_at" or key=="created_at" or key=="deleted": if key == "id" or key == "updated_at" or key == "deleted_at" or \
key == "created_at" or key == "deleted":
del host_meta[key] del host_meta[key]
if host_meta.has_key("memory"): if "memory" in host_meta:
del host_meta['memory'] del host_meta['memory']
if host_meta.has_key("system"): if "system" in host_meta:
del host_meta['system'] del host_meta['system']
if host_meta.has_key("disks"): if "disks" in host_meta:
del host_meta['disks'] del host_meta['disks']
if host_meta.has_key("os_status"): if "os_status" in host_meta:
del host_meta['os_status'] del host_meta['os_status']
if host_meta.has_key("status"): if "status" in host_meta:
del host_meta['status'] del host_meta['status']
if host_meta.has_key("messages"): if "messages" in host_meta:
del host_meta['messages'] del host_meta['messages']
if host_meta.has_key("cpu"): if "cpu" in host_meta:
del host_meta['cpu'] del host_meta['cpu']
if host_meta.has_key("ipmi_addr"): if "ipmi_addr" in host_meta:
del host_meta['ipmi_addr'] del host_meta['ipmi_addr']
if host_meta.has_key("interfaces"): if "interfaces" in host_meta:
for interface in host_meta['interfaces']: for interface in host_meta['interfaces']:
for key in interface.keys(): for key in interface.keys():
if key=="id" or key=="updated_at" or key=="deleted_at" \ if key == "id" or key == "updated_at" or \
or key=="created_at" or key=="deleted" or key=="current_speed" \ key == "deleted_at" \
or key=="max_speed" or key=="host_id" or key=="state": or key == "created_at" or key == "deleted" or \
key == "current_speed" \
or key == "max_speed" or key == "host_id" or \
key == "state":
del interface[key] del interface[key]
for assigned_network in interface['assigned_networks']: for assigned_network in interface['assigned_networks']:
if assigned_network.has_key("ip"): if "ip" in assigned_network:
assigned_network['ip'] = "" assigned_network['ip'] = ""
return host_meta return host_meta
@ -263,7 +272,8 @@ class Controller(controller.BaseController):
""" """
self._enforce(req, 'get_host_template_detail') self._enforce(req, 'get_host_template_detail')
try: try:
host_template = registry.host_template_detail_metadata(req.context, template_id) host_template = registry.host_template_detail_metadata(
req.context, template_id)
return {'host_template': host_template} return {'host_template': host_template}
except exception.NotFound as e: except exception.NotFound as e:
msg = (_("Failed to find host template: %s") % msg = (_("Failed to find host template: %s") %
@ -280,8 +290,10 @@ class Controller(controller.BaseController):
request=req, request=req,
content_type="text/plain") content_type="text/plain")
except exception.InUseByStore as e: except exception.InUseByStore as e:
msg = (_("host template %(id)s could not be get because it is in use: " msg = (_("host template %(id)s could not be get "
"%(exc)s") % {"id": template_id, "exc": utils.exception_to_str(e)}) "because it is in use: "
"%(exc)s") % {"id": template_id,
"exc": utils.exception_to_str(e)})
LOG.error(msg) LOG.error(msg)
raise HTTPConflict(explanation=msg, raise HTTPConflict(explanation=msg,
request=req, request=req,
@ -296,7 +308,8 @@ class Controller(controller.BaseController):
params = self._get_query_params(req) params = self._get_query_params(req)
template_meta = {} template_meta = {}
try: try:
host_template_lists = registry.host_template_lists_metadata(req.context, **params) host_template_lists = registry.host_template_lists_metadata(
req.context, **params)
if host_template_lists and host_template_lists[0]: if host_template_lists and host_template_lists[0]:
template_meta = json.loads(host_template_lists[0]['hosts']) template_meta = json.loads(host_template_lists[0]['hosts'])
return {'host_template': template_meta} return {'host_template': template_meta}
@ -315,21 +328,32 @@ class Controller(controller.BaseController):
""" """
self._enforce(req, 'host_to_template') self._enforce(req, 'host_to_template')
if host_template.get('host_id', None): if host_template.get('host_id', None):
origin_host_meta = self.get_host_meta_or_404(req, host_template['host_id']) origin_host_meta = self.get_host_meta_or_404(
req, host_template['host_id'])
host_meta = self._filter_params(origin_host_meta) host_meta = self._filter_params(origin_host_meta)
if host_template.get('host_template_name', None) and host_template.get('cluster_name', None): if host_template.get(
'host_template_name',
None) and host_template.get(
'cluster_name',
None):
host_meta['name'] = host_template['host_template_name'] host_meta['name'] = host_template['host_template_name']
host_meta['description'] = host_template.get('description', None) host_meta['description'] = host_template.get(
params = {'filters':{'cluster_name':host_template['cluster_name']}} 'description', None)
templates = registry.host_template_lists_metadata(req.context, **params) params = {
'filters': {
'cluster_name': host_template['cluster_name']}}
templates = registry.host_template_lists_metadata(
req.context, **params)
if templates and templates[0]: if templates and templates[0]:
had_host_template = False had_host_template = False
if templates[0]['hosts']: if templates[0]['hosts']:
templates[0]['hosts'] = json.loads(templates[0]['hosts']) templates[0]['hosts'] = json.loads(
templates[0]['hosts'])
else: else:
templates[0]['hosts'] = [] templates[0]['hosts'] = []
for index in range(len(templates[0]['hosts'])): for index in range(len(templates[0]['hosts'])):
if host_template['host_template_name'] == templates[0]['hosts'][index]['name']: if host_template['host_template_name'] == templates[
0]['hosts'][index]['name']:
had_host_template = True had_host_template = True
templates[0]['hosts'][index] = host_meta templates[0]['hosts'][index] = host_meta
break break
@ -337,12 +361,15 @@ class Controller(controller.BaseController):
host_meta['name'] = host_template['host_template_name'] host_meta['name'] = host_template['host_template_name']
templates[0]['hosts'].append(host_meta) templates[0]['hosts'].append(host_meta)
templates[0]['hosts'] = json.dumps(templates[0]['hosts']) templates[0]['hosts'] = json.dumps(templates[0]['hosts'])
host_template = registry.update_host_template_metadata(req.context, host_template = registry.update_host_template_metadata(
templates[0]['id'], req.context, templates[0]['id'], templates[0])
templates[0])
else: else:
param = {"cluster_name": host_template['cluster_name'], "hosts":json.dumps([host_meta])} param = {
host_template = registry.add_host_template_metadata(req.context, param) "cluster_name": host_template['cluster_name'],
"hosts": json.dumps(
[host_meta])}
host_template = registry.add_host_template_metadata(
req.context, param)
return {'host_template': host_template} return {'host_template': host_template}
@utils.mutating @utils.mutating
@ -351,7 +378,8 @@ class Controller(controller.BaseController):
msg = "cluster name is null" msg = "cluster name is null"
raise HTTPNotFound(explanation=msg) raise HTTPNotFound(explanation=msg)
params = {'filters': {'cluster_name': host_template['cluster_name']}} params = {'filters': {'cluster_name': host_template['cluster_name']}}
templates = registry.host_template_lists_metadata(req.context, **params) templates = registry.host_template_lists_metadata(
req.context, **params)
hosts_param = [] hosts_param = []
host_template_used = {} host_template_used = {}
if templates and templates[0]: if templates and templates[0]:
@ -362,7 +390,10 @@ class Controller(controller.BaseController):
break break
if not host_template_used: if not host_template_used:
msg = "not host_template %s" % host_template['host_template_name'] msg = "not host_template %s" % host_template['host_template_name']
raise HTTPNotFound(explanation=msg, request=req, content_type="text/plain") raise HTTPNotFound(
explanation=msg,
request=req,
content_type="text/plain")
if host_template.get('host_id', None): if host_template.get('host_id', None):
self.get_host_meta_or_404(req, host_template['host_id']) self.get_host_meta_or_404(req, host_template['host_id'])
else: else:
@ -373,53 +404,63 @@ class Controller(controller.BaseController):
clusters = registry.get_clusters_detail(req.context, **params) clusters = registry.get_clusters_detail(req.context, **params)
if clusters and clusters[0]: if clusters and clusters[0]:
host_template_used['cluster'] = clusters[0]['id'] host_template_used['cluster'] = clusters[0]['id']
if host_template_used.has_key('role') and host_template_used['role']: if 'role' in host_template_used and host_template_used['role']:
role_id_list = [] role_id_list = []
host_role_list = [] host_role_list = []
if host_template_used.has_key('cluster'): if 'cluster' in host_template_used:
params = self._get_query_params(req) params = self._get_query_params(req)
role_list = registry.get_roles_detail(req.context, **params) role_list = registry.get_roles_detail(req.context, **params)
for role_name in role_list: for role_name in role_list:
if role_name['cluster_id'] == host_template_used['cluster']: if role_name['cluster_id'] == host_template_used[
'cluster']:
host_role_list = list(host_template_used['role']) host_role_list = list(host_template_used['role'])
if role_name['name'] in host_role_list: if role_name['name'] in host_role_list:
role_id_list.append(role_name['id']) role_id_list.append(role_name['id'])
host_template_used['role'] = role_id_list host_template_used['role'] = role_id_list
if host_template_used.has_key('name'): if 'name' in host_template_used:
host_template_used.pop('name') host_template_used.pop('name')
if host_template_used.has_key('dmi_uuid'): if 'dmi_uuid' in host_template_used:
host_template_used.pop('dmi_uuid') host_template_used.pop('dmi_uuid')
if host_template_used.has_key('ipmi_user'): if 'ipmi_user' in host_template_used:
host_template_used.pop('ipmi_user') host_template_used.pop('ipmi_user')
if host_template_used.has_key('ipmi_passwd'): if 'ipmi_passwd' in host_template_used:
host_template_used.pop('ipmi_passwd') host_template_used.pop('ipmi_passwd')
if host_template_used.has_key('ipmi_addr'): if 'ipmi_addr' in host_template_used:
host_template_used.pop('ipmi_addr') host_template_used.pop('ipmi_addr')
host_template_interfaces = host_template_used.get('interfaces', None) host_template_interfaces = host_template_used.get('interfaces', None)
if host_template_interfaces: if host_template_interfaces:
template_ether_interface = [interface for interface in host_template_interfaces if interface['type'] == "ether" ] template_ether_interface = [
interface for interface in host_template_interfaces if
interface['type'] == "ether"]
orig_host_meta = registry.get_host_metadata(req.context, host_id) orig_host_meta = registry.get_host_metadata(req.context, host_id)
orig_host_interfaces = orig_host_meta.get('interfaces', None) orig_host_interfaces = orig_host_meta.get('interfaces', None)
temp_orig_host_interfaces = [ interface for interface in orig_host_interfaces if interface['type'] == "ether" ] temp_orig_host_interfaces = [
interface for interface in orig_host_interfaces if
interface['type'] == "ether"]
if len(temp_orig_host_interfaces) != len(template_ether_interface): if len(temp_orig_host_interfaces) != len(template_ether_interface):
msg = (_('host_id %s does not match the host_id host_template ' msg = (_('host_id %s does not match the host_id host_template '
'%s.') % (host_id, host_template['host_template_name'])) '%s.') % (host_id,
host_template['host_template_name']))
raise HTTPBadRequest(explanation=msg) raise HTTPBadRequest(explanation=msg)
interface_match_flag = 0 interface_match_flag = 0
for host_template_interface in host_template_interfaces: for host_template_interface in host_template_interfaces:
if host_template_interface['type'] == 'ether': if host_template_interface['type'] == 'ether':
for orig_host_interface in orig_host_interfaces: for orig_host_interface in orig_host_interfaces:
if orig_host_interface['pci'] == host_template_interface['pci']: if orig_host_interface[
'pci'] == host_template_interface['pci']:
interface_match_flag += 1 interface_match_flag += 1
host_template_interface['mac'] = orig_host_interface['mac'] host_template_interface[
if host_template_interface.has_key('ip'): 'mac'] = orig_host_interface['mac']
if 'ip' in host_template_interface:
host_template_interface.pop('ip') host_template_interface.pop('ip')
if interface_match_flag != len(template_ether_interface): if interface_match_flag != len(template_ether_interface):
msg = (_('host_id %s does not match the host ' msg = (_('host_id %s does not match the host '
'host_template %s.') % (host_id, host_template['host_template_name'])) 'host_template %s.') % (
host_id, host_template['host_template_name']))
raise HTTPBadRequest(explanation=msg) raise HTTPBadRequest(explanation=msg)
host_template_used['interfaces'] = str(host_template_interfaces) host_template_used['interfaces'] = str(host_template_interfaces)
host_template = registry.update_host_metadata(req.context, host_id, host_template_used) host_template = registry.update_host_metadata(
req.context, host_id, host_template_used)
return {"host_template": host_template} return {"host_template": host_template}
@utils.mutating @utils.mutating
@ -437,8 +478,11 @@ class Controller(controller.BaseController):
if not host_template.get('cluster_name', None): if not host_template.get('cluster_name', None):
msg = "cluster name is null" msg = "cluster name is null"
raise HTTPNotFound(explanation=msg) raise HTTPNotFound(explanation=msg)
params = {'filters':{'cluster_name':host_template['cluster_name']}} params = {
host_templates = registry.host_template_lists_metadata(req.context, **params) 'filters': {
'cluster_name': host_template['cluster_name']}}
host_templates = registry.host_template_lists_metadata(
req.context, **params)
template_param = [] template_param = []
had_host_template = False had_host_template = False
if host_templates and host_templates[0]: if host_templates and host_templates[0]:
@ -449,16 +493,18 @@ class Controller(controller.BaseController):
had_host_template = True had_host_template = True
break break
if not had_host_template: if not had_host_template:
msg = "not host template name %s" %host_template['host_template_name'] msg = "not host template name %s" % host_template[
'host_template_name']
raise HTTPNotFound(explanation=msg) raise HTTPNotFound(explanation=msg)
else: else:
host_templates[0]['hosts'] = json.dumps(template_param) host_templates[0]['hosts'] = json.dumps(template_param)
host_template = registry.update_host_template_metadata(req.context, host_template = registry.update_host_template_metadata(
host_templates[0]['id'], req.context, host_templates[0]['id'],
host_templates[0]) host_templates[0])
return {"host_template": host_template} return {"host_template": host_template}
else: else:
msg = "host template cluster name %s is null" %host_template['cluster_name'] msg = "host template cluster name %s is null" % host_template[
'cluster_name']
raise HTTPNotFound(explanation=msg) raise HTTPNotFound(explanation=msg)
except exception.NotFound as e: except exception.NotFound as e:
@ -476,8 +522,10 @@ class Controller(controller.BaseController):
request=req, request=req,
content_type="text/plain") content_type="text/plain")
except exception.InUseByStore as e: except exception.InUseByStore as e:
msg = (_("template %(id)s could not be deleted because it is in use: " msg = (_("template %(id)s could not be deleted "
"%(exc)s") % {"id": template_id, "exc": utils.exception_to_str(e)}) "because it is in use: "
"%(exc)s") % {"id": host_template['host_id'],
"exc": utils.exception_to_str(e)})
LOG.error(msg) LOG.error(msg)
raise HTTPConflict(explanation=msg, raise HTTPConflict(explanation=msg,
request=req, request=req,
@ -485,6 +533,7 @@ class Controller(controller.BaseController):
else: else:
return Response(body='', status=200) return Response(body='', status=200)
class HostTemplateDeserializer(wsgi.JSONRequestDeserializer): class HostTemplateDeserializer(wsgi.JSONRequestDeserializer):
"""Handles deserialization of specific controller method requests.""" """Handles deserialization of specific controller method requests."""
@ -499,7 +548,6 @@ class HostTemplateDeserializer(wsgi.JSONRequestDeserializer):
def update_host_template(self, request): def update_host_template(self, request):
return self._deserialize(request) return self._deserialize(request)
def host_to_template(self, request): def host_to_template(self, request):
return self._deserialize(request) return self._deserialize(request)
@ -509,6 +557,7 @@ class HostTemplateDeserializer(wsgi.JSONRequestDeserializer):
def delete_host_template(self, request): def delete_host_template(self, request):
return self._deserialize(request) return self._deserialize(request)
class HostTemplateSerializer(wsgi.JSONResponseSerializer): class HostTemplateSerializer(wsgi.JSONResponseSerializer):
"""Handles serialization of specific controller method responses.""" """Handles serialization of specific controller method responses."""
@ -528,12 +577,14 @@ class HostTemplateSerializer(wsgi.JSONResponseSerializer):
response.headers['Content-Type'] = 'application/json' response.headers['Content-Type'] = 'application/json'
response.body = self.to_json(dict(host_template=host_template)) response.body = self.to_json(dict(host_template=host_template))
return response return response
def get_host_template_detail(self, response, result): def get_host_template_detail(self, response, result):
host_template = result['host_template'] host_template = result['host_template']
response.status = 201 response.status = 201
response.headers['Content-Type'] = 'application/json' response.headers['Content-Type'] = 'application/json'
response.body = self.to_json(dict(host_template=host_template)) response.body = self.to_json(dict(host_template=host_template))
return response return response
def update_host_template(self, response, result): def update_host_template(self, response, result):
host_template = result['host_template'] host_template = result['host_template']
response.status = 201 response.status = 201

File diff suppressed because it is too large Load Diff

347
code/daisy/daisy/api/v1/hwms.py Executable file
View File

@ -0,0 +1,347 @@
# Copyright 2013 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
/Hwm endpoint for Daisy v1 API
"""
from oslo_config import cfg
from oslo_log import log as logging
import webob.exc
from webob.exc import HTTPBadRequest
from webob.exc import HTTPConflict
from webob.exc import HTTPForbidden
from webob.exc import HTTPNotFound
from webob import Response
from daisy.api import policy
import daisy.api.v1
from daisy.api.v1 import controller
from daisy.api.v1 import filters
from daisy.common import exception
from daisy.common import property_utils
from daisy.common import utils
from daisy.common import wsgi
from daisy import i18n
from daisy import notifier
import daisy.registry.client.v1.api as registry
from daisy.registry.api.v1 import hwms
import daisy.api.backends.tecs.common as tecs_cmn
daisy_tecs_path = tecs_cmn.daisy_tecs_path
LOG = logging.getLogger(__name__)
_ = i18n._
_LE = i18n._LE
_LI = i18n._LI
_LW = i18n._LW
SUPPORTED_PARAMS = hwms.SUPPORTED_PARAMS
SUPPORTED_FILTERS = hwms.SUPPORTED_FILTERS
ACTIVE_IMMUTABLE = daisy.api.v1.ACTIVE_IMMUTABLE
CONF = cfg.CONF
CONF.import_opt('disk_formats', 'daisy.common.config',
group='image_format')
CONF.import_opt('container_formats', 'daisy.common.config',
group='image_format')
CONF.import_opt('image_property_quota', 'daisy.common.config')
class Controller(controller.BaseController):
"""
WSGI controller for hwms resource in Daisy v1 API
The Templates resource API is a RESTful web Template for Template data.
The API is as follows::
GET /Templates -- Returns a set of brief metadata about Templates
GET /Templates/detail -- Returns a set of detailed metadata about
Templates
HEAD /Templates/<ID> -- Return metadata about an Template with id <ID>
GET /Templates/<ID> -- Return Template data for Template with id <ID>
POST /Templates -- Store Template data and return metadata about the
newly-stored Template
PUT /Templates/<ID> -- Update Template metadata and/or upload Template
data for a previously-reserved Template
DELETE /Templates/<ID> -- Delete the Template with id <ID>
"""
def __init__(self):
self.notifier = notifier.Notifier()
registry.configure_registry_client()
self.policy = policy.Enforcer()
if property_utils.is_property_protection_enabled():
self.prop_enforcer = property_utils.PropertyRules(self.policy)
else:
self.prop_enforcer = None
def _enforce(self, req, action, target=None):
"""Authorize an action against our policies"""
if target is None:
target = {}
try:
self.policy.enforce(req.context, action, target)
except exception.Forbidden:
raise HTTPForbidden()
def _get_filters(self, req):
"""
Return a dictionary of query param filters from the request
:param req: the Request object coming from the wsgi layer
:retval a dict of key/value filters
"""
query_filters = {}
for param in req.params:
if param in SUPPORTED_FILTERS:
query_filters[param] = req.params.get(param)
if not filters.validate(param, query_filters[param]):
raise HTTPBadRequest(_('Bad value passed to filter '
'%(filter)s got %(val)s')
% {'filter': param,
'val': query_filters[param]})
return query_filters
def _get_query_params(self, req):
"""
Extracts necessary query params from request.
:param req: the WSGI Request object
:retval dict of parameters that can be used by registry client
"""
params = {'filters': self._get_filters(req)}
for PARAM in SUPPORTED_PARAMS:
if PARAM in req.params:
params[PARAM] = req.params.get(PARAM)
return params
def _raise_404_if_cluster_deleted(self, req, cluster_id):
cluster = self.get_cluster_meta_or_404(req, cluster_id)
if cluster['deleted']:
msg = _("Cluster with identifier %s has been deleted.") % \
cluster_id
raise webob.exc.HTTPNotFound(msg)
def get_clusters_hwm_ip(self, req):
params = self._get_query_params(req)
clusters_hwm_ip = list()
clusters = registry.get_clusters_detail(req.context, **params)
for cluster in clusters:
clusters_hwm_ip.append(cluster.get('hwm_ip'))
return clusters_hwm_ip
@utils.mutating
def add_hwm(self, req, hwm):
"""
Adds a new hwm to Daisy.
:param req: The WSGI/Webob Request object
:param image_meta: Mapping of metadata about Template
:raises HTTPBadRequest if x-Template-name is missing
"""
self._enforce(req, 'add_template')
hwm = registry.add_hwm_metadata(req.context, hwm)
return {'hwm': hwm}
@utils.mutating
def update_hwm(self, req, id, hwm):
"""
Updates an existing hwm with the registry.
:param request: The WSGI/Webob Request object
:param id: The opaque image identifier
:retval Returns the updated image information as a mapping
"""
self._enforce(req, 'update_hwm')
hwm_meta = registry.hwm_detail_metadata(req.context, id)
hwm_ip = hwm_meta['hwm_ip']
clusters_hwm_ip = self.get_clusters_hwm_ip(req)
if hwm_ip in clusters_hwm_ip:
msg = (_("Hwm %s has already used in cluster, "
"it can not be update. " % hwm_ip))
LOG.error(msg)
raise HTTPForbidden(explanation=msg, request=req,
content_type="text/plain")
try:
hwm = registry.update_hwm_metadata(req.context, id, hwm)
except exception.Invalid as e:
msg = (_("Failed to update hwm metadata. Got error: %s") %
utils.exception_to_str(e))
LOG.warn(msg)
raise HTTPBadRequest(explanation=msg,
request=req,
content_type="text/plain")
except exception.NotFound as e:
msg = (_("Failed to find hwm to update: %s") %
utils.exception_to_str(e))
LOG.warn(msg)
raise HTTPNotFound(explanation=msg,
request=req,
content_type="text/plain")
except exception.Forbidden as e:
msg = (_("Forbidden to update hwm: %s") %
utils.exception_to_str(e))
LOG.warn(msg)
raise HTTPForbidden(explanation=msg,
request=req,
content_type="text/plain")
except (exception.Conflict, exception.Duplicate) as e:
LOG.warn(utils.exception_to_str(e))
raise HTTPConflict(body=_('hwm operation conflicts'),
request=req,
content_type='text/plain')
else:
self.notifier.info('hwm.update', hwm)
return {'hwm': hwm}
@utils.mutating
def delete_hwm(self, req, id):
"""
delete a existing hwm template with the registry.
:param request: The WSGI/Webob Request object
:param id: The opaque image identifier
:retval Returns the updated image information as a mapping
"""
self._enforce(req, 'delete_hwm')
hwm_meta = registry.hwm_detail_metadata(req.context, id)
hwm_ip = hwm_meta['hwm_ip']
clusters_hwm_ip = self.get_clusters_hwm_ip(req)
if hwm_ip in clusters_hwm_ip:
msg = (_("Hwm %s has already used in cluster, "
"it can not be deleted. " % hwm_ip))
LOG.error(msg)
raise HTTPForbidden(explanation=msg, request=req,
content_type="text/plain")
try:
registry.delete_hwm_metadata(req.context, id)
except exception.NotFound as e:
msg = (_("Failed to find hwm to delete: %s") %
utils.exception_to_str(e))
LOG.error(msg)
raise HTTPNotFound(explanation=msg, request=req,
content_type="text/plain")
except exception.Forbidden as e:
msg = (_("Forbidden to delete hwm: %s") %
utils.exception_to_str(e))
LOG.error(msg)
raise HTTPForbidden(explanation=msg, request=req,
content_type="text/plain")
except exception.InUseByStore as e:
msg = (_(
"hwm %(id)s could not be deleted because it is in "
"use:%(exc)s") % {"id": id, "exc": utils.exception_to_str(e)})
LOG.error(msg)
raise HTTPConflict(explanation=msg, request=req,
content_type="text/plain")
else:
return Response(body='', status=200)
@utils.mutating
def detail(self, req, id):
"""
delete a existing hwm with the registry.
:param request: The WSGI/Webob Request object
:param id: The opaque image identifie
:retval Returns the updated image information as a mapping
"""
self._enforce(req, 'detail')
context = req.context
try:
hwm_meta = registry.hwm_detail_metadata(context, id)
except exception.NotFound:
msg = "Hwm with identifier %s not found" % id
LOG.debug(msg)
raise webob.exc.HTTPNotFound(
msg, request=req, content_type='text/plain')
except exception.Forbidden:
msg = "Forbidden hwm access"
LOG.debug(msg)
raise webob.exc.HTTPForbidden(msg,
request=req,
content_type='text/plain')
return {'hwm': hwm_meta}
@utils.mutating
def list(self, req):
self._enforce(req, 'list')
params = self._get_query_params(req)
try:
hwm_list = registry.hwm_list_metadata(req.context, **params)
except exception.Invalid as e:
raise HTTPBadRequest(explanation=e.msg, request=req)
return dict(hwm=hwm_list)
class HwmDeserializer(wsgi.JSONRequestDeserializer):
"""Handles deserialization of specific controller method requests."""
def _deserialize(self, request):
result = {}
result["hwm"] = utils.get_hwm_meta(request)
return result
def add_hwm(self, request):
return self._deserialize(request)
def update_hwm(self, request):
return self._deserialize(request)
class HwmSerializer(wsgi.JSONResponseSerializer):
"""Handles serialization of specific controller method responses."""
def __init__(self):
self.notifier = notifier.Notifier()
def add_hwm(self, response, result):
hwm = result['hwm']
response.status = 201
response.headers['Content-Type'] = 'application/json'
response.body = self.to_json(dict(hwm=hwm))
return response
def delete_hwm(self, response, result):
hwm = result['hwm']
response.status = 201
response.headers['Content-Type'] = 'application/json'
response.body = self.to_json(dict(hwm=hwm))
return response
def get_detail(self, response, result):
hwm = result['hwm']
response.status = 201
response.headers['Content-Type'] = 'application/json'
response.body = self.to_json(dict(hwm=hwm))
return response
def update_hwm(self, response, result):
hwm = result['hwm']
response.status = 201
response.headers['Content-Type'] = 'application/json'
response.body = self.to_json(dict(hwm=hwm))
return response
def create_resource():
"""Templates resource factory method"""
deserializer = HwmDeserializer()
serializer = HwmSerializer()
return wsgi.Resource(Controller(), deserializer, serializer)

View File

@ -905,8 +905,9 @@ class Controller(controller.BaseController):
# Once an image is 'active' only an admin can # Once an image is 'active' only an admin can
# modify certain core metadata keys # modify certain core metadata keys
for key in ACTIVE_IMMUTABLE: for key in ACTIVE_IMMUTABLE:
if (orig_status == 'active' and image_meta.get(key) is not None if (orig_status == 'active' and
and image_meta.get(key) != orig_image_meta.get(key)): image_meta.get(key) is not None and
image_meta.get(key) != orig_image_meta.get(key)):
msg = _("Forbidden to modify '%s' of active image.") % key msg = _("Forbidden to modify '%s' of active image.") % key
raise HTTPForbidden(explanation=msg, raise HTTPForbidden(explanation=msg,
request=req, request=req,

View File

@ -17,13 +17,11 @@
/hosts endpoint for Daisy v1 API /hosts endpoint for Daisy v1 API
""" """
import time import time
import traceback
import webob.exc import webob.exc
from oslo_log import log as logging from oslo_log import log as logging
from webob.exc import HTTPBadRequest from webob.exc import HTTPBadRequest
from webob.exc import HTTPForbidden from webob.exc import HTTPForbidden
from webob.exc import HTTPServerError
from threading import Thread from threading import Thread
@ -44,10 +42,6 @@ import daisy.api.backends.common as daisy_cmn
from daisy.api.backends import driver from daisy.api.backends import driver
from daisy.api.backends import os as os_handle from daisy.api.backends import os as os_handle
try:
import simplejson as json
except ImportError:
import json
LOG = logging.getLogger(__name__) LOG = logging.getLogger(__name__)
_ = i18n._ _ = i18n._
@ -68,23 +62,31 @@ BACKENDS_UNINSTALL_ORDER = []
def get_deployment_backends(req, cluster_id, backends_order): def get_deployment_backends(req, cluster_id, backends_order):
cluster_roles = daisy_cmn.get_cluster_roles_detail(req, cluster_id) cluster_roles = daisy_cmn.get_cluster_roles_detail(req, cluster_id)
cluster_backends = set([role['deployment_backend'] for role in cluster_roles if daisy_cmn.get_hosts_of_role(req, role['id'])]) cluster_backends = set([role['deployment_backend']
ordered_backends = [backend for backend in backends_order if backend in cluster_backends] for role in cluster_roles if
other_backends = [backend for backend in cluster_backends if backend not in backends_order] daisy_cmn.get_hosts_of_role(req, role['id'])])
ordered_backends = [
backend for backend in backends_order if backend in cluster_backends]
other_backends = [
backend for backend in cluster_backends if
backend not in backends_order]
deployment_backends = ordered_backends + other_backends deployment_backends = ordered_backends + other_backends
return deployment_backends return deployment_backends
class InstallTask(object): class InstallTask(object):
""" """
Class for install OS and TECS. Class for install OS and TECS.
""" """
""" Definition for install states.""" """ Definition for install states."""
def __init__(self, req, cluster_id): def __init__(self, req, cluster_id):
self.req = req self.req = req
self.cluster_id = cluster_id self.cluster_id = cluster_id
def _backends_install(self): def _backends_install(self):
backends = get_deployment_backends(self.req, self.cluster_id, BACKENDS_INSTALL_ORDER) backends = get_deployment_backends(
self.req, self.cluster_id, BACKENDS_INSTALL_ORDER)
if not backends: if not backends:
LOG.info(_("No backends need to install.")) LOG.info(_("No backends need to install."))
return return
@ -92,6 +94,7 @@ class InstallTask(object):
backend_driver = driver.load_deployment_dirver(backend) backend_driver = driver.load_deployment_dirver(backend)
backend_driver.install(self.req, self.cluster_id) backend_driver.install(self.req, self.cluster_id)
# this will be raise raise all the exceptions of the thread to log file # this will be raise raise all the exceptions of the thread to log file
def run(self): def run(self):
try: try:
self._run() self._run()
@ -104,10 +107,15 @@ class InstallTask(object):
:return: :return:
""" """
# get hosts config which need to install OS # get hosts config which need to install OS
all_hosts_need_os = os_handle.get_cluster_hosts_config(self.req, self.cluster_id) all_hosts_need_os = os_handle.get_cluster_hosts_config(
self.req, self.cluster_id)
if all_hosts_need_os: if all_hosts_need_os:
hosts_with_role_need_os = [host_detail for host_detail in all_hosts_need_os if host_detail['status'] == 'with-role'] hosts_with_role_need_os = [
hosts_without_role_need_os = [host_detail for host_detail in all_hosts_need_os if host_detail['status'] != 'with-role'] host_detail for host_detail in all_hosts_need_os if
host_detail['status'] == 'with-role']
hosts_without_role_need_os = [
host_detail for host_detail in all_hosts_need_os if
host_detail['status'] != 'with-role']
else: else:
LOG.info(_("No host need to install os, begin to install " LOG.info(_("No host need to install os, begin to install "
"backends for cluster %s." % self.cluster_id)) "backends for cluster %s." % self.cluster_id))
@ -115,30 +123,39 @@ class InstallTask(object):
return return
run_once_flag = True run_once_flag = True
# if no hosts with role need os, install backend applications immediately # if no hosts with role need os, install backend applications
# immediately
if not hosts_with_role_need_os: if not hosts_with_role_need_os:
run_once_flag = False run_once_flag = False
role_hosts_need_os = [] role_hosts_need_os = []
LOG.info(_("All of hosts with role is 'active', begin to install " LOG.info(_("All of hosts with role is 'active', begin to install "
"backend applications for cluster %s first." % self.cluster_id)) "backend applications for cluster %s first." %
self.cluster_id))
self._backends_install() self._backends_install()
else: else:
role_hosts_need_os = [host_detail['id'] for host_detail in hosts_with_role_need_os] role_hosts_need_os = [host_detail['id']
for host_detail in hosts_with_role_need_os]
# hosts with role put the head of the list # hosts with role put the head of the list
order_hosts_need_os = hosts_with_role_need_os + hosts_without_role_need_os order_hosts_need_os = hosts_with_role_need_os + \
hosts_without_role_need_os
while order_hosts_need_os: while order_hosts_need_os:
os_install = os_handle.OSInstall(self.req, self.cluster_id) os_install = os_handle.OSInstall(self.req, self.cluster_id)
#all os will be installed batch by batch with max_parallel_os_number which was set in daisy-api.conf # all os will be installed batch by batch with
(order_hosts_need_os,role_hosts_need_os) = os_install.install_os(order_hosts_need_os,role_hosts_need_os) # max_parallel_os_number which was set in daisy-api.conf
# after a batch of os install over, judge if all role hosts install os completely, (order_hosts_need_os, role_hosts_need_os) = os_install.install_os(
order_hosts_need_os, role_hosts_need_os)
# after a batch of os install over, judge if all
# role hosts install os completely,
# if role_hosts_need_os is empty, install TECS immediately # if role_hosts_need_os is empty, install TECS immediately
if run_once_flag and not role_hosts_need_os: if run_once_flag and not role_hosts_need_os:
run_once_flag = False run_once_flag = False
# wait to reboot os after new os installed # wait to reboot os after new os installed
time.sleep(10) time.sleep(10)
LOG.info(_("All hosts with role install successfully, " LOG.info(_("All hosts with role install successfully, "
"begin to install backend applications for cluster %s." % self.cluster_id)) "begin to install backend applications "
"for cluster %s." %
self.cluster_id))
self._backends_install() self._backends_install()
@ -160,6 +177,7 @@ class Controller(controller.BaseController):
data for a previously-reserved host data for a previously-reserved host
DELETE /hosts/<ID> -- Delete the host with id <ID> DELETE /hosts/<ID> -- Delete the host with id <ID>
""" """
def __init__(self): def __init__(self):
self.notifier = notifier.Notifier() self.notifier = notifier.Notifier()
registry.configure_registry_client() registry.configure_registry_client()
@ -181,7 +199,8 @@ class Controller(controller.BaseController):
def _raise_404_if_cluster_deleted(self, req, cluster_id): def _raise_404_if_cluster_deleted(self, req, cluster_id):
cluster = self.get_cluster_meta_or_404(req, cluster_id) cluster = self.get_cluster_meta_or_404(req, cluster_id)
if cluster['deleted']: if cluster['deleted']:
msg = _("Cluster with identifier %s has been deleted.") % cluster_id msg = _("Cluster with identifier %s has been deleted.") % \
cluster_id
raise webob.exc.HTTPNotFound(msg) raise webob.exc.HTTPNotFound(msg)
def _get_filters(self, req): def _get_filters(self, req):
@ -225,15 +244,20 @@ class Controller(controller.BaseController):
:raises HTTPBadRequest if x-install-cluster is missing :raises HTTPBadRequest if x-install-cluster is missing
""" """
if 'deployment_interface' in install_meta:
os_handle.pxe_server_build(req, install_meta)
return {"status": "pxe is installed"}
cluster_id = install_meta['cluster_id'] cluster_id = install_meta['cluster_id']
self._enforce(req, 'install_cluster') self._enforce(req, 'install_cluster')
self._raise_404_if_cluster_deleted(req, cluster_id) self._raise_404_if_cluster_deleted(req, cluster_id)
if install_meta.get("deployment_interface", None): daisy_cmn.set_role_status_and_progress(
os_handle.pxe_server_build(req, install_meta) req, cluster_id, 'install',
return {"status": "pxe is installed"} {'messages': 'Waiting for TECS installation', 'progress': '0'},
'tecs')
# if have hosts need to install os, TECS installataion executed in InstallTask # if have hosts need to install os,
# TECS installataion executed in InstallTask
os_install_obj = InstallTask(req, cluster_id) os_install_obj = InstallTask(req, cluster_id)
os_install_thread = Thread(target=os_install_obj.run) os_install_thread = Thread(target=os_install_obj.run)
os_install_thread.start() os_install_thread.start()
@ -251,10 +275,13 @@ class Controller(controller.BaseController):
self._enforce(req, 'uninstall_cluster') self._enforce(req, 'uninstall_cluster')
self._raise_404_if_cluster_deleted(req, cluster_id) self._raise_404_if_cluster_deleted(req, cluster_id)
backends = get_deployment_backends(req, cluster_id, BACKENDS_UNINSTALL_ORDER) backends = get_deployment_backends(
req, cluster_id, BACKENDS_UNINSTALL_ORDER)
for backend in backends: for backend in backends:
backend_driver = driver.load_deployment_dirver(backend) backend_driver = driver.load_deployment_dirver(backend)
uninstall_thread = Thread(target=backend_driver.uninstall, args=(req, cluster_id)) uninstall_thread = Thread(
target=backend_driver.uninstall, args=(
req, cluster_id))
uninstall_thread.start() uninstall_thread.start()
return {"status": "begin uninstall"} return {"status": "begin uninstall"}
@ -264,7 +291,8 @@ class Controller(controller.BaseController):
self._raise_404_if_cluster_deleted(req, cluster_id) self._raise_404_if_cluster_deleted(req, cluster_id)
all_nodes = {} all_nodes = {}
backends = get_deployment_backends(req, cluster_id, BACKENDS_UNINSTALL_ORDER) backends = get_deployment_backends(
req, cluster_id, BACKENDS_UNINSTALL_ORDER)
if not backends: if not backends:
LOG.info(_("No backends need to uninstall.")) LOG.info(_("No backends need to uninstall."))
return all_nodes return all_nodes
@ -274,7 +302,6 @@ class Controller(controller.BaseController):
all_nodes.update(nodes_process) all_nodes.update(nodes_process)
return all_nodes return all_nodes
@utils.mutating @utils.mutating
def update_cluster(self, req, cluster_id): def update_cluster(self, req, cluster_id):
""" """
@ -287,13 +314,19 @@ class Controller(controller.BaseController):
self._enforce(req, 'update_cluster') self._enforce(req, 'update_cluster')
self._raise_404_if_cluster_deleted(req, cluster_id) self._raise_404_if_cluster_deleted(req, cluster_id)
backends = get_deployment_backends(req, cluster_id, BACKENDS_UPGRADE_ORDER) backends = get_deployment_backends(
req, cluster_id, BACKENDS_UPGRADE_ORDER)
if not backends: if not backends:
LOG.info(_("No backends need to update.")) LOG.info(_("No backends need to update."))
return {"status": ""} return {"status": ""}
daisy_cmn.set_role_status_and_progress(
req, cluster_id, 'upgrade',
{'messages': 'Waiting for TECS upgrading', 'progress': '0'},
'tecs')
for backend in backends: for backend in backends:
backend_driver = driver.load_deployment_dirver(backend) backend_driver = driver.load_deployment_dirver(backend)
update_thread = Thread(target=backend_driver.upgrade, args=(req, cluster_id)) update_thread = Thread(target=backend_driver.upgrade,
args=(req, cluster_id))
update_thread.start() update_thread.start()
return {"status": "begin update"} return {"status": "begin update"}
@ -302,7 +335,8 @@ class Controller(controller.BaseController):
self._enforce(req, 'update_progress') self._enforce(req, 'update_progress')
self._raise_404_if_cluster_deleted(req, cluster_id) self._raise_404_if_cluster_deleted(req, cluster_id)
backends = get_deployment_backends(req, cluster_id, BACKENDS_UPGRADE_ORDER) backends = get_deployment_backends(
req, cluster_id, BACKENDS_UPGRADE_ORDER)
all_nodes = {} all_nodes = {}
for backend in backends: for backend in backends:
backend_driver = driver.load_deployment_dirver(backend) backend_driver = driver.load_deployment_dirver(backend)
@ -324,7 +358,8 @@ class Controller(controller.BaseController):
self._raise_404_if_cluster_deleted(req, cluster_id) self._raise_404_if_cluster_deleted(req, cluster_id)
all_config_files = {} all_config_files = {}
backends = get_deployment_backends(req, cluster_id, BACKENDS_INSTALL_ORDER) backends = get_deployment_backends(
req, cluster_id, BACKENDS_INSTALL_ORDER)
if not backends: if not backends:
LOG.info(_("No backends need to export.")) LOG.info(_("No backends need to export."))
return all_config_files return all_config_files
@ -347,7 +382,8 @@ class Controller(controller.BaseController):
self._raise_404_if_cluster_deleted(req, cluster_id) self._raise_404_if_cluster_deleted(req, cluster_id)
tecs_backend_name = 'tecs' tecs_backend_name = 'tecs'
backends = get_deployment_backends(req, cluster_id, BACKENDS_UNINSTALL_ORDER) backends = get_deployment_backends(
req, cluster_id, BACKENDS_UNINSTALL_ORDER)
if tecs_backend_name not in backends: if tecs_backend_name not in backends:
message = "No tecs backend" message = "No tecs backend"
LOG.info(_(message)) LOG.info(_(message))
@ -374,6 +410,7 @@ class InstallDeserializer(wsgi.JSONRequestDeserializer):
def update_disk_array(self, request): def update_disk_array(self, request):
return {} return {}
class InstallSerializer(wsgi.JSONResponseSerializer): class InstallSerializer(wsgi.JSONResponseSerializer):
"""Handles serialization of specific controller method responses.""" """Handles serialization of specific controller method responses."""
@ -398,6 +435,7 @@ class InstallSerializer(wsgi.JSONResponseSerializer):
response.body = self.to_json(result) response.body = self.to_json(result)
return response return response
def create_resource(): def create_resource():
"""Image members resource factory method""" """Image members resource factory method"""
deserializer = InstallDeserializer() deserializer = InstallDeserializer()

View File

@ -53,7 +53,8 @@ class Controller(controller.BaseController):
def _raise_404_if_project_deleted(self, req, cluster_id): def _raise_404_if_project_deleted(self, req, cluster_id):
project = self.get_cluster_meta_or_404(req, cluster_id) project = self.get_cluster_meta_or_404(req, cluster_id)
if project['deleted']: if project['deleted']:
msg = _("Cluster with identifier %s has been deleted.") % cluster_id msg = _("Cluster with identifier %s has been deleted.") % \
cluster_id
raise webob.exc.HTTPNotFound(msg) raise webob.exc.HTTPNotFound(msg)
# def get_cluster_hosts(self, req, cluster_id, host_id=None): # def get_cluster_hosts(self, req, cluster_id, host_id=None):
@ -73,7 +74,8 @@ class Controller(controller.BaseController):
# self._raise_404_if_project_deleted(req, cluster_id) # self._raise_404_if_project_deleted(req, cluster_id)
# #
# try: # try:
# members = registry.get_cluster_hosts(req.context, cluster_id, host_id) # members = registry.get_cluster_hosts(
# req.context, cluster_id, host_id)
# except exception.NotFound: # except exception.NotFound:
# msg = _("Project with identifier %s not found") % cluster_id # msg = _("Project with identifier %s not found") % cluster_id
# LOG.warn(msg) # LOG.warn(msg)

View File

@ -36,6 +36,7 @@ from daisy.common import wsgi
from daisy import i18n from daisy import i18n
from daisy import notifier from daisy import notifier
import daisy.registry.client.v1.api as registry import daisy.registry.client.v1.api as registry
from functools import reduce
LOG = logging.getLogger(__name__) LOG = logging.getLogger(__name__)
_ = i18n._ _ = i18n._
@ -52,8 +53,15 @@ CONF.import_opt('container_formats', 'daisy.common.config',
group='image_format') group='image_format')
CONF.import_opt('image_property_quota', 'daisy.common.config') CONF.import_opt('image_property_quota', 'daisy.common.config')
SUPPORT_NETWORK_TYPE = ('PUBLIC', 'PRIVATE', 'STORAGE', 'MANAGEMENT', 'EXTERNAL', 'DEPLOYMENT', 'VXLAN') SUPPORT_NETWORK_TYPE = (
SUPPORT_NETWORK_TEMPLATE_TYPE = ('custom', 'template', 'default') 'PUBLICAPI',
'DATAPLANE',
'STORAGE',
'MANAGEMENT',
'EXTERNAL',
'DEPLOYMENT',
'HEARTBEAT')
SUPPORT_NETWORK_TEMPLATE_TYPE = ('custom', 'template', 'default', 'system')
SUPPORT_ML2_TYPE = ('ovs', 'sriov(direct)', 'sriov(macvtap)', SUPPORT_ML2_TYPE = ('ovs', 'sriov(direct)', 'sriov(macvtap)',
'ovs,sriov(direct)', 'ovs,sriov(macvtap)') 'ovs,sriov(direct)', 'ovs,sriov(macvtap)')
SUPPORT_NETWORK_CAPABILITY = ('high', 'low') SUPPORT_NETWORK_CAPABILITY = ('high', 'low')
@ -99,12 +107,15 @@ class Controller(controller.BaseController):
def _raise_404_if_network_deleted(self, req, network_id): def _raise_404_if_network_deleted(self, req, network_id):
network = self.get_network_meta_or_404(req, network_id) network = self.get_network_meta_or_404(req, network_id)
if network['deleted']: if network['deleted']:
msg = _("Network with identifier %s has been deleted.") % network_id msg = _("Network with identifier %s has been deleted.") % \
network_id
raise HTTPNotFound(msg) raise HTTPNotFound(msg)
def _raise_404_if_cluster_delete(self, req, cluster_id): def _raise_404_if_cluster_delete(self, req, cluster_id):
cluster_id = self.get_cluster_meta_or_404(req, cluster_id) cluster_id = self.get_cluster_meta_or_404(req, cluster_id)
if cluster_id['deleted']: if cluster_id['deleted']:
msg = _("cluster_id with identifier %s has been deleted.") % cluster_id msg = _("cluster_id with identifier %s has been deleted.") % \
cluster_id
raise HTTPNotFound(msg) raise HTTPNotFound(msg)
def _get_network_name_by_cluster_id(self, context, cluster_id): def _get_network_name_by_cluster_id(self, context, cluster_id):
@ -114,7 +125,6 @@ class Controller(controller.BaseController):
network_name_list.append(network['name']) network_name_list.append(network['name'])
return network_name_list return network_name_list
def _get_filters(self, req): def _get_filters(self, req):
""" """
Return a dictionary of query param filters from the request Return a dictionary of query param filters from the request
@ -157,10 +167,10 @@ class Controller(controller.BaseController):
'10.43.1789', invalid format '10.43.1789', invalid format
''' '''
valid_fromat = False valid_fromat = False
if ip_str.count('.') == 3 and \ if ip_str.count('.') == 3 and all(num.isdigit() and 0 <= int(
all(num.isdigit() and 0<=int(num)<256 for num in ip_str.rstrip().split('.')): num) < 256 for num in ip_str.rstrip().split('.')):
valid_fromat = True valid_fromat = True
if valid_fromat == False: if not valid_fromat:
msg = (_("%s invalid ip format!") % ip_str) msg = (_("%s invalid ip format!") % ip_str)
LOG.warn(msg) LOG.warn(msg)
raise HTTPForbidden(msg) raise HTTPForbidden(msg)
@ -182,9 +192,13 @@ class Controller(controller.BaseController):
""" """
network = network.split('/') network = network.split('/')
mask = ~(2**(32 - int(network[1])) - 1) mask = ~(2**(32 - int(network[1])) - 1)
return (self._ip_into_int(ip) & mask) == (self._ip_into_int(network[0]) & mask) return (
self._ip_into_int(ip) & mask) == (
self._ip_into_int(
network[0]) & mask)
def _verify_uniqueness_of_network_name(self, req, network_list, network_meta, is_update = False): def _verify_uniqueness_of_network_name(
self, req, network_list, network_meta, is_update=False):
""" """
Network name is match case and uniqueness in cluster. Network name is match case and uniqueness in cluster.
:param req: :param req:
@ -192,8 +206,10 @@ class Controller(controller.BaseController):
:param network_meta: network plane need be verified :param network_meta: network plane need be verified
:return: :return:
""" """
if not network_list or not network_meta or not network_meta.get('name', None): if not network_list or not network_meta or not network_meta.get(
msg = _("Input params invalid for verifying uniqueness of network name.") 'name', None):
msg = _("Input params invalid for verifying uniqueness of "
"network name.")
raise HTTPBadRequest(msg, request=req, content_type="text/plain") raise HTTPBadRequest(msg, request=req, content_type="text/plain")
network_name = network_meta['name'] network_name = network_meta['name']
@ -205,37 +221,122 @@ class Controller(controller.BaseController):
# network name don't match case # network name don't match case
network_name_list = [network['name'].lower() for network in network_name_list = [network['name'].lower() for network in
network_list['networks'] if network.get('name', None)] network_list['networks'] if
network.get('name', None)]
if network_name.lower() in network_name_list: if network_name.lower() in network_name_list:
msg = _("Name of network isn't match case and %s already exits in the cluster." % network_name) msg = _(
"Name of network isn't match case and %s already exits "
"in the cluster." %
network_name)
raise HTTPConflict(msg, request=req, content_type="text/plain") raise HTTPConflict(msg, request=req, content_type="text/plain")
if not is_update: if not is_update:
# Input networks type can't be same with db record which is all ready exit, # Input networks type can't be same with db record
# which is all ready exit,
# except PRIVATE network. # except PRIVATE network.
network_type_exist_list = \ network_type_exist_list = \
[network['network_type'] for network in network_list['networks'] [network['network_type'] for network in
if network.get('network_type', None) and network['network_type'] != "PRIVATE" network_list['networks']
and network['network_type'] != "STORAGE"] if network.get('network_type', None) and
if network_meta.get("network_type", None) in network_type_exist_list: network['network_type'] != "DATAPLANE" and
msg = _("The %s network plane %s must be only, except PRIVATE network." % network['network_type'] != "STORAGE" and
network['network_type'] != "HEARTBEAT"]
if network_meta.get(
"network_type",
None) in network_type_exist_list:
msg = _(
"The %s network plane %s must be unique, "
"except DATAPLANE/STORAGE/HEARTBEAT network." %
(network_meta['network_type'], network_name)) (network_meta['network_type'], network_name))
raise HTTPConflict(msg, request=req, content_type="text/plain") raise HTTPConflict(msg, request=req, content_type="text/plain")
def _valid_vlan_range(self, req, network_meta): def _valid_network_range(self, req, network_meta):
if ((network_meta.has_key('vlan_start') and not network_meta.has_key('vlan_end')) or if (('vlan_start' in network_meta and 'vlan_end' not in
(not network_meta.has_key('vlan_start') and network_meta.has_key('vlan_end'))): network_meta) or (
raise HTTPBadRequest(explanation="vlan-start and vlan-end must be appeared at the same time", request=req) 'vlan_start' not in network_meta and
if network_meta.has_key('vlan_start'): 'vlan_end' in network_meta)):
msg = "vlan-start and vlan-end must be appeared "\
"at the same time"
LOG.error(msg)
raise HTTPBadRequest(explanation=msg, request=req)
if 'vlan_start' in network_meta:
if not (int(network_meta['vlan_start']) >= 1 and if not (int(network_meta['vlan_start']) >= 1 and
int(network_meta['vlan_start']) <= 4094): int(network_meta['vlan_start']) <= 4094):
raise HTTPBadRequest(explanation="vlan-start must be a integer in '1~4096'", request=req) msg = "vlan_start must be a integer in 1~4096"
if network_meta.has_key('vlan_end'): LOG.error(msg)
raise HTTPBadRequest(explanation=msg, request=req)
if 'vlan_end' in network_meta:
if not (int(network_meta['vlan_end']) >= 1 and if not (int(network_meta['vlan_end']) >= 1 and
int(network_meta['vlan_end']) <= 4094): int(network_meta['vlan_end']) <= 4094):
raise HTTPBadRequest(explanation="vlan-end must be a integer in '1~4096'", request=req) msg = "vlan_end must be a integer in 1~4096"
LOG.error(msg)
raise HTTPBadRequest(explanation=msg, request=req)
if int(network_meta['vlan_start']) > int(network_meta['vlan_end']): if int(network_meta['vlan_start']) > int(network_meta['vlan_end']):
raise HTTPBadRequest(explanation="vlan-start must be less than vlan-end", request=req) msg = "vlan_start must be less than vlan_end"
LOG.error(msg)
raise HTTPBadRequest(explanation=msg, request=req)
if (('vni_start' in network_meta and 'vni_end' not in
network_meta) or (
'vni_start' not in network_meta and
'vni_end' in network_meta)):
msg = "vni_start and vni_end must be appeared at the same time"
LOG.error(msg)
raise HTTPBadRequest(explanation=msg, request=req)
if 'vni_start' in network_meta:
if not (int(network_meta['vni_start']) >= 1 and
int(network_meta['vni_start']) <= 16777216):
msg = "vni_start must be a integer in 1~16777216"
LOG.error(msg)
raise HTTPBadRequest(explanation=msg, request=req)
if 'vni_end' in network_meta:
if not (int(network_meta['vni_end']) >= 1 and
int(network_meta['vni_end']) <= 16777216):
msg = "vni_end must be a integer in 1~16777216"
LOG.error(msg)
raise HTTPBadRequest(explanation=msg, request=req)
if int(network_meta['vni_start']) > int(network_meta['vni_end']):
msg = "vni_start must be less than vni_end"
LOG.error(msg)
raise HTTPBadRequest(explanation=msg, request=req)
if (('gre_id_start' in network_meta and 'gre_id_end' not in
network_meta) or (
'gre_id_start' not in network_meta and
'gre_id_end' in network_meta)):
msg = "gre_id_start and gre_id_end must"\
"be appeared at the same time"
LOG.error(msg)
raise HTTPBadRequest(explanation=msg, request=req)
if 'gre_id_start' in network_meta:
if not (int(network_meta['gre_id_start']) >= 1 and
int(network_meta['gre_id_start']) <= 4094):
msg = "gre_id_start must be a integer in 1~4094"
LOG.error(msg)
raise HTTPBadRequest(explanation=msg, request=req)
if 'gre_id_end' in network_meta:
if not (int(network_meta['gre_id_end']) >= 1 and
int(network_meta['gre_id_end']) <= 4094):
msg = "gre_id_end must be a integer in 1~4094"
LOG.error(msg)
raise HTTPBadRequest(explanation=msg, request=req)
if int(network_meta['gre_id_start']) >\
int(network_meta['gre_id_end']):
msg = "gre_id_start must be less than gre_id_end"
LOG.error(msg)
raise HTTPBadRequest(explanation=msg, request=req)
def _verify_heartbeat_network(self, req, network_list, network_meta):
heartbeat_networks = [
network for network in network_list['networks'] if network.get(
'network_type',
None) and network['network_type'] == "HEARTBEAT"]
if len(heartbeat_networks) >= 2:
raise HTTPBadRequest(
explanation="HEARTBEAT network plane number must be "
"less than two",
request=req)
@utils.mutating @utils.mutating
def add_network(self, req, network_meta): def add_network(self, req, network_meta):
@ -252,42 +353,58 @@ class Controller(controller.BaseController):
if cluster_id: if cluster_id:
self._raise_404_if_cluster_delete(req, cluster_id) self._raise_404_if_cluster_delete(req, cluster_id)
network_list = self.detail(req, cluster_id) network_list = self.detail(req, cluster_id)
self._verify_uniqueness_of_network_name(req, network_list, network_meta) self._verify_uniqueness_of_network_name(
req, network_list, network_meta)
if 'network_type' in network_meta and network_meta[
'network_type'] == "HEARTBEAT":
self._verify_heartbeat_network(req, network_list, network_meta)
# else: # else:
# if network_meta.get('type',None) != "template": # if network_meta.get('type',None) != "template":
# raise HTTPBadRequest(explanation="cluster id must be given", request=req) # raise HTTPBadRequest(explanation="cluster id must be given",
# request=req)
network_name = network_meta.get('name', None) network_name = network_meta.get('name', None)
network_name_split = network_name.split('_') network_name_split = network_name.split('_')
for network_name_info in network_name_split: for network_name_info in network_name_split:
if not network_name_info.isalnum(): if not network_name_info.isalnum():
raise ValueError('network name must be numbers or letters or underscores !') raise ValueError(
if not network_meta.has_key('network_type'): 'network name must be numbers or letters or underscores !')
raise HTTPBadRequest(explanation="network-type must be given", request=req) if 'network_type' not in network_meta:
raise HTTPBadRequest(
explanation="network-type must be given",
request=req)
if network_meta['network_type'] not in SUPPORT_NETWORK_TYPE: if network_meta['network_type'] not in SUPPORT_NETWORK_TYPE:
raise HTTPBadRequest(explanation="unsupported network-type", request=req) raise HTTPBadRequest(
explanation="unsupported network-type",
request=req)
if ('type' in network_meta and
if (network_meta.has_key('type') and
network_meta['type'] not in SUPPORT_NETWORK_TEMPLATE_TYPE): network_meta['type'] not in SUPPORT_NETWORK_TEMPLATE_TYPE):
raise HTTPBadRequest(explanation="unsupported type", request=req) raise HTTPBadRequest(explanation="unsupported type", request=req)
if (network_meta.has_key('capability') and if ('capability' in network_meta and
network_meta['capability'] not in SUPPORT_NETWORK_CAPABILITY): network_meta['capability'] not in SUPPORT_NETWORK_CAPABILITY):
raise HTTPBadRequest(explanation="unsupported capability type", request=req) raise HTTPBadRequest(
explanation="unsupported capability type",
request=req)
self._valid_vlan_range(req, network_meta) self._valid_network_range(req, network_meta)
if network_meta.get('ip_ranges', None): if network_meta.get('ip_ranges', None) and \
eval(network_meta['ip_ranges']):
cidr = None cidr = None
if not network_meta.has_key('cidr'): if 'cidr' not in network_meta:
msg = (_("When ip range was specified, the CIDR parameter can not be empty.")) msg = (
_("When ip range was specified, the CIDR parameter "
"can not be empty."))
LOG.warn(msg) LOG.warn(msg)
raise HTTPForbidden(msg) raise HTTPForbidden(msg)
else: else:
cidr = network_meta['cidr'] cidr = network_meta['cidr']
cidr_division = cidr.split('/') cidr_division = cidr.split('/')
if len(cidr_division) != 2 or ( cidr_division[1] \ if len(cidr_division) != 2 or (
and int(cidr_division[1]) > 32 or int(cidr_division[1]) < 0): cidr_division[1] and int(
cidr_division[1]) > 32 or int(
cidr_division[1]) < 0):
msg = (_("Wrong CIDR format.")) msg = (_("Wrong CIDR format."))
LOG.warn(msg) LOG.warn(msg)
raise HTTPForbidden(msg) raise HTTPForbidden(msg)
@ -299,7 +416,9 @@ class Controller(controller.BaseController):
sorted_int_ip_ranges_list = list() sorted_int_ip_ranges_list = list()
for ip_pair in ip_ranges: for ip_pair in ip_ranges:
if ['start', 'end'] != ip_pair.keys(): if ['start', 'end'] != ip_pair.keys():
msg = (_("IP range was not start with 'start:' or end with 'end:'.")) msg = (
_("IP range was not start with 'start:' or "
"end with 'end:'."))
LOG.warn(msg) LOG.warn(msg)
raise HTTPForbidden(msg) raise HTTPForbidden(msg)
ip_start = ip_pair['start'] ip_start = ip_pair['start']
@ -308,16 +427,21 @@ class Controller(controller.BaseController):
self.validate_ip_format(ip_end) self.validate_ip_format(ip_end)
if not self._is_in_network_range(ip_start, cidr): if not self._is_in_network_range(ip_start, cidr):
msg = (_("IP address %s was not in the range of CIDR %s." % (ip_start, cidr))) msg = (
_("IP address %s was not in the range "
"of CIDR %s." % (ip_start, cidr)))
LOG.warn(msg) LOG.warn(msg)
raise HTTPForbidden(msg) raise HTTPForbidden(msg)
if not self._is_in_network_range(ip_end, cidr): if not self._is_in_network_range(ip_end, cidr):
msg = (_("IP address %s was not in the range of CIDR %s." % (ip_end, cidr))) msg = (
_("IP address %s was not in the range "
"of CIDR %s." % (ip_end, cidr)))
LOG.warn(msg) LOG.warn(msg)
raise HTTPForbidden(msg) raise HTTPForbidden(msg)
#transform ip format to int when the string format is valid # transform ip format to int when the string format is
# valid
int_ip_start = self._ip_into_int(ip_start) int_ip_start = self._ip_into_int(ip_start)
int_ip_end = self._ip_into_int(ip_end) int_ip_end = self._ip_into_int(ip_end)
@ -326,12 +450,16 @@ class Controller(controller.BaseController):
LOG.warn(msg) LOG.warn(msg)
raise HTTPForbidden(msg) raise HTTPForbidden(msg)
int_ip_ranges_list.append([int_ip_start, int_ip_end]) int_ip_ranges_list.append([int_ip_start, int_ip_end])
sorted_int_ip_ranges_list = sorted(int_ip_ranges_list, key=lambda x : x[0]) sorted_int_ip_ranges_list = sorted(
int_ip_ranges_list, key=lambda x: x[0])
for int_ip_range in sorted_int_ip_ranges_list: for int_ip_range in sorted_int_ip_ranges_list:
if last_ip_range_end and last_ip_range_end >= int_ip_range[0]: if last_ip_range_end and last_ip_range_end >= int_ip_range[
0]:
msg = (_("Between ip ranges can not be overlap.")) msg = (_("Between ip ranges can not be overlap."))
LOG.warn(msg) # such as "[10, 15], [12, 16]", last_ip_range_end >= int_ip_range[0], this ip ranges were overlap # such as "[10, 15], [12, 16]", last_ip_range_end >=
# int_ip_range[0], this ip ranges were overlap
LOG.warn(msg)
raise HTTPForbidden(msg) raise HTTPForbidden(msg)
else: else:
last_ip_range_end = int_ip_range[1] last_ip_range_end = int_ip_range[1]
@ -353,16 +481,33 @@ class Controller(controller.BaseController):
'have the same cidr')) 'have the same cidr'))
raise HTTPBadRequest(explanation=msg) raise HTTPBadRequest(explanation=msg)
if network_meta.get('gateway', None) and network_meta.get('cidr', None): if network_meta.get(
'gateway',
None) and network_meta.get(
'cidr',
None):
gateway = network_meta['gateway'] gateway = network_meta['gateway']
cidr = network_meta['cidr'] cidr = network_meta['cidr']
self.validate_ip_format(gateway) self.validate_ip_format(gateway)
return_flag = self._is_in_network_range(gateway, cidr) return_flag = self._is_in_network_range(gateway, cidr)
if not return_flag: if not return_flag:
msg = (_('The gateway %s was not in the same segment with the cidr %s of management network.' % (gateway, cidr))) msg = (
_(
'The gateway %s was not in the same segment '
'with the cidr %s of management network.' %
(gateway, cidr)))
raise HTTPBadRequest(explanation=msg) raise HTTPBadRequest(explanation=msg)
if network_meta.get('cluster_id') and network_meta.get('gateway'):
networks = registry.get_networks_detail(req.context, cluster_id)
gateways = [network['gateway'] for network in networks
if network['name'] != network_meta['name'] and
network['gateway']]
if gateways:
msg = (_('More than one gateway found in cluster.'))
LOG.error(msg)
raise HTTPConflict(explanation=msg)
network_meta = registry.add_network_metadata(req.context, network_meta) network_meta = registry.add_network_metadata(req.context, network_meta)
return {'network_meta': network_meta} return {'network_meta': network_meta}
@ -381,10 +526,12 @@ class Controller(controller.BaseController):
# self._raise_404_if_network_deleted(req, network_id) # self._raise_404_if_network_deleted(req, network_id)
network = self.get_network_meta_or_404(req, network_id) network = self.get_network_meta_or_404(req, network_id)
if network['deleted']: if network['deleted']:
msg = _("Network with identifier %s has been deleted.") % network_id msg = _("Network with identifier %s has been deleted.") % \
network_id
raise HTTPNotFound(msg) raise HTTPNotFound(msg)
if network['type'] != 'custom': if network['type'] != 'custom':
msg = _("Type of network was not custom, can not delete this network.") msg = _("Type of network was not custom, can not "
"delete this network.")
raise HTTPForbidden(msg) raise HTTPForbidden(msg)
try: try:
registry.delete_network_metadata(req.context, network_id) registry.delete_network_metadata(req.context, network_id)
@ -403,7 +550,8 @@ class Controller(controller.BaseController):
request=req, request=req,
content_type="text/plain") content_type="text/plain")
except exception.InUseByStore as e: except exception.InUseByStore as e:
msg = (_("Network %(id)s could not be deleted because it is in use: " msg = (_("Network %(id)s could not be deleted "
"because it is in use: "
"%(exc)s") % {"id": id, "exc": utils.exception_to_str(e)}) "%(exc)s") % {"id": id, "exc": utils.exception_to_str(e)})
LOG.warn(msg) LOG.warn(msg)
raise HTTPConflict(explanation=msg, raise HTTPConflict(explanation=msg,
@ -436,10 +584,19 @@ class Controller(controller.BaseController):
""" """
self._enforce(req, 'get_all_network') self._enforce(req, 'get_all_network')
params = self._get_query_params(req) params = self._get_query_params(req)
filters = params.get('filters')
if filters and filters.get('type'):
if filters['type'] not in SUPPORT_NETWORK_TEMPLATE_TYPE:
msg = "type '%s' is not support." % filters['type']
LOG.error(msg)
raise HTTPBadRequest(explanation=msg, request=req)
try: try:
networks = registry.get_all_networks(req.context, **params) networks = registry.get_all_networks(req.context, **params)
except Exception: except Exception:
raise HTTPBadRequest(explanation="Get all networks failed.", request=req) raise HTTPBadRequest(
explanation="Get all networks failed.",
request=req)
return dict(networks=networks) return dict(networks=networks)
def detail(self, req, id): def detail(self, req, id):
@ -458,7 +615,7 @@ class Controller(controller.BaseController):
'deleted_at': <TIMESTAMP>|<NONE>,}, ... 'deleted_at': <TIMESTAMP>|<NONE>,}, ...
]} ]}
""" """
cluster_id = self._raise_404_if_cluster_delete(req, id) self._raise_404_if_cluster_delete(req, id)
self._enforce(req, 'get_networks') self._enforce(req, 'get_networks')
params = self._get_query_params(req) params = self._get_query_params(req)
try: try:
@ -477,12 +634,14 @@ class Controller(controller.BaseController):
:retval Returns the updated image information as a mapping :retval Returns the updated image information as a mapping
""" """
if network_meta.has_key('name'): if 'name' in network_meta:
network_name = network_meta.get('name', None) network_name = network_meta.get('name', None)
network_name_split = network_name.split('_') network_name_split = network_name.split('_')
for network_name_info in network_name_split: for network_name_info in network_name_split:
if not network_name_info.isalnum(): if not network_name_info.isalnum():
raise ValueError('network name must be numbers or letters or underscores !') raise ValueError(
'network name must be numbers or '
'letters or underscores !')
self._enforce(req, 'update_network') self._enforce(req, 'update_network')
# orig_cluster_meta = self.get_cluster_meta_or_404(req, cluster_id) # orig_cluster_meta = self.get_cluster_meta_or_404(req, cluster_id)
orig_network_meta = self.get_network_meta_or_404(req, network_id) orig_network_meta = self.get_network_meta_or_404(req, network_id)
@ -492,23 +651,27 @@ class Controller(controller.BaseController):
raise HTTPForbidden(explanation=msg, raise HTTPForbidden(explanation=msg,
request=req, request=req,
content_type="text/plain") content_type="text/plain")
if (network_meta.has_key('network_type') and if ('network_type' in network_meta and
network_meta['network_type'] not in SUPPORT_NETWORK_TYPE): network_meta['network_type'] not in SUPPORT_NETWORK_TYPE):
raise HTTPBadRequest(explanation="unsupported network-type", request=req) raise HTTPBadRequest(
if (network_meta.has_key('type') and explanation="unsupported network-type",
request=req)
if ('type' in network_meta and
network_meta['type'] not in SUPPORT_NETWORK_TEMPLATE_TYPE): network_meta['type'] not in SUPPORT_NETWORK_TEMPLATE_TYPE):
raise HTTPBadRequest(explanation="unsupported type", request=req) raise HTTPBadRequest(explanation="unsupported type", request=req)
if (network_meta.has_key('type') and if ('type' in network_meta and
network_meta['type'] == 'template'): network_meta['type'] == 'template'):
raise HTTPBadRequest(explanation="network template type is not allowed to update", request=req) raise HTTPBadRequest(
explanation="network template type is not allowed to update",
request=req)
if ('capability' in network_meta and
if (network_meta.has_key('capability') and
network_meta['capability'] not in SUPPORT_NETWORK_CAPABILITY): network_meta['capability'] not in SUPPORT_NETWORK_CAPABILITY):
raise HTTPBadRequest(explanation="unsupported capability type", request=req) raise HTTPBadRequest(
explanation="unsupported capability type",
request=req)
self._valid_vlan_range(req, network_meta) self._valid_network_range(req, network_meta)
network_name = network_meta.get('name', None) network_name = network_meta.get('name', None)
cluster_id = orig_network_meta['cluster_id'] cluster_id = orig_network_meta['cluster_id']
@ -516,17 +679,20 @@ class Controller(controller.BaseController):
network_updated = copy.deepcopy(network_meta) network_updated = copy.deepcopy(network_meta)
network_updated['id'] = network_id network_updated['id'] = network_id
network_type = network_meta.get('network_type', None) network_type = network_meta.get('network_type', None)
network_updated['network_type'] = \ network_updated['network_type'] = orig_network_meta[
orig_network_meta['network_type'] if not network_type else network_type 'network_type'] if not network_type else network_type
network_list = self.detail(req, cluster_id) network_list = self.detail(req, cluster_id)
self._verify_uniqueness_of_network_name(req, network_list, network_updated, True) self._verify_uniqueness_of_network_name(
req, network_list, network_updated, True)
cidr = network_meta.get('cidr', orig_network_meta['cidr']) cidr = network_meta.get('cidr', orig_network_meta['cidr'])
vlan_id = network_meta.get('vlan_id', orig_network_meta['vlan_id']) vlan_id = network_meta.get('vlan_id', orig_network_meta['vlan_id'])
if cidr: if cidr:
cidr_division = cidr.split('/') cidr_division = cidr.split('/')
if len(cidr_division) != 2 or ( cidr_division[1] \ if len(cidr_division) != 2 or (
and int(cidr_division[1]) > 32 or int(cidr_division[1]) < 0): cidr_division[1] and int(
cidr_division[1]) > 32 or int(
cidr_division[1]) < 0):
msg = (_("Wrong CIDR format.")) msg = (_("Wrong CIDR format."))
LOG.warn(msg) LOG.warn(msg)
raise HTTPForbidden(msg) raise HTTPForbidden(msg)
@ -549,9 +715,12 @@ class Controller(controller.BaseController):
'have the same cidr')) 'have the same cidr'))
raise HTTPBadRequest(explanation=msg) raise HTTPBadRequest(explanation=msg)
if network_meta.get('ip_ranges', None): if network_meta.get('ip_ranges', None) and \
eval(network_meta['ip_ranges']):
if not cidr: if not cidr:
msg = (_("When ip range was specified, the CIDR parameter can not be empty.")) msg = (
_("When ip range was specified, "
"the CIDR parameter can not be empty."))
LOG.warn(msg) LOG.warn(msg)
raise HTTPForbidden(msg) raise HTTPForbidden(msg)
ip_ranges = eval(network_meta['ip_ranges']) ip_ranges = eval(network_meta['ip_ranges'])
@ -560,7 +729,9 @@ class Controller(controller.BaseController):
sorted_int_ip_ranges_list = list() sorted_int_ip_ranges_list = list()
for ip_pair in ip_ranges: for ip_pair in ip_ranges:
if ['start', 'end'] != ip_pair.keys(): if ['start', 'end'] != ip_pair.keys():
msg = (_("IP range was not start with 'start:' or end with 'end:'.")) msg = (
_("IP range was not start with 'start:' "
"or end with 'end:'."))
LOG.warn(msg) LOG.warn(msg)
raise HTTPForbidden(msg) raise HTTPForbidden(msg)
ip_start = ip_pair['start'] ip_start = ip_pair['start']
@ -569,12 +740,16 @@ class Controller(controller.BaseController):
self.validate_ip_format(ip_end) self.validate_ip_format(ip_end)
if not self._is_in_network_range(ip_start, cidr): if not self._is_in_network_range(ip_start, cidr):
msg = (_("IP address %s was not in the range of CIDR %s." % (ip_start, cidr))) msg = (
_("IP address %s was not in the "
"range of CIDR %s." % (ip_start, cidr)))
LOG.warn(msg) LOG.warn(msg)
raise HTTPForbidden(msg) raise HTTPForbidden(msg)
if not self._is_in_network_range(ip_end, cidr): if not self._is_in_network_range(ip_end, cidr):
msg = (_("IP address %s was not in the range of CIDR %s." % (ip_end, cidr))) msg = (
_("IP address %s was not in the "
"range of CIDR %s." % (ip_end, cidr)))
LOG.warn(msg) LOG.warn(msg)
raise HTTPForbidden(msg) raise HTTPForbidden(msg)
@ -587,26 +762,48 @@ class Controller(controller.BaseController):
LOG.warn(msg) LOG.warn(msg)
raise HTTPForbidden(msg) raise HTTPForbidden(msg)
int_ip_ranges_list.append([int_ip_start, int_ip_end]) int_ip_ranges_list.append([int_ip_start, int_ip_end])
sorted_int_ip_ranges_list = sorted(int_ip_ranges_list, key=lambda x : x[0]) sorted_int_ip_ranges_list = sorted(
int_ip_ranges_list, key=lambda x: x[0])
LOG.warn("sorted_int_ip_ranges_list: " % sorted_int_ip_ranges_list) LOG.warn("sorted_int_ip_ranges_list: " % sorted_int_ip_ranges_list)
# check ip ranges overlap # check ip ranges overlap
for int_ip_range in sorted_int_ip_ranges_list: for int_ip_range in sorted_int_ip_ranges_list:
if last_ip_range_end and last_ip_range_end >= int_ip_range[0]: if last_ip_range_end and last_ip_range_end >= int_ip_range[0]:
msg = (_("Between ip ranges can not be overlap.")) msg = (_("Between ip ranges can not be overlap."))
LOG.warn(msg) # such as "[10, 15], [12, 16]", last_ip_range_end >= int_ip_range[0], this ip ranges were overlap # such as "[10, 15], [12, 16]", last_ip_range_end >=
# int_ip_range[0], this ip ranges were overlap
LOG.warn(msg)
raise HTTPForbidden(msg) raise HTTPForbidden(msg)
else: else:
last_ip_range_end = int_ip_range[1] last_ip_range_end = int_ip_range[1]
if network_meta.get('gateway', orig_network_meta['gateway']) and network_meta.get('cidr', orig_network_meta['cidr']): if network_meta.get(
'gateway',
orig_network_meta['gateway']) and network_meta.get(
'cidr',
orig_network_meta['cidr']):
gateway = network_meta.get('gateway', orig_network_meta['gateway']) gateway = network_meta.get('gateway', orig_network_meta['gateway'])
cidr = network_meta.get('cidr', orig_network_meta['cidr']) cidr = network_meta.get('cidr', orig_network_meta['cidr'])
self.validate_ip_format(gateway) self.validate_ip_format(gateway)
return_flag = self._is_in_network_range(gateway, cidr) return_flag = self._is_in_network_range(gateway, cidr)
if not return_flag: if not return_flag:
msg = (_('The gateway %s was not in the same segment with the cidr %s of management network.' % (gateway, cidr))) msg = (
_(
'The gateway %s was not in the same '
'segment with the cidr %s of management network.' %
(gateway, cidr)))
raise HTTPBadRequest(explanation=msg) raise HTTPBadRequest(explanation=msg)
# allow one gateway in one cluster
if network_meta.get('cluster_id') and (network_meta.get('gateway')):
networks = registry.get_networks_detail(req.context, cluster_id)
gateways = [network['gateway'] for network in networks
if network['name'] != orig_network_meta['name'] and
network['gateway']]
if gateways:
msg = (_('More than one gateway found in cluster.'))
LOG.error(msg)
raise HTTPConflict(explanation=msg)
try: try:
network_meta = registry.update_network_metadata(req.context, network_meta = registry.update_network_metadata(req.context,
network_id, network_id,
@ -626,12 +823,8 @@ class Controller(controller.BaseController):
request=req, request=req,
content_type="text/plain") content_type="text/plain")
except exception.Forbidden as e: except exception.Forbidden as e:
msg = (_("Forbidden to update network: %s") % LOG.warn(e)
utils.exception_to_str(e)) raise HTTPForbidden(e)
LOG.warn(msg)
raise HTTPForbidden(explanation=msg,
request=req,
content_type="text/plain")
except (exception.Conflict, exception.Duplicate) as e: except (exception.Conflict, exception.Duplicate) as e:
LOG.warn(utils.exception_to_str(e)) LOG.warn(utils.exception_to_str(e))
raise HTTPConflict(body=_('Network operation conflicts'), raise HTTPConflict(body=_('Network operation conflicts'),
@ -642,6 +835,7 @@ class Controller(controller.BaseController):
return {'network_meta': network_meta} return {'network_meta': network_meta}
class HostDeserializer(wsgi.JSONRequestDeserializer): class HostDeserializer(wsgi.JSONRequestDeserializer):
"""Handles deserialization of specific controller method requests.""" """Handles deserialization of specific controller method requests."""
@ -656,6 +850,7 @@ class HostDeserializer(wsgi.JSONRequestDeserializer):
def update_network(self, request): def update_network(self, request):
return self._deserialize(request) return self._deserialize(request)
class HostSerializer(wsgi.JSONResponseSerializer): class HostSerializer(wsgi.JSONResponseSerializer):
"""Handles serialization of specific controller method responses.""" """Handles serialization of specific controller method responses."""
@ -683,9 +878,9 @@ class HostSerializer(wsgi.JSONResponseSerializer):
response.body = self.to_json(dict(network=network_meta)) response.body = self.to_json(dict(network=network_meta))
return response return response
def create_resource(): def create_resource():
"""Hosts resource factory method""" """Hosts resource factory method"""
deserializer = HostDeserializer() deserializer = HostDeserializer()
serializer = HostSerializer() serializer = HostSerializer()
return wsgi.Resource(Controller(), deserializer, serializer) return wsgi.Resource(Controller(), deserializer, serializer)

View File

@ -46,8 +46,16 @@ SUPPORTED_PARAMS = daisy.api.v1.SUPPORTED_PARAMS
SUPPORTED_FILTERS = daisy.api.v1.SUPPORTED_FILTERS SUPPORTED_FILTERS = daisy.api.v1.SUPPORTED_FILTERS
ACTIVE_IMMUTABLE = daisy.api.v1.ACTIVE_IMMUTABLE ACTIVE_IMMUTABLE = daisy.api.v1.ACTIVE_IMMUTABLE
SUPPORTED_DEPLOYMENT_BACKENDS = ('tecs', 'zenic', 'proton') SUPPORTED_DEPLOYMENT_BACKENDS = ('tecs', 'zenic', 'proton')
SUPPORTED_ROLE = ('CONTROLLER_LB', 'CONTROLLER_HA', 'COMPUTER', 'ZENIC_CTL', 'ZENIC_NFM', SUPPORTED_ROLE = (
'ZENIC_MDB', 'PROTON', 'CHILD_CELL_1_COMPUTER', 'CONTROLLER_CHILD_CELL_1') 'CONTROLLER_LB',
'CONTROLLER_HA',
'COMPUTER',
'ZENIC_CTL',
'ZENIC_NFM',
'ZENIC_MDB',
'PROTON',
'CHILD_CELL_1_COMPUTER',
'CONTROLLER_CHILD_CELL_1')
SUPPORT_DISK_LOCATION = ('local', 'share') SUPPORT_DISK_LOCATION = ('local', 'share')
CONF = cfg.CONF CONF = cfg.CONF
@ -56,6 +64,7 @@ CONF.import_opt('container_formats', 'daisy.common.config',
group='image_format') group='image_format')
CONF.import_opt('image_property_quota', 'daisy.common.config') CONF.import_opt('image_property_quota', 'daisy.common.config')
class Controller(controller.BaseController): class Controller(controller.BaseController):
""" """
WSGI controller for roles resource in Daisy v1 API WSGI controller for roles resource in Daisy v1 API
@ -130,26 +139,33 @@ class Controller(controller.BaseController):
if host['deleted']: if host['deleted']:
msg = _("Node with identifier %s has been deleted.") % host_id msg = _("Node with identifier %s has been deleted.") % host_id
raise HTTPNotFound(msg) raise HTTPNotFound(msg)
def _raise_404_if_service_deleted(self, req, service_id): def _raise_404_if_service_deleted(self, req, service_id):
service = self.get_service_meta_or_404(req, service_id) service = self.get_service_meta_or_404(req, service_id)
if service['deleted']: if service['deleted']:
msg = _("Service with identifier %s has been deleted.") % service_id msg = _("Service with identifier %s has been deleted.") % \
service_id
raise HTTPNotFound(msg) raise HTTPNotFound(msg)
def _raise_404_if_config_set_deleted(self, req, config_set_id): def _raise_404_if_config_set_deleted(self, req, config_set_id):
config_set = self.get_config_set_meta_or_404(req, config_set_id) config_set = self.get_config_set_meta_or_404(req, config_set_id)
if config_set['deleted']: if config_set['deleted']:
msg = _("Config_Set with identifier %s has been deleted.") % config_set_id msg = _("Config_Set with identifier %s has been deleted.") % \
config_set_id
raise HTTPNotFound(msg) raise HTTPNotFound(msg)
def _raise_404_if_cluster_deleted(self, req, cluster_id): def _raise_404_if_cluster_deleted(self, req, cluster_id):
cluster = self.get_cluster_meta_or_404(req, cluster_id) cluster = self.get_cluster_meta_or_404(req, cluster_id)
if cluster['deleted']: if cluster['deleted']:
msg = _("cluster with identifier %s has been deleted.") % cluster_id msg = _("cluster with identifier %s has been deleted.") % \
cluster_id
raise HTTPNotFound(msg) raise HTTPNotFound(msg)
def _get_service_name_list(self, req, role_service_id_list): def _get_service_name_list(self, req, role_service_id_list):
service_name_list = [] service_name_list = []
for service_id in role_service_id_list: for service_id in role_service_id_list:
service_meta = registry.get_service_metadata(req.context, service_id) service_meta = registry.get_service_metadata(
req.context, service_id)
service_name_list.append(service_meta['name']) service_name_list.append(service_meta['name'])
return service_name_list return service_name_list
@ -160,7 +176,7 @@ class Controller(controller.BaseController):
''' '''
# import pdb;pdb.set_trace() # import pdb;pdb.set_trace()
host_disk_except_os_disk_lists = 0 host_disk_except_os_disk_lists = 0
os_disk_m = host_info.get('root_lv_size', 51200) os_disk_m = host_info.get('root_lv_size', 102400)
swap_size_m = host_info.get('swap_lv_size', None) swap_size_m = host_info.get('swap_lv_size', None)
if swap_size_m: if swap_size_m:
swap_size_m = (swap_size_m / 4) * 4 swap_size_m = (swap_size_m / 4) * 4
@ -169,7 +185,7 @@ class Controller(controller.BaseController):
boot_partition_m = 400 boot_partition_m = 400
redundant_partiton_m = 600 redundant_partiton_m = 600
if not os_disk_m: if not os_disk_m:
os_disk_m = 51200 os_disk_m = 102400
# host_disk = 1024 # host_disk = 1024
host_disks = host_info.get('disks', None) host_disks = host_info.get('disks', None)
host_disk_size_m = 0 host_disk_size_m = 0
@ -181,8 +197,11 @@ class Controller(controller.BaseController):
disk_size_b_int = int(disk_size_b_str) disk_size_b_int = int(disk_size_b_str)
disk_size_m = disk_size_b_int // (1024 * 1024) disk_size_m = disk_size_b_int // (1024 * 1024)
host_disk_size_m = host_disk_size_m + disk_size_m host_disk_size_m = host_disk_size_m + disk_size_m
host_disk_except_os_disk_lists = host_disk_size_m - os_disk_m - swap_size_m - boot_partition_m - redundant_partiton_m host_disk_except_os_disk_lists = host_disk_size_m - os_disk_m - \
LOG.warn('----start----host_disk_except_os_disk_lists: %s -----end--' % host_disk_except_os_disk_lists) swap_size_m - boot_partition_m - redundant_partiton_m
LOG.warn(
'----start----host_disk_except_os_disk_lists: %s -----end--' %
host_disk_except_os_disk_lists)
return host_disk_except_os_disk_lists return host_disk_except_os_disk_lists
def _check_host_validity(self, **paras): def _check_host_validity(self, **paras):
@ -195,7 +214,8 @@ class Controller(controller.BaseController):
disk_size_m = int(disk_size) disk_size_m = int(disk_size)
else: else:
disk_size_m = 0 disk_size_m = 0
if disk_size_m == 0: #Host hard disk size was 0, think that the host does not need to install the system if disk_size_m == 0: # Host hard disk size was 0,
# think that the host does not need to install the system
return # Don't need to ckeck the validity of hard disk size return # Don't need to ckeck the validity of hard disk size
db_lv_size_m = paras.get('db_lv_size', 300) db_lv_size_m = paras.get('db_lv_size', 300)
@ -221,7 +241,9 @@ class Controller(controller.BaseController):
db_lv_size_m = (db_lv_size_m / 4) * 4 db_lv_size_m = (db_lv_size_m / 4) * 4
nova_lv_size_m = (nova_lv_size_m / 4) * 4 nova_lv_size_m = (nova_lv_size_m / 4) * 4
if glance_lv_size_m + db_lv_size_m + nova_lv_size_m > disk_size_m: if glance_lv_size_m + db_lv_size_m + nova_lv_size_m > disk_size_m:
msg = _("There isn't enough disk space to specify database or glance or nova disk, please specify database or glance or nova disk size again") msg = _("There isn't enough disk space to specify database or "
"glance or nova disk, please specify database or "
"glance or nova disk size again")
LOG.debug(msg) LOG.debug(msg)
raise HTTPForbidden(msg) raise HTTPForbidden(msg)
@ -236,7 +258,12 @@ class Controller(controller.BaseController):
def _check_config_set_id_exist(self, req, config_set_id): def _check_config_set_id_exist(self, req, config_set_id):
self._raise_404_if_config_set_deleted(req, config_set_id) self._raise_404_if_config_set_deleted(req, config_set_id)
def _check_glance_lv_value(self, req, glance_lv_value, role_name, service_name_list): def _check_glance_lv_value(
self,
req,
glance_lv_value,
role_name,
service_name_list):
if int(glance_lv_value) < 0 and int(glance_lv_value) != -1: if int(glance_lv_value) < 0 and int(glance_lv_value) != -1:
msg = _("glance_lv_size can't be negative except -1.") msg = _("glance_lv_size can't be negative except -1.")
raise HTTPForbidden(explanation=msg, raise HTTPForbidden(explanation=msg,
@ -254,8 +281,10 @@ class Controller(controller.BaseController):
msg = _("The size of database disk can't be negative except -1.") msg = _("The size of database disk can't be negative except -1.")
LOG.debug(msg) LOG.debug(msg)
raise HTTPForbidden(msg) raise HTTPForbidden(msg)
#Only the role with database service can be formulated the size of a database. # Only the role with database service can be formulated the size of
if 'mariadb' not in service_name_list and 'mongodb' not in service_name_list: # a database.
if 'mariadb' not in service_name_list and 'mongodb' not in \
service_name_list:
msg = _('The role without database service is unable ' msg = _('The role without database service is unable '
'to specify the size of the database!') 'to specify the size of the database!')
LOG.debug(msg) LOG.debug(msg)
@ -286,29 +315,43 @@ class Controller(controller.BaseController):
host_id_list, cluster_id, argws): host_id_list, cluster_id, argws):
if db_lv_size or glance_lv_size or nova_lv_size: if db_lv_size or glance_lv_size or nova_lv_size:
for host_id in host_id_list: for host_id in host_id_list:
host_disk_db_glance_nova_size = self.get_host_disk_db_glance_nova_size(req, host_id, cluster_id) host_disk_db_glance_nova_size = \
if host_disk_db_glance_nova_size['db_lv_size'] and db_lv_size and \ self.get_host_disk_db_glance_nova_size(
int(db_lv_size) < int(host_disk_db_glance_nova_size['db_lv_size']): req, host_id, cluster_id)
argws['db_lv_size'] = host_disk_db_glance_nova_size['db_lv_size'] if host_disk_db_glance_nova_size['db_lv_size'] and \
db_lv_size and int(
db_lv_size) < int(host_disk_db_glance_nova_size[
'db_lv_size']):
argws['db_lv_size'] = host_disk_db_glance_nova_size[
'db_lv_size']
else: else:
argws['db_lv_size'] = db_lv_size argws['db_lv_size'] = db_lv_size
if host_disk_db_glance_nova_size['glance_lv_size'] and glance_lv_size and \ if host_disk_db_glance_nova_size['glance_lv_size'] and \
int(glance_lv_size) < int(host_disk_db_glance_nova_size['glance_lv_size']): glance_lv_size and int(
argws['glance_lv_size'] = host_disk_db_glance_nova_size['glance_lv_size'] glance_lv_size) < int(host_disk_db_glance_nova_size[
'glance_lv_size']):
argws['glance_lv_size'] = host_disk_db_glance_nova_size[
'glance_lv_size']
else: else:
argws['glance_lv_size'] = glance_lv_size argws['glance_lv_size'] = glance_lv_size
if host_disk_db_glance_nova_size['nova_lv_size'] and nova_lv_size and \ if host_disk_db_glance_nova_size['nova_lv_size'] and \
int(nova_lv_size) < int(host_disk_db_glance_nova_size['nova_lv_size']): nova_lv_size and int(
argws['nova_lv_size'] = host_disk_db_glance_nova_size['nova_lv_size'] nova_lv_size) < int(host_disk_db_glance_nova_size[
'nova_lv_size']):
argws['nova_lv_size'] = host_disk_db_glance_nova_size[
'nova_lv_size']
else: else:
argws['nova_lv_size'] = nova_lv_size argws['nova_lv_size'] = nova_lv_size
argws['disk_size'] = host_disk_db_glance_nova_size['disk_size'] argws['disk_size'] = host_disk_db_glance_nova_size['disk_size']
LOG.warn('--------host(%s) check_host_validity argws:----- %s'% (host_id, argws)) LOG.warn(
'--------host(%s) check_host_validity argws:----- %s' %
(host_id, argws))
self._check_host_validity(**argws) self._check_host_validity(**argws)
def _check_deployment_backend(self, req, deployment_backend): def _check_deployment_backend(self, req, deployment_backend):
if deployment_backend not in SUPPORTED_DEPLOYMENT_BACKENDS: if deployment_backend not in SUPPORTED_DEPLOYMENT_BACKENDS:
msg = "deployment backend '%s' is not supported." % deployment_backend msg = "deployment backend '%s' is not supported." % \
deployment_backend
raise HTTPBadRequest(explanation=msg, raise HTTPBadRequest(explanation=msg,
request=req, request=req,
content_type="text/plain") content_type="text/plain")
@ -319,13 +362,15 @@ class Controller(controller.BaseController):
LOG.debug(msg) LOG.debug(msg)
raise HTTPForbidden(msg) raise HTTPForbidden(msg)
def _check_cluster_id_in_role_update(self, req, role_cluster, orig_role_meta): def _check_cluster_id_in_role_update(
self, req, role_cluster, orig_role_meta):
if orig_role_meta['type'].lower() == 'template': if orig_role_meta['type'].lower() == 'template':
msg = _("The template role does not belong to any cluster.") msg = _("The template role does not belong to any cluster.")
LOG.debug(msg) LOG.debug(msg)
raise HTTPForbidden(msg) raise HTTPForbidden(msg)
orig_role_cluster = orig_role_meta['cluster_id'] orig_role_cluster = orig_role_meta['cluster_id']
if orig_role_cluster != role_cluster: #Can not change the cluster which the role belongs to if orig_role_cluster != role_cluster: # Can not change the cluster
# which the role belongs to
msg = _("Can't update the cluster of the role.") msg = _("Can't update the cluster of the role.")
LOG.debug(msg) LOG.debug(msg)
raise HTTPForbidden(msg) raise HTTPForbidden(msg)
@ -340,8 +385,8 @@ class Controller(controller.BaseController):
else: # role type was template, cluster id was None else: # role type was template, cluster id was None
self.check_template_role_name_repetition(req, role_name) self.check_template_role_name_repetition(req, role_name)
def _check_all_lv_size_of_nodes_with_role_in_role_update(self, req, role_meta, orig_role_meta, def _check_all_lv_size_of_nodes_with_role_in_role_update(
role_host_id_list): self, req, role_meta, orig_role_meta, role_host_id_list):
# check host with this role at the same time # check host with this role at the same time
cluster_id = role_meta.get('cluster_id', None) cluster_id = role_meta.get('cluster_id', None)
if not cluster_id: # role with cluster if not cluster_id: # role with cluster
@ -351,19 +396,19 @@ class Controller(controller.BaseController):
LOG.debug(msg) LOG.debug(msg)
raise HTTPForbidden(msg) raise HTTPForbidden(msg)
argws = dict() argws = dict()
if role_meta.has_key('db_lv_size'): if 'db_lv_size' in role_meta:
db_lv_size = role_meta['db_lv_size'] db_lv_size = role_meta['db_lv_size']
else: # The db_lv_size has been specified before. else: # The db_lv_size has been specified before.
db_lv_size = orig_role_meta.get('db_lv_size') db_lv_size = orig_role_meta.get('db_lv_size')
if role_meta.has_key('glance_lv_size'): if 'glance_lv_size' in role_meta:
glance_lv_size = role_meta['glance_lv_size'] glance_lv_size = role_meta['glance_lv_size']
else: else:
glance_lv_size = orig_role_meta.get('glance_lv_size') glance_lv_size = orig_role_meta.get('glance_lv_size')
if role_meta.has_key('nova_lv_size'): if 'nova_lv_size' in role_meta:
nova_lv_size = role_meta['nova_lv_size'] nova_lv_size = role_meta['nova_lv_size']
else: else:
nova_lv_size = orig_role_meta.get('nova_lv_size') nova_lv_size = orig_role_meta.get('nova_lv_size')
if role_meta.has_key('nodes'): if 'nodes' in role_meta:
host_id_list = list(eval(role_meta['nodes'])) + role_host_id_list host_id_list = list(eval(role_meta['nodes'])) + role_host_id_list
else: else:
host_id_list = role_host_id_list host_id_list = role_host_id_list
@ -375,18 +420,20 @@ class Controller(controller.BaseController):
msg = 'The role %s need no ntp_server' % role_name msg = 'The role %s need no ntp_server' % role_name
raise HTTPForbidden(explanation=msg) raise HTTPForbidden(explanation=msg)
def _check_role_type_in_role_add(self, req, role_meta): def _check_role_type_in_role_add(self, req, role_meta):
# role_type == None or not template, cluster id must not be None # role_type == None or not template, cluster id must not be None
role_type = role_meta['type'] role_type = role_meta['type']
if role_type.lower() != 'template': if role_type.lower() != 'template':
role_cluster_id = role_meta.get('cluster_id', None) role_cluster_id = role_meta.get('cluster_id', None)
if not role_cluster_id: #add role without cluster id parameter, raise error if not role_cluster_id: # add role without cluster id parameter,
msg = _("The cluster_id parameter can not be None if role was not a template type.") # raise error
msg = _(
"The cluster_id parameter can not be None "
"if role was not a template type.")
LOG.debug(msg) LOG.debug(msg)
raise HTTPForbidden(msg) raise HTTPForbidden(msg)
else: # role_type == template, cluster id is not necessary else: # role_type == template, cluster id is not necessary
if role_meta.has_key('cluster_id'): if 'cluster_id' in role_meta:
msg = _("Tht template role cannot be added to any cluster.") msg = _("Tht template role cannot be added to any cluster.")
LOG.debug(msg) LOG.debug(msg)
raise HTTPForbidden(msg) raise HTTPForbidden(msg)
@ -418,12 +465,14 @@ class Controller(controller.BaseController):
db_lv_size = list() db_lv_size = list()
glance_lv_size = list() glance_lv_size = list()
nova_lv_size = list() nova_lv_size = list()
disk_size = list() # disk_size = list()
host_info = self.get_host_meta_or_404(req, host_id) host_info = self.get_host_meta_or_404(req, host_id)
if host_info: if host_info:
if host_info.has_key('deleted') and host_info['deleted']: if 'deleted' in host_info and host_info['deleted']:
msg = _("Node with identifier %s has been deleted.") % host_info['id'] msg = _("Node with identifier %s has been deleted.") % \
host_info[
'id']
LOG.debug(msg) LOG.debug(msg)
raise HTTPNotFound(msg) raise HTTPNotFound(msg)
# get host disk infomation # get host disk infomation
@ -431,58 +480,73 @@ class Controller(controller.BaseController):
host_disk_db_glance_nova_size['disk_size'] = host_disk host_disk_db_glance_nova_size['disk_size'] = host_disk
# get role_host db/galnce/nova infomation # get role_host db/galnce/nova infomation
cluster_info = self.get_cluster_meta_or_404(req, cluster_id) cluster_info = self.get_cluster_meta_or_404(req, cluster_id)
if host_info.has_key('cluster'): #host with cluster if 'cluster' in host_info: # host with cluster
if host_info['cluster'] != cluster_info['name']: if host_info['cluster'] != cluster_info['name']:
#type(host_info['cluster']) = list, type(cluster_info['name']) = str # type(host_info['cluster']) = list,
# type(cluster_info['name']) = str
msg = _("Role and hosts belong to different cluster.") msg = _("Role and hosts belong to different cluster.")
LOG.debug(msg) LOG.debug(msg)
raise HTTPNotFound(msg) raise HTTPNotFound(msg)
else: else:
all_roles = registry.get_roles_detail(req.context) all_roles = registry.get_roles_detail(req.context)
cluster_roles = [role for role in all_roles if role['cluster_id'] == cluster_id] cluster_roles = [
role for role in all_roles if role['cluster_id'] ==
cluster_id]
# roles infomation saved in cluster_roles # roles infomation saved in cluster_roles
if host_info.has_key('role') and host_info['role']: #host with role if 'role' in host_info and host_info[
'role']: # host with role
for role in cluster_roles: for role in cluster_roles:
if role['name'] in host_info['role'] and cluster_roles: if role['name'] in host_info[
'role'] and cluster_roles:
db_lv_size.append(role.get('db_lv_size', None)) db_lv_size.append(role.get('db_lv_size', None))
glance_lv_size.append(role.get('glance_lv_size', None)) glance_lv_size.append(
nova_lv_size.append(role.get('nova_lv_size', None)) role.get('glance_lv_size', None))
nova_lv_size.append(
role.get('nova_lv_size', None))
if db_lv_size: if db_lv_size:
host_disk_db_glance_nova_size['db_lv_size'] = max(db_lv_size) host_disk_db_glance_nova_size['db_lv_size'] = max(db_lv_size)
else: # host without cluster else: # host without cluster
host_disk_db_glance_nova_size['db_lv_size'] = 0 host_disk_db_glance_nova_size['db_lv_size'] = 0
if glance_lv_size: if glance_lv_size:
host_disk_db_glance_nova_size['glance_lv_size'] = max(glance_lv_size) host_disk_db_glance_nova_size[
'glance_lv_size'] = max(glance_lv_size)
else: else:
host_disk_db_glance_nova_size['glance_lv_size'] = 0 host_disk_db_glance_nova_size['glance_lv_size'] = 0
if nova_lv_size: if nova_lv_size:
host_disk_db_glance_nova_size['nova_lv_size'] = max(nova_lv_size) host_disk_db_glance_nova_size['nova_lv_size'] = max(nova_lv_size)
else: else:
host_disk_db_glance_nova_size['nova_lv_size'] = 0 host_disk_db_glance_nova_size['nova_lv_size'] = 0
LOG.warn('--------host(%s)disk_db_glance_nova_size:----- %s'% (host_id, host_disk_db_glance_nova_size)) LOG.warn('--------host(%s)disk_db_glance_nova_size:----- %s' %
(host_id, host_disk_db_glance_nova_size))
return host_disk_db_glance_nova_size return host_disk_db_glance_nova_size
def check_cluster_role_name_repetition(self, req, role_name, cluster_id): def check_cluster_role_name_repetition(self, req, role_name, cluster_id):
all_roles = registry.get_roles_detail(req.context) all_roles = registry.get_roles_detail(req.context)
cluster_roles = [role for role in all_roles if role['cluster_id'] == cluster_id] cluster_roles = [role for role in all_roles if role[
'cluster_id'] == cluster_id]
cluster_roles_name = [role['name'].lower() for role in cluster_roles] cluster_roles_name = [role['name'].lower() for role in cluster_roles]
if role_name.lower() in cluster_roles_name: if role_name.lower() in cluster_roles_name:
msg = _("The role %s has already been in the cluster %s!" % (role_name, cluster_id)) msg = _(
"The role %s has already been in the cluster %s!" %
(role_name, cluster_id))
LOG.debug(msg) LOG.debug(msg)
raise HTTPForbidden(msg) raise HTTPForbidden(msg)
def check_template_role_name_repetition(self, req, role_name): def check_template_role_name_repetition(self, req, role_name):
all_roles = registry.get_roles_detail(req.context) all_roles = registry.get_roles_detail(req.context)
template_roles = [role for role in all_roles if role['cluster_id'] == None] template_roles = [
role for role in all_roles if role['cluster_id'] is None]
template_roles_name = [role['name'].lower() for role in template_roles] template_roles_name = [role['name'].lower() for role in template_roles]
if role_name.lower() in template_roles_name: if role_name.lower() in template_roles_name:
msg = _("The role %s has already been in the the template role." % role_name) msg = _(
"The role %s has already been in the the template role." %
role_name)
LOG.debug(msg) LOG.debug(msg)
raise HTTPForbidden(msg) raise HTTPForbidden(msg)
def _check_disk_parameters(self, req, role_meta): def _check_disk_parameters(self, req, role_meta):
if (role_meta.has_key('disk_location') and if ('disk_location' in role_meta and
role_meta['disk_location'] not in SUPPORT_DISK_LOCATION): role_meta['disk_location'] not in SUPPORT_DISK_LOCATION):
msg = _("value of disk_location is not supported.") msg = _("value of disk_location is not supported.")
raise HTTPForbidden(msg) raise HTTPForbidden(msg)
@ -496,69 +560,82 @@ class Controller(controller.BaseController):
role_service_id_list, role_host_id_list): role_service_id_list, role_host_id_list):
role_name = orig_role_meta['name'] role_name = orig_role_meta['name']
if role_meta.get('type', None): if role_meta.get('type', None):
self._check_role_type_in_update_role(req, role_meta['type'], orig_role_meta) self._check_role_type_in_update_role(
if role_meta.has_key('ntp_server'): req, role_meta['type'], orig_role_meta)
if 'ntp_server' in role_meta:
self._check_ntp_server(req, role_name) self._check_ntp_server(req, role_name)
if role_meta.has_key('nodes'): if 'nodes' in role_meta:
self._check_nodes_exist(req, list(eval(role_meta['nodes']))) self._check_nodes_exist(req, list(eval(role_meta['nodes'])))
if role_meta.has_key('services'): if 'services' in role_meta:
self._check_services_exist(req, list(eval(role_meta['services']))) self._check_services_exist(req, list(eval(role_meta['services'])))
role_service_id_list.extend(list(eval(role_meta['services']))) role_service_id_list.extend(list(eval(role_meta['services'])))
if role_meta.has_key('config_set_id'): if 'config_set_id' in role_meta:
self._check_config_set_id_exist(req, str(role_meta['config_set_id'])) self._check_config_set_id_exist(
if role_meta.has_key('cluster_id'): req, str(role_meta['config_set_id']))
self._check_cluster_id_in_role_update(req, str(role_meta['cluster_id']), orig_role_meta) if 'cluster_id' in role_meta:
if role_meta.has_key('name'): self._check_cluster_id_in_role_update(
self._check_role_name_in_role_update(req, role_meta, orig_role_meta) req, str(role_meta['cluster_id']), orig_role_meta)
service_name_list = self._get_service_name_list(req, role_service_id_list) if 'name' in role_meta:
glance_lv_value = role_meta.get('glance_lv_size', orig_role_meta['glance_lv_size']) self._check_role_name_in_role_update(
req, role_meta, orig_role_meta)
service_name_list = self._get_service_name_list(
req, role_service_id_list)
glance_lv_value = role_meta.get(
'glance_lv_size', orig_role_meta['glance_lv_size'])
if glance_lv_value: if glance_lv_value:
self._check_glance_lv_value(req, glance_lv_value, role_name, service_name_list) self._check_glance_lv_value(
req, glance_lv_value, role_name, service_name_list)
if role_meta.get('db_lv_size', None) and role_meta['db_lv_size']: if role_meta.get('db_lv_size', None) and role_meta['db_lv_size']:
self._check_db_lv_size(req, role_meta['db_lv_size'], service_name_list) self._check_db_lv_size(
req, role_meta['db_lv_size'], service_name_list)
if role_meta.get('nova_lv_size', None): if role_meta.get('nova_lv_size', None):
self._check_nova_lv_size(req, role_meta['nova_lv_size'], role_name) self._check_nova_lv_size(req, role_meta['nova_lv_size'], role_name)
if role_meta.has_key('nodes') or role_host_id_list: if 'nodes' in role_meta or role_host_id_list:
self._check_all_lv_size_of_nodes_with_role_in_role_update(req, role_meta, orig_role_meta, self._check_all_lv_size_of_nodes_with_role_in_role_update(
role_host_id_list) req, role_meta, orig_role_meta, role_host_id_list)
self._check_disk_parameters(req, role_meta) self._check_disk_parameters(req, role_meta)
if role_meta.has_key('deployment_backend'): if 'deployment_backend' in role_meta:
self._check_deployment_backend(req, role_meta['deployment_backend']) self._check_deployment_backend(
req, role_meta['deployment_backend'])
if role_meta.get('role_type', None): if role_meta.get('role_type', None):
self._check_type_role_reasonable(req, role_meta) self._check_type_role_reasonable(req, role_meta)
def _check_role_add_parameters(self, req, role_meta, role_service_id_list): def _check_role_add_parameters(self, req, role_meta, role_service_id_list):
role_type = role_meta.get('type', None)
role_name = role_meta.get('name', None) role_name = role_meta.get('name', None)
if role_meta.get('type', None): if role_meta.get('type', None):
self._check_role_type_in_role_add(req, role_meta) self._check_role_type_in_role_add(req, role_meta)
if role_meta.has_key('nodes'): if 'nodes' in role_meta:
self._check_nodes_exist(req, list(eval(role_meta['nodes']))) self._check_nodes_exist(req, list(eval(role_meta['nodes'])))
if role_meta.has_key('services'): if 'services' in role_meta:
self._check_services_exist(req, list(eval(role_meta['services']))) self._check_services_exist(req, list(eval(role_meta['services'])))
role_service_id_list.extend(list(eval(role_meta['services']))) role_service_id_list.extend(list(eval(role_meta['services'])))
if role_meta.has_key('config_set_id'): if 'config_set_id' in role_meta:
self._check_config_set_id_exist(req, str(role_meta['config_set_id'])) self._check_config_set_id_exist(
if role_meta.has_key('cluster_id'): req, str(role_meta['config_set_id']))
if 'cluster_id' in role_meta:
orig_cluster = str(role_meta['cluster_id']) orig_cluster = str(role_meta['cluster_id'])
self._raise_404_if_cluster_deleted(req, orig_cluster) self._raise_404_if_cluster_deleted(req, orig_cluster)
self.check_cluster_role_name_repetition(req, role_name, orig_cluster) self.check_cluster_role_name_repetition(
req, role_name, orig_cluster)
else: else:
self.check_template_role_name_repetition(req, role_name) self.check_template_role_name_repetition(req, role_name)
service_name_list = self._get_service_name_list(req, role_service_id_list) service_name_list = self._get_service_name_list(
req, role_service_id_list)
glance_lv_value = role_meta.get('glance_lv_size', None) glance_lv_value = role_meta.get('glance_lv_size', None)
if glance_lv_value: if glance_lv_value:
self._check_glance_lv_value(req, glance_lv_value, role_name, service_name_list) self._check_glance_lv_value(
req, glance_lv_value, role_name, service_name_list)
if role_meta.get('db_lv_size', None) and role_meta['db_lv_size']: if role_meta.get('db_lv_size', None) and role_meta['db_lv_size']:
self._check_db_lv_size(req, role_meta['db_lv_size'], service_name_list) self._check_db_lv_size(
req, role_meta['db_lv_size'], service_name_list)
if role_meta.get('nova_lv_size', None): if role_meta.get('nova_lv_size', None):
self._check_nova_lv_size(req, role_meta['nova_lv_size'], role_name) self._check_nova_lv_size(req, role_meta['nova_lv_size'], role_name)
if role_meta.has_key('nodes'): if 'nodes' in role_meta:
self._check_all_lv_size_with_role_in_role_add(req, role_meta) self._check_all_lv_size_with_role_in_role_add(req, role_meta)
self._check_disk_parameters(req, role_meta) self._check_disk_parameters(req, role_meta)
if role_meta.has_key('deployment_backend'): if 'deployment_backend' in role_meta:
self._check_deployment_backend(req, role_meta['deployment_backend']) self._check_deployment_backend(
req, role_meta['deployment_backend'])
else: else:
role_meta['deployment_backend'] = 'tecs' role_meta['deployment_backend'] = 'tecs'
if role_meta.get('role_type', None): if role_meta.get('role_type', None):
@ -684,10 +761,25 @@ class Controller(controller.BaseController):
""" """
orig_role_meta = self.get_role_meta_or_404(req, id) orig_role_meta = self.get_role_meta_or_404(req, id)
role_service_list = registry.get_role_services(req.context, id) role_service_list = registry.get_role_services(req.context, id)
role_service_id_list = [ role_service['service_id'] for role_service in role_service_list ] role_service_id_list = [role_service['service_id']
for role_service in role_service_list]
role_host_info_list = registry.get_role_host_metadata(req.context, id) role_host_info_list = registry.get_role_host_metadata(req.context, id)
role_host_id_list = [role_host['host_id'] for role_host in role_host_info_list] role_host_id_list = [role_host['host_id']
self._check_role_update_parameters(req, role_meta, orig_role_meta, role_service_id_list, role_host_id_list) for role_host in role_host_info_list]
self._check_role_update_parameters(
req,
role_meta,
orig_role_meta,
role_service_id_list,
role_host_id_list)
if orig_role_meta['role_type'] == "CONTROLLER_HA":
cluster_meta = {}
cluster_meta['public_vip'] = role_meta.get(
'public_vip') or role_meta.get('vip')
if cluster_meta['public_vip']:
cluster_meta = registry.update_cluster_metadata(
req.context, orig_role_meta['cluster_id'], cluster_meta)
self._enforce(req, 'modify_image') self._enforce(req, 'modify_image')
# orig_role_meta = self.get_role_meta_or_404(req, id) # orig_role_meta = self.get_role_meta_or_404(req, id)
@ -735,6 +827,7 @@ class Controller(controller.BaseController):
return {'role_meta': role_meta} return {'role_meta': role_meta}
class RoleDeserializer(wsgi.JSONRequestDeserializer): class RoleDeserializer(wsgi.JSONRequestDeserializer):
"""Handles deserialization of specific controller method requests.""" """Handles deserialization of specific controller method requests."""
@ -749,6 +842,7 @@ class RoleDeserializer(wsgi.JSONRequestDeserializer):
def update_role(self, request): def update_role(self, request):
return self._deserialize(request) return self._deserialize(request)
class RoleSerializer(wsgi.JSONResponseSerializer): class RoleSerializer(wsgi.JSONResponseSerializer):
"""Handles serialization of specific controller method responses.""" """Handles serialization of specific controller method responses."""
@ -768,6 +862,7 @@ class RoleSerializer(wsgi.JSONResponseSerializer):
response.headers['Content-Type'] = 'application/json' response.headers['Content-Type'] = 'application/json'
response.body = self.to_json(dict(role=role_meta)) response.body = self.to_json(dict(role=role_meta))
return response return response
def get_role(self, response, result): def get_role(self, response, result):
role_meta = result['role_meta'] role_meta = result['role_meta']
response.status = 201 response.status = 201
@ -775,6 +870,7 @@ class RoleSerializer(wsgi.JSONResponseSerializer):
response.body = self.to_json(dict(role=role_meta)) response.body = self.to_json(dict(role=role_meta))
return response return response
def create_resource(): def create_resource():
"""Roles resource factory method""" """Roles resource factory method"""
deserializer = RoleDeserializer() deserializer = RoleDeserializer()

View File

@ -29,14 +29,17 @@ from daisy.api.v1 import networks
from daisy.api.v1 import install from daisy.api.v1 import install
from daisy.api.v1 import disk_array from daisy.api.v1 import disk_array
from daisy.api.v1 import host_template from daisy.api.v1 import host_template
from daisy.api.v1 import hwms
from daisy.common import wsgi from daisy.common import wsgi
from daisy.api.v1 import backup_restore
class API(wsgi.Router): class API(wsgi.Router):
"""WSGI router for Glance v1 API requests.""" """WSGI router for Glance v1 API requests."""
def __init__(self, mapper): def __init__(self, mapper):
reject_method_resource = wsgi.Resource(wsgi.RejectMethodController()) wsgi.Resource(wsgi.RejectMethodController())
'''images_resource = images.create_resource() '''images_resource = images.create_resource()
@ -126,7 +129,6 @@ class API(wsgi.Router):
controller=members_resource, controller=members_resource,
action="index_shared_images")''' action="index_shared_images")'''
hosts_resource = hosts.create_resource() hosts_resource = hosts.create_resource()
mapper.connect("/nodes", mapper.connect("/nodes",
@ -150,6 +152,12 @@ class API(wsgi.Router):
controller=hosts_resource, controller=hosts_resource,
action='get_host', action='get_host',
conditions={'method': ['GET']}) conditions={'method': ['GET']})
mapper.connect("/hwm_nodes",
controller=hosts_resource,
action='update_hwm_host',
conditions={'method': ['POST']})
mapper.connect("/discover_host/", mapper.connect("/discover_host/",
controller=hosts_resource, controller=hosts_resource,
action='discover_host', action='discover_host',
@ -180,6 +188,40 @@ class API(wsgi.Router):
action='get_discover_host_detail', action='get_discover_host_detail',
conditions={'method': ['GET']}) conditions={'method': ['GET']})
mapper.connect("/pxe_discover/nodes",
controller=hosts_resource,
action='add_pxe_host',
conditions={'method': ['POST']})
mapper.connect("/pxe_discover/nodes/{id}",
controller=hosts_resource,
action='update_pxe_host',
conditions={'method': ['PUT']})
hwms_resource = hwms.create_resource()
mapper.connect("/hwm",
controller=hwms_resource,
action='add_hwm',
conditions={'method': ['POST']})
mapper.connect("/hwm/{id}",
controller=hwms_resource,
action='delete_hwm',
conditions={'method': ['DELETE']})
mapper.connect("/hwm/{id}",
controller=hwms_resource,
action='update_hwm',
conditions={'method': ['PUT']})
mapper.connect("/hwm",
controller=hwms_resource,
action='list',
conditions={'method': ['GET']})
mapper.connect("/hwm/{id}",
controller=hwms_resource,
action='detail',
conditions={'method': ['GET']})
clusters_resource = clusters.create_resource() clusters_resource = clusters.create_resource()
mapper.connect("/clusters", mapper.connect("/clusters",
@ -205,7 +247,6 @@ class API(wsgi.Router):
action='get_cluster', action='get_cluster',
conditions={'method': ['GET']}) conditions={'method': ['GET']})
mapper.connect("/clusters/{id}", mapper.connect("/clusters/{id}",
controller=clusters_resource, controller=clusters_resource,
action='update_cluster', action='update_cluster',
@ -222,7 +263,6 @@ class API(wsgi.Router):
action='update_template', action='update_template',
conditions={'method': ['PUT']}) conditions={'method': ['PUT']})
mapper.connect("/template/{template_id}", mapper.connect("/template/{template_id}",
controller=template_resource, controller=template_resource,
action='delete_template', action='delete_template',
@ -253,7 +293,6 @@ class API(wsgi.Router):
action='import_template_to_db', action='import_template_to_db',
conditions={'method': ['POST']}) conditions={'method': ['POST']})
host_template_resource = host_template.create_resource() host_template_resource = host_template.create_resource()
mapper.connect("/host_template", mapper.connect("/host_template",
controller=host_template_resource, controller=host_template_resource,
@ -569,6 +608,22 @@ class API(wsgi.Router):
action='cinder_volume_detail', action='cinder_volume_detail',
conditions={'method': ['GET']}) conditions={'method': ['GET']})
backup_restore_resource = backup_restore.create_resource()
mapper.connect("/backup",
controller=backup_restore_resource,
action='backup',
conditions={'method': ['POST']})
mapper.connect("/restore",
controller=backup_restore_resource,
action='restore',
conditions={'method': ['POST']})
mapper.connect("/backup_file_version",
controller=backup_restore_resource,
action='get_backup_file_version',
conditions={'method': ['POST']})
mapper.connect("/version",
controller=backup_restore_resource,
action='version',
conditions={'method': ['POST']})
super(API, self).__init__(mapper) super(API, self).__init__(mapper)

View File

@ -52,12 +52,13 @@ CONF.import_opt('container_formats', 'daisy.common.config',
group='image_format') group='image_format')
CONF.import_opt('image_property_quota', 'daisy.common.config') CONF.import_opt('image_property_quota', 'daisy.common.config')
class Controller(controller.BaseController): class Controller(controller.BaseController):
""" """
WSGI controller for services resource in Daisy v1 API WSGI controller for services resource in Daisy v1 API
The services resource API is a RESTful web service for service data. The API The services resource API is a RESTful web service for service data.
is as follows:: The API is as follows::
GET /services -- Returns a set of brief metadata about services GET /services -- Returns a set of brief metadata about services
GET /services/detail -- Returns a set of detailed metadata about GET /services/detail -- Returns a set of detailed metadata about
@ -124,7 +125,8 @@ class Controller(controller.BaseController):
def _raise_404_if_component_deleted(self, req, component_id): def _raise_404_if_component_deleted(self, req, component_id):
component = self.get_component_meta_or_404(req, component_id) component = self.get_component_meta_or_404(req, component_id)
if component['deleted']: if component['deleted']:
msg = _("Component with identifier %s has been deleted.") % component_id msg = _("Component with identifier %s has been deleted.") % \
component_id
raise HTTPNotFound(msg) raise HTTPNotFound(msg)
@utils.mutating @utils.mutating
@ -141,7 +143,7 @@ class Controller(controller.BaseController):
service_name = service_meta["name"] service_name = service_meta["name"]
service_description = service_meta["description"] service_description = service_meta["description"]
if service_meta.has_key('component_id'): if 'component_id' in service_meta:
orig_component_id = str(service_meta['component_id']) orig_component_id = str(service_meta['component_id'])
self._raise_404_if_component_deleted(req, orig_component_id) self._raise_404_if_component_deleted(req, orig_component_id)
@ -182,7 +184,8 @@ class Controller(controller.BaseController):
request=req, request=req,
content_type="text/plain") content_type="text/plain")
except exception.InUseByStore as e: except exception.InUseByStore as e:
msg = (_("service %(id)s could not be deleted because it is in use: " msg = (_("service %(id)s could not be deleted "
"because it is in use: "
"%(exc)s") % {"id": id, "exc": utils.exception_to_str(e)}) "%(exc)s") % {"id": id, "exc": utils.exception_to_str(e)})
LOG.warn(msg) LOG.warn(msg)
raise HTTPConflict(explanation=msg, raise HTTPConflict(explanation=msg,
@ -287,6 +290,7 @@ class Controller(controller.BaseController):
return {'service_meta': service_meta} return {'service_meta': service_meta}
class ServiceDeserializer(wsgi.JSONRequestDeserializer): class ServiceDeserializer(wsgi.JSONRequestDeserializer):
"""Handles deserialization of specific controller method requests.""" """Handles deserialization of specific controller method requests."""
@ -301,6 +305,7 @@ class ServiceDeserializer(wsgi.JSONRequestDeserializer):
def update_service(self, request): def update_service(self, request):
return self._deserialize(request) return self._deserialize(request)
class ServiceSerializer(wsgi.JSONResponseSerializer): class ServiceSerializer(wsgi.JSONResponseSerializer):
"""Handles serialization of specific controller method responses.""" """Handles serialization of specific controller method responses."""
@ -320,6 +325,7 @@ class ServiceSerializer(wsgi.JSONResponseSerializer):
response.headers['Content-Type'] = 'application/json' response.headers['Content-Type'] = 'application/json'
response.body = self.to_json(dict(service=service_meta)) response.body = self.to_json(dict(service=service_meta))
return response return response
def get_service(self, response, result): def get_service(self, response, result):
service_meta = result['service_meta'] service_meta = result['service_meta']
response.status = 201 response.status = 201
@ -327,6 +333,7 @@ class ServiceSerializer(wsgi.JSONResponseSerializer):
response.body = self.to_json(dict(service=service_meta)) response.body = self.to_json(dict(service=service_meta))
return response return response
def create_resource(): def create_resource():
"""Services resource factory method""" """Services resource factory method"""
deserializer = ServiceDeserializer() deserializer = ServiceDeserializer()

View File

@ -42,10 +42,6 @@ from daisy.registry.api.v1 import template
import daisy.api.backends.tecs.common as tecs_cmn import daisy.api.backends.tecs.common as tecs_cmn
import daisy.api.backends.common as daisy_cmn import daisy.api.backends.common as daisy_cmn
try:
import simplejson as json
except ImportError:
import json
daisy_tecs_path = tecs_cmn.daisy_tecs_path daisy_tecs_path = tecs_cmn.daisy_tecs_path
@ -64,12 +60,13 @@ CONF.import_opt('container_formats', 'daisy.common.config',
group='image_format') group='image_format')
CONF.import_opt('image_property_quota', 'daisy.common.config') CONF.import_opt('image_property_quota', 'daisy.common.config')
class Controller(controller.BaseController): class Controller(controller.BaseController):
""" """
WSGI controller for Templates resource in Daisy v1 API WSGI controller for Templates resource in Daisy v1 API
The Templates resource API is a RESTful web Template for Template data. The API The Templates resource API is a RESTful web Template for Template data.
is as follows:: The API is as follows::
GET /Templates -- Returns a set of brief metadata about Templates GET /Templates -- Returns a set of brief metadata about Templates
GET /Templates/detail -- Returns a set of detailed metadata about GET /Templates/detail -- Returns a set of detailed metadata about
@ -136,8 +133,9 @@ class Controller(controller.BaseController):
def _raise_404_if_cluster_deleted(self, req, cluster_id): def _raise_404_if_cluster_deleted(self, req, cluster_id):
cluster = self.get_cluster_meta_or_404(req, cluster_id) cluster = self.get_cluster_meta_or_404(req, cluster_id)
if cluster['deleted']: if cluster['deleted']:
msg = _("Cluster with identifier %s has been deleted.") % cluster_id msg = _("Cluster with identifier %s has been deleted.") % \
raise webob.exc.HTTPNotFound(msg) cluster_id
raise HTTPNotFound(msg)
@utils.mutating @utils.mutating
def add_template(self, req, template): def add_template(self, req, template):
@ -150,7 +148,6 @@ class Controller(controller.BaseController):
:raises HTTPBadRequest if x-Template-name is missing :raises HTTPBadRequest if x-Template-name is missing
""" """
self._enforce(req, 'add_template') self._enforce(req, 'add_template')
template_name = template["name"]
template = registry.add_template_metadata(req.context, template) template = registry.add_template_metadata(req.context, template)
@ -202,6 +199,7 @@ class Controller(controller.BaseController):
self.notifier.info('template.update', template) self.notifier.info('template.update', template)
return {'template': template} return {'template': template}
@utils.mutating @utils.mutating
def delete_template(self, req, template_id): def delete_template(self, req, template_id):
""" """
@ -230,8 +228,10 @@ class Controller(controller.BaseController):
request=req, request=req,
content_type="text/plain") content_type="text/plain")
except exception.InUseByStore as e: except exception.InUseByStore as e:
msg = (_("template %(id)s could not be deleted because it is in use: " msg = (_("template %(id)s could not be deleted "
"%(exc)s") % {"id": template_id, "exc": utils.exception_to_str(e)}) "because it is in use: "
"%(exc)s") % {"id": template_id,
"exc": utils.exception_to_str(e)})
LOG.error(msg) LOG.error(msg)
raise HTTPConflict(explanation=msg, raise HTTPConflict(explanation=msg,
request=req, request=req,
@ -260,6 +260,26 @@ class Controller(controller.BaseController):
del cluster['base_mac'] del cluster['base_mac']
del cluster['name'] del cluster['name']
def _get_cinder_volumes(self, req, role):
cinder_volume_params = {'filters': {'role_id': role['id']}}
cinder_volumes = registry.list_cinder_volume_metadata(
req.context, **cinder_volume_params)
for cinder_volume in cinder_volumes:
if cinder_volume.get('role_id', None):
cinder_volume['role_id'] = role['name']
self._del_general_params(cinder_volume)
return cinder_volumes
def _get_services_disk(self, req, role):
params = {'filters': {'role_id': role['id']}}
services_disk = registry.list_service_disk_metadata(
req.context, **params)
for service_disk in services_disk:
if service_disk.get('role_id', None):
service_disk['role_id'] = role['name']
self._del_general_params(service_disk)
return services_disk
@utils.mutating @utils.mutating
def export_db_to_json(self, req, template): def export_db_to_json(self, req, template):
""" """
@ -273,6 +293,7 @@ class Controller(controller.BaseController):
template_name = template.get('template_name', None) template_name = template.get('template_name', None)
self._enforce(req, 'export_db_to_json') self._enforce(req, 'export_db_to_json')
cinder_volume_list = [] cinder_volume_list = []
service_disk_list = []
template_content = {} template_content = {}
template_json = {} template_json = {}
template_id = "" template_id = ""
@ -285,22 +306,26 @@ class Controller(controller.BaseController):
else: else:
msg = "the cluster %s is not exist" % cluster_name msg = "the cluster %s is not exist" % cluster_name
LOG.error(msg) LOG.error(msg)
raise HTTPForbidden(explanation=msg, request=req, content_type="text/plain") raise HTTPForbidden(
explanation=msg,
request=req,
content_type="text/plain")
params = {'filters': {'cluster_id': cluster_id}} params = {'filters': {'cluster_id': cluster_id}}
cluster = registry.get_cluster_metadata(req.context, cluster_id) cluster = registry.get_cluster_metadata(
req.context, cluster_id)
roles = registry.get_roles_detail(req.context, **params) roles = registry.get_roles_detail(req.context, **params)
networks = registry.get_networks_detail(req.context, cluster_id,**params) networks = registry.get_networks_detail(
req.context, cluster_id, **params)
for role in roles: for role in roles:
cinder_volume_params = {'filters': {'role_id':role['id']}} cinder_volumes = self._get_cinder_volumes(req, role)
cinder_volumes = registry.list_cinder_volume_metadata(req.context, **cinder_volume_params) cinder_volume_list += cinder_volumes
for cinder_volume in cinder_volumes: services_disk = self._get_services_disk(req, role)
if cinder_volume.get('role_id',None): service_disk_list += services_disk
cinder_volume['role_id'] = role['name']
self._del_general_params(cinder_volume)
cinder_volume_list.append(cinder_volume)
if role.get('config_set_id', None): if role.get('config_set_id', None):
config_set = registry.get_config_set_metadata(req.context, role['config_set_id']) config_set = registry.get_config_set_metadata(
req.context, role['config_set_id'])
role['config_set_id'] = config_set['name'] role['config_set_id'] = config_set['name']
del role['cluster_id'] del role['cluster_id']
del role['status'] del role['status']
@ -309,7 +334,8 @@ class Controller(controller.BaseController):
del role['config_set_update_progress'] del role['config_set_update_progress']
self._del_general_params(role) self._del_general_params(role)
for network in networks: for network in networks:
network_detail = registry.get_network_metadata(req.context, network['id']) network_detail = registry.get_network_metadata(
req.context, network['id'])
if network_detail.get('ip_ranges', None): if network_detail.get('ip_ranges', None):
network['ip_ranges'] = network_detail['ip_ranges'] network['ip_ranges'] = network_detail['ip_ranges']
del network['cluster_id'] del network['cluster_id']
@ -334,39 +360,48 @@ class Controller(controller.BaseController):
template_content['roles'] = roles template_content['roles'] = roles
template_content['networks'] = networks template_content['networks'] = networks
template_content['cinder_volumes'] = cinder_volume_list template_content['cinder_volumes'] = cinder_volume_list
template_content['services_disk'] = service_disk_list
template_json['content'] = json.dumps(template_content) template_json['content'] = json.dumps(template_content)
template_json['type'] = 'tecs' template_json['type'] = 'tecs'
template_json['name'] = template_name template_json['name'] = template_name
template_json['description'] = description template_json['description'] = description
template_host_params = {'cluster_name': cluster_name} template_host_params = {'cluster_name': cluster_name}
template_hosts = registry.host_template_lists_metadata(req.context, **template_host_params) template_hosts = registry.host_template_lists_metadata(
req.context, **template_host_params)
if template_hosts: if template_hosts:
template_json['hosts'] = template_hosts[0]['hosts'] template_json['hosts'] = template_hosts[0]['hosts']
else: else:
template_json['hosts'] = "[]" template_json['hosts'] = "[]"
template_params = {'filters': {'name': template_name}} template_params = {'filters': {'name': template_name}}
template_list = registry.template_lists_metadata(req.context, **template_params) template_list = registry.template_lists_metadata(
req.context, **template_params)
if template_list: if template_list:
update_template = registry.update_template_metadata(req.context, template_list[0]['id'], template_json) registry.update_template_metadata(
req.context, template_list[0]['id'], template_json)
template_id = template_list[0]['id'] template_id = template_list[0]['id']
else: else:
add_template = registry.add_template_metadata(req.context, template_json) add_template = registry.add_template_metadata(
req.context, template_json)
template_id = add_template['id'] template_id = add_template['id']
if template_id: if template_id:
template_detail = registry.template_detail_metadata(req.context, template_id) template_detail = registry.template_detail_metadata(
req.context, template_id)
self._del_general_params(template_detail) self._del_general_params(template_detail)
template_detail['content'] = json.loads(template_detail['content']) template_detail['content'] = json.loads(
template_detail['content'])
if template_detail['hosts']: if template_detail['hosts']:
template_detail['hosts'] = json.loads(template_detail['hosts']) template_detail['hosts'] = json.loads(
template_detail['hosts'])
tecs_json = daisy_tecs_path + "%s.json" % template_name tecs_json = daisy_tecs_path + "%s.json" % template_name
cmd = 'rm -rf %s' % (tecs_json,) cmd = 'rm -rf %s' % (tecs_json,)
daisy_cmn.subprocess_call(cmd) daisy_cmn.subprocess_call(cmd)
with open(tecs_json, "w+") as fp: with open(tecs_json, "w+") as fp:
fp.write(json.dumps(template_detail)) json.dump(template_detail, fp, indent=2)
except exception.Invalid as e: except exception.Invalid as e:
raise HTTPBadRequest(explanation=e.msg, request=req) raise HTTPBadRequest(explanation=e.msg, request=req)
@ -381,22 +416,28 @@ class Controller(controller.BaseController):
template_params = {'filters': {'name': template_name}} template_params = {'filters': {'name': template_name}}
try: try:
if template_cluster.get('content', None): if template_cluster.get('content', None):
template_cluster['content'] = json.dumps(template_cluster['content']) template_cluster['content'] = json.dumps(
template_cluster['content'])
if template_cluster.get('hosts', None): if template_cluster.get('hosts', None):
template_cluster['hosts'] = json.dumps(template_cluster['hosts']) template_cluster['hosts'] = json.dumps(
template_cluster['hosts'])
else: else:
template_cluster['hosts'] = "[]" template_cluster['hosts'] = "[]"
template_list = registry.template_lists_metadata(req.context, **template_params) template_list = registry.template_lists_metadata(
req.context, **template_params)
if template_list: if template_list:
update_template_cluster = registry.update_template_metadata(req.context, template_list[0]['id'], template_cluster) registry.update_template_metadata(
req.context, template_list[0]['id'], template_cluster)
template_id = template_list[0]['id'] template_id = template_list[0]['id']
else: else:
add_template_cluster = registry.add_template_metadata(req.context, template_cluster) add_template_cluster = registry.add_template_metadata(
req.context, template_cluster)
template_id = add_template_cluster['id'] template_id = add_template_cluster['id']
if template_id: if template_id:
template_detail = registry.template_detail_metadata(req.context, template_id) template_detail = registry.template_detail_metadata(
req.context, template_id)
del template_detail['deleted'] del template_detail['deleted']
del template_detail['deleted_at'] del template_detail['deleted_at']
@ -405,6 +446,42 @@ class Controller(controller.BaseController):
return {"template": template_detail} return {"template": template_detail}
def _import_cinder_volumes_to_db(self, req,
template_cinder_volumes, roles):
for template_cinder_volume in template_cinder_volumes:
has_template_role = False
for role in roles:
if template_cinder_volume['role_id'] == role['name']:
has_template_role = True
template_cinder_volume['role_id'] = role['id']
break
if has_template_role:
registry.add_cinder_volume_metadata(req.context,
template_cinder_volume)
else:
msg = "can't find role %s in new cluster when\
import cinder_volumes from template"\
% template_cinder_volume['role_id']
raise HTTPBadRequest(explanation=msg, request=req)
def _import_services_disk_to_db(self, req,
template_services_disk, roles):
for template_service_disk in template_services_disk:
has_template_role = False
for role in roles:
if template_service_disk['role_id'] == role['name']:
has_template_role = True
template_service_disk['role_id'] = role['id']
break
if has_template_role:
registry.add_service_disk_metadata(req.context,
template_service_disk)
else:
msg = "can't find role %s in new cluster when\
import service_disks from template"\
% template_service_disk['role_id']
raise HTTPBadRequest(explanation=msg, request=req)
@utils.mutating @utils.mutating
def import_template_to_db(self, req, template): def import_template_to_db(self, req, template):
cluster_id = "" cluster_id = ""
@ -414,58 +491,93 @@ class Controller(controller.BaseController):
template_name = template_meta.get('name', None) template_name = template_meta.get('name', None)
cluster_name = template_meta.get('cluster', None) cluster_name = template_meta.get('cluster', None)
template_params = {'filters': {'name': template_name}} template_params = {'filters': {'name': template_name}}
template_list = registry.template_lists_metadata(req.context, **template_params) template_list = registry.template_lists_metadata(
req.context, **template_params)
if template_list: if template_list:
template_cluster = template_list[0] template_cluster = template_list[0]
else: else:
msg = "the template %s is not exist" % template_name msg = "the template %s is not exist" % template_name
LOG.error(msg) LOG.error(msg)
raise HTTPForbidden(explanation=msg, request=req, content_type="text/plain") raise HTTPForbidden(
explanation=msg,
request=req,
content_type="text/plain")
try: try:
template_content = json.loads(template_cluster['content']) template_content = json.loads(template_cluster['content'])
template_content_cluster = template_content['cluster'] template_content_cluster = template_content['cluster']
template_content_cluster['name'] = cluster_name template_content_cluster['name'] = cluster_name
template_content_cluster['networking_parameters'] = str(template_content_cluster['networking_parameters']) template_content_cluster['networking_parameters'] = str(
template_content_cluster['logic_networks'] = str(template_content_cluster['logic_networks']) template_content_cluster['networking_parameters'])
template_content_cluster['logic_networks'] = template_content_cluster['logic_networks'].replace("\'true\'","True") template_content_cluster['logic_networks'] = str(
template_content_cluster['routers'] = str(template_content_cluster['routers']) template_content_cluster['logic_networks'])
template_content_cluster['logic_networks'] = \
template_content_cluster[
'logic_networks'].replace("\'true\'", "True")
template_content_cluster['routers'] = str(
template_content_cluster['routers'])
if template_cluster['hosts']: if template_cluster['hosts']:
template_hosts = json.loads(template_cluster['hosts']) template_hosts = json.loads(template_cluster['hosts'])
template_host_params = {'cluster_name': cluster_name} template_host_params = {'cluster_name': cluster_name}
template_host_list = registry.host_template_lists_metadata(req.context, **template_host_params) template_host_list = registry.host_template_lists_metadata(
req.context, **template_host_params)
if template_host_list: if template_host_list:
update_template_meta = {"cluster_name": cluster_name, "hosts":json.dumps(template_hosts)} update_template_meta = {
registry.update_host_template_metadata(req.context, template_host_list[0]['id'], update_template_meta) "cluster_name": cluster_name,
"hosts": json.dumps(template_hosts)}
registry.update_host_template_metadata(
req.context, template_host_list[0]['id'],
update_template_meta)
else: else:
template_meta = {"cluster_name": cluster_name, "hosts":json.dumps(template_hosts)} template_meta = {
registry.add_host_template_metadata(req.context, template_meta) "cluster_name": cluster_name,
"hosts": json.dumps(template_hosts)}
registry.add_host_template_metadata(
req.context, template_meta)
cluster_params = {'filters': {'name': cluster_name}} cluster_params = {'filters': {'name': cluster_name}}
clusters = registry.get_clusters_detail(req.context, **cluster_params) clusters = registry.get_clusters_detail(
req.context, **cluster_params)
if clusters: if clusters:
msg = "the cluster %s is exist" % clusters[0]['name'] msg = "the cluster %s is exist" % clusters[0]['name']
LOG.error(msg) LOG.error(msg)
raise HTTPForbidden(explanation=msg, request=req, content_type="text/plain") raise HTTPForbidden(
explanation=msg,
request=req,
content_type="text/plain")
else: else:
cluster_meta = registry.add_cluster_metadata(req.context, template_content['cluster']) if template_content_cluster.get('auto_scale', None) == 1:
params = {'filters': ''}
clusters_list = registry.get_clusters_detail(
req.context, **params)
for cluster in clusters_list:
if cluster.get('auto_scale', None) == 1:
template_content_cluster['auto_scale'] = 0
break
cluster_meta = registry.add_cluster_metadata(
req.context, template_content['cluster'])
cluster_id = cluster_meta['id'] cluster_id = cluster_meta['id']
params = {'filters': {}} params = {'filters': {}}
networks = registry.get_networks_detail(req.context, cluster_id,**params) networks = registry.get_networks_detail(
req.context, cluster_id, **params)
template_content_networks = template_content['networks'] template_content_networks = template_content['networks']
for template_content_network in template_content_networks: for template_content_network in template_content_networks:
template_content_network['ip_ranges'] = str(template_content_network['ip_ranges']) template_content_network['ip_ranges'] = str(
template_content_network['ip_ranges'])
network_exist = 'false' network_exist = 'false'
for network in networks: for network in networks:
if template_content_network['name'] == network['name']: if template_content_network['name'] == network['name']:
update_network_meta = registry.update_network_metadata(req.context, network['id'], template_content_network) registry.update_network_metadata(
req.context, network['id'],
template_content_network)
network_exist = 'true' network_exist = 'true'
if network_exist == 'false': if network_exist == 'false':
template_content_network['cluster_id'] = cluster_id template_content_network['cluster_id'] = cluster_id
add_network_meta = registry.add_network_metadata(req.context, template_content_network) registry.add_network_metadata(
req.context, template_content_network)
params = {'filters': {'cluster_id': cluster_id}} params = {'filters': {'cluster_id': cluster_id}}
roles = registry.get_roles_detail(req.context, **params) roles = registry.get_roles_detail(req.context, **params)
@ -475,29 +587,20 @@ class Controller(controller.BaseController):
del template_content_role['config_set_id'] del template_content_role['config_set_id']
for role in roles: for role in roles:
if template_content_role['name'] == role['name']: if template_content_role['name'] == role['name']:
update_role_meta = registry.update_role_metadata(req.context, role['id'], template_content_role) registry.update_role_metadata(
req.context, role['id'], template_content_role)
role_exist = 'true' role_exist = 'true'
if role_exist == 'false': if role_exist == 'false':
template_content_role['cluster_id'] = cluster_id template_content_role['cluster_id'] = cluster_id
add_role_meta = registry.add_role_metadata(req.context, template_content_role) registry.add_role_metadata(
req.context, template_content_role)
cinder_volumes = registry.list_cinder_volume_metadata(req.context, **params) self._import_cinder_volumes_to_db(
template_content_cinder_volumes = template_content['cinder_volumes'] req, template_content['cinder_volumes'], roles)
for template_content_cinder_volume in template_content_cinder_volumes: self._import_services_disk_to_db(req,
cinder_volume_exist = 'false' template_content['services_disk'],
roles = registry.get_roles_detail(req.context, **params) roles)
for role in roles:
if template_content_cinder_volume['role_id'] == role['name']:
template_content_cinder_volume['role_id'] = role['id']
for cinder_volume in cinder_volumes:
if template_content_cinder_volume['role_id'] == cinder_volume['role_id']:
update_cinder_volume_meta = registry.update_cinder_volume_metadata(req.context, cinder_volume['id'], template_content_cinder_volume)
cinder_volume_exist = 'true'
if cinder_volume_exist == 'false':
add_cinder_volumes = registry.add_cinder_volume_metadata(req.context, template_content_cinder_volume)
except exception.Invalid as e: except exception.Invalid as e:
raise HTTPBadRequest(explanation=e.msg, request=req) raise HTTPBadRequest(explanation=e.msg, request=req)
@ -513,7 +616,8 @@ class Controller(controller.BaseController):
""" """
self._enforce(req, 'get_template_detail') self._enforce(req, 'get_template_detail')
try: try:
template = registry.template_detail_metadata(req.context, template_id) template = registry.template_detail_metadata(
req.context, template_id)
return {'template': template} return {'template': template}
except exception.NotFound as e: except exception.NotFound as e:
msg = (_("Failed to find template: %s") % msg = (_("Failed to find template: %s") %
@ -531,7 +635,8 @@ class Controller(controller.BaseController):
content_type="text/plain") content_type="text/plain")
except exception.InUseByStore as e: except exception.InUseByStore as e:
msg = (_("template %(id)s could not be get because it is in use: " msg = (_("template %(id)s could not be get because it is in use: "
"%(exc)s") % {"id": template_id, "exc": utils.exception_to_str(e)}) "%(exc)s") % {"id": template_id,
"exc": utils.exception_to_str(e)})
LOG.error(msg) LOG.error(msg)
raise HTTPConflict(explanation=msg, raise HTTPConflict(explanation=msg,
request=req, request=req,
@ -544,11 +649,13 @@ class Controller(controller.BaseController):
self._enforce(req, 'get_template_lists') self._enforce(req, 'get_template_lists')
params = self._get_query_params(req) params = self._get_query_params(req)
try: try:
template_lists = registry.template_lists_metadata(req.context, **params) template_lists = registry.template_lists_metadata(
req.context, **params)
except exception.Invalid as e: except exception.Invalid as e:
raise HTTPBadRequest(explanation=e.msg, request=req) raise HTTPBadRequest(explanation=e.msg, request=req)
return dict(template=template_lists) return dict(template=template_lists)
class TemplateDeserializer(wsgi.JSONRequestDeserializer): class TemplateDeserializer(wsgi.JSONRequestDeserializer):
"""Handles deserialization of specific controller method requests.""" """Handles deserialization of specific controller method requests."""
@ -572,6 +679,7 @@ class TemplateDeserializer(wsgi.JSONRequestDeserializer):
def import_template_to_db(self, request): def import_template_to_db(self, request):
return self._deserialize(request) return self._deserialize(request)
class TemplateSerializer(wsgi.JSONResponseSerializer): class TemplateSerializer(wsgi.JSONResponseSerializer):
"""Handles serialization of specific controller method responses.""" """Handles serialization of specific controller method responses."""
@ -591,12 +699,14 @@ class TemplateSerializer(wsgi.JSONResponseSerializer):
response.headers['Content-Type'] = 'application/json' response.headers['Content-Type'] = 'application/json'
response.body = self.to_json(dict(template=template)) response.body = self.to_json(dict(template=template))
return response return response
def get_template_detail(self, response, result): def get_template_detail(self, response, result):
template = result['template'] template = result['template']
response.status = 201 response.status = 201
response.headers['Content-Type'] = 'application/json' response.headers['Content-Type'] = 'application/json'
response.body = self.to_json(dict(template=template)) response.body = self.to_json(dict(template=template))
return response return response
def update_template(self, response, result): def update_template(self, response, result):
template = result['template'] template = result['template']
response.status = 201 response.status = 201
@ -622,6 +732,7 @@ class TemplateSerializer(wsgi.JSONResponseSerializer):
response.body = self.to_json(result) response.body = self.to_json(result)
return response return response
def create_resource(): def create_resource():
"""Templates resource factory method""" """Templates resource factory method"""
deserializer = TemplateDeserializer() deserializer = TemplateDeserializer()

View File

@ -32,6 +32,7 @@ _LI = i18n._LI
class ImageActionsController(object): class ImageActionsController(object):
def __init__(self, db_api=None, policy_enforcer=None, notifier=None, def __init__(self, db_api=None, policy_enforcer=None, notifier=None,
store_api=None): store_api=None):
self.db_api = db_api or daisy.db.get_api() self.db_api = db_api or daisy.db.get_api()

View File

@ -33,6 +33,7 @@ _LE = i18n._LE
class ImageDataController(object): class ImageDataController(object):
def __init__(self, db_api=None, store_api=None, def __init__(self, db_api=None, store_api=None,
policy_enforcer=None, notifier=None, policy_enforcer=None, notifier=None,
gateway=None): gateway=None):

View File

@ -38,6 +38,7 @@ _ = i18n._
class ImageMembersController(object): class ImageMembersController(object):
def __init__(self, db_api=None, policy_enforcer=None, notifier=None, def __init__(self, db_api=None, policy_enforcer=None, notifier=None,
store_api=None): store_api=None):
self.db_api = db_api or daisy.db.get_api() self.db_api = db_api or daisy.db.get_api()
@ -250,6 +251,7 @@ class RequestDeserializer(wsgi.JSONRequestDeserializer):
class ResponseSerializer(wsgi.JSONResponseSerializer): class ResponseSerializer(wsgi.JSONResponseSerializer):
def __init__(self, schema=None): def __init__(self, schema=None):
super(ResponseSerializer, self).__init__() super(ResponseSerializer, self).__init__()
self.schema = schema or get_schema() self.schema = schema or get_schema()

View File

@ -31,6 +31,7 @@ _ = i18n._
class Controller(object): class Controller(object):
def __init__(self, db_api=None, policy_enforcer=None, notifier=None, def __init__(self, db_api=None, policy_enforcer=None, notifier=None,
store_api=None): store_api=None):
self.db_api = db_api or daisy.db.get_api() self.db_api = db_api or daisy.db.get_api()
@ -85,6 +86,7 @@ class Controller(object):
class ResponseSerializer(wsgi.JSONResponseSerializer): class ResponseSerializer(wsgi.JSONResponseSerializer):
def update(self, response, result): def update(self, response, result):
response.status_int = 204 response.status_int = 204

View File

@ -46,6 +46,7 @@ CONF.import_opt('container_formats', 'daisy.common.config',
class ImagesController(object): class ImagesController(object):
def __init__(self, db_api=None, policy_enforcer=None, notifier=None, def __init__(self, db_api=None, policy_enforcer=None, notifier=None,
store_api=None): store_api=None):
self.db_api = db_api or daisy.db.get_api() self.db_api = db_api or daisy.db.get_api()
@ -188,8 +189,8 @@ class ImagesController(object):
self._do_add_locations(image, path[1], value) self._do_add_locations(image, path[1], value)
else: else:
if ((hasattr(image, path_root) or if ((hasattr(image, path_root) or
path_root in image.extra_properties) path_root in image.extra_properties) and
and json_schema_version == 4): json_schema_version == 4):
msg = _("Property %s already present.") msg = _("Property %s already present.")
raise webob.exc.HTTPConflict(msg % path_root) raise webob.exc.HTTPConflict(msg % path_root)
if hasattr(image, path_root): if hasattr(image, path_root):
@ -681,6 +682,7 @@ class RequestDeserializer(wsgi.JSONRequestDeserializer):
class ResponseSerializer(wsgi.JSONResponseSerializer): class ResponseSerializer(wsgi.JSONResponseSerializer):
def __init__(self, schema=None): def __init__(self, schema=None):
super(ResponseSerializer, self).__init__() super(ResponseSerializer, self).__init__()
self.schema = schema or get_schema() self.schema = schema or get_schema()

View File

@ -48,6 +48,7 @@ CONF = cfg.CONF
class NamespaceController(object): class NamespaceController(object):
def __init__(self, db_api=None, policy_enforcer=None, notifier=None): def __init__(self, db_api=None, policy_enforcer=None, notifier=None):
self.db_api = db_api or daisy.db.get_api() self.db_api = db_api or daisy.db.get_api()
self.policy = policy_enforcer or policy.Enforcer() self.policy = policy_enforcer or policy.Enforcer()
@ -486,6 +487,7 @@ class RequestDeserializer(wsgi.JSONRequestDeserializer):
class ResponseSerializer(wsgi.JSONResponseSerializer): class ResponseSerializer(wsgi.JSONResponseSerializer):
def __init__(self, schema=None): def __init__(self, schema=None):
super(ResponseSerializer, self).__init__() super(ResponseSerializer, self).__init__()
self.schema = schema self.schema = schema

View File

@ -42,6 +42,7 @@ CONF = cfg.CONF
class MetadefObjectsController(object): class MetadefObjectsController(object):
def __init__(self, db_api=None, policy_enforcer=None, notifier=None): def __init__(self, db_api=None, policy_enforcer=None, notifier=None):
self.db_api = db_api or daisy.db.get_api() self.db_api = db_api or daisy.db.get_api()
self.policy = policy_enforcer or policy.Enforcer() self.policy = policy_enforcer or policy.Enforcer()
@ -294,6 +295,7 @@ class RequestDeserializer(wsgi.JSONRequestDeserializer):
class ResponseSerializer(wsgi.JSONResponseSerializer): class ResponseSerializer(wsgi.JSONResponseSerializer):
def __init__(self, schema=None): def __init__(self, schema=None):
super(ResponseSerializer, self).__init__() super(ResponseSerializer, self).__init__()
self.schema = schema or get_schema() self.schema = schema or get_schema()

View File

@ -40,6 +40,7 @@ _LI = i18n._LI
class NamespacePropertiesController(object): class NamespacePropertiesController(object):
def __init__(self, db_api=None, policy_enforcer=None, notifier=None): def __init__(self, db_api=None, policy_enforcer=None, notifier=None):
self.db_api = db_api or daisy.db.get_api() self.db_api = db_api or daisy.db.get_api()
self.policy = policy_enforcer or policy.Enforcer() self.policy = policy_enforcer or policy.Enforcer()
@ -213,6 +214,7 @@ class RequestDeserializer(wsgi.JSONRequestDeserializer):
class ResponseSerializer(wsgi.JSONResponseSerializer): class ResponseSerializer(wsgi.JSONResponseSerializer):
def __init__(self, schema=None): def __init__(self, schema=None):
super(ResponseSerializer, self).__init__() super(ResponseSerializer, self).__init__()
self.schema = schema self.schema = schema

View File

@ -40,6 +40,7 @@ _LI = i18n._LI
class ResourceTypeController(object): class ResourceTypeController(object):
def __init__(self, db_api=None, policy_enforcer=None, notifier=None): def __init__(self, db_api=None, policy_enforcer=None, notifier=None):
self.db_api = db_api or daisy.db.get_api() self.db_api = db_api or daisy.db.get_api()
self.policy = policy_enforcer or policy.Enforcer() self.policy = policy_enforcer or policy.Enforcer()
@ -167,6 +168,7 @@ class RequestDeserializer(wsgi.JSONRequestDeserializer):
class ResponseSerializer(wsgi.JSONResponseSerializer): class ResponseSerializer(wsgi.JSONResponseSerializer):
def __init__(self, schema=None): def __init__(self, schema=None):
super(ResponseSerializer, self).__init__() super(ResponseSerializer, self).__init__()
self.schema = schema self.schema = schema

View File

@ -229,8 +229,8 @@ class ResponseSerializer(wsgi.JSONResponseSerializer):
def __init__(self, task_schema=None, partial_task_schema=None): def __init__(self, task_schema=None, partial_task_schema=None):
super(ResponseSerializer, self).__init__() super(ResponseSerializer, self).__init__()
self.task_schema = task_schema or get_task_schema() self.task_schema = task_schema or get_task_schema()
self.partial_task_schema = (partial_task_schema self.partial_task_schema = (partial_task_schema or
or _get_partial_task_schema()) _get_partial_task_schema())
def _inject_location_header(self, response, task): def _inject_location_header(self, response, task):
location = self._get_task_location(task) location = self._get_task_location(task)

View File

@ -27,6 +27,17 @@ import sys
import eventlet import eventlet
from daisy.common import utils from daisy.common import utils
import glance_store
from oslo_config import cfg
from oslo_log import log as logging
import osprofiler.notifier
import osprofiler.web
from daisy.common import config
from daisy.common import exception
from daisy.common import wsgi
from daisy import notifier
from daisy.openstack.common import systemd
# Monkey patch socket, time, select, threads # Monkey patch socket, time, select, threads
eventlet.patcher.monkey_patch(all=False, socket=True, time=True, eventlet.patcher.monkey_patch(all=False, socket=True, time=True,
@ -40,17 +51,6 @@ possible_topdir = os.path.normpath(os.path.join(os.path.abspath(sys.argv[0]),
if os.path.exists(os.path.join(possible_topdir, 'glance', '__init__.py')): if os.path.exists(os.path.join(possible_topdir, 'glance', '__init__.py')):
sys.path.insert(0, possible_topdir) sys.path.insert(0, possible_topdir)
import glance_store
from oslo_config import cfg
from oslo_log import log as logging
import osprofiler.notifier
import osprofiler.web
from daisy.common import config
from daisy.common import exception
from daisy.common import wsgi
from daisy import notifier
from daisy.openstack.common import systemd
CONF = cfg.CONF CONF = cfg.CONF
CONF.import_group("profiler", "daisy.common.wsgi") CONF.import_group("profiler", "daisy.common.wsgi")

View File

@ -31,8 +31,9 @@ period, we automatically sweep it up.
import os import os
import sys import sys
from oslo_log import log as logging from oslo_log import log as logging
from daisy.common import config
from daisy.image_cache import cleaner
# If ../glance/__init__.py exists, add ../ to Python search path, so that # If ../glance/__init__.py exists, add ../ to Python search path, so that
# it will override what happens to be installed in /usr/(local/)lib/python... # it will override what happens to be installed in /usr/(local/)lib/python...
@ -42,8 +43,6 @@ possible_topdir = os.path.normpath(os.path.join(os.path.abspath(sys.argv[0]),
if os.path.exists(os.path.join(possible_topdir, 'glance', '__init__.py')): if os.path.exists(os.path.join(possible_topdir, 'glance', '__init__.py')):
sys.path.insert(0, possible_topdir) sys.path.insert(0, possible_topdir)
from daisy.common import config
from daisy.image_cache import cleaner
CONF = config.CONF CONF = config.CONF
logging.register_options(CONF) logging.register_options(CONF)

View File

@ -19,16 +19,16 @@
A simple cache management utility for daisy. A simple cache management utility for daisy.
""" """
from __future__ import print_function from __future__ import print_function
import functools import functools
import optparse import optparse
import os import os
import sys import sys
import time import time
from oslo_utils import timeutils from oslo_utils import timeutils
from daisy.common import utils from daisy.common import utils
from daisy.common import exception
import daisy.image_cache.client
from daisy.version import version_info as version
# If ../glance/__init__.py exists, add ../ to Python search path, so that # If ../glance/__init__.py exists, add ../ to Python search path, so that
# it will override what happens to be installed in /usr/(local/)lib/python... # it will override what happens to be installed in /usr/(local/)lib/python...
@ -38,10 +38,6 @@ possible_topdir = os.path.normpath(os.path.join(os.path.abspath(sys.argv[0]),
if os.path.exists(os.path.join(possible_topdir, 'glance', '__init__.py')): if os.path.exists(os.path.join(possible_topdir, 'glance', '__init__.py')):
sys.path.insert(0, possible_topdir) sys.path.insert(0, possible_topdir)
from daisy.common import exception
import daisy.image_cache.client
from daisy.version import version_info as version
SUCCESS = 0 SUCCESS = 0
FAILURE = 1 FAILURE = 1

View File

@ -24,6 +24,11 @@ images to be pretched.
import os import os
import sys import sys
import glance_store
from oslo_log import log as logging
from daisy.common import config
from daisy.image_cache import prefetcher
# If ../glance/__init__.py exists, add ../ to Python search path, so that # If ../glance/__init__.py exists, add ../ to Python search path, so that
# it will override what happens to be installed in /usr/(local/)lib/python... # it will override what happens to be installed in /usr/(local/)lib/python...
@ -33,11 +38,6 @@ possible_topdir = os.path.normpath(os.path.join(os.path.abspath(sys.argv[0]),
if os.path.exists(os.path.join(possible_topdir, 'glance', '__init__.py')): if os.path.exists(os.path.join(possible_topdir, 'glance', '__init__.py')):
sys.path.insert(0, possible_topdir) sys.path.insert(0, possible_topdir)
import glance_store
from oslo_log import log as logging
from daisy.common import config
from daisy.image_cache import prefetcher
CONF = config.CONF CONF = config.CONF
logging.register_options(CONF) logging.register_options(CONF)

View File

@ -25,6 +25,8 @@ import os
import sys import sys
from oslo_log import log as logging from oslo_log import log as logging
from daisy.common import config
from daisy.image_cache import pruner
# If ../glance/__init__.py exists, add ../ to Python search path, so that # If ../glance/__init__.py exists, add ../ to Python search path, so that
# it will override what happens to be installed in /usr/(local/)lib/python... # it will override what happens to be installed in /usr/(local/)lib/python...
@ -34,8 +36,6 @@ possible_topdir = os.path.normpath(os.path.join(os.path.abspath(sys.argv[0]),
if os.path.exists(os.path.join(possible_topdir, 'glance', '__init__.py')): if os.path.exists(os.path.join(possible_topdir, 'glance', '__init__.py')):
sys.path.insert(0, possible_topdir) sys.path.insert(0, possible_topdir)
from daisy.common import config
from daisy.image_cache import pruner
CONF = config.CONF CONF = config.CONF
logging.register_options(CONF) logging.register_options(CONF)

View File

@ -30,6 +30,12 @@ import subprocess
import sys import sys
import tempfile import tempfile
import time import time
from oslo_config import cfg
from oslo_utils import units
# NOTE(jokke): simplified transition to py3, behaves like py2 xrange
from six.moves import range
from daisy.common import config
from daisy import i18n
# If ../glance/__init__.py exists, add ../ to Python search path, so that # If ../glance/__init__.py exists, add ../ to Python search path, so that
# it will override what happens to be installed in /usr/(local/)lib/python... # it will override what happens to be installed in /usr/(local/)lib/python...
@ -39,13 +45,6 @@ possible_topdir = os.path.normpath(os.path.join(os.path.abspath(sys.argv[0]),
if os.path.exists(os.path.join(possible_topdir, 'glance', '__init__.py')): if os.path.exists(os.path.join(possible_topdir, 'glance', '__init__.py')):
sys.path.insert(0, possible_topdir) sys.path.insert(0, possible_topdir)
from oslo_config import cfg
from oslo_utils import units
# NOTE(jokke): simplified transition to py3, behaves like py2 xrange
from six.moves import range
from daisy.common import config
from daisy import i18n
_ = i18n._ _ = i18n._

View File

@ -29,15 +29,6 @@ from __future__ import print_function
import os import os
import sys import sys
# If ../glance/__init__.py exists, add ../ to Python search path, so that
# it will override what happens to be installed in /usr/(local/)lib/python...
possible_topdir = os.path.normpath(os.path.join(os.path.abspath(sys.argv[0]),
os.pardir,
os.pardir))
if os.path.exists(os.path.join(possible_topdir, 'daisy', '__init__.py')):
sys.path.insert(0, possible_topdir)
from oslo_config import cfg from oslo_config import cfg
from oslo_db.sqlalchemy import migration from oslo_db.sqlalchemy import migration
from oslo_log import log as logging from oslo_log import log as logging
@ -52,6 +43,14 @@ from daisy.db.sqlalchemy import api as db_api
from daisy.db.sqlalchemy import metadata from daisy.db.sqlalchemy import metadata
from daisy import i18n from daisy import i18n
# If ../glance/__init__.py exists, add ../ to Python search path, so that
# it will override what happens to be installed in /usr/(local/)lib/python...
possible_topdir = os.path.normpath(os.path.join(os.path.abspath(sys.argv[0]),
os.pardir,
os.pardir))
if os.path.exists(os.path.join(possible_topdir, 'daisy', '__init__.py')):
sys.path.insert(0, possible_topdir)
CONF = cfg.CONF CONF = cfg.CONF
LOG = logging.getLogger(__name__) LOG = logging.getLogger(__name__)

View File

@ -23,8 +23,14 @@ Reference implementation server for Daisy orchestration
import os import os
import sys import sys
import eventlet import eventlet
from oslo_config import cfg
from oslo_log import log as logging
from daisy.common import exception
from daisy.common import config
from daisy.openstack.common import loopingcall
from daisy.orchestration.manager import OrchestrationManager
import six
# Monkey patch socket and time # Monkey patch socket and time
eventlet.patcher.monkey_patch(all=False, socket=True, time=True, thread=True) eventlet.patcher.monkey_patch(all=False, socket=True, time=True, thread=True)
@ -37,23 +43,12 @@ possible_topdir = os.path.normpath(os.path.join(os.path.abspath(sys.argv[0]),
if os.path.exists(os.path.join(possible_topdir, 'daisy', '__init__.py')): if os.path.exists(os.path.join(possible_topdir, 'daisy', '__init__.py')):
sys.path.insert(0, possible_topdir) sys.path.insert(0, possible_topdir)
from oslo_config import cfg
from oslo_log import log as logging
import osprofiler.notifier
import osprofiler.web
from daisy.common import exception
from daisy.common import config
from daisy.common import utils
from daisy.common import wsgi
from daisy import notifier
from daisy.openstack.common import systemd
from daisy.openstack.common import loopingcall
from daisy.orchestration.manager import OrchestrationManager
CONF = cfg.CONF CONF = cfg.CONF
scale_opts = [ scale_opts = [
cfg.StrOpt('auto_scale_interval', default=60, cfg.StrOpt('auto_scale_interval', default=60,
help='Number of seconds between two checkings to compute auto scale status'), help='Number of seconds between two '
'checkings to compute auto scale status'),
] ]
CONF.register_opts(scale_opts, group='orchestration') CONF.register_opts(scale_opts, group='orchestration')
logging.register_options(CONF) logging.register_options(CONF)
@ -62,6 +57,7 @@ logging.register_options(CONF)
def fail(returncode, e): def fail(returncode, e):
sys.stderr.write("ERROR: %s\n" % six.text_type(e)) sys.stderr.write("ERROR: %s\n" % six.text_type(e))
def main(): def main():
try: try:
config.parse_args() config.parse_args()

View File

@ -25,6 +25,16 @@ import os
import sys import sys
import eventlet import eventlet
from oslo_config import cfg
from oslo_log import log as logging
import osprofiler.notifier
import osprofiler.web
from daisy.common import config
from daisy.common import utils
from daisy.common import wsgi
from daisy import notifier
from daisy.openstack.common import systemd
# Monkey patch socket and time # Monkey patch socket and time
eventlet.patcher.monkey_patch(all=False, socket=True, time=True, thread=True) eventlet.patcher.monkey_patch(all=False, socket=True, time=True, thread=True)
@ -37,16 +47,6 @@ possible_topdir = os.path.normpath(os.path.join(os.path.abspath(sys.argv[0]),
if os.path.exists(os.path.join(possible_topdir, 'daisy', '__init__.py')): if os.path.exists(os.path.join(possible_topdir, 'daisy', '__init__.py')):
sys.path.insert(0, possible_topdir) sys.path.insert(0, possible_topdir)
from oslo_config import cfg
from oslo_log import log as logging
import osprofiler.notifier
import osprofiler.web
from daisy.common import config
from daisy.common import utils
from daisy.common import wsgi
from daisy import notifier
from daisy.openstack.common import systemd
CONF = cfg.CONF CONF = cfg.CONF
CONF.import_group("profiler", "daisy.common.wsgi") CONF.import_group("profiler", "daisy.common.wsgi")

View File

@ -21,6 +21,13 @@ Glance Scrub Service
import os import os
import sys import sys
import glance_store
from oslo_config import cfg
from oslo_log import log as logging
from daisy.common import config
from daisy.openstack.common import systemd
from daisy import scrubber
# If ../glance/__init__.py exists, add ../ to Python search path, so that # If ../glance/__init__.py exists, add ../ to Python search path, so that
# it will override what happens to be installed in /usr/(local/)lib/python... # it will override what happens to be installed in /usr/(local/)lib/python...
@ -30,14 +37,6 @@ possible_topdir = os.path.normpath(os.path.join(os.path.abspath(sys.argv[0]),
if os.path.exists(os.path.join(possible_topdir, 'glance', '__init__.py')): if os.path.exists(os.path.join(possible_topdir, 'glance', '__init__.py')):
sys.path.insert(0, possible_topdir) sys.path.insert(0, possible_topdir)
import glance_store
from oslo_config import cfg
from oslo_log import log as logging
from daisy.common import config
from daisy.openstack.common import systemd
from daisy import scrubber
CONF = cfg.CONF CONF = cfg.CONF
logging.register_options(CONF) logging.register_options(CONF)

View File

@ -27,6 +27,15 @@ import sys
import eventlet import eventlet
from daisy.common import utils from daisy.common import utils
from oslo.config import cfg
from oslo_log import log as logging
import osprofiler.notifier
import osprofiler.web
from daisy.common import config
from daisy.common import exception
from daisy.common import wsgi
from daisy import notifier
# Monkey patch socket, time, select, threads # Monkey patch socket, time, select, threads
eventlet.patcher.monkey_patch(socket=True, time=True, select=True, eventlet.patcher.monkey_patch(socket=True, time=True, select=True,
@ -40,15 +49,6 @@ possible_topdir = os.path.normpath(os.path.join(os.path.abspath(sys.argv[0]),
if os.path.exists(os.path.join(possible_topdir, 'glance', '__init__.py')): if os.path.exists(os.path.join(possible_topdir, 'glance', '__init__.py')):
sys.path.insert(0, possible_topdir) sys.path.insert(0, possible_topdir)
from oslo.config import cfg
from oslo_log import log as logging
import osprofiler.notifier
import osprofiler.web
from daisy.common import config
from daisy.common import exception
from daisy.common import wsgi
from daisy import notifier
CONF = cfg.CONF CONF = cfg.CONF
CONF.import_group("profiler", "daisy.common.wsgi") CONF.import_group("profiler", "daisy.common.wsgi")

View File

@ -399,6 +399,7 @@ class PropertyDefinition(AttributeDefinition):
class RelationDefinition(AttributeDefinition): class RelationDefinition(AttributeDefinition):
"""A base class for Attributes defining cross-artifact relations""" """A base class for Attributes defining cross-artifact relations"""
def __init__(self, internal=False, **kwargs): def __init__(self, internal=False, **kwargs):
self.internal = internal self.internal = internal
kwargs.setdefault('mutable', False) kwargs.setdefault('mutable', False)
@ -482,6 +483,7 @@ class ArtifactPropertyDescriptor(object):
class ArtifactAttributes(object): class ArtifactAttributes(object):
"""A container class storing description of Artifact Type attributes""" """A container class storing description of Artifact Type attributes"""
def __init__(self): def __init__(self):
self.properties = {} self.properties = {}
self.dependencies = {} self.dependencies = {}

View File

@ -477,6 +477,7 @@ class ArtifactReferenceList(declarative.ListAttributeDefinition,
class Blob(object): class Blob(object):
"""A Binary object being part of the Artifact""" """A Binary object being part of the Artifact"""
def __init__(self, size=0, locations=None, checksum=None, item_key=None): def __init__(self, size=0, locations=None, checksum=None, item_key=None):
"""Initializes a new Binary Object for an Artifact """Initializes a new Binary Object for an Artifact

View File

@ -48,6 +48,7 @@ CONF.register_opts(plugins_opts)
class ArtifactsPluginLoader(object): class ArtifactsPluginLoader(object):
def __init__(self, namespace): def __init__(self, namespace):
self.mgr = enabled.EnabledExtensionManager( self.mgr = enabled.EnabledExtensionManager(
check_func=self._gen_check_func(), check_func=self._gen_check_func(),

View File

@ -44,6 +44,7 @@ _ = i18n._
class BaseStrategy(object): class BaseStrategy(object):
def __init__(self): def __init__(self):
self.auth_token = None self.auth_token = None
# TODO(sirp): Should expose selecting public/internal/admin URL. # TODO(sirp): Should expose selecting public/internal/admin URL.
@ -62,6 +63,7 @@ class BaseStrategy(object):
class NoAuthStrategy(BaseStrategy): class NoAuthStrategy(BaseStrategy):
def authenticate(self): def authenticate(self):
pass pass

View File

@ -27,6 +27,7 @@ _FATAL_EXCEPTION_FORMAT_ERRORS = False
class RedirectException(Exception): class RedirectException(Exception):
def __init__(self, url): def __init__(self, url):
self.url = urlparse.urlparse(url) self.url = urlparse.urlparse(url)
@ -336,13 +337,16 @@ class TaskException(DaisyException):
class BadTaskConfiguration(DaisyException): class BadTaskConfiguration(DaisyException):
message = _("Task was not configured properly") message = _("Task was not configured properly")
class InstallException(DaisyException): class InstallException(DaisyException):
message = _("Cluster installtation raise exception") message = _("Cluster installtation raise exception")
class InstallTimeoutException(DaisyException): class InstallTimeoutException(DaisyException):
message = _( message = _(
"Time out, during install TECS components to cluster %(cluster_id)s") "Time out, during install TECS components to cluster %(cluster_id)s")
class TaskNotFound(TaskException, NotFound): class TaskNotFound(TaskException, NotFound):
message = _("Task with the given id %(task_id)s was not found") message = _("Task with the given id %(task_id)s was not found")
@ -566,23 +570,32 @@ class InvalidJsonPatchPath(JsonPatchException):
class InvalidNetworkConfig(DaisyException): class InvalidNetworkConfig(DaisyException):
pass pass
class InvalidIP(DaisyException): class InvalidIP(DaisyException):
pass pass
class OSInstallFailed(DaisyException): class OSInstallFailed(DaisyException):
message = _("os installtation failed.") message = _("os installtation failed.")
class IMPIOprationFailed(DaisyException): class IMPIOprationFailed(DaisyException):
message = _("ipmi command failed.") message = _("ipmi command failed.")
class ThreadBinException(DaisyException): class ThreadBinException(DaisyException):
def __init__(self, *args): def __init__(self, *args):
super(ThreadBinException, self).__init__(*args) super(ThreadBinException, self).__init__(*args)
class SubprocessCmdFailed(DaisyException): class SubprocessCmdFailed(DaisyException):
message = _("suprocess command failed.") message = _("suprocess command failed.")
class DeleteConstrainted(DaisyException): class DeleteConstrainted(DaisyException):
message = _("delete is not allowed.") message = _("delete is not allowed.")
class TrustMeFailed(DaisyException):
message = _("Trust me script failed.")

View File

@ -251,8 +251,8 @@ class RPCClient(client.BaseClient):
# checking if content contains the '_error' key, # checking if content contains the '_error' key,
# verify if it is an instance of dict - since the # verify if it is an instance of dict - since the
# RPC call may have returned something different. # RPC call may have returned something different.
if self.raise_exc and (isinstance(content, dict) if self.raise_exc and (isinstance(content, dict) and
and '_error' in content): '_error' in content):
error = content['_error'] error = content['_error']
try: try:
exc_cls = imp.import_class(error['cls']) exc_cls = imp.import_class(error['cls'])

View File

@ -12,11 +12,6 @@
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations # License for the specific language governing permissions and limitations
# under the License. # under the License.
__all__ = [
'run',
]
from oslo_concurrency import lockutils from oslo_concurrency import lockutils
from oslo_log import log as logging from oslo_log import log as logging
from oslo_utils import excutils from oslo_utils import excutils
@ -28,6 +23,9 @@ from daisy.common.scripts import utils as script_utils
from daisy.common import store_utils from daisy.common import store_utils
from daisy.common import utils as common_utils from daisy.common import utils as common_utils
from daisy import i18n from daisy import i18n
__all__ = [
'run',
]
LOG = logging.getLogger(__name__) LOG = logging.getLogger(__name__)

View File

@ -12,7 +12,12 @@
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations # License for the specific language governing permissions and limitations
# under the License. # under the License.
import urllib2
from oslo_log import log as logging
from daisy.common import exception
from daisy import i18n
__all__ = [ __all__ = [
'get_task', 'get_task',
'unpack_task_input', 'unpack_task_input',
@ -22,14 +27,6 @@ __all__ = [
] ]
import urllib2
from oslo_log import log as logging
from daisy.common import exception
from daisy import i18n
LOG = logging.getLogger(__name__) LOG = logging.getLogger(__name__)
_ = i18n._ _ = i18n._
_LE = i18n._LE _LE = i18n._LE
@ -100,7 +97,7 @@ def validate_location_uri(location):
"source of image data.") "source of image data.")
# NOTE: raise Exception and let the encompassing block save # NOTE: raise Exception and let the encompassing block save
# the error msg in the task.message. # the error msg in the task.message.
raise StandardError(msg) raise Exception(msg)
else: else:
# TODO(nikhil): add other supported uris # TODO(nikhil): add other supported uris

View File

@ -25,6 +25,7 @@ _ = i18n._
class DBVersion(object): class DBVersion(object):
def __init__(self, components_long, prerelease, build): def __init__(self, components_long, prerelease, build):
""" """
Creates a DBVersion object out of 3 component fields. This initializer Creates a DBVersion object out of 3 component fields. This initializer
@ -54,8 +55,8 @@ class DBVersion(object):
other.version == self.version) other.version == self.version)
def __ne__(self, other): def __ne__(self, other):
return (not isinstance(other, DBVersion) return (not isinstance(other, DBVersion) or
or self.version != other.version) self.version != other.version)
def __composite_values__(self): def __composite_values__(self):
long_version = _version_to_long(self.version) long_version = _version_to_long(self.version)

View File

@ -63,6 +63,7 @@ def is_multiple_swift_store_accounts_enabled():
class SwiftParams(object): class SwiftParams(object):
def __init__(self): def __init__(self):
if is_multiple_swift_store_accounts_enabled(): if is_multiple_swift_store_accounts_enabled():
self.params = self._load_config() self.params = self._load_config()
@ -71,8 +72,8 @@ class SwiftParams(object):
def _form_default_params(self): def _form_default_params(self):
default = {} default = {}
if (CONF.swift_store_user and CONF.swift_store_key if (CONF.swift_store_user and CONF.swift_store_key and
and CONF.swift_store_auth_address): CONF.swift_store_auth_address):
default['user'] = CONF.swift_store_user default['user'] = CONF.swift_store_user
default['key'] = CONF.swift_store_key default['key'] = CONF.swift_store_key
default['auth_address'] = CONF.swift_store_auth_address default['auth_address'] = CONF.swift_store_auth_address

View File

@ -21,6 +21,7 @@ System-level utilities and helper functions.
""" """
import errno import errno
from functools import reduce
try: try:
from eventlet import sleep from eventlet import sleep
@ -46,9 +47,11 @@ from oslo_utils import netutils
from oslo_utils import strutils from oslo_utils import strutils
import six import six
from webob import exc from webob import exc
import ConfigParser
from daisy.common import exception from daisy.common import exception
from daisy import i18n from daisy import i18n
from ironicclient import client as ironic_client
CONF = cfg.CONF CONF = cfg.CONF
@ -73,6 +76,11 @@ IMAGE_META_HEADERS = ['x-image-meta-location', 'x-image-meta-size',
DAISY_TEST_SOCKET_FD_STR = 'DAISY_TEST_SOCKET_FD' DAISY_TEST_SOCKET_FD_STR = 'DAISY_TEST_SOCKET_FD'
DISCOVER_DEFAULTS = {
'listen_port': '5050',
'ironic_url': 'http://127.0.0.1:6385/v1',
}
def chunkreadable(iter, chunk_size=65536): def chunkreadable(iter, chunk_size=65536):
""" """
@ -135,6 +143,7 @@ MAX_COOP_READER_BUFFER_SIZE = 134217728 # 128M seems like a sane buffer limit
class CooperativeReader(object): class CooperativeReader(object):
""" """
An eventlet thread friendly class for reading in image data. An eventlet thread friendly class for reading in image data.
@ -144,6 +153,7 @@ class CooperativeReader(object):
starvation, ie allows all threads to be scheduled periodically rather than starvation, ie allows all threads to be scheduled periodically rather than
having the same thread be continuously active. having the same thread be continuously active.
""" """
def __init__(self, fd): def __init__(self, fd):
""" """
:param fd: Underlying image file object :param fd: Underlying image file object
@ -223,10 +233,12 @@ class CooperativeReader(object):
class LimitingReader(object): class LimitingReader(object):
""" """
Reader designed to fail when reading image data past the configured Reader designed to fail when reading image data past the configured
allowable amount. allowable amount.
""" """
def __init__(self, data, limit): def __init__(self, data, limit):
""" """
:param data: Underlying image data object :param data: Underlying image data object
@ -330,72 +342,91 @@ def get_image_meta_from_headers(response):
result[key] = strutils.bool_from_string(result[key]) result[key] = strutils.bool_from_string(result[key])
return result return result
def get_host_meta(response): def get_host_meta(response):
result = {} result = {}
for key, value in response.json.items(): for key, value in response.json.items():
result[key] = value result[key] = value
return result return result
def get_hwm_meta(response):
result = {}
for key, value in response.json.items():
result[key] = value
return result
def get_cluster_meta(response): def get_cluster_meta(response):
result = {} result = {}
for key, value in response.json.items(): for key, value in response.json.items():
result[key] = value result[key] = value
return result return result
def get_component_meta(response): def get_component_meta(response):
result = {} result = {}
for key, value in response.json.items(): for key, value in response.json.items():
result[key] = value result[key] = value
return result return result
def get_service_meta(response): def get_service_meta(response):
result = {} result = {}
for key, value in response.json.items(): for key, value in response.json.items():
result[key] = value result[key] = value
return result return result
def get_template_meta(response): def get_template_meta(response):
result = {} result = {}
for key, value in response.json.items(): for key, value in response.json.items():
result[key] = value result[key] = value
return result return result
def get_role_meta(response): def get_role_meta(response):
result = {} result = {}
for key, value in response.json.items(): for key, value in response.json.items():
result[key] = value result[key] = value
return result return result
def get_config_file_meta(response): def get_config_file_meta(response):
result = {} result = {}
for key, value in response.json.items(): for key, value in response.json.items():
result[key] = value result[key] = value
return result return result
def get_config_set_meta(response): def get_config_set_meta(response):
result = {} result = {}
for key, value in response.json.items(): for key, value in response.json.items():
result[key] = value result[key] = value
return result return result
def get_config_meta(response): def get_config_meta(response):
result = {} result = {}
for key, value in response.json.items(): for key, value in response.json.items():
result[key] = value result[key] = value
return result return result
def get_network_meta(response): def get_network_meta(response):
result = {} result = {}
for key, value in response.json.items(): for key, value in response.json.items():
result[key] = value result[key] = value
return result return result
def get_dict_meta(response): def get_dict_meta(response):
result = {} result = {}
for key, value in response.json.items(): for key, value in response.json.items():
result[key] = value result[key] = value
return result return result
def create_mashup_dict(image_meta): def create_mashup_dict(image_meta):
""" """
Returns a dictionary-like mashup of the image core properties Returns a dictionary-like mashup of the image core properties
@ -434,6 +465,7 @@ def safe_remove(path):
class PrettyTable(object): class PrettyTable(object):
"""Creates an ASCII art table for use in bin/glance """Creates an ASCII art table for use in bin/glance
Example: Example:
@ -442,6 +474,7 @@ class PrettyTable(object):
--- ----------------- ------------ ----- --- ----------------- ------------ -----
122 image 22 0 122 image 22 0
""" """
def __init__(self): def __init__(self):
self.columns = [] self.columns = []
@ -507,7 +540,8 @@ def get_terminal_size():
try: try:
height_width = struct.unpack('hh', fcntl.ioctl(sys.stderr.fileno(), height_width = struct.unpack('hh', fcntl.ioctl(sys.stderr.fileno(),
termios.TIOCGWINSZ, termios.TIOCGWINSZ,
struct.pack('HH', 0, 0))) struct.pack(
'HH', 0, 0)))
except Exception: except Exception:
pass pass
@ -802,3 +836,254 @@ def get_search_plugins():
ext_manager = stevedore.extension.ExtensionManager( ext_manager = stevedore.extension.ExtensionManager(
namespace, invoke_on_load=True) namespace, invoke_on_load=True)
return ext_manager.extensions return ext_manager.extensions
def get_host_min_mac(host_interfaces):
if not isinstance(host_interfaces, list):
host_interfaces = eval(host_interfaces)
macs = [interface['mac'] for interface in host_interfaces
if interface['type'] == 'ether' and interface['mac']]
min_mac = min(macs)
return min_mac
def ip_into_int(ip):
"""
Switch ip string to decimalism integer..
:param ip: ip string
:return: decimalism integer
"""
return reduce(lambda x, y: (x << 8) + y, map(int, ip.split('.')))
def is_ip_in_cidr(ip, cidr):
"""
Check ip is in cidr
:param ip: Ip will be checked, like:192.168.1.2.
:param cidr: Ip range,like:192.168.0.0/24.
:return: If ip in cidr, return True, else return False.
"""
network = cidr.split('/')
mask = ~(2**(32 - int(network[1])) - 1)
return (ip_into_int(ip) & mask) == (ip_into_int(network[0]) & mask)
def is_ip_in_ranges(ip, ip_ranges):
"""
Check ip is in range
: ip: Ip will be checked, like:192.168.1.2.
: ip_ranges : Ip ranges, like:
[{'start':'192.168.0.10', 'end':'192.168.0.20'}
{'start':'192.168.0.50', 'end':'192.168.0.60'}]
:return: If ip in ip_ranges, return True, else return False.
"""
for ip_range in ip_ranges:
start_ip_int = ip_into_int(ip_range['start'])
end_ip_int = ip_into_int(ip_range['end'])
ip_int = ip_into_int(ip)
if ip_int >= start_ip_int and ip_int <= end_ip_int:
return True
return False
def get_ironicclient(): # pragma: no cover
"""Get Ironic client instance."""
config_discoverd = ConfigParser.ConfigParser(defaults=DISCOVER_DEFAULTS)
config_discoverd.read("/etc/ironic-discoverd/discoverd.conf")
ironic_url = config_discoverd.get("discoverd", "ironic_url")
args = {'os_auth_token': 'fake',
'ironic_url': ironic_url}
return ironic_client.get_client(1, **args)
def get_host_hw_info(host_interface):
host_hw_config = {}
ironicclient = get_ironicclient()
if host_interface:
min_mac = get_host_min_mac(host_interface)
try:
host_obj = ironicclient.physical_node.get(min_mac)
host_hw_config = dict([(f, getattr(host_obj, f, ''))
for f in ['system', 'memory', 'cpu',
'disks', 'interfaces',
'pci', 'devices']])
except Exception:
LOG.exception(_LE("Unable to find ironic data %s")
% Exception)
return host_hw_config
def get_dvs_interfaces(host_interfaces):
dvs_interfaces = []
if not isinstance(host_interfaces, list):
host_interfaces = eval(host_interfaces)
for interface in host_interfaces:
if not isinstance(interface, dict):
interface = eval(interface)
if ('vswitch_type' in interface and
interface['vswitch_type'] == 'dvs'):
dvs_interfaces.append(interface)
return dvs_interfaces
def get_clc_pci_info(pci_info):
clc_pci = []
flag1 = 'Intel Corporation Coleto Creek PCIe Endpoint'
flag2 = '8086:0435'
for pci in pci_info:
if flag1 in pci or flag2 in pci:
clc_pci.append(pci.split()[0])
return clc_pci
def cpu_str_to_list(spec):
"""Parse a CPU set specification.
:param spec: cpu set string eg "1-4,^3,6"
Each element in the list is either a single
CPU number, a range of CPU numbers, or a
caret followed by a CPU number to be excluded
from a previous range.
:returns: a set of CPU indexes
"""
cpusets = []
if not spec:
return cpusets
cpuset_ids = set()
cpuset_reject_ids = set()
for rule in spec.split(','):
rule = rule.strip()
# Handle multi ','
if len(rule) < 1:
continue
# Note the count limit in the .split() call
range_parts = rule.split('-', 1)
if len(range_parts) > 1:
# So, this was a range; start by converting the parts to ints
try:
start, end = [int(p.strip()) for p in range_parts]
except ValueError:
raise exception.Invalid(_("Invalid range expression %r")
% rule)
# Make sure it's a valid range
if start > end:
raise exception.Invalid(_("Invalid range expression %r")
% rule)
# Add available CPU ids to set
cpuset_ids |= set(range(start, end + 1))
elif rule[0] == '^':
# Not a range, the rule is an exclusion rule; convert to int
try:
cpuset_reject_ids.add(int(rule[1:].strip()))
except ValueError:
raise exception.Invalid(_("Invalid exclusion "
"expression %r") % rule)
else:
# OK, a single CPU to include; convert to int
try:
cpuset_ids.add(int(rule))
except ValueError:
raise exception.Invalid(_("Invalid inclusion "
"expression %r") % rule)
# Use sets to handle the exclusion rules for us
cpuset_ids -= cpuset_reject_ids
cpusets = list(cpuset_ids)
cpusets.sort()
return cpusets
def cpu_list_to_str(cpu_list):
"""Parse a CPU list to string.
:param cpu_list: eg "[1,2,3,4,6,7]"
:returns: a string of CPU ranges, eg 1-4,6,7
"""
spec = ''
if not cpu_list:
return spec
cpu_list.sort()
count = 0
group_cpus = []
tmp_cpus = []
for cpu in cpu_list:
if count == 0:
init = cpu
tmp_cpus.append(cpu)
else:
if cpu == (init + count):
tmp_cpus.append(cpu)
else:
group_cpus.append(tmp_cpus)
tmp_cpus = []
count = 0
init = cpu
tmp_cpus.append(cpu)
count += 1
group_cpus.append(tmp_cpus)
for group in group_cpus:
if len(group) > 2:
group_spec = ("%s-%s" % (group[0], group[0]+len(group)-1))
else:
group_str = [str(num) for num in group]
group_spec = ','.join(group_str)
if spec:
spec += ',' + group_spec
else:
spec = group_spec
return spec
def simple_subprocess_call(cmd):
return_code = subprocess.call(cmd,
shell=True,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
return return_code
def translate_quotation_marks_for_shell(orig_str):
translated_str = ''
quotation_marks = '"'
quotation_marks_count = orig_str.count(quotation_marks)
if quotation_marks_count > 0:
replace_marks = '\\"'
translated_str = orig_str.replace(quotation_marks, replace_marks)
else:
translated_str = orig_str
return translated_str
def get_numa_node_cpus(host_cpu):
numa = {}
if 'numa_node0' in host_cpu:
numa['numa_node0'] = cpu_str_to_list(host_cpu['numa_node0'])
if 'numa_node1' in host_cpu:
numa['numa_node1'] = cpu_str_to_list(host_cpu['numa_node1'])
return numa
def get_numa_node_from_cpus(numa, str_cpus):
numa_nodes = []
cpu_list = cpu_str_to_list(str_cpus)
for cpu in cpu_list:
if cpu in numa['numa_node0']:
numa_nodes.append(0)
if cpu in numa['numa_node1']:
numa_nodes.append(1)
numa_nodes = list(set(numa_nodes))
numa_nodes.sort()
return numa_nodes

View File

@ -0,0 +1,392 @@
# Copyright 2010 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# Copyright 2014 SoftLayer Technologies, Inc.
# Copyright 2015 Mirantis, Inc
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
System-level utilities and helper functions.
"""
from oslo_config import cfg
from oslo_log import log as logging
from webob import exc
from daisy.common import utils
from daisy import i18n
CONF = cfg.CONF
LOG = logging.getLogger(__name__)
_ = i18n._
_LE = i18n._LE
def get_total_cpus_for_numa(numa_cpus):
all_cpus = []
for value in numa_cpus.values():
all_cpus.extend(value)
return all_cpus
def get_default_os_num(host_roles_name):
if (('CONTROLLER_LB' in host_roles_name or
'CONTROLLER_HA' in host_roles_name) and
'COMPUTER' in host_roles_name):
# host with role of CONTOLLER and COMPUTER,
# isolate 4 cpu cores default for OS and TECS
os_cpu_num = 4
elif 'COMPUTER' in host_roles_name:
# host with role of COMPUTER only,
# isolate 2 cpu cores default for OS and TECS
os_cpu_num = 2
elif ('CONTROLLER_LB' in host_roles_name or
'CONTROLLER_HA' in host_roles_name):
# host with role of CONTOLLER only,
# don't isolate cpu for OS and TECS
os_cpu_num = 0
else:
os_cpu_num = 0
return os_cpu_num
def pci_get_cpu_sets(numa_cpus, pci_info, device_numa_node):
high_pci_cpu_set = {}
msg = ''
return_code = 0
status = {'rc': 0, 'msg': ''}
if not numa_cpus:
msg = "The architecture of CPU does not supported"
LOG.info(msg)
return_code = 0
status['rc'] = return_code
status['msg'] = msg
return (status, high_pci_cpu_set)
# get Intel Corporation Coleto Creek PCIe Endpoint
clc_pci_lines = utils.get_clc_pci_info(pci_info)
if not clc_pci_lines:
msg = "No CLC card in system"
LOG.info(msg)
return_code = 0
status['rc'] = return_code
status['msg'] = msg
return (status, high_pci_cpu_set)
high_pci_cpusets = []
for clc_pci_line in clc_pci_lines:
numa_node = device_numa_node['0000:' + clc_pci_line]
numa_key = 'numa_node' + str(numa_node)
if numa_key not in numa_cpus:
msg = "Can't find numa_node '%s' for CLC" % numa_node
return_code = 1
status['rc'] = return_code
status['msg'] = msg
return (status, high_pci_cpu_set)
high_pci_cpusets += numa_cpus[numa_key]
high_pci_cpu_set['high'] = list(set(high_pci_cpusets))
total_cpus = get_total_cpus_for_numa(numa_cpus)
high_pci_cpu_set['low'] =\
list(set(total_cpus) - set(high_pci_cpu_set['high']))
LOG.debug("high_pci_cpu_set:%s" % high_pci_cpu_set)
return (status, high_pci_cpu_set)
# if numa codes are not same, return -1
def get_numa_by_nic(nic_info, device_numa_node):
numa = []
try:
for nic in nic_info:
numa.append(device_numa_node[nic['bus']])
numa = list(set(numa))
numa_info = (-1 if len(numa) > 1 else numa[0])
except Exception:
numa_info = -1
return numa_info
def dvs_get_cpu_sets(dic_numas, nic_info, device_numa_node, cpu_num=4):
dvs_cpu_set = []
total_cpus = []
high_cpu_set = []
low_cpu_set = []
cpu_set = {}
msg = ''
return_code = 0
status = {}
if not dic_numas:
msg = "The architecture of CPU not supported"
LOG.info(msg)
return_code = 1
status['rc'] = return_code
status['msg'] = msg
return (status, cpu_set)
numa_node = get_numa_by_nic(nic_info, device_numa_node)
if numa_node < 0:
msg = 'Invalid numa node:%s' % numa_node
LOG.info(msg)
return_code = 3
status['rc'] = return_code
status['msg'] = msg
return (status, cpu_set)
numa_key = "numa_node%s" % numa_node
if numa_key not in dic_numas:
msg = "Can't find numa node '%s' for DVS" % numa_node
return_code = 4
status['rc'] = return_code
status['msg'] = msg
return (status, cpu_set)
if len(dic_numas[numa_key]) < (cpu_num + 1):
msg = "CPU on %s is not enough" % numa_key
LOG.info(msg)
return_code = 5
status['rc'] = return_code
status['msg'] = msg
return (status, cpu_set)
total_cpus = get_total_cpus_for_numa(dic_numas)
LOG.debug("total_cpu:%s" % total_cpus)
# sort
dic_numas[numa_key] = sorted(dic_numas[numa_key], reverse=True)
for i in dic_numas[numa_key][0:cpu_num]:
dvs_cpu_set.append(i)
high_cpu_set = dic_numas[numa_key]
low_cpu_set =\
list(set(total_cpus).difference(set(dic_numas[numa_key])))
LOG.debug("cpu used by dvs:%s" % dvs_cpu_set)
LOG.debug("low_cpu_set:%s" % low_cpu_set)
LOG.debug("high_cpu_set:%s" % high_cpu_set)
cpu_set['dvs'] = dvs_cpu_set
cpu_set['high'] = high_cpu_set
cpu_set['low'] = low_cpu_set
LOG.debug("cpu_set:%s" % cpu_set)
msg = 'Success'
status['rc'] = return_code
status['msg'] = msg
LOG.debug("status:%s" % status)
return (status, cpu_set)
def get_dvs_cpusets(numa_cpus, host_detail, host_hw_info):
dvs_nics_name = []
dvs_interfaces = utils.get_dvs_interfaces(host_detail['interfaces'])
for dvs_interface in dvs_interfaces:
if dvs_interface['type'] == 'ether':
dvs_nics_name.append(dvs_interface['name'])
if dvs_interface['type'] == 'bond':
if dvs_interface.get('slaves', None):
dvs_nics_name.extend(dvs_interface['slaves'])
elif dvs_interface.get('slave1', None) and \
dvs_interface.get('slave2', None):
slave_list = []
slave_list.append(dvs_interface['slave1'])
slave_list.append(dvs_interface['slave2'])
dvs_nics_name.extend(slave_list)
dvs_cpusets = {}
if dvs_nics_name:
nics_info = [{'name': nic_name, 'bus': interface['pci']}
for nic_name in dvs_nics_name
for interface in host_hw_info['interfaces'].values()
if nic_name == interface['name']]
dvs_cpu_num = 4
device_numa = {}
for device in host_hw_info['devices'].values():
device_numa.update(device)
LOG.info("DVS netcard info: '%s'" % nics_info)
(status, dvs_cpusets) = \
dvs_get_cpu_sets(numa_cpus,
nics_info,
device_numa,
dvs_cpu_num)
if status['rc'] != 0:
msg = "Get dvs cpu sets for host '%s' failed,\
detail error is '%s'"\
% (host_detail['name'], status['msg'])
LOG.error(msg)
raise exc.HTTPBadRequest(explanation=msg)
return dvs_cpusets
def get_pci_cpusets(numa_cpus, host_hw_info):
device_numa = {}
for device in host_hw_info['devices'].values():
device_numa.update(device)
(status, pci_cpusets) = pci_get_cpu_sets(numa_cpus,
host_hw_info['pci'].values(),
device_numa)
if status['rc'] != 0:
LOG.error(status['msg'])
raise exc.HTTPBadRequest(explanation=status['msg'])
return pci_cpusets
def allocate_os_cpus(roles_name, pci_cpusets, dvs_cpusets):
os_cpus = []
if not dvs_cpusets and not pci_cpusets:
return os_cpus
os_cpu_num = get_default_os_num(roles_name)
if os_cpu_num == 0:
return os_cpus
os_available_cpuset = []
if ((pci_cpusets and pci_cpusets.get('high')) and
(not dvs_cpusets or not dvs_cpusets.get('high'))):
# if only pci exist, get OS cores from pci lowset
cpus_low = pci_cpusets.get('low', [])
cpus_high = pci_cpusets.get('high', [])
if dvs_cpusets and dvs_cpusets.get('high'):
# if only dvs exist, get OS cores from dvs lowset.
# if pci and dvs exist at the same time,
# get OS cores from lowset from dvs lowset.
cpus_low = list(set(dvs_cpusets.get('low', [])) -
set(dvs_cpusets.get('dvs', [])))
cpus_high = list(set(dvs_cpusets.get('high', [])) -
set(dvs_cpusets.get('dvs', [])))
cpus_low.sort()
cpus_high.sort()
os_available_cpuset = cpus_low + cpus_high
if not os_available_cpuset:
return os_cpus
if (len(os_available_cpuset) < os_cpu_num):
msg = 'cpus are not enough'
LOG.error(msg)
raise exc.HTTPBadRequest(explanation=msg)
# cpu core 0 must give OS
cpu0 = 0
if cpu0 in os_available_cpuset:
os_available_cpuset.remove(cpu0)
os_available_cpuset = [cpu0] + os_available_cpuset
os_cpus = os_available_cpuset[:os_cpu_num]
return os_cpus
# when config role 'COMPUTER', allocate cpus for CLC
def allocate_clc_cpus(host_detail):
pci_cpu_sets = {}
if 'COMPUTER' not in host_detail.get('role', []):
return pci_cpu_sets
host_interfaces = host_detail.get('interfaces')
if host_interfaces:
host_hw_info = utils.get_host_hw_info(host_interfaces)
else:
return pci_cpu_sets
host_id = host_detail.get('id')
clc_pci = utils.get_clc_pci_info(host_hw_info['pci'].values())
if not clc_pci:
return pci_cpu_sets
else:
LOG.info("CLC card pci number: '%s'" % clc_pci)
numa_cpus = utils.get_numa_node_cpus(host_hw_info.get('cpu', {}))
if not numa_cpus or not numa_cpus['numa_node0']:
msg = "No NUMA CPU found from of host '%s'" % host_id
LOG.info(msg)
return pci_cpu_sets
LOG.info("Get CLC cpusets of host '%s'" % host_id)
pci_cpu_sets = get_pci_cpusets(numa_cpus, host_hw_info)
if not pci_cpu_sets or not pci_cpu_sets.get('high'):
msg = "Can't get CLC cpusets of host '%s'" % host_id
LOG.error(msg)
raise exc.HTTPBadRequest(explanation=msg)
return pci_cpu_sets
# when config DVS on network plane mapping, allocate cpus for dvs
def allocate_dvs_cpus(host_detail):
dvs_cpu_sets = {}
host_interfaces = host_detail.get('interfaces')
if not host_interfaces:
return dvs_cpu_sets
dvs_interfaces = utils.get_dvs_interfaces(host_interfaces)
if not dvs_interfaces:
return dvs_cpu_sets
host_id = host_detail.get('id')
host_hw_info = utils.get_host_hw_info(host_interfaces)
numa_cpus = utils.get_numa_node_cpus(host_hw_info.get('cpu', {}))
if not numa_cpus or not numa_cpus['numa_node0']:
msg = "No NUMA CPU found from of host '%s'" % host_id
LOG.info(msg)
return dvs_cpu_sets
LOG.info("Get dvs cpusets of host '%s'" % host_id)
dvs_cpu_sets = get_dvs_cpusets(numa_cpus,
host_detail,
host_hw_info)
if not dvs_cpu_sets or not dvs_cpu_sets.get('high'):
msg = "Can't get dvs high cpusets of host '%s'" % host_id
LOG.error(msg)
raise exc.HTTPBadRequest(explanation=msg)
return dvs_cpu_sets
def allocate_cpus(host_detail):
host_cpu_sets = {'dvs_high_cpuset': '',
'pci_high_cpuset': '',
'suggest_dvs_cpus': '',
'suggest_os_cpus': ''}
dvs_cpusets = allocate_dvs_cpus(host_detail)
pci_cpusets = allocate_clc_cpus(host_detail)
# no CLC and no DVS
if (not pci_cpusets and not dvs_cpusets):
return host_cpu_sets
host_roles_name = host_detail.get('role', [])
os_cpus = allocate_os_cpus(host_roles_name,
pci_cpusets,
dvs_cpusets)
host_cpu_sets['dvs_high_cpuset'] =\
utils.cpu_list_to_str(dvs_cpusets.get('high', []))
host_cpu_sets['pci_high_cpuset'] =\
utils.cpu_list_to_str(pci_cpusets.get('high', []))
host_cpu_sets['suggest_dvs_cpus'] =\
utils.cpu_list_to_str(dvs_cpusets.get('dvs', []))
host_cpu_sets['suggest_os_cpus'] = utils.cpu_list_to_str(os_cpus)
LOG.info("NUMA CPU usage for host %s: %s"
% (host_detail['id'], host_cpu_sets))
return host_cpu_sets

View File

@ -244,6 +244,7 @@ class Server(object):
This class requires initialize_glance_store set to True if This class requires initialize_glance_store set to True if
glance store needs to be initialized. glance store needs to be initialized.
""" """
def __init__(self, threads=1000, initialize_glance_store=False): def __init__(self, threads=1000, initialize_glance_store=False):
os.umask(0o27) # ensure files are created with the correct privileges os.umask(0o27) # ensure files are created with the correct privileges
self._logger = logging.getLogger("eventlet.wsgi.server") self._logger = logging.getLogger("eventlet.wsgi.server")
@ -638,6 +639,7 @@ class APIMapper(routes.Mapper):
class RejectMethodController(object): class RejectMethodController(object):
def reject(self, req, allowed_methods, *args, **kwargs): def reject(self, req, allowed_methods, *args, **kwargs):
LOG.debug("The method %s is not allowed for this resource" % LOG.debug("The method %s is not allowed for this resource" %
req.environ['REQUEST_METHOD']) req.environ['REQUEST_METHOD'])

View File

@ -40,7 +40,7 @@ class WSMEModelTransformer(object):
for name in names: for name in names:
value = getattr(db_entity, name, None) value = getattr(db_entity, name, None)
if value is not None: if value is not None:
if type(value) == datetime: if isinstance(value, datetime):
iso_datetime_value = timeutils.isotime(value) iso_datetime_value = timeutils.isotime(value)
values.update({name: iso_datetime_value}) values.update({name: iso_datetime_value})
else: else:

View File

@ -607,4 +607,3 @@ def artifact_publish(client, artifact_id,
return client.artifact_publish(artifact_id=artifact_id, return client.artifact_publish(artifact_id=artifact_id,
type_name=type_name, type_name=type_name,
type_version=type_version) type_version=type_version)

View File

@ -272,8 +272,8 @@ def _filter_images(images, filters, context,
elif visibility == 'private': elif visibility == 'private':
if image['is_public']: if image['is_public']:
continue continue
if not (has_ownership or (context.is_admin if not (has_ownership or (context.is_admin and not
and not admin_as_user)): admin_as_user)):
continue continue
elif visibility == 'shared': elif visibility == 'shared':
if not is_member: if not is_member:
@ -387,8 +387,8 @@ def _image_get(context, image_id, force_show_deleted=False, status=None):
LOG.warn(_LW('Could not find image %s') % image_id) LOG.warn(_LW('Could not find image %s') % image_id)
raise exception.NotFound() raise exception.NotFound()
if image['deleted'] and not (force_show_deleted if image['deleted'] and not (force_show_deleted or
or context.can_see_deleted): context.can_see_deleted):
LOG.warn(_LW('Unable to get deleted image')) LOG.warn(_LW('Unable to get deleted image'))
raise exception.NotFound() raise exception.NotFound()
@ -625,7 +625,7 @@ def _image_locations_delete_all(context, image_id, delete_time=None):
delete_time=delete_time) delete_time=delete_time)
for i, loc in enumerate(DATA['locations']): for i, loc in enumerate(DATA['locations']):
if image_id == loc['image_id'] and loc['deleted'] == False: if image_id == loc['image_id'] and loc['deleted'] is False:
del DATA['locations'][i] del DATA['locations'][i]

File diff suppressed because it is too large Load Diff

View File

@ -184,8 +184,8 @@ def _populate_metadata(meta, metadata_path=None, merge=False,
json_schema_files = [metadata_path] json_schema_files = [metadata_path]
else: else:
json_schema_files = [f for f in os.listdir(metadata_path) json_schema_files = [f for f in os.listdir(metadata_path)
if isfile(join(metadata_path, f)) if isfile(join(metadata_path, f)) and
and f.endswith('.json')] f.endswith('.json')]
except OSError as e: except OSError as e:
LOG.error(utils.exception_to_str(e)) LOG.error(utils.exception_to_str(e))
return return

View File

@ -12,11 +12,11 @@
# License for the specific language governing permissions and limitations # License for the specific language governing permissions and limitations
# under the License. # under the License.
from sqlalchemy.schema import (Column, ForeignKey, Index, MetaData, Table) from sqlalchemy.schema import (Column, ForeignKey, MetaData, Table)
from daisy.db.sqlalchemy.migrate_repo.schema import ( from daisy.db.sqlalchemy.migrate_repo.schema import (
BigInteger, Boolean, DateTime, Integer, Numeric, String, Text, BigInteger, Boolean, DateTime, Integer, String, Text,
create_tables) # noqa create_tables) # noqa
@ -58,15 +58,18 @@ def define_hosts_table(meta):
return hosts return hosts
def define_discover_hosts_table(meta): def define_discover_hosts_table(meta):
discover_hosts = Table('discover_hosts', discover_hosts = Table('discover_hosts',
meta, meta,
Column('id', String(36), primary_key=True, Column('id', String(36), primary_key=True,
nullable=False), nullable=False),
Column('ip', String(255), nullable=False), Column('ip', String(255), nullable=True),
Column('user', String(36)), Column('user', String(36)),
Column('passwd', String(36), nullable=False), Column('passwd', String(36), nullable=True),
Column('status', String(255), default='init', nullable=True), Column(
'status', String(255), default='init',
nullable=True),
Column('created_at', DateTime(), nullable=True), Column('created_at', DateTime(), nullable=True),
Column('updated_at', DateTime(), nullable=True), Column('updated_at', DateTime(), nullable=True),
Column('deleted_at', DateTime()), Column('deleted_at', DateTime()),
@ -80,12 +83,14 @@ def define_discover_hosts_table(meta):
return discover_hosts return discover_hosts
def define_clusters_table(meta): def define_clusters_table(meta):
clusters = Table('clusters', clusters = Table('clusters',
meta, meta,
Column('id', String(36), primary_key=True, Column('id', String(36), primary_key=True,
nullable=False), nullable=False),
Column('name', String(255), default='TECS', nullable=False), Column(
'name', String(255), default='TECS', nullable=False),
Column('owner', String(255)), Column('owner', String(255)),
Column('description', Text()), Column('description', Text()),
Column('net_l23_provider', String(64)), Column('net_l23_provider', String(64)),
@ -98,7 +103,8 @@ def define_clusters_table(meta):
Column('vni_end', BigInteger()), Column('vni_end', BigInteger()),
Column('public_vip', String(128)), Column('public_vip', String(128)),
Column('segmentation_type', String(64)), Column('segmentation_type', String(64)),
Column('auto_scale', Integer(), nullable=False, default=0), Column(
'auto_scale', Integer(), nullable=False, default=0),
Column('created_at', DateTime(), nullable=False), Column('created_at', DateTime(), nullable=False),
Column('updated_at', DateTime(), nullable=False), Column('updated_at', DateTime(), nullable=False),
Column('deleted_at', DateTime()), Column('deleted_at', DateTime()),
@ -112,6 +118,7 @@ def define_clusters_table(meta):
return clusters return clusters
def define_cluster_hosts_table(meta): def define_cluster_hosts_table(meta):
cluster_hosts = Table('cluster_hosts', cluster_hosts = Table('cluster_hosts',
meta, meta,
@ -135,6 +142,7 @@ def define_cluster_hosts_table(meta):
return cluster_hosts return cluster_hosts
def define_networks_table(meta): def define_networks_table(meta):
networks = Table('networks', networks = Table('networks',
meta, meta,
@ -145,15 +153,19 @@ def define_networks_table(meta):
Column('cluster_id', String(36)), Column('cluster_id', String(36)),
Column('cidr', String(255)), Column('cidr', String(255)),
Column('vlan_id', String(36)), Column('vlan_id', String(36)),
Column('vlan_start', Integer(),nullable=False, default=1), Column(
Column('vlan_end', Integer(),nullable=False, default=4094), 'vlan_start', Integer(), nullable=False, default=1),
Column(
'vlan_end', Integer(), nullable=False, default=4094),
Column('ip', String(256)), Column('ip', String(256)),
Column('gateway', String(128)), Column('gateway', String(128)),
Column('type', String(36), nullable=False, default='default'), Column(
'type', String(36), nullable=False,
default='default'),
Column('ml2_type', String(36)), Column('ml2_type', String(36)),
Column('network_type', String(36), nullable=False), Column('network_type', String(36), nullable=False),
Column('physnet_name', String(108)), Column('physnet_name', String(108)),
Column('capability', String(36)), Column('capability', String(36), default='high'),
Column('mtu', Integer(), nullable=False, default=1500), Column('mtu', Integer(), nullable=False, default=1500),
Column('alias', String(255)), Column('alias', String(255)),
Column('created_at', DateTime(), nullable=False), Column('created_at', DateTime(), nullable=False),
@ -169,6 +181,7 @@ def define_networks_table(meta):
return networks return networks
def define_ip_ranges_table(meta): def define_ip_ranges_table(meta):
ip_ranges = Table('ip_ranges', ip_ranges = Table('ip_ranges',
meta, meta,
@ -190,6 +203,7 @@ def define_ip_ranges_table(meta):
return ip_ranges return ip_ranges
def define_host_interfaces_table(meta): def define_host_interfaces_table(meta):
host_interfaces = Table('host_interfaces', host_interfaces = Table('host_interfaces',
meta, meta,
@ -204,7 +218,9 @@ def define_host_interfaces_table(meta):
Column('gateway', String(256)), Column('gateway', String(256)),
Column('mac', String(256)), Column('mac', String(256)),
Column('pci', String(32)), Column('pci', String(32)),
Column('type', String(32),nullable=False, default='ether'), Column(
'type', String(32), nullable=False,
default='ether'),
Column('slave1', String(32)), Column('slave1', String(32)),
Column('slave2', String(32)), Column('slave2', String(32)),
Column('mode', String(36)), Column('mode', String(36)),
@ -222,6 +238,7 @@ def define_host_interfaces_table(meta):
return host_interfaces return host_interfaces
def define_host_roles_table(meta): def define_host_roles_table(meta):
host_roles = Table('host_roles', host_roles = Table('host_roles',
meta, meta,
@ -235,7 +252,9 @@ def define_host_roles_table(meta):
String(36), String(36),
ForeignKey('roles.id'), ForeignKey('roles.id'),
nullable=False), nullable=False),
Column('status', String(32), nullable=False, default='init'), Column(
'status', String(32), nullable=False,
default='init'),
Column('progress', Integer(), default=0), Column('progress', Integer(), default=0),
Column('messages', Text()), Column('messages', Text()),
Column('created_at', DateTime(), nullable=False), Column('created_at', DateTime(), nullable=False),
@ -274,7 +293,9 @@ def define_roles_table(meta):
Column('db_lv_size', Integer()), Column('db_lv_size', Integer()),
Column('glance_lv_size', Integer()), Column('glance_lv_size', Integer()),
Column('nova_lv_size', Integer(), default=0), Column('nova_lv_size', Integer(), default=0),
Column('disk_location', String(255), nullable=False, default='local'), Column(
'disk_location', String(255), nullable=False,
default='local'),
Column('deployment_backend', String(36)), Column('deployment_backend', String(36)),
Column('config_set_update_progress', Integer(), default=0), Column('config_set_update_progress', Integer(), default=0),
Column('ntp_server', String(255)), Column('ntp_server', String(255)),
@ -291,6 +312,7 @@ def define_roles_table(meta):
return roles return roles
def define_service_roles_table(meta): def define_service_roles_table(meta):
service_roles = Table('service_roles', service_roles = Table('service_roles',
meta, meta,
@ -298,7 +320,9 @@ def define_service_roles_table(meta):
nullable=False), nullable=False),
Column('role_id', String(36), ForeignKey('roles.id'), Column('role_id', String(36), ForeignKey('roles.id'),
nullable=False), nullable=False),
Column('service_id', String(36), ForeignKey('services.id'), nullable=False), Column(
'service_id', String(36), ForeignKey(
'services.id'), nullable=False),
Column('created_at', DateTime(), nullable=False), Column('created_at', DateTime(), nullable=False),
Column('updated_at', DateTime(), nullable=False), Column('updated_at', DateTime(), nullable=False),
Column('deleted_at', DateTime()), Column('deleted_at', DateTime()),
@ -312,15 +336,19 @@ def define_service_roles_table(meta):
return service_roles return service_roles
def define_services_table(meta): def define_services_table(meta):
services = Table('services', services = Table('services',
meta, meta,
Column('id', String(36), primary_key=True, Column('id', String(36), primary_key=True,
nullable=False), nullable=False),
Column('name', String(255), nullable=False), Column('name', String(255), nullable=False),
Column('component_id', String(36), ForeignKey('components.id'), nullable=True), Column('component_id', String(36), ForeignKey(
'components.id'), nullable=True),
Column('description', Text()), Column('description', Text()),
Column('backup_type', String(32), nullable=False, default='none'), Column(
'backup_type', String(32), nullable=False,
default='none'),
Column('created_at', DateTime(), nullable=False), Column('created_at', DateTime(), nullable=False),
Column('updated_at', DateTime(), nullable=False), Column('updated_at', DateTime(), nullable=False),
Column('deleted_at', DateTime()), Column('deleted_at', DateTime()),
@ -334,6 +362,7 @@ def define_services_table(meta):
return services return services
def define_components_table(meta): def define_components_table(meta):
components = Table('components', components = Table('components',
meta, meta,
@ -354,6 +383,7 @@ def define_components_table(meta):
return components return components
def define_config_sets_table(meta): def define_config_sets_table(meta):
config_sets = Table('config_sets', config_sets = Table('config_sets',
meta, meta,
@ -374,6 +404,7 @@ def define_config_sets_table(meta):
return config_sets return config_sets
def define_configs_table(meta): def define_configs_table(meta):
configs = Table('configs', configs = Table('configs',
meta, meta,
@ -382,7 +413,8 @@ def define_configs_table(meta):
Column('section', String(255)), Column('section', String(255)),
Column('key', String(255), nullable=False), Column('key', String(255), nullable=False),
Column('value', String(255)), Column('value', String(255)),
Column('config_file_id', String(36), ForeignKey('config_files.id'), nullable=False), Column('config_file_id', String(36), ForeignKey(
'config_files.id'), nullable=False),
Column('config_version', Integer(), default=0), Column('config_version', Integer(), default=0),
Column('running_version', Integer(), default=0), Column('running_version', Integer(), default=0),
Column('description', Text()), Column('description', Text()),
@ -399,6 +431,7 @@ def define_configs_table(meta):
return configs return configs
def define_config_files_table(meta): def define_config_files_table(meta):
config_files = Table('config_files', config_files = Table('config_files',
meta, meta,
@ -419,14 +452,17 @@ def define_config_files_table(meta):
return config_files return config_files
def define_config_set_items_table(meta): def define_config_set_items_table(meta):
config_set_items = Table('config_set_items', config_set_items = Table('config_set_items',
meta, meta,
Column('id', String(36), primary_key=True, Column('id', String(36), primary_key=True,
nullable=False), nullable=False),
Column('config_set_id', String(36), ForeignKey('config_sets.id'), Column('config_set_id', String(36),
ForeignKey('config_sets.id'),
nullable=False), nullable=False),
Column('config_id', String(36), ForeignKey('configs.id'), nullable=False), Column('config_id', String(36), ForeignKey(
'configs.id'), nullable=False),
Column('created_at', DateTime(), nullable=False), Column('created_at', DateTime(), nullable=False),
Column('updated_at', DateTime(), nullable=False), Column('updated_at', DateTime(), nullable=False),
Column('deleted_at', DateTime()), Column('deleted_at', DateTime()),
@ -440,6 +476,7 @@ def define_config_set_items_table(meta):
return config_set_items return config_set_items
def define_config_historys_table(meta): def define_config_historys_table(meta):
config_historys = Table('config_historys', config_historys = Table('config_historys',
meta, meta,
@ -461,6 +498,7 @@ def define_config_historys_table(meta):
return config_historys return config_historys
def define_tasks_table(meta): def define_tasks_table(meta):
tasks = Table('tasks', tasks = Table('tasks',
meta, meta,
@ -482,6 +520,7 @@ def define_tasks_table(meta):
return tasks return tasks
def define_task_infos_table(meta): def define_task_infos_table(meta):
task_infos = Table('task_infos', task_infos = Table('task_infos',
meta, meta,
@ -502,10 +541,13 @@ def define_task_infos_table(meta):
return task_infos return task_infos
def define_repositorys_table(meta): def define_repositorys_table(meta):
repositorys = Table('repositorys', repositorys = Table('repositorys',
meta, meta,
Column('id', String(36), primary_key=True, nullable=False), Column(
'id', String(36), primary_key=True,
nullable=False),
Column('url', String(255)), Column('url', String(255)),
Column('description', Text()), Column('description', Text()),
Column('created_at', DateTime(), nullable=False), Column('created_at', DateTime(), nullable=False),
@ -545,6 +587,7 @@ def define_users_table(meta):
return users return users
def define_versions_table(meta): def define_versions_table(meta):
versions = Table('versions', versions = Table('versions',
meta, meta,
@ -571,6 +614,7 @@ def define_versions_table(meta):
return versions return versions
def define_assigned_networks_table(meta): def define_assigned_networks_table(meta):
assigned_networks = Table('assigned_networks', assigned_networks = Table('assigned_networks',
meta, meta,
@ -594,6 +638,7 @@ def define_assigned_networks_table(meta):
return assigned_networks return assigned_networks
def define_logic_networks_table(meta): def define_logic_networks_table(meta):
logic_networks = Table('logic_networks', logic_networks = Table('logic_networks',
meta, meta,
@ -602,9 +647,12 @@ def define_logic_networks_table(meta):
Column('name', String(255), nullable=False), Column('name', String(255), nullable=False),
Column('type', String(36)), Column('type', String(36)),
Column('physnet_name', String(255)), Column('physnet_name', String(255)),
Column('cluster_id', String(36), ForeignKey('clusters.id'), nullable=False), Column('cluster_id', String(36), ForeignKey(
'clusters.id'), nullable=False),
Column('segmentation_id', BigInteger()), Column('segmentation_id', BigInteger()),
Column('segmentation_type', String(64), nullable=False), Column(
'segmentation_type', String(64),
nullable=False),
Column('shared', Boolean(), default=False), Column('shared', Boolean(), default=False),
Column('created_at', DateTime(), nullable=False), Column('created_at', DateTime(), nullable=False),
Column('updated_at', DateTime(), nullable=False), Column('updated_at', DateTime(), nullable=False),
@ -618,6 +666,7 @@ def define_logic_networks_table(meta):
extend_existing=True) extend_existing=True)
return logic_networks return logic_networks
def define_subnets_table(meta): def define_subnets_table(meta):
subnets = Table('subnets', subnets = Table('subnets',
meta, meta,
@ -625,7 +674,8 @@ def define_subnets_table(meta):
nullable=False), nullable=False),
Column('cidr', String(128)), Column('cidr', String(128)),
Column('gateway', String(128)), Column('gateway', String(128)),
Column('logic_network_id', String(36), ForeignKey('logic_networks.id'), nullable=False), Column('logic_network_id', String(36), ForeignKey(
'logic_networks.id'), nullable=False),
Column('name', String(255), nullable=False), Column('name', String(255), nullable=False),
Column('router_id', String(36), ForeignKey('routers.id')), Column('router_id', String(36), ForeignKey('routers.id')),
Column('created_at', DateTime(), nullable=False), Column('created_at', DateTime(), nullable=False),
@ -640,6 +690,7 @@ def define_subnets_table(meta):
extend_existing=True) extend_existing=True)
return subnets return subnets
def define_float_ip_ranges_table(meta): def define_float_ip_ranges_table(meta):
float_ip_ranges = Table('float_ip_ranges', float_ip_ranges = Table('float_ip_ranges',
meta, meta,
@ -647,7 +698,8 @@ def define_float_ip_ranges_table(meta):
nullable=False), nullable=False),
Column('start', String(128)), Column('start', String(128)),
Column('end', String(128)), Column('end', String(128)),
Column('subnet_id', String(36), ForeignKey('subnets.id'), nullable=False), Column('subnet_id', String(36), ForeignKey(
'subnets.id'), nullable=False),
Column('created_at', DateTime(), nullable=False), Column('created_at', DateTime(), nullable=False),
Column('updated_at', DateTime(), nullable=False), Column('updated_at', DateTime(), nullable=False),
Column('deleted_at', DateTime()), Column('deleted_at', DateTime()),
@ -661,13 +713,16 @@ def define_float_ip_ranges_table(meta):
return float_ip_ranges return float_ip_ranges
def define_dns_nameservers_table(meta): def define_dns_nameservers_table(meta):
dns_nameservers = Table('dns_nameservers', dns_nameservers = Table('dns_nameservers',
meta, meta,
Column('id', String(36), primary_key=True, Column('id', String(36), primary_key=True,
nullable=False), nullable=False),
Column('dns', String(128)), Column('dns', String(128)),
Column('subnet_id', String(36), ForeignKey('subnets.id'), nullable=False), Column(
'subnet_id', String(36),
ForeignKey('subnets.id'), nullable=False),
Column('created_at', DateTime(), nullable=False), Column('created_at', DateTime(), nullable=False),
Column('updated_at', DateTime(), nullable=False), Column('updated_at', DateTime(), nullable=False),
Column('deleted_at', DateTime()), Column('deleted_at', DateTime()),
@ -681,6 +736,7 @@ def define_dns_nameservers_table(meta):
return dns_nameservers return dns_nameservers
def define_routers_table(meta): def define_routers_table(meta):
routers = Table('routers', routers = Table('routers',
meta, meta,
@ -688,7 +744,8 @@ def define_routers_table(meta):
nullable=False), nullable=False),
Column('name', String(255)), Column('name', String(255)),
Column('description', Text()), Column('description', Text()),
Column('cluster_id', String(36), ForeignKey('clusters.id'), nullable=False), Column('cluster_id', String(36), ForeignKey(
'clusters.id'), nullable=False),
Column('external_logic_network', String(255)), Column('external_logic_network', String(255)),
Column('created_at', DateTime(), nullable=False), Column('created_at', DateTime(), nullable=False),
Column('updated_at', DateTime(), nullable=False), Column('updated_at', DateTime(), nullable=False),
@ -703,6 +760,7 @@ def define_routers_table(meta):
return routers return routers
def define_service_disks_table(meta): def define_service_disks_table(meta):
disks = Table('service_disks', disks = Table('service_disks',
meta, meta,
@ -713,7 +771,9 @@ def define_service_disks_table(meta):
String(36), String(36),
ForeignKey('roles.id'), ForeignKey('roles.id'),
nullable=False), nullable=False),
Column('disk_location', String(255), nullable=False, default='local'), Column(
'disk_location', String(255),
nullable=False, default='local'),
Column('lun', Integer()), Column('lun', Integer()),
Column('data_ips', String(255)), Column('data_ips', String(255)),
Column('size', Integer()), Column('size', Integer()),
@ -730,6 +790,7 @@ def define_service_disks_table(meta):
return disks return disks
def define_cinder_volumes_table(meta): def define_cinder_volumes_table(meta):
disks = Table('cinder_volumes', disks = Table('cinder_volumes',
meta, meta,

Some files were not shown because too many files have changed in this diff Show More