Delete all rpms and major upgrade from inside...

Change-Id: I12d6307464cc03664b6113807a35c396b652add9
Signed-off-by: Zhijiang Hu <hu.zhijiang@zte.com.cn>
This commit is contained in:
Zhijiang Hu 2016-07-01 10:21:39 +08:00
parent 8698f3a15e
commit 2d2da98b35
574 changed files with 28824 additions and 22613 deletions

View File

@ -1,3 +0,0 @@
# Ignore everything in this directory
*
# Except this file !.gitignore

View File

@ -1,166 +0,0 @@
## HA配置双机
# 每套HA系统配置一个配置文件该文件名命令规律如下一套HA为HA_1.conf两套HA命令格式为HA_2_1.conf和HA_2_2.conf依次类推
# 建议拷贝该模版改名后再编辑如使用vi命令应先执行 export LC_ALL="zh_CN.GB2312" 否则会有乱码编辑后unset LC_ALL
[DEFAULT]
# HA安装的OpenCOS组件, 可以填写为loadbalance,database,amqp,keystone,neutron,glance,cinder,nova,horizon,heat,ceilometer,ironic与下面组件服务列表的关键字一致
# 之中的任意组合,用逗号分开, 全部可简写为all, 无顺序要求haproxy代表配置LB.
# 注意HA是通过conf方式安装的但这种方式不支持安装ironic如果这里配置了ironic应在整个安装流程前手动通过custom方式单独安装ironic
# 该配置项必填
components=database,amqp,keystone,neutron,glance,cinder,nova,horizon,heat,ceilometer
# 由HA管理的组件服务(可裁剪),多个服务以逗号分开.
# 一般对服务无增加或减少可不必修改如下选项多余组件也无需注释掉组件选择与否由“components”决定
loadbalance = haproxy
database=mariadb
amqp=rabbitmq-server
keystone=openstack-keystone
#neutron-metadata-agent,neutron-lbaas-agent don't use default
neutron=neutron-server,neutron-l3-agent,neutron-dhcp-agent
#openstack-glance-scrubber don't use default
glance=openstack-glance-api,openstack-glance-registry
#openstack-cinder-backup don't use default
cinder=openstack-cinder-api,openstack-cinder-scheduler,openstack-cinder-volume
nova=openstack-nova-api,openstack-nova-conductor,openstack-nova-scheduler,openstack-nova-cert,openstack-nova-consoleauth,openstack-nova-novncproxy
horizon=httpd,opencos-alarmmanager,opencos-alarmagent
heat=openstack-heat-api,openstack-heat-engine,openstack-heat-api-cfn,openstack-heat-api-cloudwatch
ceilometer=openstack-ceilometer-api,openstack-ceilometer-central,openstack-ceilometer-alarm-evaluator,openstack-ceilometer-alarm-notifier,openstack-ceilometer-notification,openstack-ceilometer-collector
ironic=openstack-ironic-api,openstack-ironic-conductor
# 根据业务需要增加clone服务资源(每个节点都运行),填写去掉.service后的服务名多个服务以逗号分隔,可选
#clone_service=
# guard服务名字
guard=tfg-guard
# HA集群心跳线至少一条建议三条每条是一对IP用逗号分开
# 如果LB和HA是使用相同服务器则此处心跳线不用再填写
# 第一条心跳线例中是外网IP必填
heartbeat_link1=10.43.179.221,10.43.179.222
# 第二条心跳线不能与其他心跳线有相同IP可选
heartbeat_link2=
# 第三条心跳线不能与其他心跳线有相同IP可选
heartbeat_link3=
#执行HA脚本的节点为local node其他节点为remote node这里为ssh登录remote node的root用户密码必填
remote_node_password=ossdbg1
# haproxy浮动IP地址,配置LB时必填
#loadbalance_fip=192.160.0.226
#loadbalance_nic=ens33
#loadbalance_netmask=23
#############DB################
# 数据库浮动IP可以与LB浮动IP相同必填
# 浮动IP地址
#database_fip=192.160.0.225
# 浮动IP所在网卡
#database_nic=baseleft
# 掩码CIDR格式
#database_netmask=23
# 数据库共享磁盘全路径名,组件存在则必填
# 磁盘名建议用lv方式使用lv时应注意配置为逻辑盘名
#database_device=/dev/mapper/vg_mysql-lv_mysql
# 文件系统类型
#database_fs_type=ext4
#数据库备份共享磁盘全路径名,不能和其他共享磁盘相同(功能暂不支持),可选
#backup_database_device=/dev/mapper/vg_mysqlbackup-lv_mysqlbackup
#backup_database_fs_type=ext4
##############AMQP################
# AMQP浮动IP可以与LB浮动IP相同必填
#amqp_fip=192.160.0.225
#amqp_nic=baseleft
#amqp_netmask=23
##############keystone################
# keystone浮动IP配置为LB时浮动IP不用填写否则组件存在则必填
#keystone_fip=192.160.0.225
#keystone_nic=baseleft
#keystone_netmask=23
##############neutron################
# neutron 浮动IP配置为LB时浮动IP不用填写否则组件存在则必填
#neutron_fip=192.160.0.225
#neutron_nic=baseleft
#neutron_netmask=23
##############glance################
# glance 浮动IP配置为LB时浮动IP不用填写否则组件存在则必填
#glance_fip=192.160.0.225
#glance_nic=baseleft
#glance_netmask=23
# 镜像共享磁盘设置,不能和其他共享磁盘相同,组件存在则必填
# glance_device_type可选drbd或iscsi
#glance_device_type=drbd
#glance_device=/dev/mapper/vg_glance-lv_glance
#glance_fs_type=ext4
##############cinder################
# cinder浮动IP配置为LB时浮动IP不用填写否则组件存在则必填
#cinder_fip=192.160.0.225
#cinder_nic=baseleft
#cinder_netmask=23
#虚拟机块设备使用的磁阵管理口IP,如果有多个IP用空格分开可选
#cinder_ping_ip=192.160.0.7
##############nova################
# nova浮动IP配置为LB时浮动IP不用填写否则组件存在则必填
#nova_fip=192.160.0.225
#nova_nic=baseleft
#nova_netmask=23
##############horizon################
# TECS dashboard登录时使用的浮动IP配置为LB时浮动IP不用填写否则组件存在则必填
# 不同浮动IP的组件可以运行在不同节点上如果还想与
# 某个组件运行在相同节点需配置location_constraint
#horizon_fip=10.43.179.230
#horizon_nic=kmportv1
#horizon_netmask=23
##############ironic################
# ironic 浮动IP配置为LB时浮动IP不用填写否则组件存在则必填
#ironic_fip=192.160.0.225
#ironic_nic=baseleft
#ironic_netmask=23
##############heat################
# heat 浮动IP配置为LB时浮动IP不用填写否则组件存在则必填
#heat_fip=192.160.0.225
#heat_nic=baseleft
#heat_netmask=23
##############ceilometer################
# ceilometer浮动IP配置为LB时浮动IP不用填写否则组件存在则必填
#ceilometer_fip=192.160.0.225
#ceilometer_nic=baseleft
#ceilometer_netmask=23
# mongod数据库共享磁盘全路径名建议配置
#mongod_device=/dev/mapper/vg_mongodb-lv_mongodb
# 文件系统类型
#mongod_fs_type=ext4
# 若mongod数据库使用本地盘则配置成local否则为空
mongod_local=local
# 如下两个配置项表示共享盘所在的磁阵信息,暂时仅支持本配置中用到的所有共享盘都在一个磁阵上,可选
# 参数说明:(主控制器业务口IP地址,主控制器iqn),(备控制器业务口IP地址,备控制器iqn)
# 如果两个控制iqn相同可以配置为(主控制器业务口IP地址,主控制器iqn)
#iscsi_storage=(172.32.1.1,iqn.2099-01.cn.com.zte:usp.spr11-4c:09:b4:b0:56:8b),(172.32.1.2,iqn.2099-01.cn.com.zte:usp.spr11-4c:09:b4:b0:56:8c)

View File

@ -1,159 +0,0 @@
#!/bin/bash
dhcp_ip="127.0.0.1"
DISCOVERD_URL="http://$dhcp_ip:5050/v1/continue"
function update() {
jq "$1" data.json > temp.json || echo "Error: update $1 to json failed"
mv temp.json data.json
}
function get_system_info(){
PRODUCT=$(dmidecode -s system-product-name)
FAMILY=$(dmidecode -t system|grep "Family"|cut -d ":" -f2)
VERSION=$(dmidecode -s system-version)
SERIAL=$(dmidecode -s system-serial-number)
MANUFACTURER=$(dmidecode -s system-manufacturer)
UUID=$(dmidecode -s system-uuid)
FQDN=$(hostname -f)
echo '{"system":{}}' > data.json
update ".system[\"product\"] = \"$PRODUCT\""
update ".system[\"family\"] = \"$FAMILY\""
update ".system[\"fqdn\"] = \"$FQDN\""
update ".system[\"version\"] = \"$VERSION\""
update ".system[\"serial\"] = \"$SERIAL\""
update ".system[\"manufacturer\"] = \"$MANUFACTURER\""
update ".system[\"uuid\"] = \"$UUID\""
}
function get_cpu_info(){
REAL=$(cat /proc/cpuinfo |grep "physical id"|sort |uniq|wc -l)
TOTAL=$(cat /proc/cpuinfo |grep "processor"|wc -l)
update ".cpu[\"real\"] = $REAL"
update ".cpu[\"total\"] = $TOTAL"
for i in $(seq $TOTAL)
do
if [ ! -z "$i" ]; then
SPEC_MODEL=$(cat /proc/cpuinfo | grep name | cut -f2 -d:|sed -n $i"p")
SPEC_FRE=$(cat /proc/cpuinfo | grep MHz | cut -f2 -d:|sed -n $i"p")
update ".cpu[\"spec_$i\"] = {model:\"$SPEC_MODEL\", frequency:$SPEC_FRE}"
fi
done
}
function get_memory_info(){
PHY_NUM=$(dmidecode -t memory|grep "Physical Memory Array"|wc -l)
TOTAL_MEM=$(cat /proc/meminfo |grep MemTotal |cut -d ":" -f2)
update ".memory[\"total\"] = \"$TOTAL_MEM\""
for num in $(seq $PHY_NUM)
do
SLOTS=$(dmidecode -t memory |grep "Number Of Devices" |cut -d ":" -f2|sed -n $num"p")
MAX_CAP=$(dmidecode -t memory |grep "Maximum Capacity" |cut -d ":" -f2|sed -n $num"p")
update ".memory[\"phy_memory_$num\"] = {slots:\"$SLOTS\", maximum_capacity:\"$MAX_CAP\"}"
for i in $(seq $SLOTS)
do
if [ ! -z "$i" ]; then
DEVICE_FRE=$(dmidecode -t memory |grep "Speed" |cut -d ":" -f2|sed -n $i"p")
DEVICE_TYPE=$(dmidecode -t memory |grep 'Type:' |grep -v "Error Correction Type"|cut -d ":" -f2|sed -n $i"p")
DEVICE_SIZE=$(dmidecode -t memory |grep Size |cut -d ":" -f2|sed -n $i"p")
update ".memory[\"phy_memory_$num\"][\"devices_$i\"] = {frequency:\"$DEVICE_FRE\", type:\"$DEVICE_TYPE\", size:\"$DEVICE_SIZE\"}"
fi
done
done
}
function get_net_info(){
physical_networks=`ls -l /sys/class/net/ | grep -v lo |grep "pci"|awk -F 'net/' '{print $2}'`
if [ -f "/sys/class/net/bonding_masters" ]; then
bond_network=$(cat /sys/class/net/bonding_masters)
if [ ! -z "$bond_network" ];then
physical_networks+=" $bond_network"
fi
fi
for iface in $physical_networks
do
NAME=$iface
MAC=$(ip link show $iface | awk '/ether/ {print $2}')
IP=$(ip addr show $iface | awk '/inet / { sub(/\/.*/, "", $2); print $2 }')
NETMASK=$(ifconfig $iface | grep netmask | awk '{print $4}')
STATE=$(ip link show $iface | awk '/mtu/ {print $3}')
PCI=$(ethtool -i $iface|grep "bus-info"|cut -d " " -f2)
CURRENT_SPEED=$(ethtool $iface |grep Speed |awk -F " " '{print $2}')
LINE=$(ethtool $iface|grep -n "Supported pause frame use"|awk -F ":" '{print $1}')
LINE=$[ LINE - 1 ]
LINE_SPEED=$(ethtool $iface|grep -n "Supported link modes"|awk -F ":" '{print $1}')
BOND=$(ifconfig $iface | grep MASTER)
if [ $LINE -eq $LINE_SPEED ]; then
MAX_SPEED=$(ethtool $iface|grep "Supported link modes"|cut -d ":" -f2)
else
MAX_SPEED=$(ethtool $iface |sed -n $LINE"p"|awk -F " " '{print $1}')
fi
UP="UP"
if [[ "$STATE" =~ "$UP" ]]; then
STATE="up"
else
STATE="down"
fi
if [ -z "$BOND" ]; then
TYPE="ether"
else
TYPE="bond"
SLAVES=$(find /etc/sysconfig/network-scripts/ -name "ifcfg-*" |xargs grep "MASTER=$iface"|awk -F 'ifcfg-' '{print $2}'|awk -F ':' '{print $1}')
fi
if [ ! -z "$MAC" ]; then
update ".interfaces[\"$iface\"] = {mac: \"$MAC\", ip: \"$IP\", netmask: \"$NETMASK\", name: \"$iface\", max_speed: \"$MAX_SPEED\", state: \"$STATE\", pci: \"$PCI\", current_speed: \"$CURRENT_SPEED\", type: \"$TYPE\", slaves:\"$SLAVES\"}"
fi
done
}
function get_disk_info(){
for disk in $(fdisk -l|grep Disk|grep "/dev" |cut -d ":" -f1|awk -F "/" '{print $NF}')
do
DISK_NAME=$disk
DISK_SIZE=$(fdisk -l|grep Disk|grep "/dev" |grep -w $disk|cut -d "," -f2)
DISK_DISK=$(ls -l /dev/disk/by-path/|grep $disk"$"|awk '{print $9}')
DISK_EXTRA_1=$(ls -l /dev/disk/by-id/|grep $disk"$"|awk '{print $9}'|sed -n 1p)
DISK_EXTRA_2=$(ls -l /dev/disk/by-id/|grep $disk"$"|awk '{print $9}'|sed -n 2p)
MODEL=$(hdparm -I /dev/sda |grep Model | cut -d ":" -f2)
REMOVABLE=$(hdparm -I /dev/sda |grep removable|awk '{print $4}')
update ".disk[\"$disk\"] = {name: \"$DISK_NAME\", size: \"$DISK_SIZE\", disk: \"$DISK_DISK\", model: \"$MODEL\", removable: \"$REMOVABLE\",extra: [\"$DISK_EXTRA_1\", \"$DISK_EXTRA_2\"]}"
done
}
function main(){
get_system_info
get_cpu_info
get_memory_info
get_net_info
get_disk_info
}
main
BMC_ADDRESS=$(ipmitool lan print | grep -e "IP Address [^S]" | awk '{ print $4 }')
if [ -z "$BMC_ADDRESS" ]; then
BMC_ADDRESS=$(ipmitool lan print 3| grep -e "IP Address [^S]" | awk '{ print $4 }')
fi
update ".ipmi_address = \"$BMC_ADDRESS\""
update ".data_name = \"baremetal_source\""
update ".os_status = \"active\""
echo Collected:
cat data.json
RESULT=$(eval curl -i -X POST \
"-H 'Accept: application/json'" \
"-H 'Content-Type: application/json'" \
"-d @data.json" \
"$DISCOVERD_URL")
if echo $RESULT | grep "HTTP/1.0 4"; then
echo "Ironic API returned error: $RESULT"
fi
echo "Node is now discovered! Halting..."
sleep 5

Binary file not shown.

View File

@ -1,39 +0,0 @@
[
{
"protocol_type": "ISCSI",
"service": "glance",
"lun": "0",
"data_ips": [
"10.43.177.159"
],
"lvm_config": {
"size": 100,
"vg_name": "VolGroupHAImage",
"lv_name": "lvHAImage",
"fs_type": "ext4"
}
},
{
"protocol_type": "ISCSI",
"service": "db",
"lun": "1",
"data_ips": [
"162.1.1.101"
],
"lvm_config": {
"size": 100,
"vg_name": "VolGroupHAMysql",
"lv_name": "lvHAMysql",
"fs_type": "ext4"
}
},
{
"protocol_type": "CEPH",
"rbd_config": {
"size": 100,
"rbd_pool": "mysql",
"rbd_volume": "mysql",
"fs_type": "ext4" # can be none
}
}
]

View File

@ -1,39 +0,0 @@
[
{
"protocol_type": "ISCSI",
"service": "glance",
"lun": "0",
"data_ips": [
"10.43.177.159"
],
"lvm_config": {
"size": 100,
"vg_name": "VolGroupHAImage",
"lv_name": "lvHAImage",
"fs_type": "ext4"
}
},
{
"protocol_type": "ISCSI",
"service": "db",
"lun": "1",
"data_ips": [
"162.1.1.101"
],
"lvm_config": {
"size": 100,
"vg_name": "VolGroupHAMysql",
"lv_name": "lvHAMysql",
"fs_type": "ext4"
}
},
{
"protocol_type": "CEPH",
"rbd_config": {
"size": 100,
"rbd_pool": "mysql",
"rbd_volume": "mysql",
"fs_type": "ext4" # can be none
}
}
]

View File

@ -1,144 +0,0 @@
# This is a basic configuration file with some examples, for device mapper
# mulead of using WWIDs as names.
defaults {
user_friendly_names yes
queue_without_daemon no
# find_multipaths yes
}
##
## Here is an example of how to configure some standard options.
##
#
#defaults {
# udev_dir /dev
# polling_interval 10
# selector "round-robin 0"
# path_grouping_policy multibus
# getuid_callout "/lib/udev/scsi_id --whitelisted --device=/dev/%n"
# prio alua
# path_checker readsector0
# rr_min_io 100
# max_fds 8192
# rr_weight priorities
# failback immediate
# no_path_retry fail
# user_friendly_names yes
#}
##
## The wwid line in the following blacklist section is shown as an example
## of how to blacklist devices by wwid. The 2 devnode lines are the
## compiled in default blacklist. If you want to blacklist entire types
## of devices, such as all scsi devices, you should use a devnode line.
## However, if you want to blacklist specific devices, you should use
## a wwid line. Since there is no guarantee that a specific device will
## not change names on reboot (from /dev/sda to /dev/sdb for example)
## devnode lines are not recommended for blacklisting specific devices.
##
#blacklist {
# wwid 26353900f02796769
# devnode "^(ram|raw|loop|fd|md|dm-|sr|scd|st)[0-9]*"
# devnode "^hd[a-z]"
#}
#multipaths {
# multipath {
# wwid 3600508b4000156d700012000000b0000
# alias yellow
# path_grouping_policy multibus
# path_checker readsector0
# path_selector "round-robin 0"
# failback manual
# rr_weight priorities
# no_path_retry 5
# }
# multipath {
# wwid 1DEC_____321816758474
# alias red
# }
#}
#devices {
# device {
# vendor "COMPAQ "
# product "HSV110 (C)COMPAQ"
# path_grouping_policy multibus
# getuid_callout "/lib/udev/scsi_id --whitelisted --device=/dev/%n"
# path_checker readsector0
# path_selector "round-robin 0"
# hardware_handler "0"
# failback 15
# rr_weight priorities
# no_path_retry queue
# }
# device {
# vendor "COMPAQ "
# product "MSA1000 "
# path_grouping_policy multibus
# }
#}
devices {
device {
vendor "FUJITSU"
product "ETERNUS_DXL"
prio alua
path_grouping_policy group_by_prio
path_selector "round-robin 0"
failback immediate
no_path_retry 0 (*1)
path_checker tur
dev_loss_tmo 2097151 (*2)
fast_io_fail_tmo 1
}
device {
vendor "FUJITSU"
product "ETERNUS_DXM"
prio alua
path_grouping_policy group_by_prio
path_selector "round-robin 0"
failback immediate
no_path_retry 0 (*1)
path_checker tur
dev_loss_tmo 2097151 (*2)
fast_io_fail_tmo 1
}
device {
vendor "FUJITSU"
product "ETERNUS_DX400"
prio alua
path_grouping_policy group_by_prio
path_selector "round-robin 0"
failback immediate
no_path_retry 0 (*1)
path_checker tur
dev_loss_tmo 2097151 (*2)
fast_io_fail_tmo 1
}
device {
vendor "FUJITSU"
product "ETERNUS_DX8000"
prio alua
path_grouping_policy group_by_prio
path_selector "round-robin 0"
failback immediate
no_path_retry 0 (*1)
path_checker tur
dev_loss_tmo 2097151 (*2)
fast_io_fail_tmo 1
}
device {
vendor "ZTE"
product "ZXUSP"
path_grouping_policy group_by_prio
path_checker tur
prio alua
path_selector "round-robin 0"
hardware_handler "1 alua"
failback immediate
rr_weight priorities
no_path_retry 0 (*1)
rr_min_io_rq 1
flush_on_last_del yes
}
}
blacklist {
}

View File

@ -1,281 +0,0 @@
import uuid
from utils import *
from xml.etree.ElementTree import ElementTree, Element
class BaseConfig():
_CINDER_CONF_PATH = "/etc/cinder/cinder.conf"
SET_CONFIG = \
"openstack-config --set {config_file} {section} {key} {value}"
GET_CONFIG = \
"openstack-config --get {config_file} {section} {key}"
instance = None
def __init__(self):
self._BACKEND_MAPPING = {
'KS3200_IPSAN': ZTEBackendConfig,
'KS3200_FCSAN': ZTEBackendConfig,
'FUJISTU_ETERNUS': FUJISTUBackendConfig,
'LVM': None,
'CEPH': CEPHBackendConfig,
}
self.instance_mapping = {}
def __get_backend_instance(self, backend_type):
if not backend_type or \
backend_type not in self._BACKEND_MAPPING.keys():
print_or_raise("Volume driver type '%s' is not valid." %
backend_type,
ScriptInnerError)
backend_instance = self.instance_mapping.get(backend_type, BaseConfig)
if isinstance(backend_instance, self._BACKEND_MAPPING[backend_type]):
return backend_instance
else:
self.instance_mapping.update(
{backend_type: self._BACKEND_MAPPING[backend_type]()})
return self.instance_mapping[backend_type]
@classmethod
def single_instance(cls):
if not BaseConfig.instance:
BaseConfig.instance = BaseConfig()
return BaseConfig.instance
def _construct_particular_cinder_data(self, backend, backend_data):
print_or_raise("Backend _construct_particular_cinder_data method no "
"implement!", ScriptInnerError)
def _write_xml(self, fp_xml, **backend_device_args):
self.backend_instance._write_xml(fp_xml, **backend_device_args)
def _construct_commonality_cinder_data(self, backend, backend_data):
backend_pools, xml_path = \
self.backend_instance._construct_particular_cinder_data(
backend, backend_data)
backend_data['volume_backend_name'] = \
backend_data.pop('volume_type')
set_backend = lambda x, y: self.SET_CONFIG.format(
config_file=self._CINDER_CONF_PATH,
section=backend,
key=x, value=y)
backend_config_list = list()
backend_config_list += map(
set_backend, backend_data.keys(), backend_data.values())
get_bakcends = \
self.GET_CONFIG.format(config_file=self._CINDER_CONF_PATH,
section="DEFAULT",
key="enabled_backends")
out, err = execute(get_bakcends, check_exit_code=[0, 1])
exist_backends = out.split("\n")[0] if out else ""
enabled_backends = \
exist_backends if backend in exist_backends else \
"%s" % backend if not out else "%s,%s" % \
(exist_backends, backend)
set_bakcends = \
self.SET_CONFIG.format(config_file=self._CINDER_CONF_PATH,
section="DEFAULT",
key="enabled_backends",
value=enabled_backends)
# write to cinder.conf
config_set_all = set_bakcends + ";" + ";".join(backend_config_list)
execute(config_set_all)
return backend_pools, xml_path
def is_needed_generate_backend_xml(self, backend_driver):
if backend_driver in ['KS3200_IPSAN', 'KS3200_FCSAN',
'FUJISTU_ETERNUS']:
return True
else:
return False
def config_backend(self, backend_cinder_args, **backend_device_args):
"""
Config outer interface,for public flow.
:param backend_device_args: device config
:param backend_cinder_args: backend config
:return:
"""
backend_data = backend_cinder_args[1]
backend_driver = backend_data.get('volume_driver', None)
self.backend_instance = self.__get_backend_instance(backend_driver)
# config cinder.conf
backend_pools, xml_path = \
self._construct_commonality_cinder_data(backend_cinder_args[0],
backend_data)
# config xml
if self.is_needed_generate_backend_xml(backend_driver):
backend_device_args.update({'pools': backend_pools})
with open(xml_path, "w+") as fp_xml:
self._write_xml(fp_xml, **backend_device_args)
execute("chown cinder:cinder %s" % xml_path)
def update_xml_node(self, element_obj, node_path, content):
node_list = element_obj.findall(node_path)
if node_list:
node_list[0].text = content
else:
new_element = Element(node_path.split('/')[-1])
new_element.text = content
parent_node = element_obj.findall(node_path.split('/')[0])
parent_node[0].append(new_element)
class ZTEBackendConfig(BaseConfig):
_DEFAULT_USERNAME = "admin"
_DEFAULT_USERPWD = "admin"
_DEFAULT_XML_FILE_PREFIX = "cinder_zte_conf_file"
_DEFAULT_XML_TEMPLATE_PATH = "/etc/cinder/cinder_zte_conf.xml"
_ISCSI_DRIVER = 'cinder.volume.drivers.zte.zte_ks.ZteISCSIDriver'
_FC_DRIVER = 'cinder.volume.drivers.zte.zte_ks.ZteFCDriver'
def _construct_particular_cinder_data(self, backend, backend_data):
# construct commonality data in cinder.conf
backend_data['volume_driver'] = \
self._ISCSI_DRIVER \
if "KS3200_IPSAN" == backend_data['volume_driver'] \
else self._FC_DRIVER
backend_data[self._DEFAULT_XML_FILE_PREFIX] = \
backend_data.pop('backend_config_file') \
if backend_data.get('backend_config_file', None) \
else "/etc/cinder/%s_%s.xml" % (self._DEFAULT_XML_FILE_PREFIX,
backend)
backend_data['use_multipath_for_image_xfer'] = \
backend_data.get('multipath_tool', True)
backend_pools = backend_data.pop('pools')
return backend_pools, backend_data[self._DEFAULT_XML_FILE_PREFIX]
def _write_xml(self, fp, **backend_device_args):
if not os.path.exists(self._DEFAULT_XML_TEMPLATE_PATH):
print_or_raise("XML file template %s not exists,can't load defult "
"params." % self._DEFAULT_XML_TEMPLATE_PATH,
ScriptInnerError)
mgnt_ips = backend_device_args['management_ips']
user_name = backend_device_args['user_name']
user_pwd = backend_device_args['user_pwd']
cinder_host_ip = backend_device_args['cinder_host_ip']
pools = backend_device_args['pools']
xml_fp = fp
tree = ElementTree()
elements = tree.parse(self._DEFAULT_XML_TEMPLATE_PATH)
for index in range(len(mgnt_ips)):
self.update_xml_node(
elements,
"Storage/ControllerIP" + str(index), mgnt_ips[index])
if cinder_host_ip:
self.update_xml_node(elements, "Storage/LocalIP", cinder_host_ip)
self.update_xml_node(elements, "Storage/UserName", user_name)
self.update_xml_node(elements, "Storage/UserPassword", user_pwd)
# del all StoragePool and StorageVd node
pool_parent_node = elements.findall("LUN")
pool_child_nodes = elements.findall("LUN/StoragePool")
vd_child_nodes = elements.findall("LUN/StorageVd")
map(pool_parent_node[0].remove, pool_child_nodes + vd_child_nodes)
# add StoragePool node base on pools
for pool in pools:
element = Element("StoragePool")
element.text = pool
element.tail = "\n\t"
pool_parent_node[0].insert(0, element)
tree.write(xml_fp, encoding="utf-8", xml_declaration=True)
class FUJISTUBackendConfig(BaseConfig):
_DEFAULT_USERNAME = "root"
_DEFAULT_USERPWD = "root"
_DEFAULT_XML_FILE_PREFIX = "cinder_eternus_config_file"
_DEFAULT_XML_TEMPLATE_PATH = \
"/etc/cinder/cinder_fujitsu_eternus_dx.xml"
FUJISTU_DRIVER = \
"cinder.volume.drivers.fujitsu.eternus_dx_iscsi.FJDXISCSIDriver"
def _construct_particular_cinder_data(self, backend, backend_data):
# construct commonality data in cinder.conf
backend_data['volume_driver'] = self.FUJISTU_DRIVER
backend_data[self._DEFAULT_XML_FILE_PREFIX] = \
backend_data.pop('backend_config_file') \
if backend_data.get('backend_config_file', None) \
else "/etc/cinder/%s_%s.xml" % (self._DEFAULT_XML_FILE_PREFIX,
backend)
backend_data['use_multipath_for_image_xfer'] = \
backend_data.get('multipath_tool', True)
backend_data['use_fujitsu_image_volume'] = \
backend_data.get('use_fujitsu_image_volume', True)
backend_data['fujitsu_min_image_volume_per_storage'] = \
backend_data.get('fujitsu_min_image_volume_per_storage', 1)
backend_data['fujitsu_image_management_dir'] = \
backend_data.get('fujitsu_image_management_dir',
'/var/lib/glance/conversion')
backend_pools = backend_data.pop('pools')
return backend_pools, backend_data[self._DEFAULT_XML_FILE_PREFIX]
def _write_xml(self, fp, **backend_device_args):
if not os.path.exists(self._DEFAULT_XML_TEMPLATE_PATH):
print_or_raise("XML file template %s not exists,can't load defult "
"params." % self._DEFAULT_XML_TEMPLATE_PATH,
ScriptInnerError)
mgnt_ip = backend_device_args['management_ips'][0]
data_ips = backend_device_args['data_ips']
user_name = backend_device_args['user_name']
user_pwd = backend_device_args['user_pwd']
pool = backend_device_args['pools'][0]
xml_fp = fp
tree = ElementTree()
elements = tree.parse(self._DEFAULT_XML_TEMPLATE_PATH)
self.update_xml_node(elements, "EternusIP", mgnt_ip)
self.update_xml_node(elements, "EternusUser", user_name)
self.update_xml_node(elements, "EternusPassword", user_pwd)
self.update_xml_node(elements, "EternusPool", pool)
self.update_xml_node(elements, "EternusSnapPool", pool)
root = tree.getroot()
map(root.remove, root.findall("EternusISCSIIP"))
for ip in data_ips:
element = Element("EternusISCSIIP")
element.text = ip
element.tail = "\n"
root.insert(4, element)
# root.append(element)
tree.write(xml_fp, encoding="utf-8", xml_declaration=True)
class CEPHBackendConfig(BaseConfig):
NOVA_CONF_FILE = "/etc/nova/nova.conf"
GLANCE_API_CONF_FILE = "/etc/glance/glance-api.conf"
_RBD_STORE_USER = "cinder"
_RBD_POOL = "volumes"
_RBD_MAX_CLONE_DEPTH = 5
_RBD_FLATTEN_VOLUME_FROM_SNAPSHOT = "False"
_RBD_CEPH_CONF = "/etc/ceph/ceph.conf"
_RBD_DRIVER = 'cinder.volume.drivers.rbd.RBDDriver'
def _construct_particular_cinder_data(self, backend, backend_data):
backend_data['volume_driver'] = self._RBD_DRIVER
backend_data['rbd_pool'] = self._RBD_POOL
backend_data['rbd_max_clone_depth'] = self._RBD_MAX_CLONE_DEPTH
backend_data['rbd_flatten_volume_from_snapshot'] = \
self._RBD_FLATTEN_VOLUME_FROM_SNAPSHOT
backend_data['rbd_ceph_conf'] = self._RBD_CEPH_CONF
uuid_instance = uuid.uuid3(uuid.NAMESPACE_DNS, "zte.com.cn")
backend_data['rbd_secret_uuid'] = uuid_instance.urn.split(":")[2]
return [], []

View File

@ -1,312 +0,0 @@
from utils import *
class BaseShareDisk():
instance = None
def __init__(self):
self._PROTOCOL_MAPPING = {
'ISCSI': ISCSIShareDisk,
'CEPH': CEPHShareDisk
}
self.instance_mapping = {}
def __get_protocol_instance(self, protocol_type):
if not protocol_type or \
protocol_type not in self._PROTOCOL_MAPPING.keys():
print_or_raise("Protocol type '%s' is not valid." % protocol_type,
ScriptInnerError)
protocol_instance = self.instance_mapping.get(protocol_type,
BaseShareDisk)
if isinstance(protocol_instance,
self._PROTOCOL_MAPPING[protocol_type]):
return protocol_instance
else:
self.instance_mapping.update(
{protocol_type: self._PROTOCOL_MAPPING[protocol_type]()})
return self.instance_mapping[protocol_type]
@classmethod
def single_instance(cls):
if not BaseShareDisk.instance:
BaseShareDisk.instance = BaseShareDisk()
return BaseShareDisk.instance
def deploy_share_disk(self, item, host_name):
protocol_instance = self.__get_protocol_instance(
item.get('protocol_type', 'ISCSI'))
protocol_instance.deploy_share_disk(item, host_name)
class ISCSIShareDisk(BaseShareDisk):
_LV_DEFAULT_NAME = {
'glance': ("VolGroupHAImage", "lvHAImage", 254),
'db': ("VolGroupHAMysql", "lvHAMysql", 253),
'db_backup': ("VolGroupHABakMysql", "lvHABakMysql", 252),
'mongodb': ("VolGroupHAMongodb", "lvHAMongodb", 251),
}
def _get_iscsi_configs(self, record_list):
raid_config = {}
for record in record_list:
discovery_media_ip = record.split(" ")[0].split(":")[0]
discovery_media_iqn = record.split(" ")[1]
try:
execute("ping -c 1 -W 2 %s" % discovery_media_ip)
except ProcessExecutionError:
execute("iscsiadm -m node -T %s -p %s -o delete" %
(discovery_media_iqn, discovery_media_ip),
check_exit_code=[0, 1])
continue
if discovery_media_ip in raid_config.get(discovery_media_iqn, []):
execute("iscsiadm -m node -T %s -p %s -R" %
(discovery_media_iqn, discovery_media_ip),
check_exit_code=[0, 1])
elif discovery_media_iqn in raid_config.keys():
raid_config[discovery_media_iqn] += [discovery_media_ip]
else:
raid_config[discovery_media_iqn] = [discovery_media_ip]
print_or_raise("Raid config is:\n%s" % str(raid_config))
return raid_config
def _lv_reentrant_check(
self, vg_name, lv_name, iscsi_session_setup, lun=None,
data_ips=[]):
"""
Check if share disk operation is reentrant.
:return:True,continue follow action; False, do nothing.
"""
lv_device_path = "/dev/%s/%s" % (vg_name, lv_name)
if not os.path.exists(lv_device_path):
return True
if not iscsi_session_setup:
exist_volumes = \
[sd for sd in self._ls_sd_path() if "-lun-" + lun in sd
for ip in data_ips if "ip-" + ip in sd]
if not exist_volumes:
print_or_raise("Lvm %s is exist, but no sd device match!" %
lv_device_path, ScriptInnerError)
return False
def _lv_rollback(self, lv, vg, block_device):
try:
execute("lvremove -y -ff /dev/%s/%s" % (lv, vg),
check_exit_code=[0, 1, 5])
execute("vgremove -y -ff %s" % vg, check_exit_code=[0, 1, 5])
execute("pvremove -y -ff %s" % block_device,
check_exit_code=[0, 1, 5])
except Exception as e:
print_or_raise("Rollback lvm resource failed!", e)
def _establish_iscsi_session(self, available_data_ips):
# discovery
discovery_ret = ""
for ip in available_data_ips:
out, err = execute(
"iscsiadm -m discovery -t st -p %s:3260" % ip)
discovery_ret += out
# if('0' != err) or ('0\n' != err ) or err:
# print_or_raise("Discovery ip:%s failed,continue.." % ip)
if not discovery_ret:
print_or_raise("No discovery record!", ScriptInnerError)
record_list = list(set(discovery_ret.split('\n')[:-1]))
print_or_raise(
"Discovery successful! Record:\n%s" % "\n".join(record_list))
# get iqn and ip like {iqn1: ip1, iqn2:ip2}
raid_config = self._get_iscsi_configs(record_list)
# auto config & login
login_cmd = \
lambda x, y: "iscsiadm -m node -T %s -p %s:3260 -l" % (x, y)
auto_cmd = \
lambda x, y: "iscsiadm -m node -T %s -p %s -o update -n " \
"node.startup -v automatic" % (x, y)
login = []
auto_config = []
for index in range(len(raid_config.keys())):
k = raid_config.keys()[index]
v = raid_config[k]
login += map(login_cmd, [k] * len(v), v)
auto_config += map(auto_cmd, [k] * len(v), v)
execute(";".join(login))
execute(";".join(auto_config))
print_or_raise("Login successful!")
return raid_config
def _modify_host_iqn(self, host_name):
# modify host IQN
host_iqn, err = execute("cat /etc/iscsi/initiatorname.iscsi")
md5_str, err = execute("echo -n %s | openssl md5" % host_name)
host_iqn = host_iqn.split("=")[1].strip()
wish_iqn = "iqn.opencos.rh:" + md5_str.split("=")[1].strip()
if wish_iqn != host_iqn:
print_or_raise(
"The host iqn is:%s, but wish iqn is %s, it will be modified."
% (host_iqn, wish_iqn))
with open("/etc/iscsi/initiatorname.iscsi", "w") as fp:
fp.write("InitiatorName=" + wish_iqn + "\n")
execute("systemctl restart iscsid.service")
def _ls_sd_path(self):
out, err = execute("ls /dev/disk/by-path")
return out.split("\n")[:-1]
def _find_multipath_by_sd(self, iqns, lun_id):
sd_path = []
attemps = 0
while not sd_path:
sd_path = \
[sd for sd in self._ls_sd_path()
if filter(lambda complex_sd_path: complex_sd_path in sd,
[iqn + "-lun-" + str(lun_id) for iqn in iqns])]
attemps += 1
if attemps == 5:
execute("iscsiadm -m node -R")
elif attemps > 10:
print_or_raise(
"After login successful,"
"there is no local sd device match with block device.",
ScriptInnerError)
time.sleep(2)
sd_path = "/dev/disk/by-path/" + sd_path[0]
sd_real_path = os.path.realpath(sd_path)
attemps = 0
multipath_path = ""
while not os.path.exists(multipath_path):
multipath_device, err = execute("multipath -l %s" % sd_real_path)
# if not multipath_device or ('0' != err) or ('0\n' != err) or err:
# continue
multipath_path = "/dev/mapper/" + \
multipath_device.split("\n")[0].split(" ")[0]
attemps += 1
if attemps > 5:
print_or_raise(
"No multipath match with local sd device:%s." %
sd_real_path,
ScriptInnerError)
time.sleep(2)
return multipath_path
def _create_lv_by_multipath_device(
self, multipath, vg_name, lv_name, size, fs_type):
try:
# create lvm base on block device
execute("pvcreate -y -ff %s" % multipath,
check_exit_code=[0, 1, 5])
execute("vgcreate -y -ff %s %s" % (vg_name, multipath),
check_exit_code=[0, 1, 5])
if size == -1:
lvcreate = "lvcreate -W y -l 100%%FREE -n %s %s" % \
(lv_name, vg_name)
else:
lvcreate = "lvcreate -W y -L %sG -n %s %s" % \
(round(size * 0.95, 2), lv_name, vg_name)
execute(lvcreate, check_exit_code=[0, 1, 5])
execute("pvscan --cache --activate ay")
# make filesystem
execute("mkfs.%s /dev/%s/%s" % (fs_type, vg_name, lv_name))
except Exception as e:
self._lv_rollback(lv_name, vg_name, multipath)
print_or_raise("LVM create failed, resource has been rollbacked.",
e)
def deploy_share_disk(self, item, host_name):
config_computer()
self._modify_host_iqn(host_name)
service = item['service']
if service not in ['glance', 'db', 'db_backup', 'mongodb']:
print_or_raise("Service name '%s' is not valid." % service)
# check ip
available_data_ips, invalid_ips = \
get_available_data_ip(item['data_ips'])
if not available_data_ips:
print_or_raise("No valid data ips,please check.", ScriptInnerError)
raid_config = self._establish_iscsi_session(available_data_ips)
lv_config = item.get('lvm_config', None)
vg_name = lv_config.get('vg_name', self._LV_DEFAULT_NAME[service][0])
lv_name = lv_config.get('lv_name', self._LV_DEFAULT_NAME[service][1])
if not self._lv_reentrant_check(vg_name, lv_name, True):
return
multipath = self._find_multipath_by_sd(
raid_config.keys(),
item.get('lun', self._LV_DEFAULT_NAME[service][2]))
self._create_lv_by_multipath_device(multipath,
vg_name,
lv_name,
lv_config.get('size', -1),
lv_config.get('fs_type', 'ext4'))
class CEPHShareDisk(BaseShareDisk):
def __init__(self):
self.monitor_ip = ''
self.monitor_passwd = ''
def deploy_share_disk(self, item, host_name):
self.monitor_ip = item.get('monitor_ip', '')
self.monitor_passwd = item.get('monitor_passwd', '')
rbd_pool = item['rbd_config']['rbd_pool']
rbd_img = item['rbd_config']['rbd_volume']
img_size = int(item['rbd_config']['size'])*1024
fs_type = item['rbd_config'].get('fs_type', 'ext4')
cmd_create = 'sshpass -p %s ssh %s rbd create -p %s --size %s %s ' % \
(self.monitor_passwd,
self.monitor_ip,
rbd_pool,
img_size,
rbd_img)
cmd_query = 'sshpass -p %s ssh %s rbd ls -l %s' % (
self.monitor_passwd, self.monitor_ip, rbd_pool)
image_in_monitor = []
print_or_raise("Create image %s in pool %s at monitor %s." %
(rbd_img, rbd_pool, self.monitor_ip))
try:
out, err = execute(cmd_query)
if out:
for line in out.splitlines():
image_in_monitor.append(line.split()[0])
if rbd_img not in image_in_monitor:
execute(cmd_create)
except Exception as e:
print_or_raise("Query pool %s in monitor error or create image %s "
"in pool %s." % (rbd_pool, rbd_img, rbd_pool), e)
execute("systemctl stop rbdmap")
rbd_map = '%s/%s id=admin,' \
'keyring=/etc/ceph/ceph.client.admin.keyring' % (rbd_pool,
rbd_img)
rbd_map_need_to_write = True
print_or_raise("Write rbdmap.")
with open("/etc/ceph/rbdmap", "a+") as fp:
for line in fp:
if line == rbd_map + "\n":
rbd_map_need_to_write = False
if rbd_map_need_to_write is True:
fp.write(rbd_map + "\n")
execute("chmod 777 /etc/ceph/rbdmap")
execute("systemctl enable rbdmap")
execute("systemctl start rbdmap")
execute("mkfs.%s /dev/rbd/%s/%s" % (fs_type, rbd_pool, rbd_img))

View File

@ -1,231 +0,0 @@
import subprocess
import random
import shlex
import signal
import time
import os
import logging
LOG = logging.getLogger()
formatter = "%(asctime)s %(name)s %(levelname)s %(message)s"
logging.basicConfig(format=formatter,
filename="storage_auto_config.log",
filemode="a",
level=logging.DEBUG)
stream_log = logging.StreamHandler()
stream_log.setLevel(logging.DEBUG)
stream_log.setFormatter(logging.Formatter(formatter))
LOG.addHandler(stream_log)
def print_or_raise(msg, exc=None):
if not exc:
LOG.debug(msg)
else:
if isinstance(exc, Exception):
LOG.error(msg)
raise exc
elif issubclass(exc, Exception):
raise exc(msg)
class ScriptInnerError(Exception):
def __init__(self, message=None):
super(ScriptInnerError, self).__init__(message)
class UnknownArgumentError(Exception):
def __init__(self, message=None):
super(UnknownArgumentError, self).__init__(message)
class NoRootWrapSpecified(Exception):
def __init__(self, message=None):
super(NoRootWrapSpecified, self).__init__(message)
class ProcessExecutionError(Exception):
def __init__(self, stdout=None, stderr=None, exit_code=None, cmd=None,
description=None):
self.exit_code = exit_code
self.stderr = stderr
self.stdout = stdout
self.cmd = cmd
self.description = description
if description is None:
description = "Unexpected error while running command."
if exit_code is None:
exit_code = '-'
message = ("%s\nCommand: %s\nExit code: %s\nStdout: %r\nStderr: %r"
% (description, cmd, exit_code, stdout, stderr))
super(ProcessExecutionError, self).__init__(message)
def execute(cmd, **kwargs):
"""Helper method to shell out and execute a command through subprocess.
Allows optional retry.s
:param cmd: Passed to subprocess.Popen.
:type cmd: string
TODO:param process_input: Send to opened process.
:type proces_input: string
TODO:param check_exit_code: Single bool, int, or list of allowed exit
codes. Defaults to [0]. Raise
:class:`ProcessExecutionError` unless
program exits with one of these code.
:type check_exit_code: boolean, int, or [int]
:param delay_on_retry: True | False. Defaults to True. If set to True,
wait a short amount of time before retrying.
:type delay_on_retry: boolean
:param attempts: How many times to retry cmd.
:type attempts: int
TODO:param run_as_root: True | False. Defaults to False. If set to True,
the command is prefixed by the command specified
in the root_helper kwarg.
:type run_as_root: boolean
:param root_helper: command to prefix to commands called with
run_as_root=True
:type root_helper: string
TODO:param shell: whether or not there should be a shell used to
execute this command. Defaults to false.
:type shell: boolean
:param loglevel: log level for execute commands.
:type loglevel: int. (Should be logging.DEBUG or logging.INFO)
:returns: (stdout, stderr) from process execution
:raises: :class:`UnknownArgumentError` on
receiving unknown arguments
:raises: :class:`ProcessExecutionError`
"""
def _subprocess_setup():
# Python installs a SIGPIPE handler by default.
# This is usually not what non-Python subprocesses expect.
signal.signal(signal.SIGPIPE, signal.SIG_DFL)
# stdin
process_input = kwargs.pop('process_input', None)
check_exit_code = kwargs.pop('check_exit_code', [0])
ignore_exit_code = False
delay_on_retry = kwargs.pop('delay_on_retry', True)
attempts = kwargs.pop('attempts', 1)
run_as_root = kwargs.pop('run_as_root', False)
root_helper = kwargs.pop('root_helper', '')
shell = kwargs.pop('shell', True)
silent = kwargs.pop('silent', False)
# loglevel = kwargs.pop('loglevel', logging.DEBUG)
if isinstance(check_exit_code, bool):
ignore_exit_code = not check_exit_code
check_exit_code = [0]
elif isinstance(check_exit_code, int):
check_exit_code = [check_exit_code]
if kwargs:
raise UnknownArgumentError(
'Got unknown keyword args to utils.execute: %r' % kwargs)
if run_as_root and hasattr(os, 'geteuid') and os.geteuid() != 0:
if not root_helper:
raise NoRootWrapSpecified(
message=('Command requested root, but did not specify a root '
'helper.'))
cmd = shlex.split(root_helper) + list(cmd)
while attempts > 0:
attempts -= 1
try:
if not silent:
print_or_raise('Running cmd (subprocess): %s' % cmd)
# windows
if os.name == 'nt':
preexec_fn = None
close_fds = False
else:
preexec_fn = _subprocess_setup
close_fds = True
obj = subprocess.Popen(cmd,
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
close_fds=close_fds,
preexec_fn=preexec_fn,
shell=shell)
if process_input is not None:
result = obj.communicate(process_input)
else:
result = obj.communicate()
obj.stdin.close()
_returncode = obj.returncode
if not silent:
print_or_raise('Result was %s' % _returncode)
if not ignore_exit_code and _returncode not in check_exit_code:
(stdout, stderr) = result
raise ProcessExecutionError(exit_code=_returncode,
stdout=stdout,
stderr=stderr,
cmd=cmd)
# cmd=sanitized_cmd)
return result
except ProcessExecutionError:
if not attempts:
raise
else:
if not silent:
print_or_raise('%r failed. Retrying.' % cmd)
if delay_on_retry:
time.sleep(random.randint(20, 200) / 100.0)
finally:
time.sleep(0)
def get_available_data_ip(media_ips):
unavailable_ip = []
for media_ip in media_ips:
try:
execute("ping -c 1 -W 2 %s" % media_ip)
except ProcessExecutionError:
unavailable_ip.append(media_ip)
continue
return list(set(media_ips) - set(unavailable_ip)), unavailable_ip
def clear_host_iscsi_resource():
out, err = execute("iscsiadm -m node", check_exit_code=[0, 21])
if not out:
return
sd_ips_list = map(lambda x: x.split(":3260")[0], out.split("\n")[:-1])
if not sd_ips_list:
return
valid_ips, invalid_ips = get_available_data_ip(sd_ips_list)
clear_resource = ""
for ip in invalid_ips:
logout_session = "iscsiadm -m node -p %s -u;" % ip
del_node = "iscsiadm -m node -p %s -o delete;" % ip
# manual_startup = "iscsiadm -m node -p %s -o update -n node.startup "
# "-v manual;" % ip
clear_resource += (logout_session + del_node)
execute(clear_resource, check_exit_code=[0, 21], silent=True)
# _execute("multipath -F")
def config_computer():
# remove exist iscsi resource
clear_host_iscsi_resource()
config_multipath()
def config_multipath():
if os.path.exists("/etc/multipath.conf"):
execute("echo y|mv /etc/multipath.conf /etc/multipath.conf.bak",
check_exit_code=[0, 1])
execute("cp -p base/multipath.conf /etc/")
execute("systemctl enable multipathd.service;"
"systemctl restart multipathd.service")

View File

@ -1,168 +0,0 @@
###############################################################################
# Author: CG
# Description:
# 1.The script should be copied to the host, before running.
# 2.The script is not thread safe.
# 3.Example for script call:
# [config share disk]:
# python storage_auto_config share_disk <host_pxe_mac>,
# we use host_pxe_mac to generate host IQN by md5 and write it to
# '/etc/iscsi/initiatorname.iscsi'
# [config cinder]: python storage_auto_config cinder_conf 10.43.177.129,
# the second parameter for cinder_config is cinder <host_ip>.
# If the backend is CEPH,you should call the following command:
# python storage_auto_config glance_rbd_conf at glance node &
# python storage_auto_config nova_rbd_conf at nova node.
# [config multipath]:python storage_auto_config check_multipath.
# 4.Before run script,the cinder.json and control.json file
# must be must be config.
###############################################################################
import sys
import uuid
import traceback
from common.utils import *
from common.cinder_conf import BaseConfig, CEPHBackendConfig
from common.share_disk import BaseShareDisk
try:
import simplejson as json
except ImportError:
import json
def _set_config_file(file, section, key, value):
set_config = BaseConfig.SET_CONFIG.format(
config_file=file,
section=section,
key=key,
value=value)
execute(set_config)
def config_share_disk(config, host_name):
# deploy share_disk
for item in config:
BaseShareDisk.single_instance().deploy_share_disk(item, host_name)
def config_cinder(config, cinder_host_ip=""):
# config xml and cinder.conf
for config in config['disk_array']:
# load disk array global config
backends = config['backend']
for item in backends.items():
BaseConfig.single_instance().config_backend(
item,
management_ips=config.get('management_ips', []),
data_ips=config.get('data_ips', []),
user_name=config.get('user_name', []),
user_pwd=config.get('user_pwd', []),
cinder_host_ip=cinder_host_ip)
# config multipath
config_computer()
# enable config
execute("systemctl restart openstack-cinder-volume.service")
def config_nova_with_rbd(config):
# config xml and cinder.conf
for config in config['disk_array']:
# load disk array global config
backends = config['backend']
for key, value in backends.items():
if value.get('volume_driver') == 'CEPH':
uuid_instance = uuid.uuid3(uuid.NAMESPACE_DNS, "zte.com.cn")
uuid_str = uuid_instance.urn.split(":")[2]
_set_config_file(CEPHBackendConfig.NOVA_CONF_FILE,
'libvirt',
'images_type',
'rbd')
_set_config_file(CEPHBackendConfig.NOVA_CONF_FILE,
'libvirt',
'rbd_secret_uuid',
uuid_str)
return
# enable config
execute("systemctl restart openstack-nova-compute.service")
def config_glance_with_rbd(config):
# config xml and cinder.conf
for config in config['disk_array']:
# load disk array global config
backends = config['backend']
for key, value in backends.items():
if value.get('volume_driver') == 'CEPH':
_set_config_file(CEPHBackendConfig.GLANCE_API_CONF_FILE,
'DEFAULT',
'show_image_direct_url',
'True')
_set_config_file(CEPHBackendConfig.GLANCE_API_CONF_FILE,
'glance_store',
'default_store',
'rbd')
return
# enable config
execute("systemctl restart openstack-glance-api.service")
def _launch_script():
def subcommand_launcher(args, valid_args_len, json_path, oper_type):
if len(args) < valid_args_len:
print_or_raise("Too few parameter is given,please check.",
ScriptInnerError)
with open(json_path, "r") as fp_json:
params = json.load(fp_json)
print_or_raise("-----Begin config %s, params is %s.-----" %
(oper_type, params))
return params
oper_type = sys.argv[1] if len(sys.argv) > 1 else ""
try:
if oper_type == "share_disk":
share_disk_config = \
subcommand_launcher(sys.argv, 3, "base/control.json",
oper_type)
config_share_disk(share_disk_config, sys.argv[2])
elif oper_type == "cinder_conf":
cinder_backend_config = subcommand_launcher(sys.argv, 3,
"base/cinder.json",
oper_type)
config_cinder(cinder_backend_config, sys.argv[2])
elif oper_type == "nova_rbd_conf":
nova_rbd_config = subcommand_launcher(sys.argv, 1,
"base/cinder.json",
oper_type)
config_nova_with_rbd(nova_rbd_config)
elif oper_type == "glance_rbd_conf":
glance_rbd_config = subcommand_launcher(sys.argv, 1,
"base/cinder.json",
oper_type)
config_glance_with_rbd(glance_rbd_config)
elif oper_type == "check_multipath":
print_or_raise("-----Begin config %s.-----")
config_computer()
elif oper_type == "debug":
pass
else:
print_or_raise("Script operation is not given,such as:share_disk,"
"cinder_conf,nova_rbd_conf,glance_rbd_conf,"
"check_multipath.", ScriptInnerError)
except Exception as e:
print_or_raise("----------Operation %s is Failed.----------\n"
"Exception call chain as follow,%s" %
(oper_type, traceback.format_exc()))
raise e
else:
print_or_raise("----------Operation %s is done!----------" %
oper_type)
if __name__ == "__main__":
_launch_script()

File diff suppressed because it is too large Load Diff

View File

@ -1,9 +0,0 @@
#!/bin/bash
scriptsdir=$(cd $(dirname $0) && pwd)
ISODIR=`mktemp -d /mnt/TFG_ISOXXXXXX`
mount -o loop $scriptsdir/*CGSL_VPLAT*.iso ${ISODIR}
cp ${ISODIR}/*CGSL_VPLAT*.bin $scriptsdir
umount ${ISODIR}
[ -e ${ISODIR} ] && rm -rf ${ISODIR}
$scriptsdir/*CGSL_VPLAT*.bin upgrade reboot

View File

@ -1,93 +0,0 @@
#!/bin/sh
# 让某个主机彻底信任我以后ssh登录过去不需要密码
#检查参数是否合法
logfile=/var/log/trustme.log
function print_log
{
local promt="$1"
echo -e "$promt"
echo -e "`date -d today +"%Y-%m-%d %H:%M:%S"` $promt" >> $logfile
}
ip=$1
if [ -z $ip ]; then
print_log "Usage: `basename $0` ipaddr passwd"
exit 1
fi
passwd=$2
if [ -z $passwd ]; then
print_log "Usage: `basename $0` ipaddr passwd"
exit 1
fi
rpm -qi sshpass >/dev/null
if [ $? != 0 ]; then
print_log "Please install sshpass first"
exit 1
fi
#试试对端能不能ping得通
unreachable=`ping $ip -c 1 -W 3 | grep -c "100% packet loss"`
if [ $unreachable -eq 1 ]; then
print_log "host $ip is unreachable"
exit 1
fi
#如果本机还没有ssh公钥就生成一个
if [ ! -e ~/.ssh/id_dsa.pub ]; then
print_log "generating ssh public key ..."
ssh-keygen -t dsa -f /root/.ssh/id_dsa -N ""
if [ $? != 0 ]; then
print_log "ssh-keygen failed"
exit 1
fi
fi
#首先在对端删除原来保存的信任公钥
user=`whoami`
host=`hostname`
keyend="$user@$host"
print_log "my keyend = $keyend"
cmd="sed '/$keyend$/d' -i ~/.ssh/authorized_keys"
#echo cmd:$cmd
print_log "clear my old pub key on $ip ..."
sshpass -p $passwd ssh -o StrictHostKeyChecking=no $ip "rm -rf /root/.ssh/known_hosts"
if [ $? != 0 ]; then
print_log "ssh $ip to delete known_hosts failed"
exit 1
fi
sshpass -p $passwd ssh -o StrictHostKeyChecking=no $ip "touch ~/.ssh/authorized_keys"
if [ $? != 0 ]; then
print_log "ssh $ip to create file authorized_keys failed"
exit 1
fi
sshpass -p $passwd ssh -o StrictHostKeyChecking=no $ip "$cmd"
if [ $? != 0 ]; then
print_log "ssh $ip to edit authorized_keys failed"
exit 1
fi
#把新生成的拷贝过去
print_log "copy my public key to $ip ..."
tmpfile=/tmp/`hostname`.key.pub
sshpass -p $passwd scp -o StrictHostKeyChecking=no ~/.ssh/id_dsa.pub $ip:$tmpfile
if [ $? != 0 ]; then
print_log "scp file to $ip failed"
exit 1
fi
#在对端将其追加到authorized_keys
print_log "on $ip, append my public key to ~/.ssh/authorized_keys ..."
sshpass -p $passwd ssh -o StrictHostKeyChecking=no $ip "cat $tmpfile >> ~/.ssh/authorized_keys"
if [ $? != 0 ]; then
print_log "ssh $ip to add public key for authorized_keys failed"
exit 1
fi
print_log "rm tmp file $ip:$tmpfile"
sshpass -p $passwd ssh -o StrictHostKeyChecking=no $ip "rm $tmpfile"
if [ $? != 0 ]; then
print_log "ssh $ip to delete tmp file failed"
exit 1
fi
print_log "trustme ok!"

View File

@ -1,62 +0,0 @@
#!/bin/sh
# 让某个主机彻底信任我以后ssh登录过去不需要密码
#检查参数是否合法
ip=$1
if [ -z $ip ]; then
echo "Usage: `basename $0` ipaddr passwd" >&2
exit 1
fi
passwd=$2
if [ -z $passwd ]; then
echo "Usage: `basename $0` ipaddr passwd" >&2
exit 1
fi
rpm -qi sshpass >/dev/null
if [ $? != 0 ]; then
echo "Please install sshpass first!" >&2
exit 1
fi
#试试对端能不能ping得通
unreachable=`ping $ip -c 1 -W 3 | grep -c "100% packet loss"`
if [ $unreachable -eq 1 ]; then
echo "host $ip is unreachable!!!"
exit 1
fi
#如果本机还没有ssh公钥就生成一个
if [ ! -e ~/.ssh/id_dsa.pub ]; then
echo "generating ssh public key ..."
ssh-keygen -t dsa -f /root/.ssh/id_dsa -N ""
fi
#首先在对端删除原来保存的信任公钥
user=`whoami`
host=`hostname`
keyend="$user@$host"
echo "my keyend = $keyend"
cmd="sed '/$keyend$/d' -i ~/.ssh/authorized_keys"
#echo cmd:$cmd
echo "clear my old pub key on $ip ..."
sshpass -p $passwd ssh $ip "rm -rf /root/.ssh/known_hosts"
sshpass -p $passwd ssh $ip "touch ~/.ssh/authorized_keys"
sshpass -p $passwd ssh $ip "$cmd"
#把新生成的拷贝过去
echo "copy my public key to $ip ..."
tmpfile=/tmp/`hostname`.key.pub
sshpass -p $passwd scp ~/.ssh/id_dsa.pub $ip:$tmpfile
#在对端将其追加到authorized_keys
echo "on $ip, append my public key to ~/.ssh/authorized_keys ..."
sshpass -p $passwd ssh $ip "cat $tmpfile >> ~/.ssh/authorized_keys"
echo "rm tmp file $ip:$tmpfile"
sshpass -p $passwd ssh $ip "rm $tmpfile"
echo "trustme ok!"

View File

@ -1,17 +0,0 @@
[general]
nodeip=192.168.3.1
nodeid=1
hostname=sdn59
needzamp=y
zbpips=192.168.3.1
zbp_node_num=1
zbpnodelist=1,256
zampips=192.168.3.1
zamp_node_num=1
mongodbips=192.168.3.1
mongodb_node_num=1
zamp_vip=
mongodb_vip=
MacName=eth1
netid=1234
memmode=tiny

View File

@ -1,899 +0,0 @@
# Copyright 2012 OpenStack Foundation
# Copyright 2013 IBM Corp.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import copy
from daisy.common import exception
import daisy.domain.proxy
from daisy import i18n
_ = i18n._
def is_image_mutable(context, image):
"""Return True if the image is mutable in this context."""
if context.is_admin:
return True
if image.owner is None or context.owner is None:
return False
return image.owner == context.owner
def proxy_image(context, image):
if is_image_mutable(context, image):
return ImageProxy(image, context)
else:
return ImmutableImageProxy(image, context)
def is_member_mutable(context, member):
"""Return True if the image is mutable in this context."""
if context.is_admin:
return True
if context.owner is None:
return False
return member.member_id == context.owner
def proxy_member(context, member):
if is_member_mutable(context, member):
return member
else:
return ImmutableMemberProxy(member)
def is_task_mutable(context, task):
"""Return True if the task is mutable in this context."""
if context.is_admin:
return True
if context.owner is None:
return False
return task.owner == context.owner
def is_task_stub_mutable(context, task_stub):
"""Return True if the task stub is mutable in this context."""
if context.is_admin:
return True
if context.owner is None:
return False
return task_stub.owner == context.owner
def proxy_task(context, task):
if is_task_mutable(context, task):
return task
else:
return ImmutableTaskProxy(task)
def proxy_task_stub(context, task_stub):
if is_task_stub_mutable(context, task_stub):
return task_stub
else:
return ImmutableTaskStubProxy(task_stub)
class ImageRepoProxy(daisy.domain.proxy.Repo):
def __init__(self, image_repo, context):
self.context = context
self.image_repo = image_repo
proxy_kwargs = {'context': self.context}
super(ImageRepoProxy, self).__init__(image_repo,
item_proxy_class=ImageProxy,
item_proxy_kwargs=proxy_kwargs)
def get(self, image_id):
image = self.image_repo.get(image_id)
return proxy_image(self.context, image)
def list(self, *args, **kwargs):
images = self.image_repo.list(*args, **kwargs)
return [proxy_image(self.context, i) for i in images]
class ImageMemberRepoProxy(daisy.domain.proxy.Repo):
def __init__(self, member_repo, image, context):
self.member_repo = member_repo
self.image = image
self.context = context
super(ImageMemberRepoProxy, self).__init__(member_repo)
def get(self, member_id):
if (self.context.is_admin or
self.context.owner in (self.image.owner, member_id)):
member = self.member_repo.get(member_id)
return proxy_member(self.context, member)
else:
message = _("You cannot get image member for %s")
raise exception.Forbidden(message % member_id)
def list(self, *args, **kwargs):
members = self.member_repo.list(*args, **kwargs)
if (self.context.is_admin or
self.context.owner == self.image.owner):
return [proxy_member(self.context, m) for m in members]
for member in members:
if member.member_id == self.context.owner:
return [proxy_member(self.context, member)]
message = _("You cannot get image member for %s")
raise exception.Forbidden(message % self.image.image_id)
def remove(self, image_member):
if (self.image.owner == self.context.owner or
self.context.is_admin):
self.member_repo.remove(image_member)
else:
message = _("You cannot delete image member for %s")
raise exception.Forbidden(message
% self.image.image_id)
def add(self, image_member):
if (self.image.owner == self.context.owner or
self.context.is_admin):
self.member_repo.add(image_member)
else:
message = _("You cannot add image member for %s")
raise exception.Forbidden(message
% self.image.image_id)
def save(self, image_member, from_state=None):
if (self.context.is_admin or
self.context.owner == image_member.member_id):
self.member_repo.save(image_member, from_state=from_state)
else:
message = _("You cannot update image member %s")
raise exception.Forbidden(message % image_member.member_id)
class ImageFactoryProxy(daisy.domain.proxy.ImageFactory):
def __init__(self, image_factory, context):
self.image_factory = image_factory
self.context = context
kwargs = {'context': self.context}
super(ImageFactoryProxy, self).__init__(image_factory,
proxy_class=ImageProxy,
proxy_kwargs=kwargs)
def new_image(self, **kwargs):
owner = kwargs.pop('owner', self.context.owner)
if not self.context.is_admin:
if owner is None or owner != self.context.owner:
message = _("You are not permitted to create images "
"owned by '%s'.")
raise exception.Forbidden(message % owner)
return super(ImageFactoryProxy, self).new_image(owner=owner, **kwargs)
class ImageMemberFactoryProxy(object):
def __init__(self, image_member_factory, context):
self.image_member_factory = image_member_factory
self.context = context
def new_image_member(self, image, member_id):
owner = image.owner
if not self.context.is_admin:
if owner is None or owner != self.context.owner:
message = _("You are not permitted to create image members "
"for the image.")
raise exception.Forbidden(message)
if image.visibility == 'public':
message = _("Public images do not have members.")
raise exception.Forbidden(message)
return self.image_member_factory.new_image_member(image, member_id)
def _immutable_attr(target, attr, proxy=None):
def get_attr(self):
value = getattr(getattr(self, target), attr)
if proxy is not None:
value = proxy(value)
return value
def forbidden(self, *args, **kwargs):
resource = getattr(self, 'resource_name', 'resource')
message = _("You are not permitted to modify '%(attr)s' on this "
"%(resource)s.")
raise exception.Forbidden(message % {'attr': attr,
'resource': resource})
return property(get_attr, forbidden, forbidden)
class ImmutableLocations(list):
def forbidden(self, *args, **kwargs):
message = _("You are not permitted to modify locations "
"for this image.")
raise exception.Forbidden(message)
def __deepcopy__(self, memo):
return ImmutableLocations(copy.deepcopy(list(self), memo))
append = forbidden
extend = forbidden
insert = forbidden
pop = forbidden
remove = forbidden
reverse = forbidden
sort = forbidden
__delitem__ = forbidden
__delslice__ = forbidden
__iadd__ = forbidden
__imul__ = forbidden
__setitem__ = forbidden
__setslice__ = forbidden
class ImmutableProperties(dict):
def forbidden_key(self, key, *args, **kwargs):
message = _("You are not permitted to modify '%s' on this image.")
raise exception.Forbidden(message % key)
def forbidden(self, *args, **kwargs):
message = _("You are not permitted to modify this image.")
raise exception.Forbidden(message)
__delitem__ = forbidden_key
__setitem__ = forbidden_key
pop = forbidden
popitem = forbidden
setdefault = forbidden
update = forbidden
class ImmutableTags(set):
def forbidden(self, *args, **kwargs):
message = _("You are not permitted to modify tags on this image.")
raise exception.Forbidden(message)
add = forbidden
clear = forbidden
difference_update = forbidden
intersection_update = forbidden
pop = forbidden
remove = forbidden
symmetric_difference_update = forbidden
update = forbidden
class ImmutableImageProxy(object):
def __init__(self, base, context):
self.base = base
self.context = context
self.resource_name = 'image'
name = _immutable_attr('base', 'name')
image_id = _immutable_attr('base', 'image_id')
name = _immutable_attr('base', 'name')
status = _immutable_attr('base', 'status')
created_at = _immutable_attr('base', 'created_at')
updated_at = _immutable_attr('base', 'updated_at')
visibility = _immutable_attr('base', 'visibility')
min_disk = _immutable_attr('base', 'min_disk')
min_ram = _immutable_attr('base', 'min_ram')
protected = _immutable_attr('base', 'protected')
locations = _immutable_attr('base', 'locations', proxy=ImmutableLocations)
checksum = _immutable_attr('base', 'checksum')
owner = _immutable_attr('base', 'owner')
disk_format = _immutable_attr('base', 'disk_format')
container_format = _immutable_attr('base', 'container_format')
size = _immutable_attr('base', 'size')
virtual_size = _immutable_attr('base', 'virtual_size')
extra_properties = _immutable_attr('base', 'extra_properties',
proxy=ImmutableProperties)
tags = _immutable_attr('base', 'tags', proxy=ImmutableTags)
def delete(self):
message = _("You are not permitted to delete this image.")
raise exception.Forbidden(message)
def get_member_repo(self):
member_repo = self.base.get_member_repo()
return ImageMemberRepoProxy(member_repo, self, self.context)
def get_data(self, *args, **kwargs):
return self.base.get_data(*args, **kwargs)
def set_data(self, *args, **kwargs):
message = _("You are not permitted to upload data for this image.")
raise exception.Forbidden(message)
class ImmutableMemberProxy(object):
def __init__(self, base):
self.base = base
self.resource_name = 'image member'
id = _immutable_attr('base', 'id')
image_id = _immutable_attr('base', 'image_id')
member_id = _immutable_attr('base', 'member_id')
status = _immutable_attr('base', 'status')
created_at = _immutable_attr('base', 'created_at')
updated_at = _immutable_attr('base', 'updated_at')
class ImmutableTaskProxy(object):
def __init__(self, base):
self.base = base
self.resource_name = 'task'
task_id = _immutable_attr('base', 'task_id')
type = _immutable_attr('base', 'type')
status = _immutable_attr('base', 'status')
owner = _immutable_attr('base', 'owner')
expires_at = _immutable_attr('base', 'expires_at')
created_at = _immutable_attr('base', 'created_at')
updated_at = _immutable_attr('base', 'updated_at')
input = _immutable_attr('base', 'input')
message = _immutable_attr('base', 'message')
result = _immutable_attr('base', 'result')
def run(self, executor):
self.base.run(executor)
def begin_processing(self):
message = _("You are not permitted to set status on this task.")
raise exception.Forbidden(message)
def succeed(self, result):
message = _("You are not permitted to set status on this task.")
raise exception.Forbidden(message)
def fail(self, message):
message = _("You are not permitted to set status on this task.")
raise exception.Forbidden(message)
class ImmutableTaskStubProxy(object):
def __init__(self, base):
self.base = base
self.resource_name = 'task stub'
task_id = _immutable_attr('base', 'task_id')
type = _immutable_attr('base', 'type')
status = _immutable_attr('base', 'status')
owner = _immutable_attr('base', 'owner')
expires_at = _immutable_attr('base', 'expires_at')
created_at = _immutable_attr('base', 'created_at')
updated_at = _immutable_attr('base', 'updated_at')
class ImageProxy(daisy.domain.proxy.Image):
def __init__(self, image, context):
self.image = image
self.context = context
super(ImageProxy, self).__init__(image)
def get_member_repo(self, **kwargs):
if self.image.visibility == 'public':
message = _("Public images do not have members.")
raise exception.Forbidden(message)
else:
member_repo = self.image.get_member_repo(**kwargs)
return ImageMemberRepoProxy(member_repo, self, self.context)
class TaskProxy(daisy.domain.proxy.Task):
def __init__(self, task):
self.task = task
super(TaskProxy, self).__init__(task)
class TaskFactoryProxy(daisy.domain.proxy.TaskFactory):
def __init__(self, task_factory, context):
self.task_factory = task_factory
self.context = context
super(TaskFactoryProxy, self).__init__(
task_factory,
task_proxy_class=TaskProxy)
def new_task(self, **kwargs):
owner = kwargs.get('owner', self.context.owner)
# NOTE(nikhil): Unlike Images, Tasks are expected to have owner.
# We currently do not allow even admins to set the owner to None.
if owner is not None and (owner == self.context.owner
or self.context.is_admin):
return super(TaskFactoryProxy, self).new_task(**kwargs)
else:
message = _("You are not permitted to create this task with "
"owner as: %s")
raise exception.Forbidden(message % owner)
class TaskRepoProxy(daisy.domain.proxy.TaskRepo):
def __init__(self, task_repo, context):
self.task_repo = task_repo
self.context = context
super(TaskRepoProxy, self).__init__(task_repo)
def get(self, task_id):
task = self.task_repo.get(task_id)
return proxy_task(self.context, task)
class TaskStubRepoProxy(daisy.domain.proxy.TaskStubRepo):
def __init__(self, task_stub_repo, context):
self.task_stub_repo = task_stub_repo
self.context = context
super(TaskStubRepoProxy, self).__init__(task_stub_repo)
def list(self, *args, **kwargs):
task_stubs = self.task_stub_repo.list(*args, **kwargs)
return [proxy_task_stub(self.context, t) for t in task_stubs]
# Metadef Namespace classes
def is_namespace_mutable(context, namespace):
"""Return True if the namespace is mutable in this context."""
if context.is_admin:
return True
if context.owner is None:
return False
return namespace.owner == context.owner
def proxy_namespace(context, namespace):
if is_namespace_mutable(context, namespace):
return namespace
else:
return ImmutableMetadefNamespaceProxy(namespace)
class ImmutableMetadefNamespaceProxy(object):
def __init__(self, base):
self.base = base
self.resource_name = 'namespace'
namespace_id = _immutable_attr('base', 'namespace_id')
namespace = _immutable_attr('base', 'namespace')
display_name = _immutable_attr('base', 'display_name')
description = _immutable_attr('base', 'description')
owner = _immutable_attr('base', 'owner')
visibility = _immutable_attr('base', 'visibility')
protected = _immutable_attr('base', 'protected')
created_at = _immutable_attr('base', 'created_at')
updated_at = _immutable_attr('base', 'updated_at')
def delete(self):
message = _("You are not permitted to delete this namespace.")
raise exception.Forbidden(message)
def save(self):
message = _("You are not permitted to update this namespace.")
raise exception.Forbidden(message)
class MetadefNamespaceProxy(daisy.domain.proxy.MetadefNamespace):
def __init__(self, namespace):
self.namespace_input = namespace
super(MetadefNamespaceProxy, self).__init__(namespace)
class MetadefNamespaceFactoryProxy(
daisy.domain.proxy.MetadefNamespaceFactory):
def __init__(self, meta_namespace_factory, context):
self.meta_namespace_factory = meta_namespace_factory
self.context = context
super(MetadefNamespaceFactoryProxy, self).__init__(
meta_namespace_factory,
meta_namespace_proxy_class=MetadefNamespaceProxy)
def new_namespace(self, **kwargs):
owner = kwargs.pop('owner', self.context.owner)
if not self.context.is_admin:
if owner is None or owner != self.context.owner:
message = _("You are not permitted to create namespace "
"owned by '%s'")
raise exception.Forbidden(message % (owner))
return super(MetadefNamespaceFactoryProxy, self).new_namespace(
owner=owner, **kwargs)
class MetadefNamespaceRepoProxy(daisy.domain.proxy.MetadefNamespaceRepo):
def __init__(self, namespace_repo, context):
self.namespace_repo = namespace_repo
self.context = context
super(MetadefNamespaceRepoProxy, self).__init__(namespace_repo)
def get(self, namespace):
namespace_obj = self.namespace_repo.get(namespace)
return proxy_namespace(self.context, namespace_obj)
def list(self, *args, **kwargs):
namespaces = self.namespace_repo.list(*args, **kwargs)
return [proxy_namespace(self.context, namespace) for
namespace in namespaces]
# Metadef Object classes
def is_object_mutable(context, object):
"""Return True if the object is mutable in this context."""
if context.is_admin:
return True
if context.owner is None:
return False
return object.namespace.owner == context.owner
def proxy_object(context, object):
if is_object_mutable(context, object):
return object
else:
return ImmutableMetadefObjectProxy(object)
class ImmutableMetadefObjectProxy(object):
def __init__(self, base):
self.base = base
self.resource_name = 'object'
object_id = _immutable_attr('base', 'object_id')
name = _immutable_attr('base', 'name')
required = _immutable_attr('base', 'required')
description = _immutable_attr('base', 'description')
properties = _immutable_attr('base', 'properties')
created_at = _immutable_attr('base', 'created_at')
updated_at = _immutable_attr('base', 'updated_at')
def delete(self):
message = _("You are not permitted to delete this object.")
raise exception.Forbidden(message)
def save(self):
message = _("You are not permitted to update this object.")
raise exception.Forbidden(message)
class MetadefObjectProxy(daisy.domain.proxy.MetadefObject):
def __init__(self, meta_object):
self.meta_object = meta_object
super(MetadefObjectProxy, self).__init__(meta_object)
class MetadefObjectFactoryProxy(daisy.domain.proxy.MetadefObjectFactory):
def __init__(self, meta_object_factory, context):
self.meta_object_factory = meta_object_factory
self.context = context
super(MetadefObjectFactoryProxy, self).__init__(
meta_object_factory,
meta_object_proxy_class=MetadefObjectProxy)
def new_object(self, **kwargs):
owner = kwargs.pop('owner', self.context.owner)
if not self.context.is_admin:
if owner is None or owner != self.context.owner:
message = _("You are not permitted to create object "
"owned by '%s'")
raise exception.Forbidden(message % (owner))
return super(MetadefObjectFactoryProxy, self).new_object(**kwargs)
class MetadefObjectRepoProxy(daisy.domain.proxy.MetadefObjectRepo):
def __init__(self, object_repo, context):
self.object_repo = object_repo
self.context = context
super(MetadefObjectRepoProxy, self).__init__(object_repo)
def get(self, namespace, object_name):
meta_object = self.object_repo.get(namespace, object_name)
return proxy_object(self.context, meta_object)
def list(self, *args, **kwargs):
objects = self.object_repo.list(*args, **kwargs)
return [proxy_object(self.context, meta_object) for
meta_object in objects]
# Metadef ResourceType classes
def is_meta_resource_type_mutable(context, meta_resource_type):
"""Return True if the meta_resource_type is mutable in this context."""
if context.is_admin:
return True
if context.owner is None:
return False
# (lakshmiS): resource type can exist without an association with
# namespace and resource type cannot be created/update/deleted directly(
# they have to be associated/de-associated from namespace)
if meta_resource_type.namespace:
return meta_resource_type.namespace.owner == context.owner
else:
return False
def proxy_meta_resource_type(context, meta_resource_type):
if is_meta_resource_type_mutable(context, meta_resource_type):
return meta_resource_type
else:
return ImmutableMetadefResourceTypeProxy(meta_resource_type)
class ImmutableMetadefResourceTypeProxy(object):
def __init__(self, base):
self.base = base
self.resource_name = 'meta_resource_type'
namespace = _immutable_attr('base', 'namespace')
name = _immutable_attr('base', 'name')
prefix = _immutable_attr('base', 'prefix')
properties_target = _immutable_attr('base', 'properties_target')
created_at = _immutable_attr('base', 'created_at')
updated_at = _immutable_attr('base', 'updated_at')
def delete(self):
message = _("You are not permitted to delete this meta_resource_type.")
raise exception.Forbidden(message)
class MetadefResourceTypeProxy(daisy.domain.proxy.MetadefResourceType):
def __init__(self, meta_resource_type):
self.meta_resource_type = meta_resource_type
super(MetadefResourceTypeProxy, self).__init__(meta_resource_type)
class MetadefResourceTypeFactoryProxy(
daisy.domain.proxy.MetadefResourceTypeFactory):
def __init__(self, resource_type_factory, context):
self.meta_resource_type_factory = resource_type_factory
self.context = context
super(MetadefResourceTypeFactoryProxy, self).__init__(
resource_type_factory,
resource_type_proxy_class=MetadefResourceTypeProxy)
def new_resource_type(self, **kwargs):
owner = kwargs.pop('owner', self.context.owner)
if not self.context.is_admin:
if owner is None or owner != self.context.owner:
message = _("You are not permitted to create resource_type "
"owned by '%s'")
raise exception.Forbidden(message % (owner))
return super(MetadefResourceTypeFactoryProxy, self).new_resource_type(
**kwargs)
class MetadefResourceTypeRepoProxy(
daisy.domain.proxy.MetadefResourceTypeRepo):
def __init__(self, meta_resource_type_repo, context):
self.meta_resource_type_repo = meta_resource_type_repo
self.context = context
super(MetadefResourceTypeRepoProxy, self).__init__(
meta_resource_type_repo)
def list(self, *args, **kwargs):
meta_resource_types = self.meta_resource_type_repo.list(
*args, **kwargs)
return [proxy_meta_resource_type(self.context, meta_resource_type) for
meta_resource_type in meta_resource_types]
def get(self, *args, **kwargs):
meta_resource_type = self.meta_resource_type_repo.get(*args, **kwargs)
return proxy_meta_resource_type(self.context, meta_resource_type)
# Metadef namespace properties classes
def is_namespace_property_mutable(context, namespace_property):
"""Return True if the object is mutable in this context."""
if context.is_admin:
return True
if context.owner is None:
return False
return namespace_property.namespace.owner == context.owner
def proxy_namespace_property(context, namespace_property):
if is_namespace_property_mutable(context, namespace_property):
return namespace_property
else:
return ImmutableMetadefPropertyProxy(namespace_property)
class ImmutableMetadefPropertyProxy(object):
def __init__(self, base):
self.base = base
self.resource_name = 'namespace_property'
property_id = _immutable_attr('base', 'property_id')
name = _immutable_attr('base', 'name')
schema = _immutable_attr('base', 'schema')
def delete(self):
message = _("You are not permitted to delete this property.")
raise exception.Forbidden(message)
def save(self):
message = _("You are not permitted to update this property.")
raise exception.Forbidden(message)
class MetadefPropertyProxy(daisy.domain.proxy.MetadefProperty):
def __init__(self, namespace_property):
self.meta_object = namespace_property
super(MetadefPropertyProxy, self).__init__(namespace_property)
class MetadefPropertyFactoryProxy(daisy.domain.proxy.MetadefPropertyFactory):
def __init__(self, namespace_property_factory, context):
self.meta_object_factory = namespace_property_factory
self.context = context
super(MetadefPropertyFactoryProxy, self).__init__(
namespace_property_factory,
property_proxy_class=MetadefPropertyProxy)
def new_namespace_property(self, **kwargs):
owner = kwargs.pop('owner', self.context.owner)
if not self.context.is_admin:
if owner is None or owner != self.context.owner:
message = _("You are not permitted to create property "
"owned by '%s'")
raise exception.Forbidden(message % (owner))
return super(MetadefPropertyFactoryProxy, self).new_namespace_property(
**kwargs)
class MetadefPropertyRepoProxy(daisy.domain.proxy.MetadefPropertyRepo):
def __init__(self, namespace_property_repo, context):
self.namespace_property_repo = namespace_property_repo
self.context = context
super(MetadefPropertyRepoProxy, self).__init__(namespace_property_repo)
def get(self, namespace, object_name):
namespace_property = self.namespace_property_repo.get(namespace,
object_name)
return proxy_namespace_property(self.context, namespace_property)
def list(self, *args, **kwargs):
namespace_properties = self.namespace_property_repo.list(
*args, **kwargs)
return [proxy_namespace_property(self.context, namespace_property) for
namespace_property in namespace_properties]
# Metadef Tag classes
def is_tag_mutable(context, tag):
"""Return True if the tag is mutable in this context."""
if context.is_admin:
return True
if context.owner is None:
return False
return tag.namespace.owner == context.owner
def proxy_tag(context, tag):
if is_tag_mutable(context, tag):
return tag
else:
return ImmutableMetadefTagProxy(tag)
class ImmutableMetadefTagProxy(object):
def __init__(self, base):
self.base = base
self.resource_name = 'tag'
tag_id = _immutable_attr('base', 'tag_id')
name = _immutable_attr('base', 'name')
created_at = _immutable_attr('base', 'created_at')
updated_at = _immutable_attr('base', 'updated_at')
def delete(self):
message = _("You are not permitted to delete this tag.")
raise exception.Forbidden(message)
def save(self):
message = _("You are not permitted to update this tag.")
raise exception.Forbidden(message)
class MetadefTagProxy(daisy.domain.proxy.MetadefTag):
pass
class MetadefTagFactoryProxy(daisy.domain.proxy.MetadefTagFactory):
def __init__(self, meta_tag_factory, context):
self.meta_tag_factory = meta_tag_factory
self.context = context
super(MetadefTagFactoryProxy, self).__init__(
meta_tag_factory,
meta_tag_proxy_class=MetadefTagProxy)
def new_tag(self, **kwargs):
owner = kwargs.pop('owner', self.context.owner)
if not self.context.is_admin:
if owner is None:
message = _("Owner must be specified to create a tag.")
raise exception.Forbidden(message)
elif owner != self.context.owner:
message = _("You are not permitted to create a tag"
" in the namespace owned by '%s'")
raise exception.Forbidden(message % (owner))
return super(MetadefTagFactoryProxy, self).new_tag(**kwargs)
class MetadefTagRepoProxy(daisy.domain.proxy.MetadefTagRepo):
def __init__(self, tag_repo, context):
self.tag_repo = tag_repo
self.context = context
super(MetadefTagRepoProxy, self).__init__(tag_repo)
def get(self, namespace, tag_name):
meta_tag = self.tag_repo.get(namespace, tag_name)
return proxy_tag(self.context, meta_tag)
def list(self, *args, **kwargs):
tags = self.tag_repo.list(*args, **kwargs)
return [proxy_tag(self.context, meta_tag) for
meta_tag in tags]

View File

@ -1,235 +1,367 @@
# Copyright 2013 OpenStack Foundation # Copyright 2013 OpenStack Foundation
# All Rights Reserved. # All Rights Reserved.
# #
# Licensed under the Apache License, Version 2.0 (the "License"); you may # Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain # not use this file except in compliance with the License. You may obtain
# a copy of the License at # a copy of the License at
# #
# http://www.apache.org/licenses/LICENSE-2.0 # http://www.apache.org/licenses/LICENSE-2.0
# #
# Unless required by applicable law or agreed to in writing, software # Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations # License for the specific language governing permissions and limitations
# under the License. # under the License.
""" """
/install endpoint for tecs API /install endpoint for tecs API
""" """
import copy import subprocess
import subprocess import time
import time from oslo_log import log as logging
from webob.exc import HTTPBadRequest
import traceback from daisy import i18n
import webob.exc
from oslo_config import cfg from daisy.common import exception
from oslo_log import log as logging import daisy.registry.client.v1.api as registry
from webob.exc import HTTPBadRequest
from webob.exc import HTTPForbidden
LOG = logging.getLogger(__name__)
from threading import Thread _ = i18n._
_LE = i18n._LE
from daisy import i18n _LI = i18n._LI
from daisy import notifier _LW = i18n._LW
from daisy.api import policy daisy_path = '/var/lib/daisy/'
import daisy.api.v1 tecs_backend_name = "tecs"
zenic_backend_name = "zenic"
from daisy.common import exception proton_backend_name = "proton"
import daisy.registry.client.v1.api as registry os_install_start_time = 0.0
try: def subprocess_call(command, file=None):
import simplejson as json if file:
except ImportError: return_code = subprocess.call(command,
import json shell=True,
stdout=file,
LOG = logging.getLogger(__name__) stderr=file)
_ = i18n._ else:
_LE = i18n._LE return_code = subprocess.call(command,
_LI = i18n._LI shell=True,
_LW = i18n._LW stdout=open('/dev/null', 'w'),
stderr=subprocess.STDOUT)
daisy_path = '/var/lib/daisy/' if return_code != 0:
tecs_backend_name = "tecs" msg = "execute '%s' failed by subprocess call." % command
zenic_backend_name = "zenic" raise exception.SubprocessCmdFailed(msg)
proton_backend_name = "proton"
os_install_start_time = 0.0
def get_host_detail(req, host_id):
def subprocess_call(command,file=None): try:
if file: host_detail = registry.get_host_metadata(req.context, host_id)
return_code = subprocess.call(command, except exception.Invalid as e:
shell=True, raise HTTPBadRequest(explanation=e.msg, request=req)
stdout=file, return host_detail
stderr=file)
else:
return_code = subprocess.call(command, def get_roles_detail(req):
shell=True, try:
stdout=open('/dev/null', 'w'), roles = registry.get_roles_detail(req.context)
stderr=subprocess.STDOUT) except exception.Invalid as e:
if return_code != 0: raise HTTPBadRequest(explanation=e.msg, request=req)
msg = "execute '%s' failed by subprocess call." % command return roles
raise exception.SubprocessCmdFailed(msg)
def get_host_detail(req, host_id): def get_cluster_roles_detail(req, cluster_id):
try: try:
host_detail = registry.get_host_metadata(req.context, host_id) params = {'cluster_id': cluster_id}
except exception.Invalid as e: roles = registry.get_roles_detail(req.context, **params)
raise HTTPBadRequest(explanation=e.msg, request=req) except exception.Invalid as e:
return host_detail raise HTTPBadRequest(explanation=e.msg, request=req)
return roles
def get_roles_detail(req):
try:
roles = registry.get_roles_detail(req.context) def get_hosts_of_role(req, role_id):
except exception.Invalid as e: try:
raise HTTPBadRequest(explanation=e.msg, request=req) hosts = registry.get_role_host_metadata(req.context, role_id)
return roles except exception.Invalid as e:
raise HTTPBadRequest(explanation=e.msg, request=req)
def get_cluster_roles_detail(req, cluster_id): return hosts
try:
params = {'cluster_id':cluster_id}
roles = registry.get_roles_detail(req.context, **params) def get_role_detail(req, role_id):
except exception.Invalid as e: try:
raise HTTPBadRequest(explanation=e.msg, request=req) role = registry.get_role_metadata(req.context, role_id)
return roles except exception.Invalid as e:
raise HTTPBadRequest(explanation=e.msg, request=req)
def get_hosts_of_role(req, role_id): return role
try:
hosts = registry.get_role_host_metadata(req.context, role_id)
except exception.Invalid as e: def get_cluster_configs_list(req, cluster_id):
raise HTTPBadRequest(explanation=e.msg, request=req) roles = get_cluster_roles_detail(req, cluster_id)
return hosts config_set_list = [role['config_set_id'] for role in roles]
cluster_configs_list = []
def get_role_detail(req, role_id): for config_set_id in config_set_list:
try: config_set_metadata = registry.get_config_set_metadata(req.context,
role = registry.get_role_metadata(req.context, role_id) config_set_id)
except exception.Invalid as e: if config_set_metadata.get('config', None):
raise HTTPBadRequest(explanation=e.msg, request=req) cluster_configs_list.extend(config_set_metadata['config'])
return role return cluster_configs_list
def update_role(req, role_id,role_meta):
try: def update_role(req, role_id, role_meta):
registry.update_role_metadata(req.context, role_id, role_meta) try:
except exception.Invalid as e: registry.update_role_metadata(req.context, role_id, role_meta)
raise HTTPBadRequest(explanation=e.msg, request=req) except exception.Invalid as e:
raise HTTPBadRequest(explanation=e.msg, request=req)
def update_role_host(req, role_id, role_host):
try:
registry.update_role_host_metadata(req.context, role_id, role_host) def update_role_host(req, role_id, role_host):
except exception.Invalid as e: try:
raise HTTPBadRequest(explanation=e.msg, request=req) registry.update_role_host_metadata(req.context, role_id, role_host)
except exception.Invalid as e:
def delete_role_hosts(req, role_id): raise HTTPBadRequest(explanation=e.msg, request=req)
try:
registry.delete_role_host_metadata(req.context, role_id)
except exception.Invalid as e: def set_role_status_and_progress(req, cluster_id, opera, status,
raise HTTPBadRequest(explanation=e.msg, request=req) backend_name='tecs'):
"""
def get_cluster_networks_detail(req, cluster_id): set information in role of some backend.
try: :status:key in host_role tables, such as:
networks = registry.get_networks_detail(req.context, cluster_id) {'messages':'Waiting','progress': '0'}
except exception.Invalid as e: """
raise HTTPBadRequest(explanation=e.msg, request=req) roles = get_cluster_roles_detail(req, cluster_id)
return networks for role in roles:
if role.get('deployment_backend') == backend_name:
def get_assigned_network(req, host_interface_id, network_id): role_hosts = get_hosts_of_role(req, role['id'])
try: for role_host in role_hosts:
assigned_network = registry.get_assigned_network(req.context, host_interface_id, network_id) if (opera == 'upgrade' and role_host['status'] in ['active']) \
except exception.Invalid as e: or (opera == 'install' and role_host['status'] not in
raise HTTPBadRequest(explanation=e.msg, request=req) ['active', 'updating', 'update-failed']):
return assigned_network update_role_host(req, role_host['id'], status)
def _ping_hosts_test(ips):
ping_cmd = 'fping' def delete_role_hosts(req, role_id):
for ip in set(ips): try:
ping_cmd = ping_cmd + ' ' + ip registry.delete_role_host_metadata(req.context, role_id)
obj = subprocess.Popen(ping_cmd, shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE) except exception.Invalid as e:
(stdoutput, erroutput) = obj.communicate() raise HTTPBadRequest(explanation=e.msg, request=req)
_returncode = obj.returncode
if _returncode == 0 or _returncode == 1:
ping_result = stdoutput.split('\n') def get_cluster_networks_detail(req, cluster_id):
unreachable_hosts = [result.split()[0] for result in ping_result if result and result.split()[2] != 'alive'] try:
else: networks = registry.get_networks_detail(req.context, cluster_id)
msg = "ping failed beaceuse there is invlid ip in %s" % ips except exception.Invalid as e:
raise exception.InvalidIP(msg) raise HTTPBadRequest(explanation=e.msg, request=req)
return unreachable_hosts return networks
def check_ping_hosts(ping_ips, max_ping_times): def get_assigned_network(req, host_interface_id, network_id):
if not ping_ips: try:
LOG.info(_("no ip got for ping test")) assigned_network = registry.get_assigned_network(
return ping_ips req.context, host_interface_id, network_id)
ping_count = 0 except exception.Invalid as e:
time_step = 5 raise HTTPBadRequest(explanation=e.msg, request=req)
LOG.info(_("begin ping test for %s" % ','.join(ping_ips))) return assigned_network
while True:
if ping_count == 0:
ips = _ping_hosts_test(ping_ips) def _ping_hosts_test(ips):
else: ping_cmd = 'fping'
ips = _ping_hosts_test(ips) for ip in set(ips):
ping_cmd = ping_cmd + ' ' + ip
ping_count += 1 obj = subprocess.Popen(
if ips: ping_cmd, shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
LOG.debug(_("ping host %s for %s times" % (','.join(ips), ping_count))) (stdoutput, erroutput) = obj.communicate()
if ping_count >= max_ping_times: _returncode = obj.returncode
LOG.info(_("ping host %s timeout for %ss" % (','.join(ips), ping_count*time_step))) if _returncode == 0 or _returncode == 1:
return ips ping_result = stdoutput.split('\n')
time.sleep(time_step) unreachable_hosts = [result.split(
else: )[0] for result in ping_result if result and
LOG.info(_("ping %s successfully" % ','.join(ping_ips))) result.split()[2] != 'alive']
return ips else:
msg = "ping failed beaceuse there is invlid ip in %s" % ips
def _ping_reachable_to_unreachable_host_test(ip,max_ping_times): raise exception.InvalidIP(msg)
ping_cmd = 'fping' return unreachable_hosts
ping_cmd = ping_cmd + ' ' + ip
ping_count = 0
time_step = 5 def check_ping_hosts(ping_ips, max_ping_times):
while True: if not ping_ips:
obj = subprocess.Popen(ping_cmd, shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE) LOG.info(_("no ip got for ping test"))
(stdoutput, erroutput) = obj.communicate() return ping_ips
_returncode = obj.returncode ping_count = 0
if _returncode != 0: time_step = 5
return True LOG.info(_("begin ping test for %s" % ','.join(ping_ips)))
ping_count += 1 while True:
if ping_count >= max_ping_times: if ping_count == 0:
LOG.info(_("ping host %s timeout for %ss" % (ip, ping_count*time_step))) ips = _ping_hosts_test(ping_ips)
return False else:
time.sleep(time_step) ips = _ping_hosts_test(ips)
return False
ping_count += 1
def _ping_unreachable_to_reachable_host_test(ip, max_ping_times): if ips:
ping_count = 0 LOG.debug(
time_step = 5 _("ping host %s for %s times" % (','.join(ips), ping_count)))
ping_cmd = 'fping' if ping_count >= max_ping_times:
ping_cmd = ping_cmd + ' ' + ip LOG.info(_("ping host %s timeout for %ss" %
while True: (','.join(ips), ping_count * time_step)))
obj = subprocess.Popen(ping_cmd, shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE) return ips
(stdoutput, erroutput) = obj.communicate() time.sleep(time_step)
_returncode = obj.returncode else:
if _returncode == 0: LOG.info(_("ping %s successfully" % ','.join(ping_ips)))
return True return ips
ping_count += 1
if ping_count >= max_ping_times:
LOG.info(_("ping host %s timeout for %ss" % (ip, ping_count*time_step))) def _ping_reachable_to_unreachable_host_test(ip, max_ping_times):
return False ping_cmd = 'fping'
time.sleep(time_step) ping_cmd = ping_cmd + ' ' + ip
return False ping_count = 0
time_step = 5
def check_reboot_ping(ip): while True:
stop_max_ping_times = 360 #ha host reboot may spend 20 min,so timeout time is 30min obj = subprocess.Popen(
start_max_ping_times = 60 ping_cmd, shell=True, stdout=subprocess.PIPE,
_ping_reachable_to_unreachable_host_test(ip, stop_max_ping_times) stderr=subprocess.PIPE)
_ping_unreachable_to_reachable_host_test(ip, start_max_ping_times) (stdoutput, erroutput) = obj.communicate()
time.sleep(5) _returncode = obj.returncode
if _returncode != 0:
def cidr_to_netmask(cidr): return True
ip_netmask = cidr.split('/') ping_count += 1
if len(ip_netmask) != 2 or not ip_netmask[1]: if ping_count >= max_ping_times:
raise exception.InvalidNetworkConfig("cidr is not valid") LOG.info(
_("ping host %s timeout for %ss"
cidr_end = ip_netmask[1] % (ip, ping_count * time_step)))
mask = ~(2**(32 - int(cidr_end)) - 1) return False
inter_ip = lambda x: '.'.join([str(x/(256**i)%256) for i in range(3,-1,-1)]) time.sleep(time_step)
netmask = inter_ip(mask) return False
return netmask
def _ping_unreachable_to_reachable_host_test(ip, max_ping_times):
ping_count = 0
time_step = 5
ping_cmd = 'fping'
ping_cmd = ping_cmd + ' ' + ip
while True:
obj = subprocess.Popen(
ping_cmd, shell=True, stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
(stdoutput, erroutput) = obj.communicate()
_returncode = obj.returncode
if _returncode == 0:
return True
ping_count += 1
if ping_count >= max_ping_times:
LOG.info(
_("ping host %s timeout for %ss"
% (ip, ping_count * time_step)))
return False
time.sleep(time_step)
return False
def check_reboot_ping(ip):
# ha host reboot may spend 20 min,so timeout time is 30min
stop_max_ping_times = 360
start_max_ping_times = 60
_ping_reachable_to_unreachable_host_test(ip, stop_max_ping_times)
_ping_unreachable_to_reachable_host_test(ip, start_max_ping_times)
time.sleep(5)
def cidr_to_netmask(cidr):
ip_netmask = cidr.split('/')
if len(ip_netmask) != 2 or not ip_netmask[1]:
raise exception.InvalidNetworkConfig("cidr is not valid")
cidr_end = ip_netmask[1]
mask = ~(2 ** (32 - int(cidr_end)) - 1)
inter_ip = lambda x: '.'.join(
[str(x / (256 ** i) % 256) for i in range(3, -1, -1)])
netmask = inter_ip(mask)
return netmask
def get_rpm_package_by_name(path, rpm_name):
cmd = "ls %s | grep ^%s.*\.rpm" % (path, rpm_name)
try:
rpm_name = subprocess.check_output(
cmd, shell=True, stderr=subprocess.STDOUT).split('\n')[0]
except subprocess.CalledProcessError:
msg = _("Get rpm %s failed in %s!" % (rpm_name, path))
raise exception.SubprocessCmdFailed(message=msg)
return rpm_name
def remote_remove_rpm(rpm_name, dest_ip):
remove_cmd = 'clush -S -w %s "rpm -q %s && rpm -e %s"' % (dest_ip,
rpm_name,
rpm_name)
subprocess.call(remove_cmd,
shell=True,
stdout=open('/dev/null', 'w'),
stderr=subprocess.STDOUT)
def remote_install_rpm(rpm_name, rpm_src_path, rpm_dest_path, dest_ips):
rpm_package = get_rpm_package_by_name(rpm_src_path, rpm_name)
for dest_ip in dest_ips:
scp_rpm = "scp -o ConnectTimeout=10 %s/%s root@%s:%s" \
% (rpm_src_path, rpm_package, dest_ip, rpm_dest_path)
subprocess_call(scp_rpm)
remote_remove_rpm(rpm_name, dest_ip)
install_cmd = 'clush -S -w %s "rpm -i %s/%s"' % (dest_ip,
rpm_dest_path,
rpm_package)
subprocess_call(install_cmd)
def remote_upgrade_rpm(rpm_name, rpm_src_path, rpm_dest_path, dest_ip):
rpm_package = get_rpm_package_by_name(rpm_src_path, rpm_name)
scp_rpm = "scp -o ConnectTimeout=10 %s/%s root@%s:%s" \
% (rpm_src_path, rpm_package, dest_ip, rpm_dest_path)
subprocess_call(scp_rpm)
upgrade_cmd = 'clush -S -w %s "rpm -U %s/%s"' % (dest_ip,
rpm_dest_path,
rpm_package)
subprocess.call(upgrade_cmd,
shell=True,
stdout=open('/dev/null', 'w'),
stderr=subprocess.STDOUT)
def trust_me(host_ips, root_passwd):
for host_ip in host_ips:
count = 0
try_times = 10
while count < try_times:
try:
trust_me_cmd = "/var/lib/daisy/tecs/trustme.sh\
%s %s" % (host_ip, root_passwd)
subprocess_call(trust_me_cmd)
except:
count += 1
LOG.info("Trying to trust '%s' for %s times" %
(host_ip, count))
time.sleep(2)
if count >= try_times:
message = "Setup trust for '%s' failed,"\
"see '/var/log/trustme.log' please" % (host_ip)
raise exception.TrustMeFailed(message=message)
else:
message = "Setup trust to '%s' successfully" % (host_ip)
LOG.info(message)
break
def calc_host_iqn(min_mac):
cmd = "echo -n %s |openssl md5" % min_mac
obj = subprocess.Popen(cmd,
shell=True,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
(stdoutput, erroutput) = obj.communicate()
iqn = ""
if stdoutput:
get_uuid = stdoutput.split('=')[1]
iqn = "iqn.opencos.rh:" + get_uuid.strip()
return iqn

View File

@ -17,18 +17,15 @@
""" """
Driver base-classes: Driver base-classes:
(Beginning of) the contract that deployment backends drivers must follow, and shared (Beginning of) the contract that deployment backends drivers must follow,
types that support that contract and shared types that support that contract
""" """
import sys
from oslo_config import cfg
from oslo_log import log as logging from oslo_log import log as logging
from oslo_utils import importutils from oslo_utils import importutils
from daisy import i18n from daisy import i18n
from daisy.common import exception
_ = i18n._ _ = i18n._
_LE = i18n._LE _LE = i18n._LE
@ -36,10 +33,13 @@ _LI = i18n._LI
_LW = i18n._LW _LW = i18n._LW
LOG = logging.getLogger(__name__) LOG = logging.getLogger(__name__)
class DeploymentDriver(object): class DeploymentDriver(object):
"""base class for deployment interface. """base class for deployment interface.
""" """
def install(self, req, cluster_id): def install(self, req, cluster_id):
raise NotImplementedError() raise NotImplementedError()
@ -48,11 +48,11 @@ class DeploymentDriver(object):
def uninstall(self, req, cluster_id): def uninstall(self, req, cluster_id):
raise NotImplementedError() raise NotImplementedError()
def uninstall_progress(self, req, cluster_id): def uninstall_progress(self, req, cluster_id):
LOG.info(_("driver no interface for 'uninstall_progress'")) LOG.info(_("driver no interface for 'uninstall_progress'"))
return {} return {}
def upgrade_progress(self, req, cluster_id): def upgrade_progress(self, req, cluster_id):
LOG.info(_("driver no interface for 'upgrade_progress'")) LOG.info(_("driver no interface for 'upgrade_progress'"))
return {} return {}
@ -60,17 +60,19 @@ class DeploymentDriver(object):
def exprot_db(self, req, cluster_id): def exprot_db(self, req, cluster_id):
LOG.info(_("driver no interface for 'exprot_db'")) LOG.info(_("driver no interface for 'exprot_db'"))
return {} return {}
def update_disk_array(self, req, cluster_id): def update_disk_array(self, req, cluster_id):
LOG.info(_("driver no interface for 'update_disk_array'")) LOG.info(_("driver no interface for 'update_disk_array'"))
return {} return {}
def check_isinstance(obj, cls): def check_isinstance(obj, cls):
"""Checks that obj is of type cls, and lets PyLint infer types.""" """Checks that obj is of type cls, and lets PyLint infer types."""
if isinstance(obj, cls): if isinstance(obj, cls):
return obj return obj
raise Exception(_('Expected object of type: %s') % (str(cls))) raise Exception(_('Expected object of type: %s') % (str(cls)))
def load_deployment_dirver(backend_name): def load_deployment_dirver(backend_name):
"""Load a cluster backend installation driver. """Load a cluster backend installation driver.
""" """
@ -78,8 +80,11 @@ def load_deployment_dirver(backend_name):
LOG.info(_("Loading deployment backend '%s'") % backend_driver) LOG.info(_("Loading deployment backend '%s'") % backend_driver)
try: try:
driver = importutils.import_object_ns('daisy.api.backends',backend_driver) driver = importutils.import_object_ns(
'daisy.api.backends', backend_driver)
return check_isinstance(driver, DeploymentDriver) return check_isinstance(driver, DeploymentDriver)
except ImportError: except ImportError:
LOG.exception(_("Error, unable to load the deployment backends '%s'" % backend_driver)) LOG.exception(
_("Error, unable to load the deployment backends '%s'"
% backend_driver))
return None return None

File diff suppressed because it is too large Load Diff

View File

@ -54,7 +54,6 @@ def get_proton_ip(req, role_hosts):
return proton_ip_list return proton_ip_list
def get_proton_hosts(req, cluster_id): def get_proton_hosts(req, cluster_id):
all_roles = proton_cmn.get_roles_detail(req) all_roles = proton_cmn.get_roles_detail(req)
for role in all_roles: for role in all_roles:

View File

@ -1,382 +1,427 @@
# Copyright 2013 OpenStack Foundation # Copyright 2013 OpenStack Foundation
# All Rights Reserved. # All Rights Reserved.
# #
# Licensed under the Apache License, Version 2.0 (the "License"); you may # Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain # not use this file except in compliance with the License. You may obtain
# a copy of the License at # a copy of the License at
# #
# http://www.apache.org/licenses/LICENSE-2.0 # http://www.apache.org/licenses/LICENSE-2.0
# #
# Unless required by applicable law or agreed to in writing, software # Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations # License for the specific language governing permissions and limitations
# under the License. # under the License.
""" """
/install endpoint for tecs API /install endpoint for tecs API
""" """
import os import subprocess
import copy import commands
import subprocess
import time from oslo_config import cfg
import commands from oslo_log import log as logging
from webob.exc import HTTPBadRequest
import traceback
import webob.exc import threading
from oslo_config import cfg
from oslo_log import log as logging from daisy import i18n
from webob.exc import HTTPBadRequest
from webob.exc import HTTPForbidden from daisy.common import exception
from webob.exc import HTTPServerError from daisy.api.backends.tecs import config
from daisy.api.backends import driver
import threading import daisy.api.backends.os as os_handle
from threading import Thread import daisy.api.backends.common as daisy_cmn
import daisy.api.backends.tecs.common as tecs_cmn
from daisy import i18n import daisy.api.backends.tecs.install as instl
from daisy import notifier import daisy.api.backends.tecs.uninstall as unstl
import daisy.api.backends.tecs.upgrade as upgrd
from daisy.api import policy import daisy.api.backends.tecs.disk_array as disk_array
import daisy.api.v1 from daisy.api.backends.tecs import write_configs
import daisy.registry.client.v1.api as registry
from daisy.common import exception
import daisy.registry.client.v1.api as registry
from daisy.api.backends.tecs import config LOG = logging.getLogger(__name__)
from daisy.api.backends import driver _ = i18n._
from daisy.api.network_api import network as neutron _LE = i18n._LE
from ironicclient import client as ironic_client _LI = i18n._LI
import daisy.api.backends.os as os_handle _LW = i18n._LW
import daisy.api.backends.common as daisy_cmn CONF = cfg.CONF
import daisy.api.backends.tecs.common as tecs_cmn upgrade_opts = [
import daisy.api.backends.tecs.install as instl cfg.StrOpt('max_parallel_os_upgrade_number', default=10,
import daisy.api.backends.tecs.uninstall as unstl help='Maximum number of hosts upgrade os at the same time.'),
import daisy.api.backends.tecs.upgrade as upgrd ]
import daisy.api.backends.tecs.disk_array as disk_array CONF.register_opts(upgrade_opts)
try: tecs_state = tecs_cmn.TECS_STATE
import simplejson as json daisy_tecs_path = tecs_cmn.daisy_tecs_path
except ImportError:
import json
class API(driver.DeploymentDriver):
LOG = logging.getLogger(__name__)
_ = i18n._ """
_LE = i18n._LE The hosts API is a RESTful web service for host data. The API
_LI = i18n._LI is as follows::
_LW = i18n._LW
CONF = cfg.CONF GET /hosts -- Returns a set of brief metadata about hosts
upgrade_opts = [ GET /hosts/detail -- Returns a set of detailed metadata about
cfg.StrOpt('max_parallel_os_upgrade_number', default=10, hosts
help='Maximum number of hosts upgrade os at the same time.'), HEAD /hosts/<ID> -- Return metadata about an host with id <ID>
] GET /hosts/<ID> -- Return host data for host with id <ID>
CONF.register_opts(upgrade_opts) POST /hosts -- Store host data and return metadata about the
newly-stored host
tecs_state = tecs_cmn.TECS_STATE PUT /hosts/<ID> -- Update host metadata and/or upload host
data for a previously-reserved host
class API(driver.DeploymentDriver): DELETE /hosts/<ID> -- Delete the host with id <ID>
""" """
The hosts API is a RESTful web service for host data. The API
is as follows:: def __init__(self):
super(API, self).__init__()
GET /hosts -- Returns a set of brief metadata about hosts return
GET /hosts/detail -- Returns a set of detailed metadata about
hosts def install(self, req, cluster_id):
HEAD /hosts/<ID> -- Return metadata about an host with id <ID> """
GET /hosts/<ID> -- Return host data for host with id <ID> Install TECS to a cluster.
POST /hosts -- Store host data and return metadata about the
newly-stored host param req: The WSGI/Webob Request object
PUT /hosts/<ID> -- Update host metadata and/or upload host cluster_id:cluster id
data for a previously-reserved host """
DELETE /hosts/<ID> -- Delete the host with id <ID> write_configs.update_configset(req, cluster_id)
"""
tecs_install_task = instl.TECSInstallTask(req, cluster_id)
def __init__(self): tecs_install_task.start()
super(API, self).__init__()
return def _get_roles_and_hosts_ip_list(self, req, cluster_id):
role_host_ips = {'ha': set(), 'lb': set(), 'all': set()}
def install(self, req, cluster_id): role_id_list = set()
""" hosts_id_list = []
Install TECS to a cluster. hosts_list = []
tecs_install_failed_list = set()
param req: The WSGI/Webob Request object
cluster_id:cluster id roles = daisy_cmn.get_cluster_roles_detail(req, cluster_id)
""" cluster_networks = daisy_cmn.get_cluster_networks_detail(
req, cluster_id)
tecs_install_task = instl.TECSInstallTask(req, cluster_id) for role in roles:
tecs_install_task.start() if role['deployment_backend'] != daisy_cmn.tecs_backend_name:
continue
def _get_roles_and_hosts_ip_list(self, req, cluster_id): role_hosts = daisy_cmn.get_hosts_of_role(req, role['id'])
host_ha_list = set() if role_hosts:
host_ip_list = set() for role_host in role_hosts:
role_id_list = set() host = daisy_cmn.get_host_detail(req, role_host['host_id'])
hosts_id_list = [] host_ip = tecs_cmn.get_host_network_ip(
hosts_list = [] req, host, cluster_networks, 'MANAGEMENT')
if role['name'] == "CONTROLLER_HA":
roles = daisy_cmn.get_cluster_roles_detail(req,cluster_id) role_host_ips['ha'].add(host_ip)
cluster_networks = daisy_cmn.get_cluster_networks_detail(req, cluster_id) if role['name'] == "CONTROLLER_LB":
for role in roles: role_host_ips['lb'].add(host_ip)
if role['deployment_backend'] != daisy_cmn.tecs_backend_name: role_host_ips['all'].add(host_ip)
continue hosts_id_list.append({host['id']: host_ip})
role_hosts = daisy_cmn.get_hosts_of_role(req, role['id']) if role_host['status'] == tecs_state['INSTALL_FAILED']:
if role_hosts: tecs_install_failed_list.add(host_ip)
for role_host in role_hosts: role_id_list.add(role['id'])
host = daisy_cmn.get_host_detail(req, role_host['host_id']) for host in hosts_id_list:
host_ip = tecs_cmn.get_host_network_ip(req, host, cluster_networks, 'MANAGEMENT') if host not in hosts_list:
if role['name'] == "CONTROLLER_HA": hosts_list.append(host)
host_ha_list.add(host_ip) return (role_id_list, role_host_ips,
host_ip_list.add(host_ip) hosts_list, tecs_install_failed_list)
hosts_id_list.append({host['id']:host_ip})
role_id_list.add(role['id']) def _query_progress(self, req, cluster_id, action=""):
for host in hosts_id_list: nodes_list = []
if host not in hosts_list: roles = daisy_cmn.get_roles_detail(req)
hosts_list.append(host) (role_id_list, role_host_ips, hosts_list, tecs_install_failed_list) =\
return (role_id_list, host_ip_list, host_ha_list, hosts_list) self._get_roles_and_hosts_ip_list(req, cluster_id)
for host in hosts_list:
def _query_progress(self, req, cluster_id, action=""): node = {}
nodes_list = [] host_id = host.keys()[0]
roles = daisy_cmn.get_roles_detail(req) host = daisy_cmn.get_host_detail(req, host_id)
(role_id_list,host_ip_list,host_ha_list,hosts_list) = self._get_roles_and_hosts_ip_list(req, cluster_id) node['id'] = host['id']
for host in hosts_list: node['name'] = host['name']
node = {}
host_id = host.keys()[0] if 0 == cmp("upgrade", action):
host = daisy_cmn.get_host_detail(req, host_id) node['os-progress'] = host['os_progress']
node['id'] = host['id'] node['os-status'] = host['os_status']
node['name'] = host['name'] node['os-messages'] = host['messages']
if 0 == cmp("upgrade", action): if host['status'] == "with-role":
node['os-progress'] = host['os_progress'] host_roles = [role for role in roles if role['name'] in host[
node['os-status'] = host['os_status'] 'role'] and role['cluster_id'] == cluster_id]
node['os-messages'] = host['messages'] if host_roles:
node['role-status'] = host_roles[0]['status']
if host['status'] == "with-role": node['role-progress'] = str(host_roles[0]['progress'])
host_roles = [ role for role in roles if role['name'] in host['role'] and role['cluster_id'] == cluster_id] # node['role-message'] = host_roles[0]['messages']
if host_roles: nodes_list.append(node)
node['role-status'] = host_roles[0]['status'] if nodes_list:
node['role-progress'] = str(host_roles[0]['progress']) return {'tecs_nodes': nodes_list}
# node['role-message'] = host_roles[0]['messages'] else:
nodes_list.append(node) return {'tecs_nodes': "TECS uninstall successfully,\
if nodes_list: the host has been removed from the host_roles table"}
return {'tecs_nodes': nodes_list}
else: def _modify_running_version_of_configs(self, req,
return {'tecs_nodes': "TECS uninstall successfully, the host has been removed from the host_roles table"} running_version, cluster_id):
cluster_configs_list = daisy_cmn.get_cluster_configs_list(req,
def uninstall(self, req, cluster_id): cluster_id)
""" if cluster_configs_list:
Uninstall TECS to a cluster. for cluster_config in cluster_configs_list:
registry.update_config_metadata(req.context,
:param req: The WSGI/Webob Request object cluster_config['id'],
{'running_version':
:raises HTTPBadRequest if x-install-cluster is missing running_version})
"""
(role_id_list, host_ip_list,host_ha_list, hosts_list) = self._get_roles_and_hosts_ip_list(req, cluster_id) def uninstall(self, req, cluster_id):
if role_id_list: """
if not host_ip_list: Uninstall TECS to a cluster.
msg = _("there is no host in cluster %s") % cluster_id
raise exception.ThreadBinException(msg) :param req: The WSGI/Webob Request object
unstl.update_progress_to_db(req, role_id_list, tecs_state['UNINSTALLING'], hosts_list) :raises HTTPBadRequest if x-install-cluster is missing
"""
threads = [] (role_id_list, role_host_ips, hosts_list, tecs_install_failed_list) =\
for host_ip in host_ip_list: self._get_roles_and_hosts_ip_list(req, cluster_id)
t = threading.Thread(target=unstl.thread_bin,args=(req,host_ip,role_id_list,hosts_list)) if role_id_list:
t.setDaemon(True) if not role_host_ips['all']:
t.start() msg = _("there is no host in cluster %s") % cluster_id
threads.append(t) raise exception.ThreadBinException(msg)
LOG.info(_("Uninstall threads have started, please waiting...."))
unstl.update_progress_to_db(
try: req, role_id_list, tecs_state['UNINSTALLING'], hosts_list)
for t in threads:
t.join() threads = []
except: for host_ip in role_host_ips['all']:
LOG.warn(_("Join uninstall thread %s failed!" % t)) t = threading.Thread(
else: target=unstl.thread_bin, args=(req, host_ip, role_id_list,
uninstall_failed_flag = False hosts_list))
for role_id in role_id_list: t.setDaemon(True)
role_hosts=daisy_cmn.get_hosts_of_role(req,role_id) t.start()
for role_host in role_hosts: threads.append(t)
if role_host['status'] == tecs_state['UNINSTALL_FAILED']: LOG.info(_("Uninstall threads have started, please waiting...."))
unstl.update_progress_to_db(req, role_id_list, tecs_state['UNINSTALL_FAILED'],hosts_list)
uninstall_failed_flag = True try:
break for t in threads:
if not uninstall_failed_flag: t.join()
LOG.info(_("All uninstall threads have done, set all roles status to 'init'!")) except:
unstl.update_progress_to_db(req, role_id_list, tecs_state['INIT'], hosts_list) LOG.warn(_("Join uninstall thread %s failed!" % t))
try: else:
(status, output) = commands.getstatusoutput('rpm -e --nodeps openstack-packstack\ uninstall_failed_flag = False
openstack-packstack-puppet openstack-puppet-modules puppet') for role_id in role_id_list:
except exception.Invalid as e: role_hosts = daisy_cmn.get_hosts_of_role(req, role_id)
raise HTTPBadRequest(explanation=e.msg, request=req) for role_host in role_hosts:
if role_host['status'] ==\
def uninstall_progress(self, req, cluster_id): tecs_state['UNINSTALL_FAILED']:
return self._query_progress(req, cluster_id, "uninstall") unstl.update_progress_to_db(
req, role_id_list, tecs_state[
def upgrade(self, req, cluster_id): 'UNINSTALL_FAILED'], hosts_list)
""" uninstall_failed_flag = True
update TECS to a cluster. break
if not uninstall_failed_flag:
:param req: The WSGI/Webob Request object LOG.info(
_("All uninstall threads have done,\
:raises HTTPBadRequest if x-install-cluster is missing set all roles status to 'init'!"))
""" unstl.update_progress_to_db(
(role_id_list,host_ip_list,host_ha_list,hosts_list) = self._get_roles_and_hosts_ip_list(req, cluster_id) req, role_id_list, tecs_state['INIT'], hosts_list)
if role_id_list: LOG.info(_("modify the running_version of configs to 0"))
if not host_ip_list: running_version = 0
msg = _("there is no host in cluster %s") % cluster_id self._modify_running_version_of_configs(
raise exception.ThreadBinException(msg) req, running_version, cluster_id)
unreached_hosts = daisy_cmn.check_ping_hosts(host_ip_list, 1) tecs_cmn.inform_provider_cloud_state(req.context, cluster_id,
if unreached_hosts: operation='delete')
self.message = "hosts %s ping failed" % unreached_hosts try:
raise exception.NotFound(message=self.message) (status, output) = commands.getstatusoutput('rpm -e --nodeps openstack-packstack\
openstack-packstack-puppet \
daisy_cmn.subprocess_call('rm -rf /root/.ssh/known_hosts') openstack-puppet-modules puppet')
except exception.Invalid as e:
if os_handle.check_tfg_exist(): raise HTTPBadRequest(explanation=e.msg, request=req)
os_handle.upgrade_os(req, hosts_list)
unreached_hosts = daisy_cmn.check_ping_hosts(host_ip_list, 30) def uninstall_progress(self, req, cluster_id):
if unreached_hosts: return self._query_progress(req, cluster_id, "uninstall")
self.message = "hosts %s ping failed after tfg upgrade" % unreached_hosts
raise exception.NotFound(message=self.message) def upgrade(self, req, cluster_id):
# check and get TECS version """
tecs_version_pkg_file = tecs_cmn.check_and_get_tecs_version(tecs_cmn.daisy_tecs_path) update TECS to a cluster.
if not tecs_version_pkg_file:
self.state = tecs_state['INSTALL_FAILED'] :param req: The WSGI/Webob Request object
self.message = "TECS version file not found in %s" % tecs_cmn.daisy_tecs_path
raise exception.NotFound(message=self.message) :raises HTTPBadRequest if x-install-cluster is missing
threads = [] """
LOG.info(_("Begin to update TECS controller nodes, please waiting....")) # daisy_update_path = '/home/daisy_update/'
upgrd.update_progress_to_db(req, role_id_list, tecs_state['UPDATING'], hosts_list)
for host_ip in host_ha_list: (role_id_list, role_host_ips, hosts_list, tecs_install_failed_list) =\
LOG.info(_("Update TECS controller node %s..." % host_ip)) self._get_roles_and_hosts_ip_list(req, cluster_id)
rc = upgrd.thread_bin(req,role_id_list,host_ip,hosts_list) if role_id_list:
if rc == 0: if not role_host_ips['all']:
LOG.info(_("Update TECS for %s successfully" % host_ip)) msg = _("there is no host in cluster %s") % cluster_id
else: raise exception.ThreadBinException(msg)
LOG.info(_("Update TECS failed for %s, return %s" % (host_ip,rc))) unreached_hosts = daisy_cmn.check_ping_hosts(
return role_host_ips['all'], 1)
LOG.info(_("Begin to update TECS other nodes, please waiting....")) if unreached_hosts:
max_parallel_upgrade_number = int(CONF.max_parallel_os_upgrade_number) self.message = "hosts %s ping failed" % unreached_hosts
compute_ip_list = host_ip_list - host_ha_list raise exception.NotFound(message=self.message)
while compute_ip_list: daisy_cmn.subprocess_call('rm -rf /root/.ssh/known_hosts')
threads = [] if os_handle.check_tfg_exist():
if len(compute_ip_list) > max_parallel_upgrade_number: os_handle.upgrade_os(req, hosts_list)
upgrade_hosts = compute_ip_list[:max_parallel_upgrade_number] unreached_hosts = daisy_cmn.check_ping_hosts(
compute_ip_list = compute_ip_list[max_parallel_upgrade_number:] role_host_ips['all'], 30)
else: if unreached_hosts:
upgrade_hosts = compute_ip_list self.message = "hosts %s ping failed after tfg upgrade" \
compute_ip_list = [] % unreached_hosts
for host_ip in upgrade_hosts: raise exception.NotFound(message=self.message)
t = threading.Thread(target=upgrd.thread_bin,args=(req,role_id_list,host_ip,hosts_list)) # check and get TECS version
t.setDaemon(True) tecs_version_pkg_file = tecs_cmn.check_and_get_tecs_version(
t.start() tecs_cmn.daisy_tecs_path)
threads.append(t) if not tecs_version_pkg_file:
try: self.state = tecs_state['UPDATE_FAILED']
for t in threads: self.message = "TECS version file not found in %s"\
t.join() % tecs_cmn.daisy_tecs_path
except: raise exception.NotFound(message=self.message)
LOG.warn(_("Join update thread %s failed!" % t)) threads = []
LOG.info(
for role_id in role_id_list: _("Begin to update TECS controller nodes, please waiting...."))
role_hosts=daisy_cmn.get_hosts_of_role(req,role_id) upgrd.update_progress_to_db(
for role_host in role_hosts: req, role_id_list, tecs_state['UPDATING'], hosts_list)
if (role_host['status'] == tecs_state['UPDATE_FAILED'] or for host_ip in role_host_ips['ha']:
role_host['status'] == tecs_state['UPDATING']): if host_ip in tecs_install_failed_list:
role_id = [role_host['role_id']] continue
upgrd.update_progress_to_db(req, LOG.info(_("Update TECS controller node %s..." % host_ip))
role_id, rc = upgrd.thread_bin(req, role_id_list, host_ip, hosts_list)
tecs_state['UPDATE_FAILED'], if rc == 0:
hosts_list) LOG.info(_("Update TECS for %s successfully" % host_ip))
break else:
elif role_host['status'] == tecs_state['ACTIVE']: LOG.info(
role_id = [role_host['role_id']] _("Update TECS failed for %s, return %s"
upgrd.update_progress_to_db(req, % (host_ip, rc)))
role_id, return
tecs_state['ACTIVE'],
hosts_list) LOG.info(_("Begin to update TECS other nodes, please waiting...."))
max_parallel_upgrade_number = int(
def upgrade_progress(self, req, cluster_id): CONF.max_parallel_os_upgrade_number)
return self._query_progress(req, cluster_id, "upgrade") compute_ip_list = role_host_ips[
'all'] - role_host_ips['ha'] - tecs_install_failed_list
while compute_ip_list:
def export_db(self, req, cluster_id): threads = []
""" if len(compute_ip_list) > max_parallel_upgrade_number:
Export daisy db data to tecs.conf and HA.conf. upgrade_hosts = compute_ip_list[
:max_parallel_upgrade_number]
:param req: The WSGI/Webob Request object compute_ip_list = compute_ip_list[
max_parallel_upgrade_number:]
:raises HTTPBadRequest if x-install-cluster is missing else:
""" upgrade_hosts = compute_ip_list
compute_ip_list = []
(tecs_config, mgnt_ip_list) =\ for host_ip in upgrade_hosts:
instl.get_cluster_tecs_config(req, cluster_id) t = threading.Thread(
target=upgrd.thread_bin,
config_files = {'tecs_conf':'','ha_conf':''} args=(req, role_id_list, host_ip, hosts_list))
tecs_install_path = "/home/tecs_install" t.setDaemon(True)
tecs_config_file = '' t.start()
if tecs_config: threads.append(t)
cluster_conf_path = tecs_install_path + "/" + cluster_id try:
create_cluster_conf_path =\ for t in threads:
"rm -rf %s;mkdir %s" %(cluster_conf_path, cluster_conf_path) t.join()
daisy_cmn.subprocess_call(create_cluster_conf_path) except:
config.update_tecs_config(tecs_config, cluster_conf_path) LOG.warn(_("Join update thread %s failed!" % t))
get_tecs_conf = "ls %s|grep tecs.conf" % cluster_conf_path for role_id in role_id_list:
obj = subprocess.Popen(get_tecs_conf, role_hosts = daisy_cmn.get_hosts_of_role(req, role_id)
shell=True, for role_host in role_hosts:
stdout=subprocess.PIPE, if (role_host['status'] == tecs_state['UPDATE_FAILED'] or
stderr=subprocess.PIPE) role_host['status'] == tecs_state['UPDATING']):
(stdoutput, erroutput) = obj.communicate() role_id = [role_host['role_id']]
tecs_conf_file = "" upgrd.update_progress_to_db(req,
if stdoutput: role_id,
tecs_conf_file = stdoutput.split('\n')[0] tecs_state[
config_files['tecs_conf'] =\ 'UPDATE_FAILED'],
cluster_conf_path + "/" + tecs_conf_file hosts_list)
break
get_ha_conf_cmd = "ls %s|grep HA_1.conf" % cluster_conf_path elif role_host['status'] == tecs_state['ACTIVE']:
obj = subprocess.Popen(get_ha_conf_cmd, role_id = [role_host['role_id']]
shell=True, upgrd.update_progress_to_db(req,
stdout=subprocess.PIPE, role_id,
stderr=subprocess.PIPE) tecs_state['ACTIVE'],
(stdoutput, erroutput) = obj.communicate() hosts_list)
ha_conf_file = ""
if stdoutput: def upgrade_progress(self, req, cluster_id):
ha_conf_file = stdoutput.split('\n')[0] return self._query_progress(req, cluster_id, "upgrade")
config_files['ha_conf'] =\
cluster_conf_path + "/" + ha_conf_file def export_db(self, req, cluster_id):
else: """
LOG.info(_("No TECS config files generated.")) Export daisy db data to tecs.conf and HA.conf.
return config_files :param req: The WSGI/Webob Request object
def update_disk_array(self, req, cluster_id): :raises HTTPBadRequest if x-install-cluster is missing
(share_disk_info, volume_disk_info) =\ """
disk_array.get_disk_array_info(req, cluster_id)
(controller_ha_nodes, computer_ips) =\ tecs_config =\
disk_array.get_ha_and_compute_ips(req, cluster_id) instl.get_cluster_tecs_config(req, cluster_id)
all_nodes_ip = computer_ips + controller_ha_nodes.keys()
config_files = {'tecs_conf': '', 'ha_conf': ''}
if all_nodes_ip: tecs_install_path = "/home/tecs_install"
compute_error_msg =\ if tecs_config:
disk_array.config_compute_multipath(all_nodes_ip) cluster_conf_path = tecs_install_path + "/" + cluster_id
if compute_error_msg: create_cluster_conf_path =\
return compute_error_msg "rm -rf %s;mkdir %s" % (cluster_conf_path, cluster_conf_path)
else: daisy_cmn.subprocess_call(create_cluster_conf_path)
LOG.info(_("Config Disk Array multipath successfully")) config.update_tecs_config(tecs_config, cluster_conf_path)
if share_disk_info: get_tecs_conf = "ls %s|grep tecs.conf" % cluster_conf_path
ha_error_msg =\ obj = subprocess.Popen(get_tecs_conf,
disk_array.config_ha_share_disk(share_disk_info, shell=True,
controller_ha_nodes) stdout=subprocess.PIPE,
if ha_error_msg: stderr=subprocess.PIPE)
return ha_error_msg (stdoutput, erroutput) = obj.communicate()
else: tecs_conf_file = ""
LOG.info(_("Config Disk Array for HA nodes successfully")) if stdoutput:
tecs_conf_file = stdoutput.split('\n')[0]
if volume_disk_info: config_files['tecs_conf'] =\
cinder_error_msg =\ cluster_conf_path + "/" + tecs_conf_file
disk_array.config_ha_cinder_volume(volume_disk_info,
controller_ha_nodes.keys()) get_ha_conf_cmd = "ls %s|grep HA_1.conf" % cluster_conf_path
if cinder_error_msg: obj = subprocess.Popen(get_ha_conf_cmd,
return cinder_error_msg shell=True,
else: stdout=subprocess.PIPE,
LOG.info(_("Config cinder volume for HA nodes successfully")) stderr=subprocess.PIPE)
(stdoutput, erroutput) = obj.communicate()
return 'update successfully' ha_conf_file = ""
if stdoutput:
ha_conf_file = stdoutput.split('\n')[0]
config_files['ha_conf'] =\
cluster_conf_path + "/" + ha_conf_file
else:
LOG.info(_("No TECS config files generated."))
return config_files
def update_disk_array(self, req, cluster_id):
(share_disk_info, volume_disk_info) =\
disk_array.get_disk_array_info(req, cluster_id)
array_nodes_addr =\
tecs_cmn.get_disk_array_nodes_addr(req, cluster_id)
ha_nodes_ip = array_nodes_addr['ha'].keys()
all_nodes_ip = list(array_nodes_addr['computer']) + ha_nodes_ip
if all_nodes_ip:
compute_error_msg =\
disk_array.config_compute_multipath(all_nodes_ip)
if compute_error_msg:
return compute_error_msg
else:
LOG.info(_("Config Disk Array multipath successfully"))
if share_disk_info:
ha_error_msg =\
disk_array.config_ha_share_disk(share_disk_info,
array_nodes_addr['ha'])
if ha_error_msg:
return ha_error_msg
else:
LOG.info(_("Config Disk Array for HA nodes successfully"))
if volume_disk_info:
cinder_error_msg =\
disk_array.config_ha_cinder_volume(volume_disk_info,
ha_nodes_ip)
if cinder_error_msg:
return cinder_error_msg
else:
LOG.info(_("Config cinder volume for HA nodes successfully"))
return 'update successfully'

View File

@ -19,33 +19,21 @@
import os import os
import copy import copy
import subprocess import subprocess
import time
import re import re
import traceback
import webob.exc
from oslo_config import cfg
from oslo_log import log as logging from oslo_log import log as logging
from webob.exc import HTTPBadRequest from webob.exc import HTTPBadRequest
from webob.exc import HTTPForbidden from webob.exc import HTTPForbidden
from threading import Thread
from daisy import i18n from daisy import i18n
from daisy import notifier from daisy.common import utils
from daisy.api import policy
import daisy.api.v1
from daisy.common import exception from daisy.common import exception
import daisy.registry.client.v1.api as registry import daisy.registry.client.v1.api as registry
import daisy.api.backends.common as daisy_cmn import daisy.api.backends.common as daisy_cmn
from daisyclient.v1 import client as daisy_client
import ConfigParser
try: STR_MASK = '*' * 8
import simplejson as json
except ImportError:
import json
LOG = logging.getLogger(__name__) LOG = logging.getLogger(__name__)
_ = i18n._ _ = i18n._
_LE = i18n._LE _LE = i18n._LE
@ -53,11 +41,12 @@ _LI = i18n._LI
_LW = i18n._LW _LW = i18n._LW
daisy_tecs_path = '/var/lib/daisy/tecs/' daisy_tecs_path = '/var/lib/daisy/tecs/'
tecs_install_path = '/home/tecs_install'
TECS_STATE = { TECS_STATE = {
'INIT' : 'init', 'INIT': 'init',
'INSTALLING' : 'installing', 'INSTALLING': 'installing',
'ACTIVE' : 'active', 'ACTIVE': 'active',
'INSTALL_FAILED': 'install-failed', 'INSTALL_FAILED': 'install-failed',
'UNINSTALLING': 'uninstalling', 'UNINSTALLING': 'uninstalling',
'UNINSTALL_FAILED': 'uninstall-failed', 'UNINSTALL_FAILED': 'uninstall-failed',
@ -66,42 +55,65 @@ TECS_STATE = {
} }
def get_daisyclient():
"""Get Daisy client instance."""
config_daisy = ConfigParser.ConfigParser()
config_daisy.read("/etc/daisy/daisy-api.conf")
daisy_port = config_daisy.get("DEFAULT", "bind_port")
args = {'version': 1.0, 'endpoint': 'http://127.0.0.1:' + daisy_port}
return daisy_client.Client(**args)
def mkdir_tecs_install(host_ips=None):
if not host_ips:
cmd = "mkdir -p %s" % tecs_install_path
daisy_cmn.subprocess_call(cmd)
return
for host_ip in host_ips:
cmd = 'clush -S -w %s "mkdir -p %s"' % (host_ip, tecs_install_path)
daisy_cmn.subprocess_call(cmd)
def _get_cluster_network(cluster_networks, network_name): def _get_cluster_network(cluster_networks, network_name):
network = [cn for cn in cluster_networks network = [cn for cn in cluster_networks if cn['name'] == network_name]
if cn['name'] in network_name]
if not network or not network[0]: if not network or not network[0]:
msg = "network %s is not exist" % (network_name) msg = "network %s is not exist" % (network_name)
raise exception.InvalidNetworkConfig(msg) raise exception.InvalidNetworkConfig(msg)
else: else:
return network[0] return network[0]
def get_host_interface_by_network(host_detail, network_name): def get_host_interface_by_network(host_detail, network_name):
host_detail_info = copy.deepcopy(host_detail) host_detail_info = copy.deepcopy(host_detail)
interface_list = [hi for hi in host_detail_info['interfaces'] interface_list = [hi for hi in host_detail_info['interfaces']
for assigned_network in hi['assigned_networks'] for assigned_network in hi['assigned_networks']
if assigned_network and network_name == assigned_network['name']] if assigned_network and
network_name == assigned_network['name']]
interface = {} interface = {}
if interface_list: if interface_list:
interface = interface_list[0] interface = interface_list[0]
if not interface and 'MANAGEMENT' == network_name: if not interface and 'MANAGEMENT' == network_name:
msg = "network %s of host %s is not exist" % (network_name, host_detail_info['id']) msg = "network %s of host %s is not exist" % (
network_name, host_detail_info['id'])
raise exception.InvalidNetworkConfig(msg) raise exception.InvalidNetworkConfig(msg)
return interface return interface
def get_host_network_ip(req, host_detail, cluster_networks, network_name): def get_host_network_ip(req, host_detail, cluster_networks, network_name):
interface_network_ip = '' interface_network_ip = ''
host_interface = get_host_interface_by_network(host_detail, network_name) host_interface = get_host_interface_by_network(host_detail, network_name)
if host_interface: if host_interface:
network = _get_cluster_network(cluster_networks, network_name) network = _get_cluster_network(cluster_networks, network_name)
assigned_network = daisy_cmn.get_assigned_network(req, assigned_network = daisy_cmn.get_assigned_network(req,
host_interface['id'], host_interface['id'],
network['id']) network['id'])
interface_network_ip = assigned_network['ip'] interface_network_ip = assigned_network['ip']
if not interface_network_ip and 'MANAGEMENT' == network_name : if not interface_network_ip and 'MANAGEMENT' == network_name:
msg = "%s network ip of host %s can't be empty" % (network_name, host_detail['id']) msg = "%s network ip of host %s can't be empty" % (
network_name, host_detail['id'])
raise exception.InvalidNetworkConfig(msg) raise exception.InvalidNetworkConfig(msg)
return interface_network_ip return interface_network_ip
@ -147,26 +159,36 @@ def get_network_netmask(cluster_networks, network_name):
raise exception.InvalidNetworkConfig(msg) raise exception.InvalidNetworkConfig(msg)
return netmask return netmask
# every host only have one gateway # every host only have one gateway
def get_network_gateway(cluster_networks, network_name): def get_network_gateway(cluster_networks, network_name):
network = _get_cluster_network(cluster_networks, network_name) network = _get_cluster_network(cluster_networks, network_name)
gateway = network['gateway'] gateway = network['gateway']
if not gateway and 'MANAGEMENT' == network_name:
msg = "gateway of network %s can't be empty" % (network_name)
raise exception.InvalidNetworkConfig(msg)
return gateway return gateway
def get_network_cidr(cluster_networks, network_name):
network = _get_cluster_network(cluster_networks, network_name)
cidr = network['cidr']
if not cidr:
msg = "cidr of network %s is not exist" % (network_name)
raise exception.InvalidNetworkConfig(msg)
return cidr
def get_mngt_network_vlan_id(cluster_networks): def get_mngt_network_vlan_id(cluster_networks):
mgnt_vlan_id = "" mgnt_vlan_id = ""
management_network = [network for network in cluster_networks if network['network_type'] == 'MANAGEMENT'] management_network = [network for network in cluster_networks if network[
'network_type'] == 'MANAGEMENT']
if (not management_network or if (not management_network or
not management_network[0] or not management_network[0] or
not management_network[0].has_key('vlan_id')): # not management_network[0].has_key('vlan_id')):
'vlan_id' not in management_network[0]):
msg = "can't get management network vlan id" msg = "can't get management network vlan id"
raise exception.InvalidNetworkConfig(msg) raise exception.InvalidNetworkConfig(msg)
else: else:
mgnt_vlan_id = management_network[0]['vlan_id'] mgnt_vlan_id = management_network[0]['vlan_id']
return mgnt_vlan_id return mgnt_vlan_id
def get_network_vlan_id(cluster_networks, network_type): def get_network_vlan_id(cluster_networks, network_type):
@ -174,7 +196,8 @@ def get_network_vlan_id(cluster_networks, network_type):
general_network = [network for network in cluster_networks general_network = [network for network in cluster_networks
if network['network_type'] == network_type] if network['network_type'] == network_type]
if (not general_network or not general_network[0] or if (not general_network or not general_network[0] or
not general_network[0].has_key('vlan_id')): # not general_network[0].has_key('vlan_id')):
'vlan_id' not in general_network[0]):
msg = "can't get %s network vlan id" % network_type msg = "can't get %s network vlan id" % network_type
raise exception.InvalidNetworkConfig(msg) raise exception.InvalidNetworkConfig(msg)
else: else:
@ -182,7 +205,7 @@ def get_network_vlan_id(cluster_networks, network_type):
return vlan_id return vlan_id
def sort_interfaces_by_pci(host_detail): def sort_interfaces_by_pci(networks, host_detail):
""" """
Sort interfaces by pci segment, if interface type is bond, Sort interfaces by pci segment, if interface type is bond,
user the pci of first memeber nic.This function is fix bug for user the pci of first memeber nic.This function is fix bug for
@ -192,61 +215,45 @@ def sort_interfaces_by_pci(host_detail):
:return: :return:
""" """
interfaces = eval(host_detail.get('interfaces', None)) \ interfaces = eval(host_detail.get('interfaces', None)) \
if isinstance(host_detail, unicode) else host_detail.get('interfaces', None) if isinstance(host_detail, unicode) else \
host_detail.get('interfaces', None)
if not interfaces: if not interfaces:
LOG.info("This host don't have /interfaces info.") LOG.info("This host has no interfaces info.")
return host_detail return host_detail
tmp_interfaces = copy.deepcopy(interfaces) tmp_interfaces = copy.deepcopy(interfaces)
if not [interface for interface in tmp_interfaces
if interface.get('name', None) and len(interface['name']) > 8]:
LOG.info("The interfaces name of host is all less than 9 character, no need sort.")
return host_detail
# add pci segment for the bond nic, the pci is equal to the first member nic pci
slaves_name_list = [] slaves_name_list = []
for interface in tmp_interfaces: for interface in tmp_interfaces:
if interface.get('type', None) == "bond" and \ if interface.get('type', None) == "bond" and\
interface.get('slave1', None) and interface.get('slave2', None): interface.get('slave1', None) and\
interface.get('slave2', None):
slaves_name_list.append(interface['slave1']) slaves_name_list.append(interface['slave1'])
slaves_name_list.append(interface['slave2']) slaves_name_list.append(interface['slave2'])
first_member_nic_name = interface['slave1']
tmp_pci = [interface_tmp['pci'] for interface in interfaces:
for interface_tmp in tmp_interfaces if interface.get('name') not in slaves_name_list:
if interface_tmp.get('name', None) and vlan_id_len_list = [len(network['vlan_id'])
interface_tmp.get('pci', None) and for assigned_network in interface.get(
interface_tmp['name'] == first_member_nic_name] 'assigned_networks', [])
for network in networks
if assigned_network.get('name') ==
network.get('name') and network.get('vlan_id')]
max_vlan_id_len = max(vlan_id_len_list) if vlan_id_len_list else 0
interface_name_len = len(interface['name'])
redundant_bit = interface_name_len + max_vlan_id_len - 14
interface['name'] = interface['name'][
redundant_bit:] if redundant_bit > 0 else interface['name']
return host_detail
if len(tmp_pci) != 1:
LOG.error("This host have two nics with same pci.")
continue
interface['pci'] = tmp_pci[0]
tmp_interfaces = [interface for interface in tmp_interfaces
if interface.get('name', None) and
interface['name'] not in slaves_name_list]
tmp_interfaces = sorted(tmp_interfaces, key = lambda interface: interface['pci'])
for index in range(0, len(tmp_interfaces)):
for interface in interfaces:
if interface['name'] != tmp_interfaces[index]['name']:
continue
interface['name'] = "b" + str(index) if interface['type'] == "bond" else "e" + str(index)
tmp_host_detail = copy.deepcopy(host_detail)
tmp_host_detail.update({'interfaces': interfaces})
return tmp_host_detail
def check_and_get_tecs_version(daisy_tecs_pkg_path): def check_and_get_tecs_version(daisy_tecs_pkg_path):
tecs_version_pkg_file = "" tecs_version_pkg_file = ""
get_tecs_version_pkg = "ls %s| grep ^ZXTECS.*\.bin$" % daisy_tecs_pkg_path get_tecs_version_pkg = "ls %s| grep ^ZXTECS.*\.bin$" % daisy_tecs_pkg_path
obj = subprocess.Popen(get_tecs_version_pkg, obj = subprocess.Popen(get_tecs_version_pkg,
shell=True, shell=True,
stdout=subprocess.PIPE, stdout=subprocess.PIPE,
stderr=subprocess.PIPE) stderr=subprocess.PIPE)
(stdoutput, erroutput) = obj.communicate() (stdoutput, erroutput) = obj.communicate()
if stdoutput: if stdoutput:
tecs_version_pkg_name = stdoutput.split('\n')[0] tecs_version_pkg_name = stdoutput.split('\n')[0]
@ -255,33 +262,60 @@ def check_and_get_tecs_version(daisy_tecs_pkg_path):
daisy_cmn.subprocess_call(chmod_for_tecs_version) daisy_cmn.subprocess_call(chmod_for_tecs_version)
return tecs_version_pkg_file return tecs_version_pkg_file
def get_service_disk_list(req, params): def get_service_disk_list(req, params):
try: try:
service_disks = registry.list_service_disk_metadata(req.context, **params) service_disks = registry.list_service_disk_metadata(
req.context, **params)
except exception.Invalid as e: except exception.Invalid as e:
raise HTTPBadRequest(explanation=e.msg, request=req) raise HTTPBadRequest(explanation=e.msg, request=req)
return service_disks return service_disks
def get_cinder_volume_list(req, params): def get_cinder_volume_list(req, params):
try: try:
cinder_volumes = registry.list_cinder_volume_metadata(req.context, **params) cinder_volumes = registry.list_cinder_volume_metadata(
req.context, **params)
except exception.Invalid as e: except exception.Invalid as e:
raise HTTPBadRequest(explanation=e.msg, request=req) raise HTTPBadRequest(explanation=e.msg, request=req)
return cinder_volumes return cinder_volumes
def get_network_configuration_rpm_name(): def mask_string(unmasked, mask_list=None, replace_list=None):
cmd = "ls %s | grep ^network-configuration.*\.rpm" % daisy_tecs_path """
Replaces words from mask_list with MASK in unmasked string.
If words are needed to be transformed before masking, transformation
could be describe in replace list. For example [("'","'\\''")]
replaces all ' characters with '\\''.
"""
mask_list = mask_list or []
replace_list = replace_list or []
masked = unmasked
for word in sorted(mask_list, lambda x, y: len(y) - len(x)):
if not word:
continue
for before, after in replace_list:
word = word.replace(before, after)
masked = masked.replace(word, STR_MASK)
return masked
def run_scrip(script, ip=None, password=None, msg=None):
try: try:
network_rpm_name = subprocess.check_output( _run_scrip(script, ip, password)
cmd, shell=True, stderr=subprocess.STDOUT).split('\n')[0] except:
except subprocess.CalledProcessError: msg1 = 'Error occurred during running scripts.'
msg = _("Get network-configuration rpm name by subprocess failed!") message = msg1 + msg if msg else msg1
raise exception.SubprocessCmdFailed(message=msg) LOG.error(message)
return network_rpm_name raise HTTPForbidden(explanation=message)
else:
LOG.info('Running scripts successfully!')
def run_scrip(script, ip=None, password=None): def _run_scrip(script, ip=None, password=None):
mask_list = []
repl_list = [("'", "'\\''")]
script = "\n".join(script) script = "\n".join(script)
_PIPE = subprocess.PIPE _PIPE = subprocess.PIPE
if ip: if ip:
@ -297,31 +331,117 @@ def run_scrip(script, ip=None, password=None):
script = "function t(){ exit $? ; } \n trap t ERR \n" + script script = "function t(){ exit $? ; } \n trap t ERR \n" + script
out, err = obj.communicate(script) out, err = obj.communicate(script)
return out, err masked_out = mask_string(out, mask_list, repl_list)
masked_err = mask_string(err, mask_list, repl_list)
if obj.returncode:
pattern = (r'^ssh\:')
if re.search(pattern, err):
LOG.error(_("Network error occured when run script."))
raise exception.NetworkError(masked_err, stdout=out, stderr=err)
else:
msg = ('Failed to run remote script, stdout: %s\nstderr: %s' %
(masked_out, masked_err))
LOG.error(msg)
raise exception.ScriptRuntimeError(msg, stdout=out, stderr=err)
return obj.returncode, out
def inform_provider_cloud_state(context, cluster_id, **kwargs):
params = dict()
daisyclient = get_daisyclient()
cluster = registry.get_cluster_metadata(context, cluster_id)
params['operation'] = kwargs.get('operation')
params['name'] = cluster.get('name')
params['url'] = "http://" + cluster.get('public_vip')
params['provider_ip'] = cluster.get('hwm_ip')
daisyclient.node.cloud_state(**params)
def get_disk_array_nodes_addr(req, cluster_id):
controller_ha_nodes = {}
computer_ips = set()
roles = daisy_cmn.get_cluster_roles_detail(req, cluster_id)
cluster_networks =\
daisy_cmn.get_cluster_networks_detail(req, cluster_id)
for role in roles:
if role['deployment_backend'] != daisy_cmn.tecs_backend_name:
continue
role_hosts = daisy_cmn.get_hosts_of_role(req, role['id'])
for role_host in role_hosts:
# host has installed tecs are exclusive
if (role_host['status'] == TECS_STATE['ACTIVE'] or
role_host['status'] == TECS_STATE['UPDATING'] or
role_host['status'] == TECS_STATE['UPDATE_FAILED']):
continue
host_detail = daisy_cmn.get_host_detail(req,
role_host['host_id'])
host_ip = get_host_network_ip(req,
host_detail,
cluster_networks,
'MANAGEMENT')
if role['name'] == "CONTROLLER_HA":
min_mac = utils.get_host_min_mac(host_detail['interfaces'])
controller_ha_nodes[host_ip] = min_mac
if role['name'] == "COMPUTER":
computer_ips.add(host_ip)
return {'ha': controller_ha_nodes, 'computer': computer_ips}
def get_ctl_ha_nodes_min_mac(req, cluster_id):
'''
ctl_ha_nodes_min_mac = {'host_name1':'min_mac1', ...}
'''
ctl_ha_nodes_min_mac = {}
roles = daisy_cmn.get_cluster_roles_detail(req, cluster_id)
cluster_networks =\
daisy_cmn.get_cluster_networks_detail(req, cluster_id)
for role in roles:
if role['deployment_backend'] != daisy_cmn.tecs_backend_name:
continue
role_hosts = daisy_cmn.get_hosts_of_role(req, role['id'])
for role_host in role_hosts:
# host has installed tecs are exclusive
if (role_host['status'] == TECS_STATE['ACTIVE'] or
role_host['status'] == TECS_STATE['UPDATING'] or
role_host['status'] == TECS_STATE['UPDATE_FAILED']):
continue
host_detail = daisy_cmn.get_host_detail(req,
role_host['host_id'])
host_name = host_detail['name']
if role['name'] == "CONTROLLER_HA":
min_mac = utils.get_host_min_mac(host_detail['interfaces'])
ctl_ha_nodes_min_mac[host_name] = min_mac
return ctl_ha_nodes_min_mac
class TecsShellExector(object): class TecsShellExector(object):
""" """
Class config task before install tecs bin. Class config task before install tecs bin.
""" """
def __init__(self, mgnt_ip, task_type, params={}):
def __init__(self, mgnt_ip, task_type, params={}):
self.task_type = task_type self.task_type = task_type
self.mgnt_ip = mgnt_ip self.mgnt_ip = mgnt_ip
self.params = params self.params = params
self.clush_cmd = "" self.clush_cmd = ""
self.rpm_name = get_network_configuration_rpm_name() self.rpm_name =\
daisy_cmn.get_rpm_package_by_name(daisy_tecs_path,
'network-configuration')
self.NETCFG_RPM_PATH = daisy_tecs_path + self.rpm_name self.NETCFG_RPM_PATH = daisy_tecs_path + self.rpm_name
self.oper_type = { self.oper_type = {
'install_rpm' : self._install_netcfg_rpm, 'install_rpm': self._install_netcfg_rpm,
'uninstall_rpm' : self._uninstall_netcfg_rpm, 'uninstall_rpm': self._uninstall_netcfg_rpm,
'update_rpm' : self._update_netcfg_rpm, 'update_rpm': self._update_netcfg_rpm,
} }
self.oper_shell = { self.oper_shell = {
'CMD_SSHPASS_PRE': "sshpass -p ossdbg1 %(ssh_ip)s %(cmd)s", 'CMD_SSHPASS_PRE': "sshpass -p ossdbg1 %(ssh_ip)s %(cmd)s",
'CMD_RPM_UNINSTALL': "rpm -e network-configuration", 'CMD_RPM_UNINSTALL': "rpm -e network-configuration",
'CMD_RPM_INSTALL': "rpm -i /home/%(rpm)s" % {'rpm': self.rpm_name}, 'CMD_RPM_INSTALL': "rpm -i /home/%(rpm)s" % {'rpm': self.rpm_name},
'CMD_RPM_UPDATE': "rpm -U /home/%(rpm)s" % {'rpm': self.rpm_name}, 'CMD_RPM_UPDATE': "rpm -U /home/%(rpm)s" % {'rpm': self.rpm_name},
'CMD_RPM_SCP': "scp -o StrictHostKeyChecking=no %(path)s root@%(ssh_ip)s:/home" % 'CMD_RPM_SCP': "scp -o StrictHostKeyChecking=no \
%(path)s root@%(ssh_ip)s:/home" %
{'path': self.NETCFG_RPM_PATH, 'ssh_ip': mgnt_ip} {'path': self.NETCFG_RPM_PATH, 'ssh_ip': mgnt_ip}
} }
LOG.info(_("<<<Network configuration rpm is %s>>>" % self.rpm_name)) LOG.info(_("<<<Network configuration rpm is %s>>>" % self.rpm_name))
@ -329,13 +449,17 @@ class TecsShellExector(object):
def _uninstall_netcfg_rpm(self): def _uninstall_netcfg_rpm(self):
self.clush_cmd = self.oper_shell['CMD_SSHPASS_PRE'] % \ self.clush_cmd = self.oper_shell['CMD_SSHPASS_PRE'] % \
{"ssh_ip":"ssh -o StrictHostKeyChecking=no " + self.mgnt_ip, "cmd":self.oper_shell['CMD_RPM_UNINSTALL']} {"ssh_ip": "ssh -o StrictHostKeyChecking=no " + self.mgnt_ip,
subprocess.check_output(self.clush_cmd, shell = True, stderr=subprocess.STDOUT) "cmd": self.oper_shell['CMD_RPM_UNINSTALL']}
subprocess.check_output(
self.clush_cmd, shell=True, stderr=subprocess.STDOUT)
def _update_netcfg_rpm(self): def _update_netcfg_rpm(self):
self.clush_cmd = self.oper_shell['CMD_SSHPASS_PRE'] % \ self.clush_cmd = self.oper_shell['CMD_SSHPASS_PRE'] % \
{"ssh_ip":"ssh -o StrictHostKeyChecking=no " + self.mgnt_ip, "cmd":self.oper_shell['CMD_RPM_UPDATE']} {"ssh_ip": "ssh -o StrictHostKeyChecking=no " + self.mgnt_ip,
subprocess.check_output(self.clush_cmd, shell = True, stderr=subprocess.STDOUT) "cmd": self.oper_shell['CMD_RPM_UPDATE']}
subprocess.check_output(
self.clush_cmd, shell=True, stderr=subprocess.STDOUT)
def _install_netcfg_rpm(self): def _install_netcfg_rpm(self):
if not os.path.exists(self.NETCFG_RPM_PATH): if not os.path.exists(self.NETCFG_RPM_PATH):
@ -343,22 +467,30 @@ class TecsShellExector(object):
return return
self.clush_cmd = "%s;%s" % \ self.clush_cmd = "%s;%s" % \
(self.oper_shell['CMD_SSHPASS_PRE'] % (self.oper_shell['CMD_SSHPASS_PRE'] %
{"ssh_ip":"", "cmd":self.oper_shell['CMD_RPM_SCP']}, \ {"ssh_ip": "", "cmd": self.oper_shell['CMD_RPM_SCP']},
self.oper_shell['CMD_SSHPASS_PRE'] % self.oper_shell['CMD_SSHPASS_PRE'] %
{"ssh_ip":"ssh -o StrictHostKeyChecking=no " + self.mgnt_ip, "cmd":self.oper_shell['CMD_RPM_INSTALL']}) {"ssh_ip": "ssh -o StrictHostKeyChecking=no " +
subprocess.check_output(self.clush_cmd, shell = True, stderr=subprocess.STDOUT) self.mgnt_ip, "cmd": self.oper_shell['CMD_RPM_INSTALL']})
subprocess.check_output(
self.clush_cmd, shell=True, stderr=subprocess.STDOUT)
def _execute(self): def _execute(self):
try: try:
if not self.task_type or not self.mgnt_ip : if not self.task_type or not self.mgnt_ip:
LOG.error(_("<<<TecsShellExector::execute, input params invalid on %s!>>>" % self.mgnt_ip, )) LOG.error(
_("<<<TecsShellExector::execute, input params invalid on \
%s!>>>" % self.mgnt_ip, ))
return return
self.oper_type[self.task_type]() self.oper_type[self.task_type]()
except subprocess.CalledProcessError as e: except subprocess.CalledProcessError as e:
LOG.warn(_("<<<TecsShellExector::execute:Execute command failed on %s! Reason:%s>>>" % (self.mgnt_ip, e.output.strip()))) LOG.warn(_("<<<TecsShellExector::execute:Execute command failed on\
%s! Reason:%s>>>" % (
self.mgnt_ip, e.output.strip())))
except Exception as e: except Exception as e:
LOG.exception(_(e.message)) LOG.exception(_(e.message))
else: else:
LOG.info(_("<<<TecsShellExector::execute:Execute command:%s,successful on %s!>>>" % (self.clush_cmd, self.mgnt_ip))) LOG.info(_("<<<TecsShellExector::execute:Execute command:\
%s,successful on %s!>>>" % (
self.clush_cmd, self.mgnt_ip)))

View File

@ -4,6 +4,8 @@ import re
import commands import commands
import types import types
import subprocess import subprocess
import socket
import netaddr
from oslo_log import log as logging from oslo_log import log as logging
from ConfigParser import ConfigParser from ConfigParser import ConfigParser
from daisy.common import exception from daisy.common import exception
@ -21,16 +23,18 @@ service_map = {
'ha': '', 'ha': '',
'mariadb': 'mariadb', 'mariadb': 'mariadb',
'amqp': 'rabbitmq-server', 'amqp': 'rabbitmq-server',
'ceilometer-api':'openstack-ceilometer-api', 'ceilometer-api': 'openstack-ceilometer-api',
'ceilometer-collector':'openstack-ceilometer-collector,openstack-ceilometer-mend', 'ceilometer-collector': 'openstack-ceilometer-collector,\
'ceilometer-central':'openstack-ceilometer-central', openstack-ceilometer-mend',
'ceilometer-notification':'openstack-ceilometer-notification', 'ceilometer-central': 'openstack-ceilometer-central',
'ceilometer-alarm':'openstack-ceilometer-alarm-evaluator,openstack-ceilometer-alarm-notifier', 'ceilometer-notification': 'openstack-ceilometer-notification',
'ceilometer-alarm': 'openstack-ceilometer-alarm-evaluator,\
openstack-ceilometer-alarm-notifier',
'heat-api': 'openstack-heat-api', 'heat-api': 'openstack-heat-api',
'heat-api-cfn': 'openstack-heat-api-cfn', 'heat-api-cfn': 'openstack-heat-api-cfn',
'heat-engine': 'openstack-heat-engine', 'heat-engine': 'openstack-heat-engine',
'ironic': 'openstack-ironic-api,openstack-ironic-conductor', 'ironic': 'openstack-ironic-api,openstack-ironic-conductor',
'horizon': 'httpd', 'horizon': 'httpd,opencos-alarmmanager',
'keystone': 'openstack-keystone', 'keystone': 'openstack-keystone',
'glance': 'openstack-glance-api,openstack-glance-registry', 'glance': 'openstack-glance-api,openstack-glance-registry',
'cinder-volume': 'openstack-cinder-volume', 'cinder-volume': 'openstack-cinder-volume',
@ -47,8 +51,9 @@ service_map = {
'nova-vncproxy': 'openstack-nova-novncproxy,openstack-nova-consoleauth', 'nova-vncproxy': 'openstack-nova-novncproxy,openstack-nova-consoleauth',
'nova-conductor': 'openstack-nova-conductor', 'nova-conductor': 'openstack-nova-conductor',
'nova-api': 'openstack-nova-api', 'nova-api': 'openstack-nova-api',
'nova-cells': 'openstack-nova-cells' 'nova-cells': 'openstack-nova-cells',
} 'camellia-api': 'camellia-api'
}
def add_service_with_host(services, name, host): def add_service_with_host(services, name, host):
@ -63,36 +68,33 @@ def add_service_with_hosts(services, name, hosts):
for h in hosts: for h in hosts:
services[name].append(h['management']['ip']) services[name].append(h['management']['ip'])
def test_ping(ping_src_nic, ping_desc_ips): def test_ping(ping_src_nic, ping_desc_ips):
ping_cmd = 'fping' ping_cmd = 'fping'
for ip in set(ping_desc_ips): for ip in set(ping_desc_ips):
ping_cmd = ping_cmd + ' -I ' + ping_src_nic + ' ' + ip ping_cmd = ping_cmd + ' -I ' + ping_src_nic + ' ' + ip
obj = subprocess.Popen(ping_cmd, shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE) obj = subprocess.Popen(
ping_cmd, shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
(stdoutput, erroutput) = obj.communicate() (stdoutput, erroutput) = obj.communicate()
_returncode = obj.returncode _returncode = obj.returncode
if _returncode == 0 or _returncode == 1: if _returncode == 0 or _returncode == 1:
ping_result = stdoutput.split('\n') ping_result = stdoutput.split('\n')
unreachable_hosts = [result.split()[0] for result in ping_result if result and result.split()[2] != 'alive'] if "No such device" in erroutput:
return []
reachable_hosts = [result.split(
)[0] for result in ping_result if result and
result.split()[2] == 'alive']
else: else:
msg = "ping failed beaceuse there is invlid ip in %s" % ping_desc_ips msg = "ping failed beaceuse there is invlid ip in %s" % ping_desc_ips
raise exception.InvalidIP(msg) raise exception.InvalidIP(msg)
return unreachable_hosts return reachable_hosts
def get_local_deployment_ip(tecs_deployment_ip):
def _get_ip_segment(full_ip): def get_local_deployment_ip(tecs_deployment_ips):
if not full_ip:
return None
match = re.search('([0-9]{1,3}\.){3}', full_ip)
if match:
return match.group()
else:
print "can't find ip segment"
return None
(status, output) = commands.getstatusoutput('ifconfig') (status, output) = commands.getstatusoutput('ifconfig')
netcard_pattern = re.compile('\S*: ') netcard_pattern = re.compile('\S*: ')
ip_str = '([0-9]{1,3}\.){3}[0-9]{1,3}' ip_str = '([0-9]{1,3}\.){3}[0-9]{1,3}'
ip_pattern = re.compile('(inet %s)' % ip_str) # ip_pattern = re.compile('(inet %s)' % ip_str)
pattern = re.compile(ip_str) pattern = re.compile(ip_str)
nic_ip = {} nic_ip = {}
for netcard in re.finditer(netcard_pattern, str(output)): for netcard in re.finditer(netcard_pattern, str(output)):
@ -108,20 +110,20 @@ def get_local_deployment_ip(tecs_deployment_ip):
nic_ip[nic_name] = ip.group() nic_ip[nic_name] = ip.group()
deployment_ip = '' deployment_ip = ''
ip_segment = _get_ip_segment(tecs_deployment_ip)
for nic in nic_ip.keys(): for nic in nic_ip.keys():
if ip_segment == _get_ip_segment(nic_ip[nic]): if nic_ip[nic] in tecs_deployment_ips:
deployment_ip = nic_ip[nic] deployment_ip = nic_ip[nic]
break break
if not deployment_ip: if not deployment_ip:
for nic,ip in nic_ip.items(): for nic, ip in nic_ip.items():
if not test_ping(nic,[tecs_deployment_ip]): if test_ping(nic, tecs_deployment_ips):
deployment_ip = nic_ip[nic] deployment_ip = nic_ip[nic]
break break
return deployment_ip return deployment_ip
class AnalsyConfig(object): class AnalsyConfig(object):
def __init__(self, all_configs): def __init__(self, all_configs):
self.all_configs = all_configs self.all_configs = all_configs
@ -139,24 +141,39 @@ class AnalsyConfig(object):
self.glance_vip = '' self.glance_vip = ''
self.public_vip = '' self.public_vip = ''
self.share_disk_services = [] self.share_disk_services = []
self.share_cluster_disk_services = []
self.ha_conf = {} self.ha_conf = {}
self.child_cell_dict = {} self.child_cell_dict = {}
self.ha_master_host = {} self.ha_master_host = {}
def get_heartbeats(self, host_interfaces): def get_heartbeats(self, host_interfaces):
for network in host_interfaces: for network in host_interfaces:
#if network.has_key("deployment") and network["deployment"]["ip"]:
# self.heartbeats[0].append(network["deployment"]["ip"])
self.heartbeats[0].append(network["management"]["ip"]) self.heartbeats[0].append(network["management"]["ip"])
if network.has_key("storage") and network["storage"]["ip"]: # if network.has_key("heartbeat1") and network["heartbeat1"]["ip"]:
self.heartbeats[1].append(network["storage"]["ip"]) if "heartbeat1" in network and network["heartbeat1"]["ip"]:
self.heartbeats[1].append(network["heartbeat1"]["ip"])
#delete empty heartbeat line # if network.has_key("heartbeat2") and network["heartbeat2"]["ip"]:
if "heartbeat2" in network and network["heartbeat2"]["ip"]:
self.heartbeats[2].append(network["heartbeat2"]["ip"])
# if network.has_key("storage") and network["storage"]["ip"]:
if "storage" in network and network["storage"]["ip"]:
# if not network.has_key("heartbeat1"):
if "heartbeat1" not in network:
self.heartbeats[1].append(network["storage"]["ip"])
# if network.has_key("heartbeat1") and not \
# network.has_key("heartbeat2"):
if "heartbeat1" in network and \
"heartbeat2" not in network:
self.heartbeats[2].append(network["storage"]["ip"])
# delete empty heartbeat line
if not self.heartbeats[0]: if not self.heartbeats[0]:
self.heartbeats[0] = self.heartbeats[1] self.heartbeats[0] = self.heartbeats[1]
self.heartbeats[1] = self.heartbeats[2] self.heartbeats[1] = self.heartbeats[2]
if not self.heartbeats[1]: if not self.heartbeats[1]:
self.heartbeats[1] = self.heartbeats[2] self.heartbeats[1] = self.heartbeats[2]
# remove repeated ip # remove repeated ip
if set(self.heartbeats[1]) == set(self.heartbeats[0]): if set(self.heartbeats[1]) == set(self.heartbeats[0]):
@ -164,7 +181,8 @@ class AnalsyConfig(object):
if set(self.heartbeats[2]) != set(self.heartbeats[0]): if set(self.heartbeats[2]) != set(self.heartbeats[0]):
self.heartbeats[1] = self.heartbeats[2] self.heartbeats[1] = self.heartbeats[2]
self.heartbeats[2] = [] self.heartbeats[2] = []
if set(self.heartbeats[2]) == set(self.heartbeats[0]) or set(self.heartbeats[2]) == set(self.heartbeats[1]): if set(self.heartbeats[2]) == set(self.heartbeats[0]) or \
set(self.heartbeats[2]) == set(self.heartbeats[1]):
self.heartbeats[2] = [] self.heartbeats[2] = []
def prepare_child_cell(self, child_cell_name, configs): def prepare_child_cell(self, child_cell_name, configs):
@ -181,69 +199,105 @@ class AnalsyConfig(object):
child_cell_host = configs['host_interfaces'][0]['management']['ip'] child_cell_host = configs['host_interfaces'][0]['management']['ip']
self.child_cell_dict[repr(child_cell_host).strip("u'")] \ self.child_cell_dict[repr(child_cell_host).strip("u'")] \
= repr(cell_compute_hosts).strip("u'") = repr(cell_compute_hosts).strip("u'")
add_service_with_host(self.services, 'CONFIG_CHILD_CELL_DICT',
str(self.child_cell_dict))
def prepare_ha_lb(self, role_configs, is_ha, is_lb): def prepare_ha_lb(self, role_configs, is_ha, is_lb):
if is_lb: if is_lb:
self.ha_master_host['ip'] = role_configs['host_interfaces'][0]['management']['ip'] self.ha_master_host['ip'] = role_configs[
self.ha_master_host['hostname'] = role_configs['host_interfaces'][0]['name'] 'host_interfaces'][0]['management']['ip']
self.components.append('CONFIG_LB_INSTALL') self.ha_master_host['hostname'] = role_configs[
add_service_with_hosts(self.services, 'host_interfaces'][0]['name']
'CONFIG_LB_BACKEND_HOSTS', self.components.append('CONFIG_LB_INSTALL')
role_configs['host_interfaces']) add_service_with_hosts(self.services,
self.lb_vip = role_configs['vip'] 'CONFIG_LB_BACKEND_HOSTS',
if is_ha: role_configs['host_interfaces'])
self.ha_vip = role_configs['vip'] self.lb_vip = role_configs['vip']
self.share_disk_services += role_configs['share_disk_services'] if is_ha:
local_deployment_ip = get_local_deployment_ip( # convert dns to ip
role_configs['host_interfaces'][0]['management']['ip']) manage_ips = []
if local_deployment_ip: for host_interface in role_configs['host_interfaces']:
manage_ip = ''
management_addr =\
host_interface['management']['ip']
try:
ip_lists = socket.gethostbyname_ex(management_addr)
manage_ip = ip_lists[2][0]
except Exception:
if netaddr.IPAddress(management_addr).version == 6:
manage_ip = management_addr
else:
raise exception.InvalidNetworkConfig(
"manage ip is not valid %s" % management_addr)
finally:
manage_ips.append(manage_ip)
self.ha_vip = role_configs['vip']
self.share_disk_services += role_configs['share_disk_services']
self.share_cluster_disk_services += \
role_configs['share_cluster_disk_services']
local_deployment_ip = get_local_deployment_ip(manage_ips)
filename = r'/etc/zte-docker'
if local_deployment_ip:
if os.path.exists(filename):
add_service_with_host( add_service_with_host(
self.services, 'CONFIG_REPO', self.services, 'CONFIG_REPO',
'http://'+local_deployment_ip+'/tecs_install/') 'http://' + local_deployment_ip +
':18080' + '/tecs_install/')
else: else:
msg = "can't find ip for yum repo" add_service_with_host(
raise exception.InvalidNetworkConfig(msg) self.services, 'CONFIG_REPO',
self.components.append('CONFIG_HA_INSTALL') 'http://' + local_deployment_ip + '/tecs_install/')
else:
msg = "can't find ip for yum repo"
raise exception.InvalidNetworkConfig(msg)
self.components.append('CONFIG_HA_INSTALL')
add_service_with_host(
self.services, 'CONFIG_HA_HOST',
role_configs['host_interfaces'][0]['management']['ip'])
add_service_with_hosts(self.services, 'CONFIG_HA_HOSTS',
role_configs['host_interfaces'])
ntp_host = role_configs['ntp_server'] \
if role_configs['ntp_server'] else role_configs['vip']
add_service_with_host(self.services, 'CONFIG_NTP_SERVERS',
ntp_host)
if role_configs['db_vip']:
self.db_vip = role_configs['db_vip']
add_service_with_host( add_service_with_host(
self.services, 'CONFIG_HA_HOST', self.services, 'CONFIG_MARIADB_HOST',
role_configs['host_interfaces'][0]['management']['ip']) role_configs['db_vip'])
add_service_with_hosts(self.services, 'CONFIG_HA_HOSTS', else:
role_configs['host_interfaces']) self.db_vip = role_configs['vip']
ntp_host = role_configs['ntp_server'] \ add_service_with_host(
if role_configs['ntp_server'] else role_configs['vip'] self.services, 'CONFIG_MARIADB_HOST', role_configs['vip'])
add_service_with_host(self.services, 'CONFIG_NTP_SERVERS',
ntp_host)
if role_configs['db_vip']: if role_configs['glance_vip']:
self.db_vip = role_configs['db_vip'] self.glance_vip = role_configs['glance_vip']
add_service_with_host(self.services, 'CONFIG_MARIADB_HOST', role_configs['db_vip']) add_service_with_host(
else: self.services, 'CONFIG_GLANCE_HOST',
self.db_vip = role_configs['vip'] role_configs['glance_vip'])
add_service_with_host(self.services, 'CONFIG_MARIADB_HOST', role_configs['vip']) else:
self.glance_vip = role_configs['vip']
add_service_with_host(
self.services, 'CONFIG_GLANCE_HOST', role_configs['vip'])
if role_configs['glance_vip']: if role_configs['public_vip']:
self.glance_vip = role_configs['glance_vip'] self.public_vip = role_configs['public_vip']
add_service_with_host(self.services, 'CONFIG_GLANCE_HOST', role_configs['glance_vip']) else:
else: self.public_vip = role_configs['vip']
self.glance_vip = role_configs['vip']
add_service_with_host(self.services, 'CONFIG_GLANCE_HOST', role_configs['vip'])
if role_configs['public_vip']: add_service_with_host(self.services,
vip = role_configs['public_vip'] 'CONFIG_NOVA_VNCPROXY_HOST',
self.public_vip = role_configs['public_vip'] self.public_vip)
else: add_service_with_host(self.services, 'CONFIG_PUBLIC_IP',
vip = role_configs['vip'] self.public_vip)
add_service_with_host(self.services, 'CONFIG_HORIZON_HOST',
self.public_vip = vip self.public_vip)
add_service_with_host(self.services, '''
'CONFIG_NOVA_VNCPROXY_HOST', vip) add_service_with_host(self.services, 'CONFIG_ADMIN_IP',
add_service_with_host(self.services, 'CONFIG_PUBLIC_IP', vip) role_configs['vip'])
add_service_with_host(self.services, 'CONFIG_HORIZON_HOST', vip) add_service_with_host(self.services, 'CONFIG_INTERNAL_IP',
role_configs['vip'])
add_service_with_host(self.services, 'CONFIG_ADMIN_IP', vip) '''
add_service_with_host(self.services, 'CONFIG_INTERNAL_IP', vip)
def prepare_role_service(self, is_ha, service, role_configs): def prepare_role_service(self, is_ha, service, role_configs):
host_key_name = "CONFIG_%s_HOST" % service host_key_name = "CONFIG_%s_HOST" % service
@ -251,7 +305,8 @@ class AnalsyConfig(object):
add_service_with_hosts(self.services, hosts_key_name, add_service_with_hosts(self.services, hosts_key_name,
role_configs['host_interfaces']) role_configs['host_interfaces'])
if service != 'LB' and service not in ['NOVA_VNCPROXY', 'MARIADB', 'GLANCE', 'HORIZON']: if service != 'LB' and service not in ['NOVA_VNCPROXY', 'MARIADB',
'GLANCE', 'HORIZON']:
add_service_with_host(self.services, host_key_name, add_service_with_host(self.services, host_key_name,
role_configs['vip']) role_configs['vip'])
@ -272,11 +327,12 @@ class AnalsyConfig(object):
{'CONFIG_GLANCE_API_INSTALL_MODE': 'LB'}) {'CONFIG_GLANCE_API_INSTALL_MODE': 'LB'})
self.modes.update( self.modes.update(
{'CONFIG_GLANCE_REGISTRY_INSTALL_MODE': 'LB'}) {'CONFIG_GLANCE_REGISTRY_INSTALL_MODE': 'LB'})
#if s == 'HEAT': # if s == 'HEAT':
# self.modes.update({'CONFIG_HEAT_API_INSTALL_MODE': 'LB'}) # self.modes.update({'CONFIG_HEAT_API_INSTALL_MODE': 'LB'})
# self.modes.update({'CONFIG_HEAT_API_CFN_INSTALL_MODE': 'LB'}) # self.modes.update({'CONFIG_HEAT_API_CFN_INSTALL_MODE': 'LB'})
#if s == 'CEILOMETER': # if s == 'CEILOMETER':
# self.modes.update({'CONFIG_CEILOMETER_API_INSTALL_MODE': 'LB'}) # self.modes.update({
# 'CONFIG_CEILOMETER_API_INSTALL_MODE': 'LB'})
if service == 'IRONIC': if service == 'IRONIC':
self.modes.update( self.modes.update(
{'CONFIG_IRONIC_API_INSTALL_MODE': 'LB'}) {'CONFIG_IRONIC_API_INSTALL_MODE': 'LB'})
@ -287,8 +343,8 @@ class AnalsyConfig(object):
if component not in self.services_in_component.keys(): if component not in self.services_in_component.keys():
self.services_in_component[component] = {} self.services_in_component[component] = {}
self.services_in_component[component]["service"] = [] self.services_in_component[component]["service"] = []
self.services_in_component[component]["service"].append(service_map[service]) self.services_in_component[component][
"service"].append(service_map[service])
if component == "horizon": if component == "horizon":
self.services_in_component[component]["fip"] = self.public_vip self.services_in_component[component]["fip"] = self.public_vip
@ -296,13 +352,13 @@ class AnalsyConfig(object):
self.services_in_component[component]["fip"] = self.db_vip self.services_in_component[component]["fip"] = self.db_vip
elif component == "glance": elif component == "glance":
self.services_in_component[component]["fip"] = self.glance_vip self.services_in_component[component]["fip"] = self.glance_vip
else: else:
self.services_in_component[component]["fip"] = role_configs["vip"] self.services_in_component[component]["fip"] = role_configs["vip"]
network_name = '' network_name = ''
if component in ['horizon'] and role_configs["host_interfaces"][0].has_key('public'): if component in ['horizon'] and\
network_name = 'public' 'publicapi' in role_configs["host_interfaces"][0]:
network_name = 'publicapi'
else: else:
network_name = 'management' network_name = 'management'
@ -311,10 +367,10 @@ class AnalsyConfig(object):
self.services_in_component[component]["nic_name"] = \ self.services_in_component[component]["nic_name"] = \
role_configs["host_interfaces"][0][network_name]["name"] role_configs["host_interfaces"][0][network_name]["name"]
if component == 'loadbalance' and \ if component == 'loadbalance' and \
self.all_configs.has_key('CONTROLLER_LB') and \ 'CONTROLLER_LB' in self.all_configs and \
self.all_configs['CONTROLLER_LB']['vip']: self.all_configs['CONTROLLER_LB']['vip']:
self.services_in_component[component]["fip"] = \ self.services_in_component[component]["fip"] = \
self.all_configs['CONTROLLER_LB']['vip'] self.all_configs['CONTROLLER_LB']['vip']
def prepare_amqp_mariadb(self): def prepare_amqp_mariadb(self):
if self.lb_vip: if self.lb_vip:
@ -331,15 +387,20 @@ class AnalsyConfig(object):
else: else:
amqp_vip = self.ha_vip amqp_vip = self.ha_vip
amqp_dict = "{'%s':'%s,%s,%s,%s'}" % (amqp_vip, self.ha_vip, amqp_dict = "{'%s':'%s,%s,%s,%s'}" % (amqp_vip, self.ha_vip,
self.lb_vip, self.glance_vip, self.public_vip) self.lb_vip, self.glance_vip,
self.public_vip)
mariadb_dict = "{'%s':'%s,%s,%s,%s'}" % (self.db_vip, self.ha_vip, mariadb_dict = "{'%s':'%s,%s,%s,%s'}" % (self.db_vip, self.ha_vip,
self.lb_vip, self.glance_vip, self.public_vip) self.lb_vip,
self.glance_vip,
self.public_vip)
add_service_with_host(self.services, 'CONFIG_LB_HOST', self.lb_vip) add_service_with_host(self.services, 'CONFIG_LB_HOST', self.lb_vip)
elif self.ha_vip: elif self.ha_vip:
amqp_dict = "{'%s':'%s,%s,%s'}" % (self.ha_vip, self.ha_vip, amqp_dict = "{'%s':'%s,%s,%s'}" % (self.ha_vip, self.ha_vip,
self.glance_vip, self.public_vip) self.glance_vip,
self.public_vip)
mariadb_dict = "{'%s':'%s,%s,%s'}" % (self.db_vip, self.ha_vip, mariadb_dict = "{'%s':'%s,%s,%s'}" % (self.db_vip, self.ha_vip,
self.glance_vip, self.public_vip) self.glance_vip,
self.public_vip)
else: else:
amqp_dict = "{}" amqp_dict = "{}"
mariadb_dict = "{}" mariadb_dict = "{}"
@ -382,50 +443,51 @@ class AnalsyConfig(object):
self.prepare_amqp_mariadb() self.prepare_amqp_mariadb()
if self.child_cell_dict:
add_service_with_host(self.services, 'CONFIG_CHILD_CELL_DICT',
str(self.child_cell_dict))
def update_conf_with_services(self, tecs): def update_conf_with_services(self, tecs):
for s in self.services: for s in self.services:
if tecs.has_option("general", s): if tecs.has_option("general", s):
print "%s is update" % s # if type(self.services[s]) is types.ListType:
if type(self.services[s]) is types.ListType: if isinstance(self.services[s], types.ListType):
if self.services[s] and not self.services[s][0]: if self.services[s] and not self.services[s][0]:
return return
tecs.set("general", s, ','.join(self.services[s])) tecs.set("general", s, ','.join(self.services[s]))
else: else:
print "service %s is not exit in conf file" % s msg = "service %s is not exit in conf file" % s
LOG.info(msg)
def update_conf_with_components(self, tecs): def update_conf_with_components(self, tecs):
for s in self.components: for s in self.components:
if tecs.has_option("general", s): if tecs.has_option("general", s):
print "Component %s is update" % s
tecs.set("general", s, 'y') tecs.set("general", s, 'y')
else: else:
print "component %s is not exit in conf file" % s msg = "component %s is not exit in conf file" % s
LOG.info(msg)
def update_conf_with_modes(self, tecs): def update_conf_with_modes(self, tecs):
for k, v in self.modes.items(): for k, v in self.modes.items():
if tecs.has_option("general", k): if tecs.has_option("general", k):
print "mode %s is update" % k
tecs.set("general", k, v) tecs.set("general", k, v)
else: else:
print "mode %s is not exit in conf file" % k msg = "mode %s is not exit in conf file" % k
LOG.info(msg)
def update_tecs_conf(self, tecs): def update_tecs_conf(self, tecs):
self.update_conf_with_services(tecs) self.update_conf_with_services(tecs)
self.update_conf_with_components(tecs) self.update_conf_with_components(tecs)
self.update_conf_with_modes(tecs) self.update_conf_with_modes(tecs)
def update_ha_conf(self, ha, ha_nic_name, tecs=None): def update_ha_conf(self, ha, ha_nic_name, tecs=None):
print "heartbeat line is update"
heart_beat_list = []
if self.all_configs['OTHER'].get('dns_config'): if self.all_configs['OTHER'].get('dns_config'):
for heartbeat in self.heartbeats: for heartbeat in self.heartbeats:
tmp_list = []
for name_ip in self.all_configs['OTHER']['dns_config']: for name_ip in self.all_configs['OTHER']['dns_config']:
for tmp in heartbeat: for tmp in heartbeat:
if tmp == name_ip.keys()[0]: if tmp == name_ip.keys()[0]:
tmp_list.append(name_ip.values()[0]) heartbeat.remove(tmp)
heart_beat_list.append(tmp_list) heartbeat.append(name_ip.values()[0])
self.heartbeats = heart_beat_list
for k, v in self.services_in_component.items(): for k, v in self.services_in_component.items():
for name_ip in self.all_configs['OTHER']['dns_config']: for name_ip in self.all_configs['OTHER']['dns_config']:
@ -435,65 +497,110 @@ class AnalsyConfig(object):
ha.set('DEFAULT', 'heartbeat_link2', ','.join(self.heartbeats[1])) ha.set('DEFAULT', 'heartbeat_link2', ','.join(self.heartbeats[1]))
ha.set('DEFAULT', 'heartbeat_link3', ','.join(self.heartbeats[2])) ha.set('DEFAULT', 'heartbeat_link3', ','.join(self.heartbeats[2]))
ha.set('DEFAULT', 'components', ','.join(self.services_in_component.keys())) ha.set('DEFAULT', 'components', ','.join(
self.services_in_component.keys()))
for k, v in self.services_in_component.items(): for k, v in self.services_in_component.items():
print "component %s is update" % k
ha.set('DEFAULT', k, ','.join(v['service'])) ha.set('DEFAULT', k, ','.join(v['service']))
if k == 'glance': if k == 'glance':
if 'glance' in self.share_disk_services: if 'glance' in self.share_disk_services:
ha.set('DEFAULT', 'glance_device_type', 'iscsi') ha.set('DEFAULT', 'glance_device_type', 'iscsi')
ha.set('DEFAULT', 'glance_device', '/dev/mapper/vg_glance-lv_glance') ha.set(
'DEFAULT', 'glance_device',
'/dev/mapper/vg_glance-lv_glance')
ha.set('DEFAULT', 'glance_fs_type', 'ext4') ha.set('DEFAULT', 'glance_fs_type', 'ext4')
else: else:
ha.set('DEFAULT', 'glance_device_type', 'drbd') ha.set('DEFAULT', 'glance_device_type', 'drbd')
ha.set('DEFAULT', 'glance_device', '/dev/vg_data/lv_glance') ha.set(
'DEFAULT', 'glance_device', '/dev/vg_data/lv_glance')
ha.set('DEFAULT', 'glance_fs_type', 'ext4') ha.set('DEFAULT', 'glance_fs_type', 'ext4')
# mariadb now not support db cluster, don't support share disk. # mariadb now not support db cluster, don't support share disk.
if k == "database": if k == "database":
if 'db' in self.share_disk_services: if 'db' in self.share_disk_services:
ha.set('DEFAULT', 'database_device', '/dev/mapper/vg_db-lv_db') ha.set(
'DEFAULT', 'database_device',
'/dev/mapper/vg_db-lv_db')
ha.set('DEFAULT', 'database_fs_type', 'ext4') ha.set('DEFAULT', 'database_fs_type', 'ext4')
ha.set('DEFAULT', 'database_device_type', 'share')
if tecs:
tecs.set(
"general",
'CONFIG_HA_INSTALL_MARIADB_LOCAL',
'n')
elif 'db' in self.share_cluster_disk_services:
ha.set(
'DEFAULT', 'database_device',
'/dev/mapper/vg_db-lv_db')
ha.set('DEFAULT', 'database_fs_type', 'ext4')
ha.set('DEFAULT', 'database_device_type', 'share_cluster')
if tecs:
tecs.set(
"general",
'CONFIG_HA_INSTALL_MARIADB_LOCAL',
'y')
else:
ha.set('DEFAULT', 'database_device_type', 'local_cluster')
if tecs:
tecs.set(
"general",
'CONFIG_HA_INSTALL_MARIADB_LOCAL',
'y')
if 'db_backup' in self.share_disk_services:
ha.set(
'DEFAULT',
'backup_database_device',
'/dev/mapper/vg_db_backup-lv_db_backup')
ha.set('DEFAULT', 'backup_database_fs_type', 'ext4')
if "mongod" in v['service']: if "mongod" in v['service']:
if 'mongodb' in self.share_disk_services: if 'mongodb' in self.share_disk_services:
ha.set('DEFAULT', 'mongod_device', '/dev/mapper/vg_mongodb-lv_mongodb') ha.set(
'DEFAULT', 'mongod_device',
'/dev/mapper/vg_mongodb-lv_mongodb')
ha.set('DEFAULT', 'mongod_fs_type', 'ext4') ha.set('DEFAULT', 'mongod_fs_type', 'ext4')
ha.set('DEFAULT', 'mongod_local', '') ha.set('DEFAULT', 'mongod_local', '')
if tecs: if tecs:
tecs.set("general", 'CONFIG_HA_INSTALL_MONGODB_LOCAL', 'n') tecs.set(
"general",
'CONFIG_HA_INSTALL_MONGODB_LOCAL', 'n')
else: else:
ha.set('DEFAULT', 'mongod_fs_type', 'ext4') ha.set('DEFAULT', 'mongod_fs_type', 'ext4')
ha.set('DEFAULT', 'mongod_local', 'yes') ha.set('DEFAULT', 'mongod_local', 'yes')
if tecs: if tecs:
tecs.set("general", 'CONFIG_HA_INSTALL_MONGODB_LOCAL', 'y') tecs.set(
"general",
'CONFIG_HA_INSTALL_MONGODB_LOCAL', 'y')
if k not in self.lb_components: if k not in self.lb_components:
# if "bond" in v['nic_name']: # if "bond" in v['nic_name']:
# v['nic_name'] = "vport" # v['nic_name'] = "vport"
ha.set('DEFAULT', k+'_fip', v['fip']) ha.set('DEFAULT', k + '_fip', v['fip'])
if ha_nic_name and k not in ['horizon']: if ha_nic_name and k not in ['horizon']:
nic_name = ha_nic_name nic_name = ha_nic_name
else: else:
nic_name = v['nic_name'] nic_name = v['nic_name']
ha.set('DEFAULT', k+'_nic', nic_name) ha.set('DEFAULT', k + '_nic', nic_name)
cidr_netmask = reduce(lambda x, y: x + y, cidr_netmask = reduce(lambda x, y: x + y,
[bin(int(i)).count('1') for i in v['netmask'].split('.')]) [bin(int(i)).count('1')
ha.set('DEFAULT', k+'_netmask', cidr_netmask) for i in v['netmask'].split('.')])
ha.set('DEFAULT', k + '_netmask', cidr_netmask)
def update_conf(tecs, key, value): def update_conf(tecs, key, value):
tecs.set("general", key, value) tecs.set("general", key, value)
def get_conf(tecs_conf_file, **kwargs): def get_conf(tecs_conf_file, **kwargs):
result = {} result = {}
if not kwargs: if not kwargs:
return result return result
tecs = ConfigParser() tecs = ConfigParser()
tecs.optionxform = str tecs.optionxform = str
tecs.read(tecs_conf_file) tecs.read(tecs_conf_file)
result = {key : tecs.get("general", kwargs.get(key, None)) result = {key: tecs.get("general", kwargs.get(key, None))
for key in kwargs.keys() for key in kwargs.keys()
if tecs.has_option("general", kwargs.get(key, None))} if tecs.has_option("general", kwargs.get(key, None))}
return result return result
@ -563,6 +670,7 @@ class DvsDaisyConfig(object):
# common # common
self.dvs_network_type = [] self.dvs_network_type = []
self.dvs_vswitch_type = {} self.dvs_vswitch_type = {}
self.dvs_cpu_sets = []
self.dvs_physnics = [] self.dvs_physnics = []
self.enable_sdn = False self.enable_sdn = False
@ -586,6 +694,9 @@ class DvsDaisyConfig(object):
return return
self.dvs_vswitch_type.update(vswitch_type) self.dvs_vswitch_type.update(vswitch_type)
dvs_cpu_sets = network.get('dvs_cpu_sets')
self.dvs_cpu_sets.extend(dvs_cpu_sets)
network_type = network['network_config'].get('network_type') network_type = network['network_config'].get('network_type')
if network_type in ['vlan']: if network_type in ['vlan']:
@ -601,13 +712,16 @@ class DvsDaisyConfig(object):
self.dvs_vswitch_type.get('ovs_agent_patch')) and ( self.dvs_vswitch_type.get('ovs_agent_patch')) and (
len(self.dvs_vswitch_type.get('ovs_agent_patch')) > 0): len(self.dvs_vswitch_type.get('ovs_agent_patch')) > 0):
return return
if not self.dvs_vswitch_type.get('ovs_agent_patch') and not self.dvs_vswitch_type.get('ovdk'): if not self.dvs_vswitch_type.get('ovs_agent_patch') and not\
self.dvs_vswitch_type.get('ovdk'):
return return
update_conf(self.tecs, 'CONFIG_DVS_TYPE', self.dvs_vswitch_type) update_conf(self.tecs, 'CONFIG_DVS_TYPE', self.dvs_vswitch_type)
update_conf(self.tecs, 'CONFIG_DVS_PHYSICAL_NICS', update_conf(self.tecs, 'CONFIG_DVS_PHYSICAL_NICS',
",".join(set(self.dvs_physnics))) ",".join(set(self.dvs_physnics)))
# cpu sets for dvs, add CONFIG_DVS_CPU_SETS to tecs.conf firstly
update_conf(self.tecs, 'CONFIG_DVS_CPU_SETS', self.dvs_cpu_sets)
if 'vlan' in self.dvs_network_type: if 'vlan' in self.dvs_network_type:
update_conf(self.tecs, 'CONFIG_NEUTRON_OVS_BRIDGE_MAPPINGS', update_conf(self.tecs, 'CONFIG_NEUTRON_OVS_BRIDGE_MAPPINGS',
@ -693,12 +807,13 @@ class DvsDaisyConfig(object):
default_tecs_conf_template_path = "/var/lib/daisy/tecs/" default_tecs_conf_template_path = "/var/lib/daisy/tecs/"
tecs_conf_template_path = default_tecs_conf_template_path tecs_conf_template_path = default_tecs_conf_template_path
def private_network_conf(tecs, private_networks_config): def private_network_conf(tecs, private_networks_config):
if private_networks_config: if private_networks_config:
mode_str = { mode_str = {
'0':'(active-backup;off;"%s-%s")', '0': '(active-backup;off;"%s-%s")',
'1':'(balance-slb;off;"%s-%s")', '1': '(balance-slb;off;"%s-%s")',
'2':'(balance-tcp;active;"%s-%s")' '2': '(balance-tcp;active;"%s-%s")'
} }
config_neutron_sriov_bridge_mappings = [] config_neutron_sriov_bridge_mappings = []
@ -709,10 +824,11 @@ def private_network_conf(tecs, private_networks_config):
type = private_network.get('type', None) type = private_network.get('type', None)
name = private_network.get('name', None) name = private_network.get('name', None)
assign_networks = private_network.get('assigned_networks', None) assign_networks = private_network.get('assigned_networks', None)
slave1 = private_network.get('slave1', None) slave1 = private_network.get('slave1', None)
slave2 = private_network.get('slave2', None) slave2 = private_network.get('slave2', None)
mode = private_network.get('mode', None) mode = private_network.get('mode', None)
if not type or not name or not assign_networks or not slave1 or not slave2 or not mode: if not type or not name or not assign_networks or not\
slave1 or not slave2 or not mode:
break break
for assign_network in assign_networks: for assign_network in assign_networks:
@ -724,23 +840,33 @@ def private_network_conf(tecs, private_networks_config):
break break
# ether # ether
if 0 == cmp(type, 'ether') and 0 == cmp(network_type, 'PRIVATE'): if 0 == cmp(type, 'ether') and\
0 == cmp(network_type, 'DATAPLANE'):
if 0 == cmp(ml2_type, 'sriov'): if 0 == cmp(ml2_type, 'sriov'):
config_neutron_sriov_bridge_mappings.append("%s:%s" % (physnet_name, "br-" + name)) config_neutron_sriov_bridge_mappings.append(
config_neutron_sriov_physnet_ifaces.append("%s:%s" % (physnet_name, name)) "%s:%s" % (physnet_name, "br-" + name))
elif 0 == cmp(ml2_type, 'ovs'):
config_neutron_ovs_bridge_mappings.append("%s:%s" % (physnet_name, "br-" + name))
config_neutron_ovs_physnet_ifaces.append("%s:%s" % (physnet_name, name))
# bond
elif 0 == cmp(type, 'bond') and 0 == cmp(network_type, 'PRIVATE'):
if 0 == cmp(ml2_type, 'sriov'):
config_neutron_sriov_bridge_mappings.append("%s:%s" % (physnet_name, "br-" + name))
config_neutron_sriov_physnet_ifaces.append( config_neutron_sriov_physnet_ifaces.append(
"%s:%s" % (physnet_name, name + mode_str[mode] % (slave1, slave2))) "%s:%s" % (physnet_name, name))
elif 0 == cmp(ml2_type, 'ovs'): elif 0 == cmp(ml2_type, 'ovs'):
config_neutron_ovs_bridge_mappings.append("%s:%s" % (physnet_name, "br-" + name)) config_neutron_ovs_bridge_mappings.append(
"%s:%s" % (physnet_name, "br-" + name))
config_neutron_ovs_physnet_ifaces.append( config_neutron_ovs_physnet_ifaces.append(
"%s:%s" % (physnet_name, name + mode_str[mode] % (slave1, slave2))) "%s:%s" % (physnet_name, name))
# bond
elif 0 == cmp(type, 'bond') and\
0 == cmp(network_type, 'DATAPLANE'):
if 0 == cmp(ml2_type, 'sriov'):
config_neutron_sriov_bridge_mappings.append(
"%s:%s" % (physnet_name, "br-" + name))
config_neutron_sriov_physnet_ifaces.append(
"%s:%s" % (physnet_name, name + mode_str[mode]
% (slave1, slave2)))
elif 0 == cmp(ml2_type, 'ovs'):
config_neutron_ovs_bridge_mappings.append(
"%s:%s" % (physnet_name, "br-" + name))
config_neutron_ovs_physnet_ifaces.append(
"%s:%s" % (physnet_name, name + mode_str[mode]
% (slave1, slave2)))
if config_neutron_sriov_bridge_mappings: if config_neutron_sriov_bridge_mappings:
update_conf(tecs, update_conf(tecs,
@ -750,18 +876,18 @@ def private_network_conf(tecs, private_networks_config):
update_conf(tecs, update_conf(tecs,
'CONFIG_NEUTRON_SRIOV_PHYSNET_IFACES', 'CONFIG_NEUTRON_SRIOV_PHYSNET_IFACES',
",".join(config_neutron_sriov_physnet_ifaces)) ",".join(config_neutron_sriov_physnet_ifaces))
if config_neutron_ovs_bridge_mappings : if config_neutron_ovs_bridge_mappings:
update_conf(tecs, 'CONFIG_NEUTRON_OVS_BRIDGE_MAPPINGS', ",".join(config_neutron_ovs_bridge_mappings)) update_conf(tecs, 'CONFIG_NEUTRON_OVS_BRIDGE_MAPPINGS',
",".join(config_neutron_ovs_bridge_mappings))
if config_neutron_ovs_physnet_ifaces: if config_neutron_ovs_physnet_ifaces:
update_conf(tecs, 'CONFIG_NEUTRON_OVS_PHYSNET_IFACES', ",".join(config_neutron_ovs_physnet_ifaces)) update_conf(tecs, 'CONFIG_NEUTRON_OVS_PHYSNET_IFACES',
",".join(config_neutron_ovs_physnet_ifaces))
def update_tecs_config(config_data, cluster_conf_path): def update_tecs_config(config_data, cluster_conf_path):
print "tecs config data is:" msg = "tecs config data is: %s" % config_data
import pprint
pprint.pprint(config_data)
msg="tecs config data is: %s" % config_data
LOG.info(msg) LOG.info(msg)
daisy_tecs_path = tecs_conf_template_path daisy_tecs_path = tecs_conf_template_path
tecs_conf_template_file = os.path.join(daisy_tecs_path, "tecs.conf") tecs_conf_template_file = os.path.join(daisy_tecs_path, "tecs.conf")
ha_conf_template_file = os.path.join(daisy_tecs_path, "HA.conf") ha_conf_template_file = os.path.join(daisy_tecs_path, "HA.conf")
@ -773,49 +899,67 @@ def update_tecs_config(config_data, cluster_conf_path):
tecs = ConfigParser() tecs = ConfigParser()
tecs.optionxform = str tecs.optionxform = str
tecs.read(tecs_conf_template_file) tecs.read(tecs_conf_template_file)
cluster_data = config_data['OTHER']['cluster_data'] cluster_data = config_data['OTHER']['cluster_data']
update_conf(tecs, 'CLUSTER_ID', cluster_data['id']) update_conf(tecs, 'CLUSTER_ID', cluster_data['id'])
if cluster_data.has_key('networking_parameters'): # if cluster_data.has_key('networking_parameters'):
if 'networking_parameters' in cluster_data:
networking_parameters = cluster_data['networking_parameters'] networking_parameters = cluster_data['networking_parameters']
if networking_parameters.has_key('base_mac') and networking_parameters['base_mac']: # if networking_parameters.has_key('base_mac') and\
update_conf(tecs, 'CONFIG_NEUTRON_BASE_MAC', networking_parameters['base_mac']) if 'base_mac'in networking_parameters and\
if networking_parameters.has_key('gre_id_range') and len(networking_parameters['gre_id_range'])>1 \ networking_parameters['base_mac']:
and networking_parameters['gre_id_range'][0] and networking_parameters['gre_id_range'][1]: update_conf(
update_conf(tecs, 'CONFIG_NEUTRON_ML2_TUNNEL_ID_RANGES', ("%s:%s" % (networking_parameters['gre_id_range'][0],networking_parameters['gre_id_range'][1]))) tecs, 'CONFIG_NEUTRON_BASE_MAC',
if networking_parameters.get("vni_range",['1000','3000']) and len(networking_parameters['vni_range'])>1 \ networking_parameters['base_mac'])
and networking_parameters['vni_range'][0] and networking_parameters['vni_range'][1]: # if networking_parameters.has_key('gre_id_range') and\
update_conf(tecs, 'CONFIG_NEUTRON_ML2_VNI_RANGES', ("%s:%s" % (networking_parameters['vni_range'][0],networking_parameters['vni_range'][1]))) if 'gre_id_range' in networking_parameters and\
if networking_parameters.get("segmentation_type","vlan"): len(networking_parameters['gre_id_range']) > 1 \
segmentation_type = networking_parameters.get("segmentation_type","vlan") and networking_parameters['gre_id_range'][0] and\
update_conf(tecs, 'CONFIG_NEUTRON_ML2_TENANT_NETWORK_TYPES', segmentation_type) networking_parameters['gre_id_range'][1]:
update_conf(tecs, 'CONFIG_NEUTRON_ML2_TYPE_DRIVERS', segmentation_type) update_conf(tecs, 'CONFIG_NEUTRON_ML2_TUNNEL_ID_RANGES',
("%s:%s" % (networking_parameters['gre_id_range'][0],
networking_parameters['gre_id_range'][1])))
if 'vxlan' in config_data['OTHER'].get('segmentation_type', {}):
update_conf(
tecs, 'CONFIG_NEUTRON_ML2_VNI_RANGES',
config_data['OTHER']['segmentation_type']['vxlan']['vni_range'])
update_conf(tecs, 'CONFIG_NEUTRON_ML2_TENANT_NETWORK_TYPES', 'vxlan')
update_conf(tecs, 'CONFIG_NEUTRON_ML2_TYPE_DRIVERS', 'vxlan')
else:
update_conf(tecs, 'CONFIG_NEUTRON_ML2_TENANT_NETWORK_TYPES', 'vlan')
update_conf(tecs, 'CONFIG_NEUTRON_ML2_TYPE_DRIVERS', 'vlan')
physic_network_cfg = config_data['OTHER']['physic_network_config'] physic_network_cfg = config_data['OTHER']['physic_network_config']
if physic_network_cfg.get('json_path', None): if physic_network_cfg.get('json_path', None):
update_conf(tecs, 'CONFIG_NEUTRON_ML2_JSON_PATH', physic_network_cfg['json_path']) update_conf(
tecs, 'CONFIG_NEUTRON_ML2_JSON_PATH',
physic_network_cfg['json_path'])
if physic_network_cfg.get('vlan_ranges', None): if physic_network_cfg.get('vlan_ranges', None):
update_conf(tecs, 'CONFIG_NEUTRON_ML2_VLAN_RANGES',physic_network_cfg['vlan_ranges']) update_conf(tecs, 'CONFIG_NEUTRON_ML2_VLAN_RANGES',
physic_network_cfg['vlan_ranges'])
if config_data['OTHER']['tecs_installed_hosts']: if config_data['OTHER']['tecs_installed_hosts']:
update_conf(tecs, 'EXCLUDE_SERVERS', ",".join(config_data['OTHER']['tecs_installed_hosts'])) update_conf(tecs, 'EXCLUDE_SERVERS', ",".join(
config_data['OTHER']['tecs_installed_hosts']))
ha = ConfigParser() ha = ConfigParser()
ha.optionxform = str ha.optionxform = str
ha.read(ha_conf_template_file) ha.read(ha_conf_template_file)
config = AnalsyConfig(config_data) config = AnalsyConfig(config_data)
if config_data['OTHER'].has_key('ha_nic_name'): # if config_data['OTHER'].has_key('ha_nic_name'):
if 'ha_nic_name'in config_data['OTHER']:
ha_nic_name = config_data['OTHER']['ha_nic_name'] ha_nic_name = config_data['OTHER']['ha_nic_name']
else: else:
ha_nic_name = "" ha_nic_name = ""
config.prepare() config.prepare()
config.update_tecs_conf(tecs) config.update_tecs_conf(tecs)
config.update_ha_conf(ha, ha_nic_name, tecs) config.update_ha_conf(ha, ha_nic_name, tecs)
update_conf_with_zenic(tecs, config_data['OTHER']['zenic_config']) update_conf_with_zenic(tecs, config_data['OTHER']['zenic_config'])
if config_data['OTHER']['dvs_config'].has_key('network_config'): # if config_data['OTHER']['dvs_config'].has_key('network_config'):
if 'network_config' in config_data['OTHER']['dvs_config']:
config_data['OTHER']['dvs_config']['network_config']['enable_sdn'] = \ config_data['OTHER']['dvs_config']['network_config']['enable_sdn'] = \
config_data['OTHER']['zenic_config'].get('vip', False) config_data['OTHER']['zenic_config'].get('vip', False)
dvs_config = DvsDaisyConfig(tecs, config_data['OTHER']['dvs_config']) dvs_config = DvsDaisyConfig(tecs, config_data['OTHER']['dvs_config'])
@ -824,7 +968,7 @@ def update_tecs_config(config_data, cluster_conf_path):
tecs.write(open(tecs_conf_out, "w+")) tecs.write(open(tecs_conf_out, "w+"))
ha.write(open(ha_config_out, "w+")) ha.write(open(ha_config_out, "w+"))
return return

View File

@ -16,25 +16,12 @@
""" """
/install endpoint for tecs API /install endpoint for tecs API
""" """
import os
import copy
import subprocess import subprocess
import time
import traceback
import webob.exc
from oslo_config import cfg
from oslo_log import log as logging from oslo_log import log as logging
from webob.exc import HTTPBadRequest
from webob.exc import HTTPForbidden
from threading import Thread
from daisy import i18n from daisy import i18n
from daisy import notifier
from daisy.common import exception from daisy.common import exception
import daisy.registry.client.v1.api as registry
import daisy.api.backends.common as daisy_cmn import daisy.api.backends.common as daisy_cmn
import daisy.api.backends.tecs.common as tecs_cmn import daisy.api.backends.tecs.common as tecs_cmn
@ -48,183 +35,281 @@ _ = i18n._
_LE = i18n._LE _LE = i18n._LE
_LI = i18n._LI _LI = i18n._LI
_LW = i18n._LW _LW = i18n._LW
tecs_state = tecs_cmn.TECS_STATE
def _get_service_disk_for_disk_array(req, role_id): def _get_service_disk_for_disk_array(req, role_id):
disk_info = [] disk_info = []
service_disks = tecs_cmn.get_service_disk_list(req, {'filters': {'role_id': role_id}}) service_disks = tecs_cmn.get_service_disk_list(req,
{'filters': {
'role_id': role_id}})
for service_disk in service_disks: for service_disk in service_disks:
share_disk = {} share_disk = {}
if service_disk['disk_location'] == 'share': if service_disk['disk_location'] == 'share':
share_disk['service'] = service_disk['service'] share_disk['service'] = service_disk['service']
share_disk['protocol_type'] = service_disk['protocol_type']
share_disk['lun'] = service_disk['lun'] share_disk['lun'] = service_disk['lun']
share_disk['data_ips'] = service_disk['data_ips'].split(',') if service_disk['protocol_type'] == 'FIBER':
share_disk['fc_hba_wwpn'] = \
service_disk['data_ips'].split(',')
else:
share_disk['data_ips'] = service_disk['data_ips'].split(',')
share_disk['lvm_config'] = {} share_disk['lvm_config'] = {}
share_disk['lvm_config']['size'] = service_disk['size'] share_disk['lvm_config']['size'] = service_disk['size']
share_disk['lvm_config']['vg_name'] = 'vg_%s' % service_disk['service'] share_disk['lvm_config']['vg_name'] =\
share_disk['lvm_config']['lv_name'] = 'lv_%s' % service_disk['service'] 'vg_%s' % service_disk['service']
share_disk['lvm_config']['lv_name'] =\
'lv_%s' % service_disk['service']
share_disk['lvm_config']['fs_type'] = 'ext4' share_disk['lvm_config']['fs_type'] = 'ext4'
disk_info.append(share_disk) disk_info.append(share_disk)
return disk_info return disk_info
def _get_share_cluster_disk_for_disk_array(req, role_id):
'''
disk_info = [{'service': 'db', 'lun': 'lun1', 'data_ips':'data_ip1'},
{'service': 'db', 'lun': 'lun2', 'data_ips':'data_ip2'},
{'service': 'glance', 'lun': 'lun3', 'data_ips':'data_ip3'},
{'service': 'glance', 'lun': 'lun4', 'data_ips':'data_ip4'},]
'''
disk_info = []
service_disks = \
tecs_cmn.get_service_disk_list(req, {'filters': {'role_id': role_id}})
service_name = 'db'
for service_disk in service_disks:
share_cluster_disk = {}
if service_disk['disk_location'] == 'share_cluster':
share_cluster_disk['service'] = service_disk['service']
share_cluster_disk['protocol_type'] = service_disk['protocol_type']
share_cluster_disk['lun'] = service_disk['lun']
if service_disk['protocol_type'] == 'FIBER':
share_cluster_disk['fc_hba_wwpn'] = \
service_disk['data_ips'].split(',')
else:
share_cluster_disk['data_ips'] = \
service_disk['data_ips'].split(',')
share_cluster_disk['lvm_config'] = {}
share_cluster_disk['lvm_config']['size'] = service_disk['size']
share_cluster_disk['lvm_config']['vg_name'] =\
'vg_%s' % service_disk['service']
share_cluster_disk['lvm_config']['lv_name'] =\
'lv_%s' % service_disk['service']
share_cluster_disk['lvm_config']['fs_type'] = 'ext4'
disk_info.append(share_cluster_disk)
return disk_info
def _get_cinder_volume_for_disk_array(req, role_id): def _get_cinder_volume_for_disk_array(req, role_id):
cinder_volume_info = [] cinder_volume_info = []
cinder_volumes = tecs_cmn.get_cinder_volume_list(req, {'filters': {'role_id': role_id}}) cinder_volumes = tecs_cmn.get_cinder_volume_list(req,
{'filters': {
'role_id': role_id}})
for cinder_volume in cinder_volumes: for cinder_volume in cinder_volumes:
cv_info = {} cv_info = {}
cv_info['management_ips'] = cinder_volume['management_ips'].split(',') cv_info['management_ips'] =\
cinder_volume['management_ips'].split(',')
cv_info['data_ips'] = cinder_volume['data_ips'].split(',') cv_info['data_ips'] = cinder_volume['data_ips'].split(',')
cv_info['user_name'] = cinder_volume['user_name'] cv_info['user_name'] = cinder_volume['user_name']
cv_info['user_pwd'] = cinder_volume['user_pwd'] cv_info['user_pwd'] = cinder_volume['user_pwd']
index = cinder_volume['backend_index'] index = cinder_volume['backend_index']
cv_info['backend'] = {index:{}} cv_info['backend'] = {index: {}}
cv_info['backend'][index]['volume_driver'] = cinder_volume['volume_driver'] cv_info['backend'][index]['volume_driver'] =\
cv_info['backend'][index]['volume_type'] = cinder_volume['volume_type'] cinder_volume['volume_driver']
cv_info['backend'][index]['pools'] = cinder_volume['pools'].split(',') cv_info['backend'][index]['volume_type'] =\
cinder_volume['volume_type']
cv_info['backend'][index]['pools'] =\
cinder_volume['pools'].split(',')
cinder_volume_info.append(cv_info) cinder_volume_info.append(cv_info)
return cinder_volume_info return cinder_volume_info
def get_disk_array_info(req, cluster_id): def get_disk_array_info(req, cluster_id):
share_disk_info = [] share_disk_info = []
share_cluster_disk_info = []
volume_disk_info = {} volume_disk_info = {}
cinder_volume_disk_list = [] cinder_volume_disk_list = []
roles = daisy_cmn.get_cluster_roles_detail(req,cluster_id) roles = daisy_cmn.get_cluster_roles_detail(req, cluster_id)
for role in roles: for role in roles:
if role['deployment_backend'] != daisy_cmn.tecs_backend_name: if role['deployment_backend'] != daisy_cmn.tecs_backend_name:
continue continue
if role['name'] == 'CONTROLLER_HA': if role['name'] == 'CONTROLLER_HA':
share_disks = _get_service_disk_for_disk_array(req, role['id']) share_disks = _get_service_disk_for_disk_array(req, role['id'])
share_disk_info += share_disks share_cluster_disks = \
cinder_volumes = _get_cinder_volume_for_disk_array(req, role['id']) _get_share_cluster_disk_for_disk_array(req, role['id'])
share_disk_info += share_disks
share_cluster_disk_info += share_cluster_disks
cinder_volumes =\
_get_cinder_volume_for_disk_array(req, role['id'])
cinder_volume_disk_list += cinder_volumes cinder_volume_disk_list += cinder_volumes
if cinder_volume_disk_list: if cinder_volume_disk_list:
volume_disk_info['disk_array'] = cinder_volume_disk_list volume_disk_info['disk_array'] = cinder_volume_disk_list
return (share_disk_info, volume_disk_info) return (share_disk_info, volume_disk_info, share_cluster_disk_info)
def get_host_min_mac(host_interfaces):
macs = [interface['mac'] for interface in host_interfaces
if interface['type'] == 'ether' and interface['mac']]
macs.sort()
return macs[0]
def get_ha_and_compute_ips(req, cluster_id): def config_ha_share_disk(share_disk_info,
controller_ha_nodes = {} controller_ha_nodes,
computer_ips = [] share_cluster_disk_info=None):
'''
share_disk_info = \
[{'service': 'db', 'lun': 'lun1', 'data_ips':'data_ip1'},
{'service': 'glance', 'lun': 'lun3', 'data_ips':'data_ip3'},]
share_cluster_disk_info = \
[{'service': 'db', 'lun': 'lun1', 'data_ips':'data_ip1', ...},
{'service': 'db', 'lun': 'lun2', 'data_ips':'data_ip2', ...},
{'service': 'glance', 'lun': 'lun3', 'data_ips':'data_ip3'},
{'service': 'glance', 'lun': 'lun4', 'data_ips':'data_ip4'},]
controller_ha_nodes[host_ip] = min_mac
'''
sorted_db_share_cluster = []
if share_cluster_disk_info:
db_share_cluster_disk = \
[disk for disk in share_cluster_disk_info
if disk['service'] == 'db']
if len(db_share_cluster_disk) != 2:
error_msg = 'share cluster disk: %s must be existed in pair.' % \
db_share_cluster_disk
LOG.error(error_msg)
raise exception.InstallException(error_msg)
sorted_db_share_cluster = \
sorted(db_share_cluster_disk, key=lambda s: s['lun'])
sorted_ha_nodes = \
sorted(controller_ha_nodes.iteritems(), key=lambda d: d[1])
sorted_ha_nodes_ip = [node[0] for node in sorted_ha_nodes]
roles = daisy_cmn.get_cluster_roles_detail(req,cluster_id) all_share_disk_info = []
cluster_networks = daisy_cmn.get_cluster_networks_detail(req, cluster_id) if sorted_db_share_cluster:
for role in roles: all_share_disk_info = \
if role['deployment_backend'] != daisy_cmn.tecs_backend_name: [[disk] + share_disk_info for disk in sorted_db_share_cluster]
continue # all_share_disk_info = \
role_hosts = daisy_cmn.get_hosts_of_role(req, role['id']) # [[{'lun': 'lun1', 'service': 'db', 'data_ips': 'data_ip1'},
for role_host in role_hosts: # {'lun': 'lun3', 'service': 'glance', 'data_ips': 'data_ip3'}],
#host has installed tecs are exclusive # [{'lun': 'lun2', 'service': 'db', 'data_ips': 'data_ip2'},
if (role_host['status'] == tecs_state['ACTIVE'] or # {'lun': 'lun3', 'service': 'glance', 'data_ips': 'data_ip3'}]]
role_host['status'] == tecs_state['UPDATING'] or else:
role_host['status'] == tecs_state['UPDATE_FAILED']): for index in range(len(sorted_ha_nodes)):
continue all_share_disk_info.append(share_disk_info)
host_detail = daisy_cmn.get_host_detail(req, # all_share_disk_info = \
role_host['host_id']) # [{'lun': 'lun3', 'service': 'glance', 'data_ips': 'data_ip3'},
host_ip = tecs_cmn.get_host_network_ip(req, # {'lun': 'lun3', 'service': 'glance', 'data_ips': 'data_ip3'}]
host_detail,
cluster_networks, '''
'MANAGEMENT')
if role['name'] == "CONTROLLER_HA":
pxe_mac = [interface['mac'] for interface in host_detail['interfaces']
if interface['is_deployment'] == True]
if pxe_mac and pxe_mac[0]:
controller_ha_nodes[host_ip] = pxe_mac[0]
else:
min_mac = get_host_min_mac(host_detail['interfaces'])
controller_ha_nodes[host_ip] = min_mac
if role['name'] == "COMPUTER":
computer_ips.append(host_ip)
return (controller_ha_nodes, computer_ips)
def config_ha_share_disk(share_disk_info, controller_ha_nodes):
error_msg = ""
cmd = 'rm -rf /var/lib/daisy/tecs/storage_auto_config/base/*.json' cmd = 'rm -rf /var/lib/daisy/tecs/storage_auto_config/base/*.json'
daisy_cmn.subprocess_call(cmd) daisy_cmn.subprocess_call(cmd)
with open("/var/lib/daisy/tecs/storage_auto_config/base/control.json", "w") as fp: with open("/var/lib/daisy/tecs/storage_auto_config/base/control.json",\
"w") as fp:
json.dump(share_disk_info, fp, indent=2) json.dump(share_disk_info, fp, indent=2)
for host_ip in controller_ha_nodes.keys(): for host_ip in controller_ha_nodes.keys():
password = "ossdbg1"
cmd = '/var/lib/daisy/tecs/trustme.sh %s %s' % (host_ip, password)
daisy_cmn.subprocess_call(cmd)
cmd = 'clush -S -w %s "mkdir -p /home/tecs_install"' % (host_ip,)
daisy_cmn.subprocess_call(cmd)
try: try:
scp_bin_result = subprocess.check_output( scp_bin_result = subprocess.check_output(
'scp -o StrictHostKeyChecking=no -r /var/lib/daisy/tecs/storage_auto_config %s:/home/tecs_install' % (host_ip,), 'scp -o StrictHostKeyChecking=no -r\
/var/lib/daisy/tecs/storage_auto_config\
%s:/home/tecs_install' % (host_ip,),
shell=True, stderr=subprocess.STDOUT) shell=True, stderr=subprocess.STDOUT)
except subprocess.CalledProcessError as e: except subprocess.CalledProcessError as e:
error_msg = "scp /var/lib/daisy/tecs/storage_auto_config to %s failed!" % host_ip error_msg = "scp /var/lib/daisy/tecs/storage_auto_config\
return error_msg to %s failed!" % host_ip
raise exception.InstallException(error_msg)
try: try:
LOG.info(_("Config share disk for host %s" % host_ip)) LOG.info(_("Config share disk for host %s" % host_ip))
cmd = "cd /home/tecs_install/storage_auto_config/; python storage_auto_config.py share_disk %s" % controller_ha_nodes[host_ip] cmd = "cd /home/tecs_install/storage_auto_config/;\
exc_result = subprocess.check_output('clush -S -w %s "%s"' % (host_ip,cmd), python storage_auto_config.py share_disk %s"\
shell=True, stderr=subprocess.STDOUT) % controller_ha_nodes[host_ip]
exc_result = subprocess.check_output(
'clush -S -w %s "%s"' % (host_ip,cmd),
shell=True, stderr=subprocess.STDOUT)
except subprocess.CalledProcessError as e: except subprocess.CalledProcessError as e:
LOG.info(_("Storage script error message: %s" % e.output)) LOG.info(_("Storage script error message: %s" % e.output))
error_msg = "config Disk Array share disks on %s failed!" % host_ip error_msg = "config Disk Array share disks\
return error_msg on %s failed!" % host_ip
return error_msg raise exception.InstallException(error_msg)
'''
def config_ha_cinder_volume(volume_disk_info, controller_ha_ips):
error_msg = ""
cmd = 'rm -rf /var/lib/daisy/tecs/storage_auto_config/base/*.json' cmd = 'rm -rf /var/lib/daisy/tecs/storage_auto_config/base/*.json'
daisy_cmn.subprocess_call(cmd) daisy_cmn.subprocess_call(cmd)
with open("/var/lib/daisy/tecs/storage_auto_config/base/cinder.json", "w") as fp:
for (host_ip, share_disk) in zip(sorted_ha_nodes_ip, all_share_disk_info):
with open("/var/lib/daisy/tecs/storage_auto_config/base/control.json",
"w") as fp:
json.dump(share_disk, fp, indent=2)
try:
subprocess.check_output(
'scp -o StrictHostKeyChecking=no -r\
/var/lib/daisy/tecs/storage_auto_config\
%s:/home/tecs_install' % (host_ip,),
shell=True, stderr=subprocess.STDOUT)
except subprocess.CalledProcessError as e:
error_msg = "scp /var/lib/daisy/tecs/storage_auto_config\
to %s failed!" % host_ip
raise exception.InstallException(error_msg)
try:
LOG.info(_("Config share disk for host %s" % host_ip))
cmd = "cd /home/tecs_install/storage_auto_config/;\
python storage_auto_config.py share_disk %s"\
% controller_ha_nodes[host_ip]
subprocess.check_output(
'clush -S -w %s "%s"' % (host_ip, cmd),
shell=True, stderr=subprocess.STDOUT)
except subprocess.CalledProcessError as e:
LOG.info(_("Storage script error message: %s" % e.output))
error_msg = "config Disk Array share disks\
on %s failed!" % host_ip
raise exception.InstallException(error_msg)
def config_ha_cinder_volume(volume_disk_info, controller_ha_ips):
cmd = 'rm -rf /var/lib/daisy/tecs/storage_auto_config/base/*.json'
daisy_cmn.subprocess_call(cmd)
with open("/var/lib/daisy/tecs/storage_auto_config/base/cinder.json",
"w") as fp:
json.dump(volume_disk_info, fp, indent=2) json.dump(volume_disk_info, fp, indent=2)
for host_ip in controller_ha_ips: for host_ip in controller_ha_ips:
password = "ossdbg1"
cmd = '/var/lib/daisy/tecs/trustme.sh %s %s' % (host_ip, password)
daisy_cmn.subprocess_call(cmd)
cmd = 'clush -S -w %s "mkdir -p /home/tecs_install"' % (host_ip,)
daisy_cmn.subprocess_call(cmd)
try: try:
scp_bin_result = subprocess.check_output( subprocess.check_output(
'scp -o StrictHostKeyChecking=no -r /var/lib/daisy/tecs/storage_auto_config %s:/home/tecs_install' % (host_ip,), 'scp -o StrictHostKeyChecking=no -r\
/var/lib/daisy/tecs/storage_auto_config\
%s:/home/tecs_install' % (host_ip,),
shell=True, stderr=subprocess.STDOUT) shell=True, stderr=subprocess.STDOUT)
except subprocess.CalledProcessError as e: except subprocess.CalledProcessError as e:
error_msg = "scp /var/lib/daisy/tecs/storage_auto_config to %s failed!" % host_ip error_msg = "scp /var/lib/daisy/tecs/storage_auto_config\
return error_msg to %s failed!" % host_ip
raise exception.InstallException(error_msg)
try: try:
LOG.info(_("Config cinder volume for host %s" % host_ip)) LOG.info(_("Config cinder volume for host %s" % host_ip))
cmd = 'cd /home/tecs_install/storage_auto_config/; python storage_auto_config.py cinder_conf %s' % host_ip cmd = 'cd /home/tecs_install/storage_auto_config/;\
exc_result = subprocess.check_output('clush -S -w %s "%s"' % (host_ip,cmd), python storage_auto_config.py cinder_conf %s' % host_ip
shell=True, stderr=subprocess.STDOUT) subprocess.check_output(
except subprocess.CalledProcessError as e: 'clush -S -w %s "%s"' % (host_ip, cmd),
LOG.info(_("Storage script error message: %s" % e.output))
error_msg = "config Disk Array cinder volumes on %s failed!" % host_ip
return error_msg
return error_msg
def config_compute_multipath(all_nodes_ip):
error_msg = ""
for host_ip in all_nodes_ip:
password = "ossdbg1"
cmd = '/var/lib/daisy/tecs/trustme.sh %s %s' % (host_ip, password)
daisy_cmn.subprocess_call(cmd)
cmd = 'clush -S -w %s "mkdir -p /home/tecs_install"' % (host_ip,)
daisy_cmn.subprocess_call(cmd)
try:
scp_bin_result = subprocess.check_output(
'scp -o StrictHostKeyChecking=no -r /var/lib/daisy/tecs/storage_auto_config %s:/home/tecs_install' % (host_ip,),
shell=True, stderr=subprocess.STDOUT) shell=True, stderr=subprocess.STDOUT)
except subprocess.CalledProcessError as e: except subprocess.CalledProcessError as e:
error_msg = "scp /var/lib/daisy/tecs/storage_auto_config to %s failed!" % host_ip LOG.info(_("Storage script error message: %s" % e.output))
return error_msg error_msg = "config Disk Array cinder volumes\
on %s failed!" % host_ip
raise exception.InstallException(error_msg)
def config_compute_multipath(hosts_ip):
for host_ip in hosts_ip:
try:
subprocess.check_output(
'scp -o StrictHostKeyChecking=no -r\
/var/lib/daisy/tecs/storage_auto_config\
%s:/home/tecs_install' % (host_ip,),
shell=True, stderr=subprocess.STDOUT)
except subprocess.CalledProcessError as e:
error_msg = "scp /var/lib/daisy/tecs/storage_auto_config\
to %s failed!" % host_ip
raise exception.InstallException(error_msg)
try: try:
LOG.info(_("Config multipath for host %s" % host_ip)) LOG.info(_("Config multipath for host %s" % host_ip))
cmd = 'cd /home/tecs_install/storage_auto_config/; python storage_auto_config.py check_multipath' cmd = 'cd /home/tecs_install/storage_auto_config/;\
exc_result = subprocess.check_output('clush -S -w %s "%s"' % (host_ip,cmd), python storage_auto_config.py check_multipath'
shell=True, stderr=subprocess.STDOUT) subprocess.check_output(
'clush -S -w %s "%s"' % (host_ip, cmd),
shell=True, stderr=subprocess.STDOUT)
except subprocess.CalledProcessError as e: except subprocess.CalledProcessError as e:
LOG.info(_("Storage script error message: %s" % e.output)) LOG.info(_("Storage script error message: %s" % e.output))
error_msg = "config Disk Array multipath on %s failed!" % host_ip error_msg = "config Disk Array multipath\
return error_msg on %s failed!" % host_ip
return error_msg raise exception.InstallException(error_msg)

File diff suppressed because it is too large Load Diff

View File

@ -17,31 +17,12 @@
/hosts endpoint for Daisy v1 API /hosts endpoint for Daisy v1 API
""" """
import webob.exc
import subprocess import subprocess
from oslo_config import cfg
from oslo_log import log as logging from oslo_log import log as logging
from webob.exc import HTTPBadRequest
from webob.exc import HTTPForbidden
from threading import Thread, Lock
import threading
from daisy import i18n from daisy import i18n
from daisy import notifier
from daisy.api import policy
import daisy.api.v1
from daisy.common import exception
from daisy.common import property_utils
from daisy.common import utils
from daisy.common import wsgi
from daisy.api.v1 import controller
from daisy.api.v1 import filters
import daisy.api.backends.common as daisy_cmn import daisy.api.backends.common as daisy_cmn
import daisy.api.backends.tecs.common as tecs_cmn import daisy.api.backends.tecs.common as tecs_cmn
import daisy.registry.client.v1.api as registry
LOG = logging.getLogger(__name__) LOG = logging.getLogger(__name__)
_ = i18n._ _ = i18n._
@ -51,9 +32,11 @@ _LW = i18n._LW
tecs_state = tecs_cmn.TECS_STATE tecs_state = tecs_cmn.TECS_STATE
def update_progress_to_db(req, role_id_list, status, hosts_list,host_ip=None):
def update_progress_to_db(req, role_id_list, status, hosts_list, host_ip=None):
""" """
Write uninstall progress and status to db, we use global lock object 'uninstall_mutex' Write uninstall progress and status to db,
we use global lock object 'uninstall_mutex'
to make sure this function is thread safety. to make sure this function is thread safety.
:param req: http req. :param req: http req.
:param role_id_list: Column neeb be update in role table. :param role_id_list: Column neeb be update in role table.
@ -63,26 +46,27 @@ def update_progress_to_db(req, role_id_list, status, hosts_list,host_ip=None):
for role_id in role_id_list: for role_id in role_id_list:
role_hosts = daisy_cmn.get_hosts_of_role(req, role_id) role_hosts = daisy_cmn.get_hosts_of_role(req, role_id)
for host_id_ip in hosts_list: for host_id_ip in hosts_list:
host_ip_tmp=host_id_ip.values()[0] host_ip_tmp = host_id_ip.values()[0]
host_id_tmp=host_id_ip.keys()[0] host_id_tmp = host_id_ip.keys()[0]
if host_ip: if host_ip:
for role_host in role_hosts: for role_host in role_hosts:
if (host_ip_tmp == host_ip and if (host_ip_tmp == host_ip and
role_host['host_id']== host_id_tmp): role_host['host_id'] == host_id_tmp):
role_host_meta = {} role_host_meta = {}
if 0 == cmp(status, tecs_state['UNINSTALLING']): if 0 == cmp(status, tecs_state['UNINSTALLING']):
role_host_meta['progress'] = 10 role_host_meta['progress'] = 10
role_host_meta['messages'] = 'TECS uninstalling' role_host_meta['messages'] = 'TECS uninstalling'
if 0 == cmp(status, tecs_state['UNINSTALL_FAILED']): if 0 == cmp(status, tecs_state['UNINSTALL_FAILED']):
role_host_meta['messages'] = 'TECS uninstalled failed' role_host_meta[
elif 0 == cmp(status, tecs_state['ACTIVE']): 'messages'] = 'TECS uninstalled failed'
elif 0 == cmp(status, tecs_state['INIT']):
role_host_meta['progress'] = 100 role_host_meta['progress'] = 100
role_host_meta['messages'] = 'TECS uninstalled successfully' role_host_meta[
'messages'] = 'TECS uninstalled successfully'
if role_host_meta: if role_host_meta:
role_host_meta['status'] = status role_host_meta['status'] = status
daisy_cmn.update_role_host(req, daisy_cmn.update_role_host(req, role_host['id'],
role_host['id'], role_host_meta)
role_host_meta)
else: else:
role = {} role = {}
if 0 == cmp(status, tecs_state['UNINSTALLING']): if 0 == cmp(status, tecs_state['UNINSTALLING']):
@ -91,11 +75,11 @@ def update_progress_to_db(req, role_id_list, status, hosts_list,host_ip=None):
role_host_meta['status'] = status role_host_meta['status'] = status
role_host_meta['progress'] = 0 role_host_meta['progress'] = 0
daisy_cmn.update_role_host(req, daisy_cmn.update_role_host(req,
role_host['id'], role_host['id'],
role_host_meta) role_host_meta)
role['progress']=0 role['progress'] = 0
role['messages'] = 'TECS uninstalling' role['messages'] = 'TECS uninstalling'
if 0 == cmp(status, tecs_state['UNINSTALL_FAILED']): if 0 == cmp(status, tecs_state['UNINSTALL_FAILED']):
role['messages'] = 'TECS uninstalled failed' role['messages'] = 'TECS uninstalled failed'
elif 0 == cmp(status, tecs_state['INIT']): elif 0 == cmp(status, tecs_state['INIT']):
role['progress'] = 100 role['progress'] = 100
@ -103,53 +87,64 @@ def update_progress_to_db(req, role_id_list, status, hosts_list,host_ip=None):
if role: if role:
role['status'] = status role['status'] = status
daisy_cmn.update_role(req, role_id, role) daisy_cmn.update_role(req, role_id, role)
if 0 == cmp(status, tecs_state['INIT']):
daisy_cmn.delete_role_hosts(req, role_id)
def _thread_bin(req, host_ip, role_id_list,hosts_list):
def _thread_bin(req, host_ip, role_id_list, hosts_list):
# uninstall network-configuration-1.1.1-15.x86_64.rpm # uninstall network-configuration-1.1.1-15.x86_64.rpm
update_progress_to_db(req,role_id_list,tecs_state['UNINSTALLING'],hosts_list,host_ip) update_progress_to_db(
req, role_id_list, tecs_state['UNINSTALLING'], hosts_list, host_ip)
tecs_cmn.TecsShellExector(host_ip, 'uninstall_rpm') tecs_cmn.TecsShellExector(host_ip, 'uninstall_rpm')
cmd = 'mkdir -p /var/log/daisy/daisy_uninstall/' cmd = 'mkdir -p /var/log/daisy/daisy_uninstall/'
daisy_cmn.subprocess_call(cmd) daisy_cmn.subprocess_call(cmd)
password = "ossdbg1" password = "ossdbg1"
var_log_path = "/var/log/daisy/daisy_uninstall/%s_uninstall_tecs.log" % host_ip var_log_path = "/var/log/daisy/daisy_uninstall/\
%s_uninstall_tecs.log" % host_ip
with open(var_log_path, "w+") as fp: with open(var_log_path, "w+") as fp:
cmd = '/var/lib/daisy/tecs/trustme.sh %s %s' % (host_ip, password) cmd = '/var/lib/daisy/tecs/trustme.sh %s %s' % (host_ip, password)
daisy_cmn.subprocess_call(cmd,fp) daisy_cmn.subprocess_call(cmd, fp)
cmd = 'clush -S -b -w %s "rm -rf /home/daisy_uninstall"' % (host_ip,) cmd = 'clush -S -b -w %s "rm -rf /home/daisy_uninstall"' % (host_ip,)
daisy_cmn.subprocess_call(cmd,fp) daisy_cmn.subprocess_call(cmd, fp)
cmd = 'clush -S -w %s "mkdir -p /home/daisy_uninstall"' % (host_ip,) cmd = 'clush -S -w %s "mkdir -p /home/daisy_uninstall"' % (host_ip,)
daisy_cmn.subprocess_call(cmd,fp) daisy_cmn.subprocess_call(cmd, fp)
try: try:
scp_bin_result = subprocess.check_output( subprocess.check_output(
'clush -S -w %s -c /var/lib/daisy/tecs/ZXTECS*.bin --dest=/home/daisy_uninstall' % (host_ip,), 'clush -S -w %s -c /var/lib/daisy/tecs/ZXTECS*.bin \
--dest=/home/daisy_uninstall' % (
host_ip,),
shell=True, stderr=subprocess.STDOUT) shell=True, stderr=subprocess.STDOUT)
except subprocess.CalledProcessError as e: except subprocess.CalledProcessError as e:
update_progress_to_db(req, role_id_list, tecs_state['UNINSTALL_FAILED'],hosts_list,host_ip) update_progress_to_db(
req, role_id_list, tecs_state[
'UNINSTALL_FAILED'], hosts_list, host_ip)
LOG.error(_("scp TECS bin for %s failed!" % host_ip)) LOG.error(_("scp TECS bin for %s failed!" % host_ip))
fp.write(e.output.strip()) fp.write(e.output.strip())
cmd = 'clush -S -w %s "chmod 777 /home/daisy_uninstall/*"' % (host_ip,) cmd = 'clush -S -w %s "chmod 777 /home/daisy_uninstall/*"' % (host_ip,)
daisy_cmn.subprocess_call(cmd,fp) daisy_cmn.subprocess_call(cmd, fp)
try: try:
exc_result = subprocess.check_output( exc_result = subprocess.check_output(
'clush -S -w %s /home/daisy_uninstall/ZXTECS*.bin clean' % (host_ip,), 'clush -S -w %s /home/daisy_uninstall/ZXTECS*.bin clean' % (
host_ip,),
shell=True, stderr=subprocess.STDOUT) shell=True, stderr=subprocess.STDOUT)
except subprocess.CalledProcessError as e: except subprocess.CalledProcessError as e:
update_progress_to_db(req, role_id_list, tecs_state['UNINSTALL_FAILED'],hosts_list,host_ip) update_progress_to_db(
req, role_id_list, tecs_state[
'UNINSTALL_FAILED'], hosts_list, host_ip)
LOG.error(_("Uninstall TECS for %s failed!" % host_ip)) LOG.error(_("Uninstall TECS for %s failed!" % host_ip))
fp.write(e.output.strip()) fp.write(e.output.strip())
else: else:
update_progress_to_db(req, role_id_list, tecs_state['ACTIVE'], hosts_list,host_ip) update_progress_to_db(req, role_id_list, tecs_state['INIT'],
hosts_list, host_ip)
LOG.info(_("Uninstall TECS for %s successfully!" % host_ip)) LOG.info(_("Uninstall TECS for %s successfully!" % host_ip))
fp.write(exc_result) fp.write(exc_result)
# this will be raise raise all the exceptions of the thread to log file # this will be raise raise all the exceptions of the thread to log file
def thread_bin(req, host_ip, role_id_list, hosts_list): def thread_bin(req, host_ip, role_id_list, hosts_list):
try: try:
_thread_bin(req, host_ip, role_id_list, hosts_list) _thread_bin(req, host_ip, role_id_list, hosts_list)
except Exception as e: except Exception as e:
LOG.exception(e.message) LOG.exception(e.message)

View File

@ -17,30 +17,10 @@
/update endpoint for Daisy v1 API /update endpoint for Daisy v1 API
""" """
import webob.exc
import subprocess import subprocess
from oslo_config import cfg
from oslo_log import log as logging from oslo_log import log as logging
from webob.exc import HTTPBadRequest
from webob.exc import HTTPForbidden
from threading import Thread, Lock
import threading
import time
from daisy import i18n from daisy import i18n
from daisy import notifier
from daisy.api import policy
import daisy.api.v1
import daisy.registry.client.v1.api as registry
from daisy.common import exception
from daisy.common import property_utils
from daisy.common import utils
from daisy.common import wsgi
from daisy.api.v1 import controller
from daisy.api.v1 import filters
from daisy.api.backends import os as os_handle
import daisy.api.backends.common as daisy_cmn import daisy.api.backends.common as daisy_cmn
import daisy.api.backends.tecs.common as tecs_cmn import daisy.api.backends.tecs.common as tecs_cmn
@ -53,7 +33,8 @@ _LW = i18n._LW
tecs_state = tecs_cmn.TECS_STATE tecs_state = tecs_cmn.TECS_STATE
def update_progress_to_db(req,role_id_list,status,hosts_list,host_ip=None):
def update_progress_to_db(req, role_id_list, status, hosts_list, host_ip=None):
""" """
Write update progress and status to db, Write update progress and status to db,
to make sure this function is thread safety. to make sure this function is thread safety.
@ -65,40 +46,43 @@ def update_progress_to_db(req,role_id_list,status,hosts_list,host_ip=None):
for role_id in role_id_list: for role_id in role_id_list:
role_hosts = daisy_cmn.get_hosts_of_role(req, role_id) role_hosts = daisy_cmn.get_hosts_of_role(req, role_id)
for host_id_ip in hosts_list: for host_id_ip in hosts_list:
host_ip_tmp=host_id_ip.values()[0] host_ip_tmp = host_id_ip.values()[0]
host_id_tmp=host_id_ip.keys()[0] host_id_tmp = host_id_ip.keys()[0]
if host_ip: if host_ip:
for role_host in role_hosts: for role_host in role_hosts:
if (host_ip_tmp == host_ip and if (host_ip_tmp == host_ip and
role_host['host_id']== host_id_tmp): role_host['host_id'] == host_id_tmp):
role_host_meta = {} role_host_meta = {}
if 0 == cmp(status, tecs_state['UPDATING']): if 0 == cmp(status, tecs_state['UPDATING']):
role_host_meta['progress'] = 10 role_host_meta['progress'] = 10
role_host_meta['messages'] = 'TECS upgrading' role_host_meta['messages'] = 'TECS upgrading'
if 0 == cmp(status, tecs_state['UPDATE_FAILED']): if 0 == cmp(status, tecs_state['UPDATE_FAILED']):
role_host_meta['messages'] = 'TECS upgraded failed' role_host_meta['messages'] = 'TECS upgraded failed'
elif 0 == cmp(status, tecs_state['ACTIVE']): elif 0 == cmp(status, tecs_state['ACTIVE']):
role_host_meta['progress'] = 100 role_host_meta['progress'] = 100
role_host_meta['messages'] = 'TECS upgraded successfully' role_host_meta[
'messages'] = 'TECS upgraded successfully'
if role_host_meta: if role_host_meta:
role_host_meta['status'] = status role_host_meta['status'] = status
daisy_cmn.update_role_host(req, daisy_cmn.update_role_host(req,
role_host['id'], role_host['id'],
role_host_meta) role_host_meta)
else: else:
role = {} role = {}
if 0 == cmp(status, tecs_state['UPDATING']): if 0 == cmp(status, tecs_state['UPDATING']):
for role_host in role_hosts: for role_host in role_hosts:
if role_host['status'] == tecs_state['INSTALL_FAILED']:
continue
role_host_meta = {} role_host_meta = {}
role_host_meta['status'] = status role_host_meta['status'] = status
role_host_meta['progress'] = 0 role_host_meta['progress'] = 0
role_host_meta['messages'] = 'TECS upgrading' role_host_meta['messages'] = 'TECS upgrading'
daisy_cmn.update_role_host(req, daisy_cmn.update_role_host(req,
role_host['id'], role_host['id'],
role_host_meta) role_host_meta)
role['progress']=0 role['progress'] = 0
role['messages'] = 'TECS upgrading' role['messages'] = 'TECS upgrading'
if 0 == cmp(status, tecs_state['UPDATE_FAILED']): if 0 == cmp(status, tecs_state['UPDATE_FAILED']):
role['messages'] = 'TECS upgraded failed' role['messages'] = 'TECS upgraded failed'
elif 0 == cmp(status, tecs_state['ACTIVE']): elif 0 == cmp(status, tecs_state['ACTIVE']):
role['progress'] = 100 role['progress'] = 100
@ -106,46 +90,56 @@ def update_progress_to_db(req,role_id_list,status,hosts_list,host_ip=None):
if role: if role:
role['status'] = status role['status'] = status
daisy_cmn.update_role(req, role_id, role) daisy_cmn.update_role(req, role_id, role)
def thread_bin(req,role_id_list, host_ip,hosts_list):
def thread_bin(req, role_id_list, host_ip, hosts_list):
# update network-configuration-1.1.1-15.x86_64.rpm # update network-configuration-1.1.1-15.x86_64.rpm
update_progress_to_db(req,role_id_list,tecs_state['UPDATING'],hosts_list,host_ip) update_progress_to_db(
req, role_id_list, tecs_state['UPDATING'], hosts_list, host_ip)
cmd = 'mkdir -p /var/log/daisy/daisy_update/' cmd = 'mkdir -p /var/log/daisy/daisy_update/'
daisy_cmn.subprocess_call(cmd) daisy_cmn.subprocess_call(cmd)
password = "ossdbg1" password = "ossdbg1"
var_log_path = "/var/log/daisy/daisy_update/%s_update_tecs.log" % host_ip var_log_path = "/var/log/daisy/daisy_update/%s_update_tecs.log" % host_ip
with open(var_log_path, "w+") as fp: with open(var_log_path, "w+") as fp:
cmd = '/var/lib/daisy/tecs/trustme.sh %s %s' % (host_ip, password) cmd = '/var/lib/daisy/tecs/trustme.sh %s %s' % (host_ip, password)
daisy_cmn.subprocess_call(cmd,fp) daisy_cmn.subprocess_call(cmd, fp)
cmd = 'clush -S -w %s "mkdir -p /home/daisy_update"' % (host_ip,) cmd = 'clush -S -w %s "mkdir -p /home/tecs_update/"' % (host_ip,)
daisy_cmn.subprocess_call(cmd,fp) daisy_cmn.subprocess_call(cmd, fp)
cmd = 'clush -S -b -w %s "rm -rf /home/daisy_update/ZXTECS*.bin"' % (host_ip,) cmd = 'clush -S -b -w %s "rm -rf /home/tecs_update/ZXTECS*.bin"' % (
daisy_cmn.subprocess_call(cmd,fp) host_ip,)
daisy_cmn.subprocess_call(cmd, fp)
tecs_cmn.TecsShellExector(host_ip, 'update_rpm') tecs_cmn.TecsShellExector(host_ip, 'update_rpm')
try: try:
scp_bin_result = subprocess.check_output( subprocess.check_output(
'clush -S -w %s -c /var/lib/daisy/tecs/ZXTECS*.bin --dest=/home/daisy_update' % (host_ip,), 'clush -S -w %s -c /var/lib/daisy/tecs/ZXTECS*.bin \
--dest=/home/tecs_update' % (
host_ip,),
shell=True, stderr=subprocess.STDOUT) shell=True, stderr=subprocess.STDOUT)
except subprocess.CalledProcessError as e: except subprocess.CalledProcessError as e:
update_progress_to_db(req,role_id_list,tecs_state['UPDATE_FAILED'],hosts_list,host_ip) update_progress_to_db(
req, role_id_list, tecs_state[
'UPDATE_FAILED'], hosts_list, host_ip)
LOG.error(_("scp TECS bin for %s failed!" % host_ip)) LOG.error(_("scp TECS bin for %s failed!" % host_ip))
fp.write(e.output.strip()) fp.write(e.output.strip())
return 1 return 1
cmd = 'clush -S -w %s "chmod 777 /home/daisy_update/*"' % (host_ip,) cmd = 'clush -S -w %s "chmod 777 /home/tecs_update/*"' % (host_ip,)
daisy_cmn.subprocess_call(cmd,fp) daisy_cmn.subprocess_call(cmd, fp)
try: try:
exc_result = subprocess.check_output( exc_result = subprocess.check_output(
'clush -S -w %s "/home/daisy_update/ZXTECS*.bin upgrade"' % (host_ip,), 'clush -S -w %s "/home/tecs_update/ZXTECS*.bin upgrade"' % (
host_ip,),
shell=True, stderr=subprocess.STDOUT) shell=True, stderr=subprocess.STDOUT)
except subprocess.CalledProcessError as e: except subprocess.CalledProcessError as e:
update_progress_to_db(req,role_id_list,tecs_state['UPDATE_FAILED'],hosts_list,host_ip) update_progress_to_db(
req, role_id_list, tecs_state[
'UPDATE_FAILED'], hosts_list, host_ip)
LOG.error(_("Update TECS for %s failed!" % host_ip)) LOG.error(_("Update TECS for %s failed!" % host_ip))
fp.write(e.output.strip()) fp.write(e.output.strip())
return 2 return 2
else: else:
update_progress_to_db(req,role_id_list,tecs_state['ACTIVE'],hosts_list,host_ip) update_progress_to_db(
req, role_id_list, tecs_state['ACTIVE'], hosts_list, host_ip)
fp.write(exc_result) fp.write(exc_result)
return 0 return 0

View File

@ -0,0 +1,142 @@
# Copyright 2013 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
/install endpoint for tecs API
"""
import daisy.registry.client.v1.api as registry
import daisy.api.backends.common as daisy_cmn
from daisy.common import utils
def _write_role_configs_to_db(req, cluster_id, role_name, configs):
config_meta = {'cluster': cluster_id,
'role': role_name,
'config': configs}
registry.config_interface_metadata(req.context,
config_meta)
def _write_host_configs_to_db(req, host_id, configs):
config_meta = {'host_id': host_id,
'config': configs}
registry.config_interface_metadata(req.context,
config_meta)
def _get_config_item(file, section, key, value, description):
return {'file-name': file,
'section': section,
'key': key,
'value': value,
'description': description}
def _add_configs_for_nova(req, host_detail):
config_file = '/etc/nova/nova.conf'
default_section = 'DEFAULT'
key_name = 'vcpu_pin_set'
key_value = host_detail.get(key_name)
config_items = []
if not key_value:
key_value = host_detail.get('isolcpus')
nova_key_name = key_name
description = 'vcpu pin set for all vm'
item = _get_config_item(config_file,
default_section,
nova_key_name,
key_value,
description)
config_items.append(item)
key_name = 'dvs_high_cpuset'
key_value = host_detail.get(key_name)
nova_key_name = 'dvs_high_cpu_set'
description = 'vcpu pin set for high-performance dvs vm'
item = _get_config_item(config_file,
default_section,
nova_key_name,
key_value,
description)
config_items.append(item)
numa_cpus = utils.get_numa_node_cpus(host_detail.get('cpu', {}))
numa_nodes = utils.get_numa_node_from_cpus(numa_cpus, key_value)
if numa_nodes:
libvirt_section = 'libvirt'
nova_key_name = 'reserved_huge_pages'
# only support one NUMA node for DVS now
key_value = 'node:%s,size:1048576,count:4' % numa_nodes[0]
description = 'reserved huges for DVS service '\
'on high NUMA node'
config_items.append({'file-name': config_file,
'key': nova_key_name,
'section': libvirt_section,
'value': key_value,
'description': description})
key_name = 'pci_high_cpuset'
pci_key_value = host_detail.get(key_name)
nova_key_name = 'vsg_card_cpu_set'
description = 'vcpu pin set for high-performance CLC card vm'
item = _get_config_item(config_file,
default_section,
nova_key_name,
pci_key_value,
description)
config_items.append(item)
if pci_key_value:
nova_key_name = 'default_ephemeral_format'
description = 'config for CLC card'
key_value = 'ext3'
item = _get_config_item(config_file,
default_section,
nova_key_name,
key_value,
description)
config_items.append(item)
nova_key_name = 'pci_passthrough_whitelist'
description = 'config for CLC card'
key_value = '[{"vendor_id": "8086","product_id": "0435"}]'
item = _get_config_item(config_file,
default_section,
nova_key_name,
key_value,
description)
config_items.append(item)
_write_host_configs_to_db(req,
host_detail['id'],
config_items)
def update_configset(req, cluster_id):
roles = daisy_cmn.get_cluster_roles_detail(req, cluster_id)
for role in roles:
# now only computer has configs
if role['name'] != 'COMPUTER':
continue
role_meta = {'config_set_update_progress': 0}
daisy_cmn.update_role(req, role['id'], role_meta)
role_hosts = daisy_cmn.get_hosts_of_role(req, role['id'])
for host in role_hosts:
host_detail = daisy_cmn.get_host_detail(req, host['host_id'])
_add_configs_for_nova(req, host_detail)

View File

@ -16,46 +16,21 @@
""" """
/install endpoint for zenic API /install endpoint for zenic API
""" """
import os
import copy
import subprocess
import time import time
import commands
import traceback
import webob.exc
from oslo_config import cfg
from oslo_log import log as logging from oslo_log import log as logging
from webob.exc import HTTPBadRequest
from webob.exc import HTTPForbidden
from webob.exc import HTTPServerError
import threading import threading
from threading import Thread
from daisy import i18n from daisy import i18n
from daisy import notifier
from daisy.api import policy
import daisy.api.v1
from daisy.common import exception from daisy.common import exception
import daisy.registry.client.v1.api as registry
from daisy.api.backends.zenic import config
from daisy.api.backends import driver from daisy.api.backends import driver
from daisy.api.network_api import network as neutron
from ironicclient import client as ironic_client
import daisy.api.backends.os as os_handle
import daisy.api.backends.common as daisy_cmn import daisy.api.backends.common as daisy_cmn
import daisy.api.backends.zenic.common as zenic_cmn import daisy.api.backends.zenic.common as zenic_cmn
import daisy.api.backends.zenic.install as instl import daisy.api.backends.zenic.install as instl
import daisy.api.backends.zenic.uninstall as unstl import daisy.api.backends.zenic.uninstall as unstl
import daisy.api.backends.zenic.upgrade as upgrd import daisy.api.backends.zenic.upgrade as upgrd
try:
import simplejson as json
except ImportError:
import json
LOG = logging.getLogger(__name__) LOG = logging.getLogger(__name__)
_ = i18n._ _ = i18n._
@ -65,12 +40,13 @@ _LW = i18n._LW
zenic_state = zenic_cmn.ZENIC_STATE zenic_state = zenic_cmn.ZENIC_STATE
class API(driver.DeploymentDriver): class API(driver.DeploymentDriver):
def __init__(self): def __init__(self):
super(API, self).__init__() super(API, self).__init__()
return return
def install(self, req, cluster_id): def install(self, req, cluster_id):
""" """
Install zenic to a cluster. Install zenic to a cluster.
@ -79,23 +55,26 @@ class API(driver.DeploymentDriver):
cluster_id:cluster id cluster_id:cluster id
""" """
#instl.pxe_server_build(req, install_meta) # instl.pxe_server_build(req, install_meta)
# get hosts config which need to install OS # get hosts config which need to install OS
#hosts_need_os = instl.get_cluster_hosts_config(req, cluster_id) # hosts_need_os = instl.get_cluster_hosts_config(req, cluster_id)
# if have hosts need to install os, ZENIC installataion executed in OSInstallTask # if have hosts need to install os, ZENIC installataion executed
#if hosts_need_os: # in OSInstallTask
#os_install_obj = instl.OSInstallTask(req, cluster_id, hosts_need_os) # if hosts_need_os:
#os_install_thread = Thread(target=os_install_obj.run) # os_install_obj = instl.OSInstallTask(req, cluster_id, hosts_need_os)
#os_install_thread.start() # os_install_thread = Thread(target=os_install_obj.run)
#else: # os_install_thread.start()
LOG.info(_("No host need to install os, begin install ZENIC for cluster %s." % cluster_id)) # else:
LOG.info(
_("No host need to install os, begin install ZENIC for cluster %s."
% cluster_id))
zenic_install_task = instl.ZENICInstallTask(req, cluster_id) zenic_install_task = instl.ZENICInstallTask(req, cluster_id)
zenic_install_task.start() zenic_install_task.start()
LOG.info((_("begin install zenic, please waiting...."))) LOG.info((_("begin install zenic, please waiting....")))
time.sleep(5) time.sleep(5)
LOG.info((_("install zenic successfully"))) LOG.info((_("install zenic successfully")))
def uninstall(self, req, cluster_id): def uninstall(self, req, cluster_id):
""" """
Uninstall ZENIC to a cluster. Uninstall ZENIC to a cluster.
@ -105,18 +84,22 @@ class API(driver.DeploymentDriver):
:raises HTTPBadRequest if x-install-cluster is missing :raises HTTPBadRequest if x-install-cluster is missing
""" """
(role_id_list, hosts_list) = zenic_cmn.get_roles_and_hosts_list(req, cluster_id) (role_id_list, hosts_list) = zenic_cmn.get_roles_and_hosts_list(
req, cluster_id)
if role_id_list: if role_id_list:
if not hosts_list: if not hosts_list:
msg = _("there is no host in cluster %s") % cluster_id msg = _("there is no host in cluster %s") % cluster_id
raise exception.ThreadBinException(msg) raise exception.ThreadBinException(msg)
unstl.update_progress_to_db(req, role_id_list, zenic_state['UNINSTALLING'], 0.0) unstl.update_progress_to_db(
uninstall_progress_percentage = round(1*1.0/len(hosts_list), 2)*100 req, role_id_list, zenic_state['UNINSTALLING'], 0.0)
uninstall_progress_percentage =\
round(1 * 1.0 / len(hosts_list), 2) * 100
threads = [] threads = []
for host in hosts_list: for host in hosts_list:
t = threading.Thread(target=unstl.thread_bin,args=(req,host,role_id_list,uninstall_progress_percentage)) t = threading.Thread(target=unstl.thread_bin, args=(
req, host, role_id_list, uninstall_progress_percentage))
t.setDaemon(True) t.setDaemon(True)
t.start() t.start()
threads.append(t) threads.append(t)
@ -132,16 +115,20 @@ class API(driver.DeploymentDriver):
for role_id in role_id_list: for role_id in role_id_list:
role = daisy_cmn.get_role_detail(req, role_id) role = daisy_cmn.get_role_detail(req, role_id)
if role['progress'] == 100: if role['progress'] == 100:
unstl.update_progress_to_db(req, role_id_list, zenic_state['UNINSTALL_FAILED']) unstl.update_progress_to_db(
req, role_id_list, zenic_state['UNINSTALL_FAILED'])
uninstall_failed_flag = True uninstall_failed_flag = True
break break
if role['status'] == zenic_state['UNINSTALL_FAILED']: if role['status'] == zenic_state['UNINSTALL_FAILED']:
uninstall_failed_flag = True uninstall_failed_flag = True
break break
if not uninstall_failed_flag: if not uninstall_failed_flag:
LOG.info(_("all uninstall threads have done, set all roles status to 'init'!")) LOG.info(
unstl.update_progress_to_db(req, role_id_list, zenic_state['INIT']) _("all uninstall threads have done,\
set all roles status to 'init'!"))
unstl.update_progress_to_db(
req, role_id_list, zenic_state['INIT'])
LOG.info((_("begin uninstall zenic, please waiting...."))) LOG.info((_("begin uninstall zenic, please waiting....")))
time.sleep(5) time.sleep(5)
LOG.info((_("uninstall zenic successfully"))) LOG.info((_("uninstall zenic successfully")))
@ -153,19 +140,22 @@ class API(driver.DeploymentDriver):
:param req: The WSGI/Webob Request object :param req: The WSGI/Webob Request object
:raises HTTPBadRequest if x-install-cluster is missing :raises HTTPBadRequest if x-install-cluster is missing
""" """
(role_id_list, hosts_list) = zenic_cmn.get_roles_and_hosts_list(req, cluster_id) (role_id_list, hosts_list) = zenic_cmn.get_roles_and_hosts_list(
req, cluster_id)
if not hosts_list: if not hosts_list:
msg = _("there is no host in cluster %s") % cluster_id msg = _("there is no host in cluster %s") % cluster_id
raise exception.ThreadBinException(msg) raise exception.ThreadBinException(msg)
upgrd.update_progress_to_db(
req, role_id_list, zenic_state['UPDATING'], 0.0)
update_progress_percentage = round(1 * 1.0 / len(hosts_list), 2) * 100
upgrd.update_progress_to_db(req, role_id_list, zenic_state['UPDATING'], 0.0)
update_progress_percentage = round(1*1.0/len(hosts_list), 2)*100
threads = [] threads = []
for host in hosts_list: for host in hosts_list:
t = threading.Thread(target=upgrd.thread_bin,args=(req,host,role_id_list,update_progress_percentage)) t = threading.Thread(target=upgrd.thread_bin, args=(
req, host, role_id_list, update_progress_percentage))
t.setDaemon(True) t.setDaemon(True)
t.start() t.start()
threads.append(t) threads.append(t)
@ -181,14 +171,16 @@ class API(driver.DeploymentDriver):
for role_id in role_id_list: for role_id in role_id_list:
role = daisy_cmn.get_role_detail(req, role_id) role = daisy_cmn.get_role_detail(req, role_id)
if role['progress'] == 0: if role['progress'] == 0:
upgrd.update_progress_to_db(req, role_id_list, zenic_state['UPDATE_FAILED']) upgrd.update_progress_to_db(
req, role_id_list, zenic_state['UPDATE_FAILED'])
update_failed_flag = True update_failed_flag = True
break break
if role['status'] == zenic_state['UPDATE_FAILED']: if role['status'] == zenic_state['UPDATE_FAILED']:
update_failed_flag = True update_failed_flag = True
break break
if not update_failed_flag: if not update_failed_flag:
LOG.info(_("all update threads have done, set all roles status to 'active'!")) LOG.info(
upgrd.update_progress_to_db(req, role_id_list, zenic_state['ACTIVE']) _("all update threads have done, \
set all roles status to 'active'!"))
upgrd.update_progress_to_db(
req, role_id_list, zenic_state['ACTIVE'])

View File

@ -19,33 +19,16 @@
import os import os
import copy import copy
import subprocess import subprocess
import time
import traceback
import webob.exc
from oslo_config import cfg
from oslo_log import log as logging from oslo_log import log as logging
from webob.exc import HTTPBadRequest from webob.exc import HTTPBadRequest
from webob.exc import HTTPForbidden
from threading import Thread
from daisy import i18n from daisy import i18n
from daisy import notifier
from daisy.api import policy
import daisy.api.v1
from daisy.common import exception from daisy.common import exception
import daisy.registry.client.v1.api as registry import daisy.registry.client.v1.api as registry
import daisy.api.backends.common as daisy_cmn import daisy.api.backends.common as daisy_cmn
try:
import simplejson as json
except ImportError:
import json
LOG = logging.getLogger(__name__) LOG = logging.getLogger(__name__)
_ = i18n._ _ = i18n._
_LE = i18n._LE _LE = i18n._LE
@ -54,9 +37,9 @@ _LW = i18n._LW
daisy_zenic_path = '/var/lib/daisy/zenic/' daisy_zenic_path = '/var/lib/daisy/zenic/'
ZENIC_STATE = { ZENIC_STATE = {
'INIT' : 'init', 'INIT': 'init',
'INSTALLING' : 'installing', 'INSTALLING': 'installing',
'ACTIVE' : 'active', 'ACTIVE': 'active',
'INSTALL_FAILED': 'install-failed', 'INSTALL_FAILED': 'install-failed',
'UNINSTALLING': 'uninstalling', 'UNINSTALLING': 'uninstalling',
'UNINSTALL_FAILED': 'uninstall-failed', 'UNINSTALL_FAILED': 'uninstall-failed',
@ -64,6 +47,7 @@ ZENIC_STATE = {
'UPDATE_FAILED': 'update-failed', 'UPDATE_FAILED': 'update-failed',
} }
def get_cluster_hosts(req, cluster_id): def get_cluster_hosts(req, cluster_id):
try: try:
cluster_hosts = registry.get_cluster_hosts(req.context, cluster_id) cluster_hosts = registry.get_cluster_hosts(req.context, cluster_id)
@ -71,13 +55,15 @@ def get_cluster_hosts(req, cluster_id):
raise HTTPBadRequest(explanation=e.msg, request=req) raise HTTPBadRequest(explanation=e.msg, request=req)
return cluster_hosts return cluster_hosts
def get_host_detail(req, host_id): def get_host_detail(req, host_id):
try: try:
host_detail = registry.get_host_metadata(req.context, host_id) host_detail = registry.get_host_metadata(req.context, host_id)
except exception.Invalid as e: except exception.Invalid as e:
raise HTTPBadRequest(explanation=e.msg, request=req) raise HTTPBadRequest(explanation=e.msg, request=req)
return host_detail return host_detail
def get_roles_detail(req): def get_roles_detail(req):
try: try:
roles = registry.get_roles_detail(req.context) roles = registry.get_roles_detail(req.context)
@ -85,13 +71,15 @@ def get_roles_detail(req):
raise HTTPBadRequest(explanation=e.msg, request=req) raise HTTPBadRequest(explanation=e.msg, request=req)
return roles return roles
def get_hosts_of_role(req, role_id): def get_hosts_of_role(req, role_id):
try: try:
hosts = registry.get_role_host_metadata(req.context, role_id) hosts = registry.get_role_host_metadata(req.context, role_id)
except exception.Invalid as e: except exception.Invalid as e:
raise HTTPBadRequest(explanation=e.msg, request=req) raise HTTPBadRequest(explanation=e.msg, request=req)
return hosts return hosts
def get_role_detail(req, role_id): def get_role_detail(req, role_id):
try: try:
role = registry.get_role_metadata(req.context, role_id) role = registry.get_role_metadata(req.context, role_id)
@ -99,17 +87,20 @@ def get_role_detail(req, role_id):
raise HTTPBadRequest(explanation=e.msg, request=req) raise HTTPBadRequest(explanation=e.msg, request=req)
return role return role
def update_role(req, role_id,role_meta):
def update_role(req, role_id, role_meta):
try: try:
registry.update_role_metadata(req.context, role_id, role_meta) registry.update_role_metadata(req.context, role_id, role_meta)
except exception.Invalid as e: except exception.Invalid as e:
raise HTTPBadRequest(explanation=e.msg, request=req) raise HTTPBadRequest(explanation=e.msg, request=req)
def update_role_host(req, role_id, role_host): def update_role_host(req, role_id, role_host):
try: try:
registry.update_role_host_metadata(req.context, role_id, role_host) registry.update_role_host_metadata(req.context, role_id, role_host)
except exception.Invalid as e: except exception.Invalid as e:
raise HTTPBadRequest(explanation=e.msg, request=req) raise HTTPBadRequest(explanation=e.msg, request=req)
def delete_role_hosts(req, role_id): def delete_role_hosts(req, role_id):
try: try:
@ -117,67 +108,81 @@ def delete_role_hosts(req, role_id):
except exception.Invalid as e: except exception.Invalid as e:
raise HTTPBadRequest(explanation=e.msg, request=req) raise HTTPBadRequest(explanation=e.msg, request=req)
def _get_cluster_network(cluster_networks, network_type): def _get_cluster_network(cluster_networks, network_type):
network = [cn for cn in cluster_networks network = [cn for cn in cluster_networks
if cn['name'] in network_type] if cn['name'] in network_type]
if not network or not network[0]: if not network or not network[0]:
msg = "network %s is not exist" % (network_type) msg = "network %s is not exist" % (network_type)
raise exception.InvalidNetworkConfig(msg) raise exception.InvalidNetworkConfig(msg)
else: else:
return network[0] return network[0]
def get_host_interface_by_network(host_detail, network_type): def get_host_interface_by_network(host_detail, network_type):
host_detail_info = copy.deepcopy(host_detail) host_detail_info = copy.deepcopy(host_detail)
interface_list = [hi for hi in host_detail_info['interfaces'] interface_list = [hi for hi in host_detail_info['interfaces']
for assigned_network in hi['assigned_networks'] for assigned_network in hi['assigned_networks']
if assigned_network and network_type == assigned_network['name']] if assigned_network and
network_type == assigned_network['name']]
interface = {} interface = {}
if interface_list: if interface_list:
interface = interface_list[0] interface = interface_list[0]
if not interface: if not interface:
msg = "network %s of host %s is not exist" % (network_type, host_detail_info['id']) msg = "network %s of host %s is not exist" % (
network_type, host_detail_info['id'])
raise exception.InvalidNetworkConfig(msg) raise exception.InvalidNetworkConfig(msg)
return interface return interface
def get_host_network_ip(req, host_detail, cluster_networks, network_type): def get_host_network_ip(req, host_detail, cluster_networks, network_type):
interface_network_ip = '' interface_network_ip = ''
host_interface = get_host_interface_by_network(host_detail, network_type) host_interface = get_host_interface_by_network(host_detail, network_type)
if host_interface: if host_interface:
network = _get_cluster_network(cluster_networks, network_type) network = _get_cluster_network(cluster_networks, network_type)
assigned_network = daisy_cmn.get_assigned_network(req, assigned_network = daisy_cmn.get_assigned_network(req,
host_interface['id'], host_interface['id'],
network['id']) network['id'])
interface_network_ip = assigned_network['ip'] interface_network_ip = assigned_network['ip']
if not interface_network_ip: if not interface_network_ip:
msg = "%s network ip of host %s can't be empty" % (network_type, host_detail['id']) msg = "%s network ip of host %s can't be empty" % (
network_type, host_detail['id'])
raise exception.InvalidNetworkConfig(msg) raise exception.InvalidNetworkConfig(msg)
return interface_network_ip return interface_network_ip
def get_deploy_node_cfg(req, host_detail, cluster_networks):
host_deploy_network = get_host_interface_by_network(host_detail, 'DEPLOYMENT') def get_deploy_node_cfg(req, host_detail, cluster_networks):
host_deploy_ip = get_host_network_ip(req, host_detail, cluster_networks, 'DEPLOYMENT') host_deploy_network = get_host_interface_by_network(
host_detail, 'DEPLOYMENT')
host_deploy_ip = get_host_network_ip(
req, host_detail, cluster_networks, 'DEPLOYMENT')
if not host_deploy_ip: if not host_deploy_ip:
msg = "deployment ip of host %s can't be empty" % host_detail['id'] msg = "deployment ip of host %s can't be empty" % host_detail['id']
raise exception.InvalidNetworkConfig(msg) raise exception.InvalidNetworkConfig(msg)
host_deploy_macname = host_deploy_network['name'] host_deploy_macname = host_deploy_network['name']
if not host_deploy_macname: if not host_deploy_macname:
msg = "deployment macname of host %s can't be empty" % host_detail['id'] msg = "deployment macname of host %s can't be empty" % host_detail[
'id']
raise exception.InvalidNetworkConfig(msg) raise exception.InvalidNetworkConfig(msg)
host_mgt_ip = get_host_network_ip(req, host_detail, cluster_networks, 'MANAGEMENT') host_mgt_ip = get_host_network_ip(
req, host_detail, cluster_networks, 'MANAGEMENT')
if not host_mgt_ip: if not host_mgt_ip:
msg = "management ip of host %s can't be empty" % host_detail['id'] msg = "management ip of host %s can't be empty" % host_detail['id']
raise exception.InvalidNetworkConfig(msg) raise exception.InvalidNetworkConfig(msg)
memmode = 'tiny' memmode = 'tiny'
host_memory = 0 host_memory = 0
if host_detail.has_key('memory'): # if host_detail.has_key('memory'):
host_memory = (int(host_detail['memory']['total'].strip().split()[0]))/(1024*1024) if 'memory' in host_detail:
host_memory = (
int(host_detail['memory'][
'total'].strip().split()[0])) / (1024 * 1024)
if host_memory < 8: if host_memory < 8:
memmode = 'tiny' memmode = 'tiny'
elif host_memory < 16: elif host_memory < 16:
@ -186,24 +191,24 @@ def get_deploy_node_cfg(req, host_detail, cluster_networks):
memmode = 'medium' memmode = 'medium'
else: else:
memmode = 'large' memmode = 'large'
deploy_node_cfg = {} deploy_node_cfg = {}
deploy_node_cfg.update({'hostid':host_detail['id']}) deploy_node_cfg.update({'hostid': host_detail['id']})
deploy_node_cfg.update({'hostname':host_detail['name']}) deploy_node_cfg.update({'hostname': host_detail['name']})
deploy_node_cfg.update({'nodeip':host_deploy_ip}) deploy_node_cfg.update({'nodeip': host_deploy_ip})
deploy_node_cfg.update({'MacName':host_deploy_macname}) deploy_node_cfg.update({'MacName': host_deploy_macname})
deploy_node_cfg.update({'memmode':memmode}) deploy_node_cfg.update({'memmode': memmode})
deploy_node_cfg.update({'mgtip':host_mgt_ip}) deploy_node_cfg.update({'mgtip': host_mgt_ip})
return deploy_node_cfg return deploy_node_cfg
def get_roles_and_hosts_list(req, cluster_id):
def get_roles_and_hosts_list(req, cluster_id):
roles_id_list = set() roles_id_list = set()
hosts_id_list = set() hosts_id_list = set()
hosts_list = [] hosts_list = []
cluster_networks = daisy_cmn.get_cluster_networks_detail(req, cluster_id) cluster_networks = daisy_cmn.get_cluster_networks_detail(req, cluster_id)
roles = daisy_cmn.get_cluster_roles_detail(req,cluster_id) roles = daisy_cmn.get_cluster_roles_detail(req, cluster_id)
for role in roles: for role in roles:
if role['deployment_backend'] != daisy_cmn.zenic_backend_name: if role['deployment_backend'] != daisy_cmn.zenic_backend_name:
continue continue
@ -212,56 +217,62 @@ def get_roles_and_hosts_list(req, cluster_id):
for role_host in role_hosts: for role_host in role_hosts:
if role_host['host_id'] not in hosts_id_list: if role_host['host_id'] not in hosts_id_list:
host = daisy_cmn.get_host_detail(req, role_host['host_id']) host = daisy_cmn.get_host_detail(req, role_host['host_id'])
host_ip = get_host_network_ip(req, host, cluster_networks, 'MANAGEMENT') host_ip = get_host_network_ip(
req, host, cluster_networks, 'MANAGEMENT')
hosts_id_list.add(host['id']) hosts_id_list.add(host['id'])
host_cfg = {} host_cfg = {}
host_cfg['mgtip'] = host_ip host_cfg['mgtip'] = host_ip
host_cfg['rootpwd'] = host['root_pwd'] host_cfg['rootpwd'] = host['root_pwd']
hosts_list.append(host_cfg) hosts_list.append(host_cfg)
roles_id_list.add(role['id']) roles_id_list.add(role['id'])
return (roles_id_list, hosts_list) return (roles_id_list, hosts_list)
def check_and_get_zenic_version(daisy_zenic_pkg_path): def check_and_get_zenic_version(daisy_zenic_pkg_path):
zenic_version_pkg_file = "" zenic_version_pkg_file = ""
zenic_version_pkg_name = "" zenic_version_pkg_name = ""
get_zenic_version_pkg = "ls %s| grep ^ZENIC.*\.zip$" % daisy_zenic_pkg_path get_zenic_version_pkg = "ls %s| grep ^ZENIC.*\.zip$" % daisy_zenic_pkg_path
obj = subprocess.Popen(get_zenic_version_pkg, obj = subprocess.Popen(get_zenic_version_pkg,
shell=True, shell=True,
stdout=subprocess.PIPE, stdout=subprocess.PIPE,
stderr=subprocess.PIPE) stderr=subprocess.PIPE)
(stdoutput, erroutput) = obj.communicate() (stdoutput, erroutput) = obj.communicate()
if stdoutput: if stdoutput:
zenic_version_pkg_name = stdoutput.split('\n')[0] zenic_version_pkg_name = stdoutput.split('\n')[0]
zenic_version_pkg_file = daisy_zenic_pkg_path + zenic_version_pkg_name zenic_version_pkg_file = daisy_zenic_pkg_path + zenic_version_pkg_name
chmod_for_zenic_version = 'chmod +x %s' % zenic_version_pkg_file chmod_for_zenic_version = 'chmod +x %s' % zenic_version_pkg_file
daisy_cmn.subprocess_call(chmod_for_zenic_version) daisy_cmn.subprocess_call(chmod_for_zenic_version)
return (zenic_version_pkg_file,zenic_version_pkg_name) return (zenic_version_pkg_file, zenic_version_pkg_name)
class ZenicShellExector(): class ZenicShellExector():
""" """
Class config task before install zenic bin. Class config task before install zenic bin.
""" """
def __init__(self, mgt_ip, task_type, params={}):
def __init__(self, mgt_ip, task_type, params={}):
self.task_type = task_type self.task_type = task_type
self.mgt_ip = mgt_ip self.mgt_ip = mgt_ip
self.params = params self.params = params
self.clush_cmd = "" self.clush_cmd = ""
self.PKG_NAME = self.params['pkg_name'] self.PKG_NAME = self.params['pkg_name']
self.PKG_PATH = daisy_zenic_path + self.PKG_NAME self.PKG_PATH = daisy_zenic_path + self.PKG_NAME
self.CFG_PATH =daisy_zenic_path + mgt_ip + "_zenic.conf" self.CFG_PATH = daisy_zenic_path + mgt_ip + "_zenic.conf"
self.oper_type = { self.oper_type = {
'install' : self._install_pkg 'install': self._install_pkg
} }
self.oper_shell = { self.oper_shell = {
'CMD_SSHPASS_PRE' : "sshpass -p ossdbg1 %(ssh_ip)s %(cmd)s", 'CMD_SSHPASS_PRE': "sshpass -p ossdbg1 %(ssh_ip)s %(cmd)s",
'CMD_CFG_SCP' : "scp %(path)s root@%(ssh_ip)s:/etc/zenic/config" % 'CMD_CFG_SCP': "scp %(path)s root@%(ssh_ip)s:/etc/zenic/config" %
{'path': self.CFG_PATH, 'ssh_ip':mgt_ip}, {'path': self.CFG_PATH, 'ssh_ip': mgt_ip},
'CMD_PKG_UNZIP' : "unzip /home/workspace/%(pkg_name)s -d /home/workspace/PKG" % {'pkg_name':self.PKG_NAME}, 'CMD_PKG_UNZIP': "unzip /home/workspace/%(pkg_name)s \
'CMD_PKG_SCP' : "scp %(path)s root@%(ssh_ip)s:/home/workspace/" % -d /home/workspace/PKG" % {'pkg_name': self.PKG_NAME},
{'path': self.PKG_PATH, 'ssh_ip':mgt_ip} 'CMD_PKG_SCP': "scp %(path)s root@%(ssh_ip)s:/home/workspace/" %
{'path': self.PKG_PATH, 'ssh_ip': mgt_ip}
} }
self._execute() self._execute()
@ -270,31 +281,39 @@ class ZenicShellExector():
if not os.path.exists(self.CFG_PATH): if not os.path.exists(self.CFG_PATH):
LOG.error(_("<<<CFG %s not exist>>>" % self.CFG_PATH)) LOG.error(_("<<<CFG %s not exist>>>" % self.CFG_PATH))
return return
if not os.path.exists(self.PKG_PATH): if not os.path.exists(self.PKG_PATH):
LOG.error(_("<<<PKG %s not exist>>>" % self.PKG_PATH)) LOG.error(_("<<<PKG %s not exist>>>" % self.PKG_PATH))
return return
self.clush_cmd = "%s;%s;%s" % \
(self.oper_shell['CMD_SSHPASS_PRE'] %
{"ssh_ip":"", "cmd":self.oper_shell['CMD_PKG_SCP']}, \
self.oper_shell['CMD_SSHPASS_PRE'] %
{"ssh_ip":"", "cmd":self.oper_shell['CMD_CFG_SCP']}, \
self.oper_shell['CMD_SSHPASS_PRE'] %
{"ssh_ip":"ssh " + self.mgt_ip, "cmd":self.oper_shell['CMD_PKG_UNZIP']})
subprocess.check_output(self.clush_cmd, shell = True, stderr=subprocess.STDOUT) self.clush_cmd = "%s;%s;%s" % \
(self.oper_shell['CMD_SSHPASS_PRE'] %
{"ssh_ip": "", "cmd": self.oper_shell['CMD_PKG_SCP']},
self.oper_shell['CMD_SSHPASS_PRE'] %
{"ssh_ip": "", "cmd": self.oper_shell['CMD_CFG_SCP']},
self.oper_shell['CMD_SSHPASS_PRE'] %
{"ssh_ip": "ssh " + self.mgt_ip, "cmd": self.oper_shell[
'CMD_PKG_UNZIP']})
subprocess.check_output(
self.clush_cmd, shell=True, stderr=subprocess.STDOUT)
def _execute(self): def _execute(self):
try: try:
if not self.task_type or not self.mgt_ip : if not self.task_type or not self.mgt_ip:
LOG.error(_("<<<ZenicShellExector::execute, input params invalid!>>>")) LOG.error(
_("<<<ZenicShellExector::execute, \
input params invalid!>>>"))
return return
self.oper_type[self.task_type]() self.oper_type[self.task_type]()
except subprocess.CalledProcessError as e: except subprocess.CalledProcessError as e:
LOG.warn(_("<<<ZenicShellExector::execute:Execute command failed! Reason:%s>>>" % e.output.strip())) LOG.warn(
_("<<<ZenicShellExector::execute:Execute command failed! Reason\
:%s>>>" % e.output.strip()))
except Exception as e: except Exception as e:
LOG.exception(_(e.message)) LOG.exception(_(e.message))
else: else:
LOG.info(_("<<<ZenicShellExector::execute:Execute command:%s,successful!>>>" % self.clush_cmd)) LOG.info(
_("<<<ZenicShellExector::execute:Execute command:\
%s,successful!>>>" % self.clush_cmd))

View File

@ -1,62 +1,59 @@
# -*- coding: utf-8 -*- # -*- coding: utf-8 -*-
import os import os
import re
import commands
import types
import subprocess
from ConfigParser import ConfigParser from ConfigParser import ConfigParser
from daisy.common import exception
default_zenic_conf_template_path = "/var/lib/daisy/zenic/" default_zenic_conf_template_path = "/var/lib/daisy/zenic/"
zenic_conf_template_path = default_zenic_conf_template_path zenic_conf_template_path = default_zenic_conf_template_path
def update_conf(zenic, key, value): def update_conf(zenic, key, value):
zenic.set("general", key, value) zenic.set("general", key, value)
def get_conf(zenic_conf_file, **kwargs): def get_conf(zenic_conf_file, **kwargs):
result = {} result = {}
if not kwargs: if not kwargs:
return result return result
zenic = ConfigParser() zenic = ConfigParser()
zenic.optionxform = str zenic.optionxform = str
zenic.read(zenic_conf_file) zenic.read(zenic_conf_file)
result = {key : zenic.get("general", kwargs.get(key, None)) result = {key: zenic.get("general", kwargs.get(key, None))
for key in kwargs.keys() for key in kwargs.keys()
if zenic.has_option("general", kwargs.get(key, None))} if zenic.has_option("general", kwargs.get(key, None))}
return result return result
def get_nodeid(deploy_ip,zbp_ips):
def get_nodeid(deploy_ip, zbp_ips):
nodeid = 0 nodeid = 0
i = 0 i = 0
for ip in zbp_ips: for ip in zbp_ips:
if deploy_ip == ip: if deploy_ip == ip:
break break
else: else:
i=i+1 i = i + 1
if i == 0: if i == 0:
nodeid = 1 nodeid = 1
elif i == 1: elif i == 1:
nodeid = 256 nodeid = 256
else: else:
nodeid = i nodeid = i
return nodeid return nodeid
def update_zenic_conf(config_data, cluster_conf_path): def update_zenic_conf(config_data, cluster_conf_path):
print "zenic config data is:" print "zenic config data is:"
import pprint import pprint
pprint.pprint(config_data) pprint.pprint(config_data)
daisy_zenic_path = zenic_conf_template_path daisy_zenic_path = zenic_conf_template_path
zenic_conf_template_file = os.path.join(daisy_zenic_path, "zenic.conf") zenic_conf_template_file = os.path.join(daisy_zenic_path, "zenic.conf")
if not os.path.exists(cluster_conf_path): if not os.path.exists(cluster_conf_path):
os.makedirs(cluster_conf_path) os.makedirs(cluster_conf_path)
zenic = ConfigParser() zenic = ConfigParser()
zenic.optionxform = str zenic.optionxform = str
@ -67,15 +64,15 @@ def update_zenic_conf(config_data, cluster_conf_path):
if not zbpips: if not zbpips:
zbpips = ip zbpips = ip
else: else:
zbpips = zbpips + ',' + ip zbpips = zbpips + ',' + ip
update_conf(zenic, 'zbpips', zbpips) update_conf(zenic, 'zbpips', zbpips)
update_conf(zenic, 'zbp_node_num', config_data['zbp_node_num']) update_conf(zenic, 'zbp_node_num', config_data['zbp_node_num'])
nodelist = '1,256' nodelist = '1,256'
if len(config_data['zbp_ips']) > 2: if len(config_data['zbp_ips']) > 2:
for i in range(2,len(config_data['zbp_ips'])): for i in range(2, len(config_data['zbp_ips'])):
nodelist = nodelist + ',' + 'i' nodelist = nodelist + ',' + 'i'
update_conf(zenic, 'zbpnodelist',nodelist) update_conf(zenic, 'zbpnodelist', nodelist)
zampips = '' zampips = ''
for ip in config_data['zamp_ips']: for ip in config_data['zamp_ips']:
if not zampips: if not zampips:
@ -84,52 +81,50 @@ def update_zenic_conf(config_data, cluster_conf_path):
zampips = zampips + ',' + ip zampips = zampips + ',' + ip
update_conf(zenic, 'zampips', zampips) update_conf(zenic, 'zampips', zampips)
update_conf(zenic, 'zamp_node_num', config_data['zamp_node_num']) update_conf(zenic, 'zamp_node_num', config_data['zamp_node_num'])
mongodbips = '' mongodbips = ''
for ip in config_data['mongodb_ips']: for ip in config_data['mongodb_ips']:
if not mongodbips: if not mongodbips:
mongodbips = ip mongodbips = ip
else: else:
mongodbips = mongodbips + ',' + ip mongodbips = mongodbips + ',' + ip
update_conf(zenic, 'mongodbips', mongodbips) update_conf(zenic, 'mongodbips', mongodbips)
update_conf(zenic, 'mongodb_node_num', config_data['mongodb_node_num']) update_conf(zenic, 'mongodb_node_num', config_data['mongodb_node_num'])
update_conf(zenic, 'zamp_vip', config_data['zamp_vip']) update_conf(zenic, 'zamp_vip', config_data['zamp_vip'])
update_conf(zenic, 'mongodb_vip', config_data['mongodb_vip']) update_conf(zenic, 'mongodb_vip', config_data['mongodb_vip'])
deploy_hosts = config_data['deploy_hosts'] deploy_hosts = config_data['deploy_hosts']
for deploy_host in deploy_hosts: for deploy_host in deploy_hosts:
nodeip = deploy_host['nodeip'] nodeip = deploy_host['nodeip']
hostname = deploy_host['hostname'] hostname = deploy_host['hostname']
MacName = deploy_host['MacName'] MacName = deploy_host['MacName']
memmode = deploy_host['memmode'] memmode = deploy_host['memmode']
update_conf(zenic,'nodeip',nodeip) update_conf(zenic, 'nodeip', nodeip)
update_conf(zenic,'hostname',hostname) update_conf(zenic, 'hostname', hostname)
update_conf(zenic,'MacName',MacName) update_conf(zenic, 'MacName', MacName)
update_conf(zenic,'memmode',memmode) update_conf(zenic, 'memmode', memmode)
nodeid = get_nodeid(nodeip,config_data['zbp_ips']) nodeid = get_nodeid(nodeip, config_data['zbp_ips'])
update_conf(zenic,'nodeid',nodeid) update_conf(zenic, 'nodeid', nodeid)
if nodeip in config_data['zamp_ips']: if nodeip in config_data['zamp_ips']:
update_conf(zenic,'needzamp','y') update_conf(zenic, 'needzamp', 'y')
else: else:
update_conf(zenic,'needzamp','n') update_conf(zenic, 'needzamp', 'n')
zenic_conf = "%s_zenic.conf" % deploy_host['mgtip'] zenic_conf = "%s_zenic.conf" % deploy_host['mgtip']
zenic_conf_cluster_out = os.path.join(cluster_conf_path, zenic_conf) zenic_conf_cluster_out = os.path.join(cluster_conf_path, zenic_conf)
zenic_conf_out = os.path.join(daisy_zenic_path, zenic_conf) zenic_conf_out = os.path.join(daisy_zenic_path, zenic_conf)
zenic.write(open(zenic_conf_cluster_out, "w+")) zenic.write(open(zenic_conf_cluster_out, "w+"))
with open(zenic_conf_cluster_out,'r') as fr,open(zenic_conf_out,'w') as fw: with open(zenic_conf_cluster_out, 'r') as fr,\
open(zenic_conf_out, 'w') as fw:
for line in fr.readlines(): for line in fr.readlines():
fw.write(line.replace(' ', '')) fw.write(line.replace(' ', ''))
return return
def test(): def test():
print("Hello, world!") print("Hello, world!")

View File

@ -16,43 +16,23 @@
""" """
/install endpoint for zenic API /install endpoint for zenic API
""" """
import os
import copy
import subprocess import subprocess
import time import time
import traceback
import webob.exc
from oslo_config import cfg from oslo_config import cfg
from oslo_log import log as logging from oslo_log import log as logging
from webob.exc import HTTPBadRequest
from webob.exc import HTTPForbidden
from webob.exc import HTTPServerError
from threading import Thread, Lock
import threading import threading
from daisy import i18n from daisy import i18n
from daisy import notifier
from daisy.api import policy
import daisy.api.v1 import daisy.api.v1
from daisy.common import exception from daisy.common import exception
import daisy.registry.client.v1.api as registry
from daisy.api.backends.zenic import config from daisy.api.backends.zenic import config
from daisy.api.backends import driver
from daisy.api.network_api import network as neutron
from ironicclient import client as ironic_client
import daisy.api.backends.common as daisy_cmn import daisy.api.backends.common as daisy_cmn
import daisy.api.backends.zenic.common as zenic_cmn import daisy.api.backends.zenic.common as zenic_cmn
try:
import simplejson as json
except ImportError:
import json
LOG = logging.getLogger(__name__) LOG = logging.getLogger(__name__)
_ = i18n._ _ = i18n._
_LE = i18n._LE _LE = i18n._LE
@ -76,21 +56,24 @@ CONF.import_opt('image_property_quota', 'daisy.common.config')
host_os_status = { host_os_status = {
'INIT' : 'init', 'INIT': 'init',
'INSTALLING' : 'installing', 'INSTALLING': 'installing',
'ACTIVE' : 'active', 'ACTIVE': 'active',
'FAILED': 'install-failed' 'FAILED': 'install-failed'
} }
zenic_state = zenic_cmn.ZENIC_STATE zenic_state = zenic_cmn.ZENIC_STATE
daisy_zenic_path = zenic_cmn.daisy_zenic_path daisy_zenic_path = zenic_cmn.daisy_zenic_path
install_zenic_progress=0.0 install_zenic_progress = 0.0
install_mutex = threading.Lock() install_mutex = threading.Lock()
def update_progress_to_db(req, role_id_list, status, progress_percentage_step=0.0):
def update_progress_to_db(req, role_id_list,
status, progress_percentage_step=0.0):
""" """
Write install progress and status to db, we use global lock object 'install_mutex' Write install progress and status to db,
we use global lock object 'install_mutex'
to make sure this function is thread safety. to make sure this function is thread safety.
:param req: http req. :param req: http req.
:param role_id_list: Column neeb be update in role table. :param role_id_list: Column neeb be update in role table.
@ -107,7 +90,7 @@ def update_progress_to_db(req, role_id_list, status, progress_percentage_step=0.
if 0 == cmp(status, zenic_state['INSTALLING']): if 0 == cmp(status, zenic_state['INSTALLING']):
role['status'] = status role['status'] = status
role['progress'] = install_zenic_progress role['progress'] = install_zenic_progress
if 0 == cmp(status, zenic_state['INSTALL_FAILED']): if 0 == cmp(status, zenic_state['INSTALL_FAILED']):
role['status'] = status role['status'] = status
elif 0 == cmp(status, zenic_state['ACTIVE']): elif 0 == cmp(status, zenic_state['ACTIVE']):
role['status'] = status role['status'] = status
@ -115,21 +98,26 @@ def update_progress_to_db(req, role_id_list, status, progress_percentage_step=0.
daisy_cmn.update_role(req, role_id, role) daisy_cmn.update_role(req, role_id, role)
install_mutex.release() install_mutex.release()
def _ping_hosts_test(ips): def _ping_hosts_test(ips):
ping_cmd = 'fping' ping_cmd = 'fping'
for ip in set(ips): for ip in set(ips):
ping_cmd = ping_cmd + ' ' + ip ping_cmd = ping_cmd + ' ' + ip
obj = subprocess.Popen(ping_cmd, shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE) obj = subprocess.Popen(
ping_cmd, shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
(stdoutput, erroutput) = obj.communicate() (stdoutput, erroutput) = obj.communicate()
_returncode = obj.returncode _returncode = obj.returncode
if _returncode == 0 or _returncode == 1: if _returncode == 0 or _returncode == 1:
ping_result = stdoutput.split('\n') ping_result = stdoutput.split('\n')
unreachable_hosts = [result.split()[0] for result in ping_result if result and result.split()[2] != 'alive'] unreachable_hosts = [result.split(
)[0] for result in ping_result if result and
result.split()[2] != 'alive']
else: else:
msg = "ping failed beaceuse there is invlid ip in %s" % ips msg = "ping failed beaceuse there is invlid ip in %s" % ips
raise exception.InvalidIP(msg) raise exception.InvalidIP(msg)
return unreachable_hosts return unreachable_hosts
def _check_ping_hosts(ping_ips, max_ping_times): def _check_ping_hosts(ping_ips, max_ping_times):
if not ping_ips: if not ping_ips:
LOG.info(_("no ip got for ping test")) LOG.info(_("no ip got for ping test"))
@ -145,9 +133,11 @@ def _check_ping_hosts(ping_ips, max_ping_times):
ping_count += 1 ping_count += 1
if ips: if ips:
LOG.debug(_("ping host %s for %s times" % (','.join(ips), ping_count))) LOG.debug(
_("ping host %s for %s times" % (','.join(ips), ping_count)))
if ping_count >= max_ping_times: if ping_count >= max_ping_times:
LOG.info(_("ping host %s timeout for %ss" % (','.join(ips), ping_count*time_step))) LOG.info(_("ping host %s timeout for %ss" %
(','.join(ips), ping_count * time_step)))
return ips return ips
time.sleep(time_step) time.sleep(time_step)
else: else:
@ -155,13 +145,15 @@ def _check_ping_hosts(ping_ips, max_ping_times):
time.sleep(120) time.sleep(120)
LOG.info(_("120s after ping host %s success" % ','.join(ping_ips))) LOG.info(_("120s after ping host %s success" % ','.join(ping_ips)))
return ips return ips
def _get_host_private_networks(host_detail, cluster_private_networks_name): def _get_host_private_networks(host_detail, cluster_private_networks_name):
host_private_networks = [hi for pn in cluster_private_networks_name host_private_networks = [hi for pn in cluster_private_networks_name
for hi in host_detail['interfaces'] if pn in hi['assigned_networks']] for hi in
host_detail['interfaces'] if pn in
# If port type is bond,use pci segment of member port replace pci1 & pci2 segments of bond port hi['assigned_networks']]
# If port type is bond,use pci segment of member port replace pci1 & pci2
# segments of bond port
for interface_outer in host_private_networks: for interface_outer in host_private_networks:
if 0 != cmp(interface_outer.get('type', None), "bond"): if 0 != cmp(interface_outer.get('type', None), "bond"):
continue continue
@ -180,38 +172,41 @@ def _get_host_private_networks(host_detail, cluster_private_networks_name):
def get_cluster_zenic_config(req, cluster_id): def get_cluster_zenic_config(req, cluster_id):
LOG.info(_("get zenic config from database...")) LOG.info(_("get zenic config from database..."))
params = dict(limit=1000000) # params = dict(limit=1000000)
zenic_config = {} zenic_config = {}
deploy_hosts = [] deploy_hosts = []
deploy_host_cfg = {} deploy_host_cfg = {}
mgt_ip = '' mgt_ip = ''
zbp_ip_list = set() zbp_ip_list = set()
mgt_ip_list = set() mgt_ip_list = set()
zamp_ip_list = set() zamp_ip_list = set()
zamp_vip = '' zamp_vip = ''
mongodb_ip_list = set() mongodb_ip_list = set()
mongodb_vip= '' mongodb_vip = ''
cluster_networks = daisy_cmn.get_cluster_networks_detail(req, cluster_id) cluster_networks = daisy_cmn.get_cluster_networks_detail(req, cluster_id)
all_roles = zenic_cmn.get_roles_detail(req) all_roles = zenic_cmn.get_roles_detail(req)
roles = [role for role in all_roles if (role['cluster_id'] == cluster_id and role['deployment_backend'] == daisy_cmn.zenic_backend_name)] roles = [role for role in all_roles if (role['cluster_id'] ==
for role in roles: cluster_id and role[
'deployment_backend'] ==
daisy_cmn.zenic_backend_name)]
for role in roles:
if not (role['name'] == 'ZENIC_CTL' or role['name'] == 'ZENIC_NFM'): if not (role['name'] == 'ZENIC_CTL' or role['name'] == 'ZENIC_NFM'):
continue continue
if role['name'] == 'ZENIC_NFM': if role['name'] == 'ZENIC_NFM':
if not zamp_vip: if not zamp_vip:
zamp_vip = role['vip'] zamp_vip = role['vip']
if not mongodb_vip: if not mongodb_vip:
mongodb_vip = role['mongodb_vip'] mongodb_vip = role['mongodb_vip']
role_hosts = zenic_cmn.get_hosts_of_role(req, role['id']) role_hosts = zenic_cmn.get_hosts_of_role(req, role['id'])
for role_host in role_hosts: for role_host in role_hosts:
mgt_ip = '' mgt_ip = ''
for deploy_host in deploy_hosts: for deploy_host in deploy_hosts:
@ -220,139 +215,157 @@ def get_cluster_zenic_config(req, cluster_id):
deploy_ip = deploy_host['nodeip'] deploy_ip = deploy_host['nodeip']
break break
if not mgt_ip: if not mgt_ip:
host_detail = zenic_cmn.get_host_detail(req, role_host['host_id']) host_detail = zenic_cmn.get_host_detail(
deploy_host_cfg = zenic_cmn.get_deploy_node_cfg(req, host_detail, cluster_networks) req, role_host['host_id'])
deploy_host_cfg = zenic_cmn.get_deploy_node_cfg(
req, host_detail, cluster_networks)
deploy_hosts.append(deploy_host_cfg) deploy_hosts.append(deploy_host_cfg)
mgt_ip = deploy_host_cfg['mgtip'] mgt_ip = deploy_host_cfg['mgtip']
deploy_ip = deploy_host_cfg['nodeip'] deploy_ip = deploy_host_cfg['nodeip']
mgt_ip_list.add(mgt_ip) mgt_ip_list.add(mgt_ip)
if role['name'] == 'ZENIC_CTL': if role['name'] == 'ZENIC_CTL':
zbp_ip_list.add(deploy_ip) zbp_ip_list.add(deploy_ip)
elif role['name'] == 'ZENIC_NFM': elif role['name'] == 'ZENIC_NFM':
zamp_ip_list.add(deploy_ip) zamp_ip_list.add(deploy_ip)
mongodb_ip_list.add(deploy_ip) mongodb_ip_list.add(deploy_ip)
else: else:
LOG.warn(_("<<<Zenic Install role %s is invalid >>>" % role['name'])) LOG.warn(
_("<<<Zenic Install role %s is invalid >>>"
% role['name']))
zenic_config.update({'deploy_hosts':deploy_hosts}) zenic_config.update({'deploy_hosts': deploy_hosts})
zenic_config.update({'zbp_ips':zbp_ip_list}) zenic_config.update({'zbp_ips': zbp_ip_list})
zenic_config.update({'zbp_node_num':len(zbp_ip_list)}) zenic_config.update({'zbp_node_num': len(zbp_ip_list)})
zenic_config.update({'zamp_ips':zamp_ip_list}) zenic_config.update({'zamp_ips': zamp_ip_list})
zenic_config.update({'zamp_node_num':len(zamp_ip_list)}) zenic_config.update({'zamp_node_num': len(zamp_ip_list)})
zenic_config.update({'mongodb_ips':mongodb_ip_list}) zenic_config.update({'mongodb_ips': mongodb_ip_list})
zenic_config.update({'mongodb_node_num':len(mongodb_ip_list)}) zenic_config.update({'mongodb_node_num': len(mongodb_ip_list)})
zenic_config.update({'zamp_vip':zamp_vip}) zenic_config.update({'zamp_vip': zamp_vip})
zenic_config.update({'mongodb_vip':mongodb_vip}) zenic_config.update({'mongodb_vip': mongodb_vip})
return (zenic_config, mgt_ip_list) return (zenic_config, mgt_ip_list)
def generate_zenic_config_file(cluster_id, zenic_config): def generate_zenic_config_file(cluster_id, zenic_config):
LOG.info(_("generate zenic config...")) LOG.info(_("generate zenic config..."))
if zenic_config: if zenic_config:
cluster_conf_path = daisy_zenic_path + cluster_id cluster_conf_path = daisy_zenic_path + cluster_id
config.update_zenic_conf(zenic_config, cluster_conf_path) config.update_zenic_conf(zenic_config, cluster_conf_path)
def thread_bin(req,host, role_id_list, pkg_name, install_progress_percentage):
def thread_bin(req, host, role_id_list, pkg_name, install_progress_percentage):
host_ip = host['mgtip'] host_ip = host['mgtip']
password = host['rootpwd'] password = host['rootpwd']
cmd = 'mkdir -p /var/log/daisy/daisy_install/' cmd = 'mkdir -p /var/log/daisy/daisy_install/'
daisy_cmn.subprocess_call(cmd) daisy_cmn.subprocess_call(cmd)
var_log_path = "/var/log/daisy/daisy_install/%s_install_zenic.log" % host_ip var_log_path =\
"/var/log/daisy/daisy_install/%s_install_zenic.log" % host_ip
with open(var_log_path, "w+") as fp: with open(var_log_path, "w+") as fp:
cmd = '/var/lib/daisy/zenic/trustme.sh %s %s' % (host_ip, password) cmd = '/var/lib/daisy/zenic/trustme.sh %s %s' % (host_ip, password)
daisy_cmn.subprocess_call(cmd,fp) daisy_cmn.subprocess_call(cmd, fp)
cmd = 'clush -S -b -w %s mkdir -p /home/workspace' % (host_ip,) cmd = 'clush -S -b -w %s mkdir -p /home/workspace' % (host_ip,)
daisy_cmn.subprocess_call(cmd,fp) daisy_cmn.subprocess_call(cmd, fp)
cmd = 'clush -S -b -w %s mkdir -p /etc/zenic' % (host_ip,) cmd = 'clush -S -b -w %s mkdir -p /etc/zenic' % (host_ip,)
daisy_cmn.subprocess_call(cmd,fp) daisy_cmn.subprocess_call(cmd, fp)
cmd = 'clush -S -b -w %s rm -rf /etc/zenic/config' % (host_ip,) cmd = 'clush -S -b -w %s rm -rf /etc/zenic/config' % (host_ip,)
daisy_cmn.subprocess_call(cmd,fp) daisy_cmn.subprocess_call(cmd, fp)
cmd = 'clush -S -b -w %s rm -rf /home/zenic' % (host_ip,) cmd = 'clush -S -b -w %s rm -rf /home/zenic' % (host_ip,)
daisy_cmn.subprocess_call(cmd,fp) daisy_cmn.subprocess_call(cmd, fp)
cmd = 'clush -S -b -w %s rm -rf /home/workspace/unipack' % (host_ip,) cmd = 'clush -S -b -w %s rm -rf /home/workspace/unipack' % (host_ip,)
daisy_cmn.subprocess_call(cmd,fp) daisy_cmn.subprocess_call(cmd, fp)
pkg_file = daisy_zenic_path + pkg_name pkg_file = daisy_zenic_path + pkg_name
cmd = 'clush -S -b -w %s rm -rf /home/workspace/%s' % (host_ip,pkg_name) cmd = 'clush -S -b -w %s rm -rf /home/workspace/%s' % (
daisy_cmn.subprocess_call(cmd,fp) host_ip, pkg_name)
daisy_cmn.subprocess_call(cmd, fp)
cfg_file = daisy_zenic_path + host_ip + "_zenic.conf"
cfg_file = daisy_zenic_path + host_ip + "_zenic.conf"
try: try:
exc_result = subprocess.check_output( exc_result = subprocess.check_output(
'sshpass -p ossdbg1 scp %s root@%s:/etc/zenic/config' % (cfg_file,host_ip,), 'sshpass -p ossdbg1 scp %s root@%s:/etc/zenic/config' % (
cfg_file, host_ip,),
shell=True, stderr=fp) shell=True, stderr=fp)
except subprocess.CalledProcessError as e: except subprocess.CalledProcessError as e:
update_progress_to_db(req, role_id_list, zenic_state['INSTALL_FAILED']) update_progress_to_db(
req, role_id_list, zenic_state['INSTALL_FAILED'])
LOG.info(_("scp zenic pkg for %s failed!" % host_ip)) LOG.info(_("scp zenic pkg for %s failed!" % host_ip))
fp.write(e.output.strip()) fp.write(e.output.strip())
exit() exit()
else: else:
LOG.info(_("scp zenic config for %s successfully!" % host_ip)) LOG.info(_("scp zenic config for %s successfully!" % host_ip))
fp.write(exc_result) fp.write(exc_result)
try: try:
exc_result = subprocess.check_output( exc_result = subprocess.check_output(
'sshpass -p ossdbg1 scp %s root@%s:/home/workspace/' % (pkg_file,host_ip,), 'sshpass -p ossdbg1 scp %s root@%s:/home/workspace/' % (
pkg_file, host_ip,),
shell=True, stderr=fp) shell=True, stderr=fp)
except subprocess.CalledProcessError as e: except subprocess.CalledProcessError as e:
update_progress_to_db(req, role_id_list, zenic_state['INSTALL_FAILED']) update_progress_to_db(
req, role_id_list, zenic_state['INSTALL_FAILED'])
LOG.info(_("scp zenic pkg for %s failed!" % host_ip)) LOG.info(_("scp zenic pkg for %s failed!" % host_ip))
fp.write(e.output.strip()) fp.write(e.output.strip())
exit() exit()
else: else:
LOG.info(_("scp zenic pkg for %s successfully!" % host_ip)) LOG.info(_("scp zenic pkg for %s successfully!" % host_ip))
fp.write(exc_result) fp.write(exc_result)
cmd = 'clush -S -b -w %s unzip /home/workspace/%s -d /home/workspace/unipack' % (host_ip,pkg_name,) cmd = 'clush -S -b -w %s unzip /home/workspace/%s \
-d /home/workspace/unipack' % (
host_ip, pkg_name,)
daisy_cmn.subprocess_call(cmd) daisy_cmn.subprocess_call(cmd)
try: try:
exc_result = subprocess.check_output( exc_result = subprocess.check_output(
'clush -S -b -w %s /home/workspace/unipack/node_install.sh' % (host_ip,), 'clush -S -b -w %s /home/workspace/unipack/node_install.sh'
% (host_ip,),
shell=True, stderr=subprocess.STDOUT) shell=True, stderr=subprocess.STDOUT)
except subprocess.CalledProcessError as e: except subprocess.CalledProcessError as e:
update_progress_to_db(req, role_id_list, zenic_state['INSTALL_FAILED']) update_progress_to_db(
req, role_id_list, zenic_state['INSTALL_FAILED'])
LOG.info(_("install zenic for %s failed!" % host_ip)) LOG.info(_("install zenic for %s failed!" % host_ip))
fp.write(e.output.strip()) fp.write(e.output.strip())
exit() exit()
else: else:
LOG.info(_("install zenic for %s successfully!" % host_ip)) LOG.info(_("install zenic for %s successfully!" % host_ip))
fp.write(exc_result) fp.write(exc_result)
try: try:
exc_result = subprocess.check_output( exc_result = subprocess.check_output(
'clush -S -b -w %s /home/zenic/node_start.sh' % (host_ip,), 'clush -S -b -w %s /home/zenic/node_start.sh' % (host_ip,),
shell=True, stderr=subprocess.STDOUT) shell=True, stderr=subprocess.STDOUT)
except subprocess.CalledProcessError as e: except subprocess.CalledProcessError as e:
update_progress_to_db(req, role_id_list, zenic_state['INSTALL_FAILED']) update_progress_to_db(
req, role_id_list, zenic_state['INSTALL_FAILED'])
LOG.info(_("start zenic for %s failed!" % host_ip)) LOG.info(_("start zenic for %s failed!" % host_ip))
fp.write(e.output.strip()) fp.write(e.output.strip())
exit() exit()
else: else:
update_progress_to_db(req, role_id_list, zenic_state['INSTALLING'], install_progress_percentage) update_progress_to_db(
req, role_id_list, zenic_state['INSTALLING'],
install_progress_percentage)
LOG.info(_("start zenic for %s successfully!" % host_ip)) LOG.info(_("start zenic for %s successfully!" % host_ip))
fp.write(exc_result) fp.write(exc_result)
class ZENICInstallTask(Thread): class ZENICInstallTask(Thread):
""" """
Class for install tecs bin. Class for install tecs bin.
""" """
""" Definition for install states.""" """ Definition for install states."""
INSTALL_STATES = { INSTALL_STATES = {
'INIT' : 'init', 'INIT': 'init',
'INSTALLING' : 'installing', 'INSTALLING': 'installing',
'ACTIVE' : 'active', 'ACTIVE': 'active',
'FAILED': 'install-failed' 'FAILED': 'install-failed'
} }
@ -371,9 +384,6 @@ class ZENICInstallTask(Thread):
self.ping_times = 36 self.ping_times = 36
self.log_file = "/var/log/daisy/zenic_%s_install.log" % self.cluster_id self.log_file = "/var/log/daisy/zenic_%s_install.log" % self.cluster_id
def run(self): def run(self):
try: try:
self._run() self._run()
@ -388,40 +398,47 @@ class ZENICInstallTask(Thread):
self.state = zenic_state['ACTIVE'] self.state = zenic_state['ACTIVE']
self.message = "Zenic install successfully" self.message = "Zenic install successfully"
LOG.info(_("install Zenic for cluster %s successfully." LOG.info(_("install Zenic for cluster %s successfully."
% self.cluster_id)) % self.cluster_id))
def _run(self): def _run(self):
(zenic_config, self.mgt_ip_list) = get_cluster_zenic_config(self.req, self.cluster_id) (zenic_config, self.mgt_ip_list) = get_cluster_zenic_config(
self.req, self.cluster_id)
if not self.mgt_ip_list: if not self.mgt_ip_list:
msg = _("there is no host in cluster %s") % self.cluster_id msg = _("there is no host in cluster %s") % self.cluster_id
raise exception.ThreadBinException(msg) raise exception.ThreadBinException(msg)
unreached_hosts = _check_ping_hosts(self.mgt_ip_list, self.ping_times) unreached_hosts = _check_ping_hosts(self.mgt_ip_list, self.ping_times)
if unreached_hosts: if unreached_hosts:
self.state = zenic_state['INSTALL_FAILED'] self.state = zenic_state['INSTALL_FAILED']
self.message = "hosts %s ping failed" % unreached_hosts self.message = "hosts %s ping failed" % unreached_hosts
raise exception.NotFound(message=self.message) raise exception.NotFound(message=self.message)
generate_zenic_config_file(self.cluster_id, zenic_config) generate_zenic_config_file(self.cluster_id, zenic_config)
# check and get ZENIC version # check and get ZENIC version
(zenic_version_pkg_file,zenic_version_pkg_name) = zenic_cmn.check_and_get_zenic_version(daisy_zenic_path) (zenic_version_pkg_file, zenic_version_pkg_name) =\
if not zenic_version_pkg_file: zenic_cmn.check_and_get_zenic_version(
daisy_zenic_path)
if not zenic_version_pkg_file:
self.state = zenic_state['INSTALL_FAILED'] self.state = zenic_state['INSTALL_FAILED']
self.message = "ZENIC version file not found in %s" % daisy_zenic_path self.message = \
"ZENIC version file not found in %s" % daisy_zenic_path
raise exception.NotFound(message=self.message) raise exception.NotFound(message=self.message)
(role_id_list, hosts_list) = zenic_cmn.get_roles_and_hosts_list(self.req, self.cluster_id) (role_id_list, hosts_list) = zenic_cmn.get_roles_and_hosts_list(
self.req, self.cluster_id)
update_progress_to_db(self.req, role_id_list, zenic_state['INSTALLING'], 0.0)
install_progress_percentage = round(1*1.0/len(hosts_list), 2)*100 update_progress_to_db(
self.req, role_id_list, zenic_state['INSTALLING'], 0.0)
install_progress_percentage = round(1 * 1.0 / len(hosts_list), 2) * 100
threads = [] threads = []
for host in hosts_list: for host in hosts_list:
t = threading.Thread(target=thread_bin,args=(self.req,host,role_id_list,zenic_version_pkg_name,install_progress_percentage)) t = threading.Thread(target=thread_bin, args=(
self.req, host, role_id_list,
zenic_version_pkg_name, install_progress_percentage))
t.setDaemon(True) t.setDaemon(True)
t.start() t.start()
threads.append(t) threads.append(t)
@ -437,14 +454,16 @@ class ZENICInstallTask(Thread):
for role_id in role_id_list: for role_id in role_id_list:
role = daisy_cmn.get_role_detail(self.req, role_id) role = daisy_cmn.get_role_detail(self.req, role_id)
if role['progress'] == 0: if role['progress'] == 0:
update_progress_to_db(self.req, role_id_list, zenic_state['INSTALL_FAILED']) update_progress_to_db(
self.req, role_id_list, zenic_state['INSTALL_FAILED'])
install_failed_flag = True install_failed_flag = True
break break
if role['status'] == zenic_state['INSTALL_FAILED']: if role['status'] == zenic_state['INSTALL_FAILED']:
install_failed_flag = True install_failed_flag = True
break break
if not install_failed_flag: if not install_failed_flag:
LOG.info(_("all install threads have done, set all roles status to 'active'!")) LOG.info(
update_progress_to_db(self.req, role_id_list, zenic_state['ACTIVE']) _("all install threads have done, \
set all roles status to 'active'!"))
update_progress_to_db(
self.req, role_id_list, zenic_state['ACTIVE'])

View File

@ -17,30 +17,12 @@
/hosts endpoint for Daisy v1 API /hosts endpoint for Daisy v1 API
""" """
import os
import webob.exc
import subprocess import subprocess
from oslo_config import cfg
from oslo_log import log as logging from oslo_log import log as logging
from webob.exc import HTTPBadRequest
from webob.exc import HTTPForbidden
from threading import Thread, Lock
import threading import threading
from daisy import i18n from daisy import i18n
from daisy import notifier
from daisy.api import policy
import daisy.api.v1
from daisy.common import exception
from daisy.common import property_utils
from daisy.common import utils
from daisy.common import wsgi
from daisy.api.v1 import controller
from daisy.api.v1 import filters
from daisy.api.backends.zenic.common import ZenicShellExector
import daisy.api.backends.common as daisy_cmn import daisy.api.backends.common as daisy_cmn
import daisy.api.backends.zenic.common as zenic_cmn import daisy.api.backends.zenic.common as zenic_cmn
@ -52,12 +34,15 @@ _LW = i18n._LW
zenic_state = zenic_cmn.ZENIC_STATE zenic_state = zenic_cmn.ZENIC_STATE
uninstall_zenic_progress=100.0 uninstall_zenic_progress = 100.0
uninstall_mutex = threading.Lock() uninstall_mutex = threading.Lock()
def update_progress_to_db(req, role_id_list, status, progress_percentage_step=0.0):
def update_progress_to_db(req, role_id_list, status,
progress_percentage_step=0.0):
""" """
Write uninstall progress and status to db, we use global lock object 'uninstall_mutex' Write uninstall progress and status to db,
we use global lock object 'uninstall_mutex'
to make sure this function is thread safety. to make sure this function is thread safety.
:param req: http req. :param req: http req.
:param role_id_list: Column neeb be update in role table. :param role_id_list: Column neeb be update in role table.
@ -74,33 +59,36 @@ def update_progress_to_db(req, role_id_list, status, progress_percentage_step=0.
if 0 == cmp(status, zenic_state['UNINSTALLING']): if 0 == cmp(status, zenic_state['UNINSTALLING']):
role['status'] = status role['status'] = status
role['progress'] = uninstall_zenic_progress role['progress'] = uninstall_zenic_progress
if 0 == cmp(status, zenic_state['UNINSTALL_FAILED']): if 0 == cmp(status, zenic_state['UNINSTALL_FAILED']):
role['status'] = status role['status'] = status
elif 0 == cmp(status, zenic_state['INIT']): elif 0 == cmp(status, zenic_state['INIT']):
role['status'] = status role['status'] = status
role['progress'] = 0 role['progress'] = 0
daisy_cmn.update_role(req, role_id, role) daisy_cmn.update_role(req, role_id, role)
uninstall_mutex.release() uninstall_mutex.release()
def thread_bin(req, host, role_id_list,uninstall_progress_percentage):
def thread_bin(req, host, role_id_list, uninstall_progress_percentage):
host_ip = host['mgtip'] host_ip = host['mgtip']
password = host['rootpwd'] password = host['rootpwd']
cmd = 'mkdir -p /var/log/daisy/daisy_uninstall/' cmd = 'mkdir -p /var/log/daisy/daisy_uninstall/'
daisy_cmn.subprocess_call(cmd) daisy_cmn.subprocess_call(cmd)
var_log_path = "/var/log/daisy/daisy_uninstall/%s_uninstall_zenic.log" % host_ip var_log_path =\
"/var/log/daisy/daisy_uninstall/%s_uninstall_zenic.log" % host_ip
with open(var_log_path, "w+") as fp: with open(var_log_path, "w+") as fp:
cmd = '/var/lib/daisy/zenic/trustme.sh %s %s' % (host_ip, password) cmd = '/var/lib/daisy/zenic/trustme.sh %s %s' % (host_ip, password)
daisy_cmn.subprocess_call(cmd,fp) daisy_cmn.subprocess_call(cmd, fp)
try: try:
exc_result = subprocess.check_output( exc_result = subprocess.check_output(
'clush -S -b -w %s /home/zenic/node_stop.sh' % (host_ip,), 'clush -S -b -w %s /home/zenic/node_stop.sh' % (host_ip,),
shell=True, stderr=subprocess.STDOUT) shell=True, stderr=subprocess.STDOUT)
except subprocess.CalledProcessError as e: except subprocess.CalledProcessError as e:
update_progress_to_db(req, role_id_list, zenic_state['UNINSTALL_FAILED']) update_progress_to_db(
fp.write(e.output.strip()) req, role_id_list, zenic_state['UNINSTALL_FAILED'])
fp.write(e.output.strip())
else: else:
update_progress_to_db(req, role_id_list, zenic_state['UNINSTALLING'], uninstall_progress_percentage) update_progress_to_db(
req, role_id_list, zenic_state['UNINSTALLING'],
uninstall_progress_percentage)
fp.write(exc_result) fp.write(exc_result)

View File

@ -17,30 +17,13 @@
/update endpoint for Daisy v1 API /update endpoint for Daisy v1 API
""" """
import os
import webob.exc
import subprocess import subprocess
from oslo_config import cfg
from oslo_log import log as logging from oslo_log import log as logging
from webob.exc import HTTPBadRequest
from webob.exc import HTTPForbidden
from threading import Thread, Lock
import threading import threading
from daisy import i18n from daisy import i18n
from daisy import notifier
from daisy.api import policy
import daisy.api.v1
from daisy.common import exception from daisy.common import exception
from daisy.common import property_utils
from daisy.common import utils
from daisy.common import wsgi
from daisy.api.v1 import controller
from daisy.api.v1 import filters
from daisy.api.backends.zenic.common import ZenicShellExector
import daisy.api.backends.common as daisy_cmn import daisy.api.backends.common as daisy_cmn
import daisy.api.backends.zenic.common as zenic_cmn import daisy.api.backends.zenic.common as zenic_cmn
@ -54,12 +37,15 @@ zenic_state = zenic_cmn.ZENIC_STATE
daisy_zenic_path = zenic_cmn.daisy_zenic_path daisy_zenic_path = zenic_cmn.daisy_zenic_path
update_zenic_progress=0.0 update_zenic_progress = 0.0
update_mutex = threading.Lock() update_mutex = threading.Lock()
def update_progress_to_db(req, role_id_list, status, progress_percentage_step=0.0):
def update_progress_to_db(req, role_id_list, status,
progress_percentage_step=0.0):
""" """
Write update progress and status to db, we use global lock object 'update_mutex' Write update progress and status to db,
we use global lock object 'update_mutex'
to make sure this function is thread safety. to make sure this function is thread safety.
:param req: http req. :param req: http req.
:param role_id_list: Column neeb be update in role table. :param role_id_list: Column neeb be update in role table.
@ -76,7 +62,7 @@ def update_progress_to_db(req, role_id_list, status, progress_percentage_step=0.
if 0 == cmp(status, zenic_state['UPDATING']): if 0 == cmp(status, zenic_state['UPDATING']):
role['status'] = status role['status'] = status
role['progress'] = update_zenic_progress role['progress'] = update_zenic_progress
if 0 == cmp(status, zenic_state['UPDATE_FAILED']): if 0 == cmp(status, zenic_state['UPDATE_FAILED']):
role['status'] = status role['status'] = status
elif 0 == cmp(status, zenic_state['ACTIVE']): elif 0 == cmp(status, zenic_state['ACTIVE']):
role['status'] = status role['status'] = status
@ -85,60 +71,70 @@ def update_progress_to_db(req, role_id_list, status, progress_percentage_step=0.
update_mutex.release() update_mutex.release()
def thread_bin(req, host,role_id_list,update_progress_percentage): def thread_bin(req, host, role_id_list, update_progress_percentage):
(zenic_version_pkg_file,zenic_version_pkg_name) = zenic_cmn.check_and_get_zenic_version(daisy_zenic_path) (zenic_version_pkg_file, zenic_version_pkg_name) = \
zenic_cmn.check_and_get_zenic_version(
daisy_zenic_path)
if not zenic_version_pkg_file: if not zenic_version_pkg_file:
self.state = zenic_state['INSTALL_FAILED'] # selfstate = zenic_state['INSTALL_FAILED']
self.message = "ZENIC version file not found in %s" % daisy_zenic_path selfmessage = "ZENIC version file not found in %s" % daisy_zenic_path
raise exception.NotFound(message=self.message) raise exception.NotFound(message=selfmessage)
host_ip = host['mgtip'] host_ip = host['mgtip']
password = host['rootpwd'] password = host['rootpwd']
cmd = 'mkdir -p /var/log/daisy/daisy_upgrade/' cmd = 'mkdir -p /var/log/daisy/daisy_upgrade/'
daisy_cmn.subprocess_call(cmd) daisy_cmn.subprocess_call(cmd)
var_log_path = "/var/log/daisy/daisy_upgrade/%s_upgrade_zenic.log" % host_ip var_log_path = \
"/var/log/daisy/daisy_upgrade/%s_upgrade_zenic.log" % host_ip
with open(var_log_path, "w+") as fp: with open(var_log_path, "w+") as fp:
cmd = '/var/lib/daisy/zenic/trustme.sh %s %s' % (host_ip, password) cmd = '/var/lib/daisy/zenic/trustme.sh %s %s' % (host_ip, password)
daisy_cmn.subprocess_call(cmd,fp) daisy_cmn.subprocess_call(cmd, fp)
cmd = 'clush -S -b -w %s /home/zenic/node_stop.sh' % (host_ip,) cmd = 'clush -S -b -w %s /home/zenic/node_stop.sh' % (host_ip,)
daisy_cmn.subprocess_call(cmd,fp) daisy_cmn.subprocess_call(cmd, fp)
cmd = 'clush -S -b -w %s rm -rf /home/workspace/%s' % (
cmd = 'clush -S -b -w %s rm -rf /home/workspace/%s' % (host_ip,zenic_version_pkg_name) host_ip, zenic_version_pkg_name)
daisy_cmn.subprocess_call(cmd,fp) daisy_cmn.subprocess_call(cmd, fp)
cmd = 'clush -S -b -w %s rm -rf /home/workspace/unipack' % (host_ip,) cmd = 'clush -S -b -w %s rm -rf /home/workspace/unipack' % (host_ip,)
daisy_cmn.subprocess_call(cmd,fp) daisy_cmn.subprocess_call(cmd, fp)
try: try:
exc_result = subprocess.check_output( exc_result = subprocess.check_output(
'sshpass -p ossdbg1 scp %s root@%s:/home/workspace/' % (zenic_version_pkg_file,host_ip,), 'sshpass -p ossdbg1 scp %s root@%s:/home/workspace/' % (
zenic_version_pkg_file, host_ip,),
shell=True, stderr=fp) shell=True, stderr=fp)
except subprocess.CalledProcessError as e: except subprocess.CalledProcessError as e:
update_progress_to_db(req, role_id_list, zenic_state['INSTALL_FAILED']) update_progress_to_db(
req, role_id_list, zenic_state['INSTALL_FAILED'])
LOG.info(_("scp zenic pkg for %s failed!" % host_ip)) LOG.info(_("scp zenic pkg for %s failed!" % host_ip))
fp.write(e.output.strip()) fp.write(e.output.strip())
exit() exit()
else: else:
LOG.info(_("scp zenic pkg for %s successfully!" % host_ip)) LOG.info(_("scp zenic pkg for %s successfully!" % host_ip))
fp.write(exc_result) fp.write(exc_result)
cmd = 'clush -S -b -w %s unzip /home/workspace/%s -d /home/workspace/unipack' % (host_ip,zenic_version_pkg_name,) cmd = 'clush -S -b -w %s unzip /home/workspace/%s \
-d /home/workspace/unipack' % (host_ip, zenic_version_pkg_name,)
daisy_cmn.subprocess_call(cmd) daisy_cmn.subprocess_call(cmd)
try: try:
exc_result = subprocess.check_output( exc_result = subprocess.check_output(
'clush -S -b -w %s /home/workspace/unipack/node_upgrade.sh' % (host_ip,), 'clush -S -b -w %s /home/workspace/unipack/node_upgrade.sh'
% (host_ip,),
shell=True, stderr=subprocess.STDOUT) shell=True, stderr=subprocess.STDOUT)
except subprocess.CalledProcessError as e: except subprocess.CalledProcessError as e:
update_progress_to_db(req, role_id_list, zenic_state['UPDATE_FAILED']) update_progress_to_db(
req, role_id_list, zenic_state['UPDATE_FAILED'])
LOG.info(_("Upgrade zenic for %s failed!" % host_ip)) LOG.info(_("Upgrade zenic for %s failed!" % host_ip))
fp.write(e.output.strip()) fp.write(e.output.strip())
else: else:
update_progress_to_db(req, role_id_list, zenic_state['UPDATING'], update_progress_percentage) update_progress_to_db(
req, role_id_list, zenic_state['UPDATING'],
update_progress_percentage)
LOG.info(_("Upgrade zenic for %s successfully!" % host_ip)) LOG.info(_("Upgrade zenic for %s successfully!" % host_ip))
fp.write(exc_result) fp.write(exc_result)
@ -147,12 +143,13 @@ def thread_bin(req, host,role_id_list,update_progress_percentage):
'clush -S -b -w %s /home/zenic/node_start.sh' % (host_ip,), 'clush -S -b -w %s /home/zenic/node_start.sh' % (host_ip,),
shell=True, stderr=subprocess.STDOUT) shell=True, stderr=subprocess.STDOUT)
except subprocess.CalledProcessError as e: except subprocess.CalledProcessError as e:
update_progress_to_db(req, role_id_list, zenic_state['UPDATE_FAILED']) update_progress_to_db(
req, role_id_list, zenic_state['UPDATE_FAILED'])
LOG.info(_("Start zenic for %s failed!" % host_ip)) LOG.info(_("Start zenic for %s failed!" % host_ip))
fp.write(e.output.strip()) fp.write(e.output.strip())
else: else:
update_progress_to_db(req, role_id_list, zenic_state['UPDATING'], update_progress_percentage) update_progress_to_db(
req, role_id_list, zenic_state['UPDATING'],
update_progress_percentage)
LOG.info(_("Start zenic for %s successfully!" % host_ip)) LOG.info(_("Start zenic for %s successfully!" % host_ip))
fp.write(exc_result) fp.write(exc_result)

View File

@ -67,8 +67,8 @@ def size_checked_iter(response, image_meta, expected_size, image_iter,
'bytes_written': bytes_written}) 'bytes_written': bytes_written})
LOG.error(msg) LOG.error(msg)
raise exception.DaisyException(_("Corrupt image download for " raise exception.DaisyException(_("Corrupt image download for "
"image %(image_id)s") % "image %(image_id)s") %
{'image_id': image_id}) {'image_id': image_id})
def image_send_notification(bytes_written, expected_size, image_meta, request, def image_send_notification(bytes_written, expected_size, image_meta, request,
@ -218,3 +218,9 @@ def get_thread_pool(lock_name, size=1024):
return wsgi.get_asynchronous_eventlet_pool(size=size) return wsgi.get_asynchronous_eventlet_pool(size=size)
return _get_thread_pool return _get_thread_pool
def get_pxe_mac(host_detail):
pxe_macs = [interface['mac'] for interface in host_detail['interfaces']
if interface['is_deployment']]
return pxe_macs

View File

@ -1,30 +1,29 @@
import subprocess import subprocess
import daisy.registry.client.v1.api as registry import daisy.registry.client.v1.api as registry
from daisy.api.backends.tecs import config from daisy.api.backends.tecs import config as role_service
from oslo_log import log as logging from oslo_log import log as logging
import webob.exc import webob.exc
from webob.exc import HTTPBadRequest
from daisy.common import exception
from daisy.common import utils
import daisy.api.backends.common as daisy_cmn
LOG = logging.getLogger(__name__) LOG = logging.getLogger(__name__)
CONFIG_MAP = {
'cinder_config': '/etc/cinder/cinder.conf',
'cinder_api_paste_ini': '/etc/cinder/api-paste.ini',
'glance_api_config': '/etc/glance/glance-api.conf',
'glance_api_paste_ini': '/etc/glance/glance-api-paste.ini',
}
class config_clushshell(): class config_clushshell():
""" Class for clush backend.""" """ Class for clush backend."""
def __init__(self, req, role_id):
if not req and not role_id:
LOG.error("<<<config_clushshell:push_config input params is invalid.>>>")
return
def __init__(self, req):
self.context = req.context self.context = req.context
self.role_id = role_id
self.CLUSH_CMD = "clush -S -w %(management_ip)s \"%(sub_command)s\"" self.CLUSH_CMD = 'clush -S -w %(management_ip)s "%(sub_command)s"'
self.SUB_COMMAND = "openstack-config --set %(config_file)s %(section)s %(key)s %(value)s" self.SUB_COMMAND_SET = "openstack-config --set %(config_file)s"\
" %(section)s %(key)s '%(value)s'"
self.SUB_COMMAND_DEL = "openstack-config --del %(config_file)s"\
" %(section)s %(key)s"
def _openstack_set_config(self, host_ip, config_set): def _openstack_set_config(self, host_ip, config_set):
""" """
@ -37,107 +36,259 @@ class config_clushshell():
LOG.debug('<<<FUN:_openstack_set_config input params invalid.>>>') LOG.debug('<<<FUN:_openstack_set_config input params invalid.>>>')
return return
sub_command_by_one_host = [] config_cmd = []
for config in config_set['config']: for config in config_set['config']:
if config['config_version'] == config['running_version']: if config['config_version'] == config['running_version']:
continue continue
config_file = registry.get_config_file_metadata(self.context, config['config_file_id']) config_file = registry.get_config_file_metadata(
sub_command_by_one_host.append( self.context, config['config_file_id'])
self.SUB_COMMAND % \ if config['value']:
{'config_file':config_file['name'] ,'section':config['section'], value = utils.translate_quotation_marks_for_shell(
'key':config['key'], 'value':config['value']}) config['value'])
config_cmd.append(self.SUB_COMMAND_SET %
{'config_file': config_file['name'],
'section': config['section'],
'key': config['key'],
'value': value})
else:
# if value is empty, delete or comment it.
config_cmd.append(self.SUB_COMMAND_DEL %
{'config_file': config_file['name'],
'section': config['section'],
'key': config['key']})
try: try:
sub_command_by_one_host = ";".join(sub_command_by_one_host) for cmd in config_cmd:
clush_cmd = self.CLUSH_CMD % {'management_ip':host_ip, 'sub_command':sub_command_by_one_host} clush_cmd = self.CLUSH_CMD % {
subprocess.check_output(clush_cmd, shell=True, stderr=subprocess.STDOUT) 'management_ip': host_ip, 'sub_command': cmd}
subprocess.check_output(
clush_cmd, shell=True, stderr=subprocess.STDOUT)
except subprocess.CalledProcessError as e: except subprocess.CalledProcessError as e:
msg = ("<<<Host %s excute clush failed:%s!>>>" % (host_ip, e.output.strip())) msg = ("<<<Host %s excute clush failed:%s.>>>" %
(host_ip, e.output.strip()))
LOG.exception(msg) LOG.exception(msg)
raise webob.exc.HTTPServerError(explanation=msg) raise webob.exc.HTTPServerError(explanation=msg)
else: else:
msg = ("<<<Host %s excute clush successful!>>>" % host_ip) msg = ("<<<Complete to push configs for host %s.>>>" % host_ip)
LOG.info(msg) LOG.info(msg)
config['running_version'] = config['config_version']
def push_config(self): # if push_status = None, we will push configs
# to all hosts in the role
def push_role_configs(self, role_id, push_status):
""" """
Push config to remote host. Push config to remote host.
:param req: http req :param req: http req
:param role_id: host role id :param role_id: host role id
:return: :return:
""" """
self.role_info = registry.get_role_metadata(self.context, self.role_id) role_info = registry.get_role_metadata(self.context, role_id)
if not self.role_info or not self.role_info.get('config_set_id'): if not role_info.get('config_set_id'):
LOG.error("<<<config_clushshell:push_config,get_role_metadata failed.>>>") LOG.info("<<<No config_set configed for role '%s'>>>"
% role_info['name'])
return return
config_set = registry.get_config_set_metadata(self.context, self.role_info['config_set_id']) config_set = registry.get_config_set_metadata(
if not config_set or not config_set.has_key('config'): self.context, role_info['config_set_id'])
LOG.info("<<<config_clushshell:push_config,get_config_set_metadata failed.>>>") if not config_set:
LOG.info("<<<Get config_set failed for role '%s'.>>>"
% role_info['name'])
return return
else:
if 'config' not in config_set:
LOG.info("<<<No configs get for role '%s'.>>>"
% role_info['name'])
return
config_set['config'] = \ config_set['config'] = [config for config in config_set['config']
[config for config in config_set['config'] if config.get('config_version', 0) !=
if config.has_key('config_version') and config.has_key('running_version') config.get('running_version', 0)]
and config['config_version'] != config['running_version']]
if not config_set['config']: if not config_set['config']:
LOG.info('<<<No config need to be modified, within the scope of the hosts in role_id:%s.>>>' % LOG.info("<<<No config need to push for role '%s'.>>>"
self.role_id) % role_info['name'])
return return
self.role_hosts = registry.get_role_host_metadata(self.context, self.role_id) self.role_hosts = registry.get_role_host_metadata(
self.context, role_id)
total_host_count = 0
if push_status:
for r_host in self.role_hosts:
if r_host['status'] == push_status:
total_host_count += 1
else:
total_host_count = len(self.role_hosts)
if total_host_count > 0:
LOG.info("Begin to push config for role '%s'"
% role_info['name'])
else:
return
current_count = 0 current_count = 0
all_host_config_sets = [] # all_host_config_sets = []
for role_host in self.role_hosts: for role_host in self.role_hosts:
host = registry.get_host_metadata(self.context, role_host['host_id']) host = registry.get_host_metadata(
#change by 10166727--------start------------- self.context, role_host['host_id'])
host_ip=[] if push_status and role_host['status'] != push_status:
LOG.debug("<<<Status of host '%s' is not '%s',"
" don't push configs.>>>"
% (role_host['host_id'], push_status))
continue
host_management_ip = ''
for interface in host['interfaces']: for interface in host['interfaces']:
find_flag=interface['ip'].find(':') if ('assigned_networks' in interface and
if find_flag<0: interface['assigned_networks']):
host_ip=[interface['ip']] for assigned_network in interface['assigned_networks']:
else: if (assigned_network['name'] == 'MANAGEMENT' and
ip_list_tmp=interface['ip'].split(",") 'ip' in assigned_network):
for ip_list in ip_list_tmp: host_management_ip = assigned_network['ip']
if ip_list.split(':')[0] == "MANAGEMENT":
host_ip=[str(ip_list.split(':')[1])]
#change by 10166727--------end---------------
if not host_ip:
continue
host_ip = host_ip[0]
if 0 != subprocess.call('/var/lib/daisy/tecs/trustme.sh %s %s' % (host_ip, 'ossdbg1'), if not host_management_ip:
shell=True, msg = "Can't find management ip for host %s"\
stderr=subprocess.STDOUT): % role_host['host_id']
raise Exception("trustme.sh error!") raise HTTPBadRequest(explanation=msg)
if not config_set.has_key("config"):
root_passwd = 'ossdbg1'
daisy_cmn.trust_me([host_management_ip], root_passwd)
self._openstack_set_config(host_management_ip, config_set)
self._role_service_restart(role_info, host_management_ip)
current_count += 1
role_info['config_set_update_progress'] =\
round(current_count * 1.0 / total_host_count, 2) * 100
registry.update_role_metadata(
self.context, role_id, role_info)
all_config_sets = []
for config in config_set['config']:
config['running_version'] = config['config_version']
all_config_sets.append(config_set)
registry.update_configs_metadata_by_role_hosts(
self.context, all_config_sets)
def _host_service_restart(self, host_ip, components_name):
params = {'limit': '200', 'filters': {}}
try:
services = registry.get_services_detail(self.context,
**params)
components = registry.get_components_detail(self.context,
**params)
except exception.Invalid as e:
raise HTTPBadRequest(explanation=e.msg)
components_id = [comp['id'] for comp in components
for comp_name in components_name
if comp['name'] == comp_name]
for service in services:
if service['component_id'] not in components_id:
continue continue
self._openstack_set_config(host_ip, config_set) services_name = role_service.service_map.get(service['name'])
all_host_config_sets.append(config_set) if not services_name:
registry.update_configs_metadata_by_role_hosts(self.context, all_host_config_sets) msg = "Can't find service for '%s'" % service
raise HTTPBadRequest(explanation=msg)
LOG.debug("Update config for host:%s successfully!" % host_ip) for service_name in services_name.split(','):
active_service = "clush -S -w %s 'systemctl is-active\
%s'" % (host_ip, service_name)
if 0 == utils.simple_subprocess_call(active_service):
restart_service = "clush -S -w %s 'systemctl restart\
%s'" % (host_ip, service_name)
LOG.info("Restart service %s after pushing config"
% service_name)
if 0 != utils.simple_subprocess_call(restart_service):
msg = "Service %s restart failed on host '%s'."\
% (service_name, host_ip)
LOG.error(msg)
self._host_service_restart(host_ip) # now i don't known how to find component id by config file,
current_count +=1 # so add you must tell me, and it can be deleted if i can find it
self.role_info['config_set_update_progress'] = round(current_count*1.0/len(self.role_hosts), 2)*100 # in future.
registry.update_role_metadata(self.context, self.role_id, self.role_info) def push_host_configs(self, host_id, components_name):
"""
Push config to remote host.
:param req: http req
:param host_id: host id
:return:
"""
host_detail = registry.get_host_metadata(self.context, host_id)
def _host_service_restart(self,host_ip): if not host_detail.get('config_set_id'):
LOG.info("<<<No config_set configed for host '%s'.>>>"
% host_id)
return
config_set =\
registry.get_config_set_metadata(self.context,
host_detail['config_set_id'])
if not config_set:
LOG.info("<<<Get config_set failed for host '%s'.>>>"
% host_id)
return
else:
if 'config' not in config_set:
LOG.info("<<<No configs get for host '%s'.>>>" % host_id)
return
config_set['config'] = [config for config in config_set['config']
if config.get('config_version', 0) !=
config.get('running_version', 0)]
if not config_set['config']:
LOG.info("<<<No config need to push for host '%s'.>>>"
% host_id)
return
host_management_ip = ''
for interface in host_detail['interfaces']:
if ('assigned_networks' in interface and
interface['assigned_networks']):
for assigned_network in interface['assigned_networks']:
if (assigned_network['name'] == 'MANAGEMENT' and
'ip' in assigned_network):
host_management_ip = assigned_network['ip']
if not host_management_ip:
msg = "Can't find management ip for host %s"\
% host_detail['host_id']
raise HTTPBadRequest(explanation=msg)
root_passwd = 'ossdbg1'
daisy_cmn.trust_me([host_management_ip], root_passwd)
self._openstack_set_config(host_management_ip, config_set)
self._host_service_restart(host_management_ip, components_name)
all_config_sets = []
for config in config_set['config']:
config['running_version'] = config['config_version']
all_config_sets.append(config_set)
registry.update_configs_metadata_by_role_hosts(self.context,
all_config_sets)
def _role_service_restart(self, role_info, host_ip):
""" """ """ """
for service in self.role_info['service_name']: for service in role_info['service_name']:
for service_detail_name in config.service_map.get(service).split(','): services_name = role_service.service_map.get(service)
cmd = "" if not services_name:
if self.role_info['name'] == "CONTROLLER_HA": msg = "Can't find service for '%s'" % service
cmd = "clush -S -w %s [ `systemctl is-active %s` != 'active' ] && systemctl restart %s" % \ raise HTTPBadRequest(explanation=msg)
(host_ip, service_detail_name, service_detail_name)
else:
cmd = "clush -S -w %s systemctl restart %s" % (host_ip, service_detail_name)
if 0 != subprocess.call(cmd, shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE):
LOG.error("Service %s restart failed in host:%s." % (service_detail_name, host_ip))
for service_name in services_name.split(','):
active_service = "clush -S -w %s 'systemctl is-active\
%s'" % (host_ip, service_name)
if 0 == utils.simple_subprocess_call(active_service):
restart_service = "clush -S -w %s 'systemctl restart\
%s'" % (host_ip, service_name)
LOG.info("Restart service %s after pushing config"
% service_name)
if 0 != utils.simple_subprocess_call(restart_service):
msg = "Service %s restart failed on host '%s'."\
% (service_name, host_ip)
LOG.error(msg)

View File

@ -1,16 +1,24 @@
from daisy.api.configset.clush import config_clushshell from daisy.api.configset.clush import config_clushshell
class configBackend(): class configBackend():
def __init__(self, type, req, role_id):
def __init__(self, type, req):
self.type = type self.type = type
self._instance = None self._instance = None
if type == "clushshell": if type == "clushshell":
self._instance = config_clushshell(req, role_id) self._instance = config_clushshell(req)
elif type == "puppet": elif type == "puppet":
pass pass
def push_config(self): # if push_status = None, we will push configs
self._instance.push_config() # to all hosts in the role
def push_config_by_roles(self, role_ids, push_status=None):
for role_id in role_ids:
self._instance.push_role_configs(role_id, push_status)
def push_config_by_hosts(self, host_ids, component_names=[]):
for host_id in host_ids:
self._instance.push_host_configs(host_id,
component_names)

View File

@ -24,10 +24,12 @@ from neutronclient.v2_0 import client as clientv20
from daisy.common import exception from daisy.common import exception
LOG = logging.getLogger(__name__) LOG = logging.getLogger(__name__)
class network(object): class network(object):
""" """
network config network config
""" """
def __init__(self, req, neutron_host, keystone_host, cluster_id): def __init__(self, req, neutron_host, keystone_host, cluster_id):
registry.configure_registry_client() registry.configure_registry_client()
auth_url = 'http://' + keystone_host + ':35357/v2.0' auth_url = 'http://' + keystone_host + ':35357/v2.0'
@ -49,10 +51,12 @@ class network(object):
except exception.Invalid as e: except exception.Invalid as e:
LOG.exception(e.msg) LOG.exception(e.msg)
raise HTTPBadRequest(explanation=e.msg, request=req) raise HTTPBadRequest(explanation=e.msg, request=req)
LOG.info("<<<CLUSTER:%s,NEUTRON HOST:%s,KEYSTOEN:%s>>>", cluster, neutron_host, keystone_host) LOG.info("<<<CLUSTER:%s,NEUTRON HOST:%s,KEYSTOEN:%s>>>",
if 'logic_networks' in cluster and cluster['logic_networks'] is not None: cluster, neutron_host, keystone_host)
if 'logic_networks' in cluster and cluster[
'logic_networks'] is not None:
self.nets = cluster['logic_networks'] self.nets = cluster['logic_networks']
#self._flat_network_uniqueness_check() # self._flat_network_uniqueness_check()
if 'routers' in cluster and cluster['routers'] is not None: if 'routers' in cluster and cluster['routers'] is not None:
self.routers = cluster['routers'] self.routers = cluster['routers']
else: else:
@ -83,7 +87,9 @@ class network(object):
for router in self.routers: for router in self.routers:
router_id = self._router_create(router['name']) router_id = self._router_create(router['name'])
if 'external_logic_network' in router: if 'external_logic_network' in router:
body = {'network_id': self.name_mappings[router['external_logic_network']]} body = {
'network_id': self.name_mappings[
router['external_logic_network']]}
self.neutron.add_gateway_router(router_id, body) self.neutron.add_gateway_router(router_id, body)
if 'subnets' in router: if 'subnets' in router:
for i in router['subnets']: for i in router['subnets']:
@ -92,7 +98,8 @@ class network(object):
def _net_subnet_same_router_check(self, ex_network, subnet): def _net_subnet_same_router_check(self, ex_network, subnet):
for router in self.routers: for router in self.routers:
if 'external_logic_network' in router and router['external_logic_network'] == ex_network: if 'external_logic_network' in router and router[
'external_logic_network'] == ex_network:
if 'subnets' in router: if 'subnets' in router:
for i in router['subnets']: for i in router['subnets']:
if i == subnet: if i == subnet:
@ -155,18 +162,25 @@ class network(object):
for net in self.nets: for net in self.nets:
body = {} body = {}
if net['type'] == 'external': if net['type'] == 'external':
body['network'] = {'name': net['name'], body['network'] = {
'router:external': True, 'name': net['name'],
'provider:network_type': net['segmentation_type']} 'router:external': True,
'provider:network_type': net['segmentation_type']}
if net['segmentation_type'].strip() == 'flat': if net['segmentation_type'].strip() == 'flat':
body['network']['provider:physical_network'] = net['physnet_name'] body['network']['provider:physical_network'] = net[
'physnet_name']
elif net['segmentation_type'].strip() == 'vxlan': elif net['segmentation_type'].strip() == 'vxlan':
if 'segmentation_id' in net and net['segmentation_id'] is not None: if 'segmentation_id' in net and net[
body['network']['provider:segmentation_id'] = net['segmentation_id'] 'segmentation_id'] is not None:
body['network']['provider:segmentation_id'] = net[
'segmentation_id']
else: else:
if 'segmentation_id' in net and net['segmentation_id'] is not None: if 'segmentation_id' in net and net[
body['network']['provider:segmentation_id'] = net['segmentation_id'] 'segmentation_id'] is not None:
body['network']['provider:physical_network'] = net['physnet_name'] body['network']['provider:segmentation_id'] = net[
'segmentation_id']
body['network']['provider:physical_network'] = net[
'physnet_name']
if net['shared']: if net['shared']:
body['network']['shared'] = True body['network']['shared'] = True
else: else:
@ -175,21 +189,28 @@ class network(object):
self.name_mappings[net['name']] = external['network']['id'] self.name_mappings[net['name']] = external['network']['id']
last_create_subnet = [] last_create_subnet = []
for subnet in net['subnets']: for subnet in net['subnets']:
if self._net_subnet_same_router_check(net['name'], subnet['name']): if self._net_subnet_same_router_check(
net['name'], subnet['name']):
last_create_subnet.append(subnet) last_create_subnet.append(subnet)
else: else:
subnet_id = self._subnet_check_and_create(external['network']['id'], subnet) subnet_id = self._subnet_check_and_create(
external['network']['id'], subnet)
self.name_mappings[subnet['name']] = subnet_id self.name_mappings[subnet['name']] = subnet_id
for subnet in last_create_subnet: for subnet in last_create_subnet:
subnet_id = self._subnet_check_and_create(external['network']['id'], subnet) subnet_id = self._subnet_check_and_create(
external['network']['id'], subnet)
self.name_mappings[subnet['name']] = subnet_id self.name_mappings[subnet['name']] = subnet_id
else: else:
body['network'] = {'name': net['name'], body['network'] = {
'provider:network_type': net['segmentation_type']} 'name': net['name'],
'provider:network_type': net['segmentation_type']}
if net['segmentation_type'].strip() == 'vlan': if net['segmentation_type'].strip() == 'vlan':
body['network']['provider:physical_network'] = net['physnet_name'] body['network']['provider:physical_network'] = net[
if 'segmentation_id' in net and net['segmentation_id'] is not None: 'physnet_name']
body['network']['provider:segmentation_id'] = net['segmentation_id'] if 'segmentation_id' in net and net[
'segmentation_id'] is not None:
body['network']['provider:segmentation_id'] = net[
'segmentation_id']
if net['shared']: if net['shared']:
body['network']['shared'] = True body['network']['shared'] = True
else: else:
@ -197,6 +218,7 @@ class network(object):
inner = self.neutron.create_network(body) inner = self.neutron.create_network(body)
self.name_mappings[net['name']] = inner['network']['id'] self.name_mappings[net['name']] = inner['network']['id']
for subnet in net['subnets']: for subnet in net['subnets']:
subnet_id = self._subnet_check_and_create(inner['network']['id'], subnet) subnet_id = self._subnet_check_and_create(
inner['network']['id'], subnet)
self.name_mappings[subnet['name']] = subnet_id self.name_mappings[subnet['name']] = subnet_id
self._router_link() self._router_link()

View File

@ -13,9 +13,11 @@
# License for the specific language governing permissions and limitations # License for the specific language governing permissions and limitations
# under the License. # under the License.
SUPPORTED_FILTERS = ['name', 'status','cluster_id','id','host_id', 'role_id', 'auto_scale','container_format', 'disk_format', SUPPORTED_FILTERS = ['name', 'status', 'cluster_id', 'id',
'host_id', 'role_id',
'auto_scale', 'container_format', 'disk_format',
'min_ram', 'min_disk', 'size_min', 'size_max', 'min_ram', 'min_disk', 'size_min', 'size_max',
'is_public', 'changes-since', 'protected'] 'is_public', 'changes-since', 'protected', 'type']
SUPPORTED_PARAMS = ('limit', 'marker', 'sort_key', 'sort_dir') SUPPORTED_PARAMS = ('limit', 'marker', 'sort_key', 'sort_dir')

View File

@ -0,0 +1,312 @@
# Copyright 2013 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
/hosts endpoint for Daisy v1 API
"""
import datetime
import os
import subprocess
from oslo_log import log as logging
from webob.exc import HTTPBadRequest
from webob.exc import HTTPForbidden
from daisy import i18n
from daisy import notifier
from daisy.api import policy
import daisy.api.v1
from daisy.common import exception
from daisy.common import property_utils
from daisy.common import utils
from daisy.common import wsgi
import daisy.registry.client.v1.api as registry
from daisy.api.v1 import controller
from daisy.api.v1 import filters
import daisy.api.backends.tecs.common as tecs_cmn
LOG = logging.getLogger(__name__)
_ = i18n._
_LE = i18n._LE
_LI = i18n._LI
_LW = i18n._LW
SUPPORTED_PARAMS = daisy.api.v1.SUPPORTED_PARAMS
SUPPORTED_FILTERS = daisy.api.v1.SUPPORTED_FILTERS
ACTIVE_IMMUTABLE = daisy.api.v1.ACTIVE_IMMUTABLE
BACK_PATH = '/home/daisy_backup/'
class Controller(controller.BaseController):
"""
WSGI controller for hosts resource in Daisy v1 API
The hosts resource API is a RESTful web service for host data. The API
is as follows::
GET /hosts -- Returns a set of brief metadata about hosts
GET /hosts/detail -- Returns a set of detailed metadata about
hosts
HEAD /hosts/<ID> -- Return metadata about an host with id <ID>
GET /hosts/<ID> -- Return host data for host with id <ID>
POST /hosts -- Store host data and return metadata about the
newly-stored host
PUT /hosts/<ID> -- Update host metadata and/or upload host
data for a previously-reserved host
DELETE /hosts/<ID> -- Delete the host with id <ID>
"""
def __init__(self):
self.notifier = notifier.Notifier()
registry.configure_registry_client()
self.policy = policy.Enforcer()
if property_utils.is_property_protection_enabled():
self.prop_enforcer = property_utils.PropertyRules(self.policy)
else:
self.prop_enforcer = None
def _enforce(self, req, action, target=None):
"""Authorize an action against our policies"""
if target is None:
target = {}
try:
self.policy.enforce(req.context, action, target)
except exception.Forbidden:
raise HTTPForbidden()
def _get_filters(self, req):
"""
Return a dictionary of query param filters from the request
:param req: the Request object coming from the wsgi layer
:retval a dict of key/value filters
"""
query_filters = {}
for param in req.params:
if param in SUPPORTED_FILTERS:
query_filters[param] = req.params.get(param)
if not filters.validate(param, query_filters[param]):
raise HTTPBadRequest(_('Bad value passed to filter '
'%(filter)s got %(val)s')
% {'filter': param,
'val': query_filters[param]})
return query_filters
def _get_query_params(self, req):
"""
Extracts necessary query params from request.
:param req: the WSGI Request object
:retval dict of parameters that can be used by registry client
"""
params = {'filters': self._get_filters(req)}
for PARAM in SUPPORTED_PARAMS:
if PARAM in req.params:
params[PARAM] = req.params.get(PARAM)
return params
def hostname(self):
if os.name == 'posix':
host = os.popen('echo $HOSTNAME')
try:
return host.read()
finally:
host.close()
else:
return 'Unkwon hostname'
def check_file_format(self, req, file_meta):
if not os.path.exists(file_meta.get('backup_file_path', '')):
msg = 'File not exists!'
LOG.error(msg)
raise HTTPForbidden(explanation=msg, request=req,
content_type="text/plain")
if not file_meta['backup_file_path'].endswith('.tar.gz'):
msg = 'File format not supported! .tar.gz format is required!'
LOG.error(msg)
raise HTTPForbidden(explanation=msg, request=req,
content_type="text/plain")
@utils.mutating
def backup(self, req):
"""
Backup daisy data..
:param req: The WSGI/Webob Request object
:raises HTTPBadRequest if backup failed
"""
version = self.version(req, {'type': 'internal'})
date_str = filter(lambda x: x.isdigit(),
str(datetime.datetime.now())[:19])
backup_file_name = '{0}_{1}_{2}.tar.gz'.format(
self.hostname().strip(), date_str, version['daisy_version'])
scripts = [
'test -d {0}daisy_tmp||mkdir -p {0}daisy_tmp'.format(BACK_PATH),
'echo {0}>{1}daisy_tmp/version.conf'.format(
version['daisy_version'], BACK_PATH),
'cp /home/daisy_install/daisy.conf {0}/daisy_tmp'.format(
BACK_PATH),
'mysqldump --all-databases > {0}daisy_tmp/database.sql'.format(
BACK_PATH),
'tar -zcvf {0}{1} -C {0} daisy_tmp >/dev/null 2>&1'.format(
BACK_PATH, backup_file_name),
'chmod 777 {0} {0}{1}'.format(BACK_PATH, backup_file_name),
'rm -rf {0}daisy_tmp'.format(BACK_PATH)
]
tecs_cmn.run_scrip(scripts, msg='Backup file failed!')
return {"backup_file": BACK_PATH + backup_file_name}
@utils.mutating
def restore(self, req, file_meta):
"""
Restore daisy data.
:param req: The WSGI/Webob Request object
:param file_meta: The daisy backup file path
:raises HTTPBadRequest if restore failed
"""
self.check_file_format(req, file_meta)
restore_scripts = [
'test -d {0} || mkdir {0}'.format(BACK_PATH),
'test -d {0} || mkdir {0}'.format('/home/daisy_install/'),
'tar -zxvf {1} -C {0}>/dev/null 2>&1'.format(
BACK_PATH, file_meta['backup_file_path']),
'mysql < {0}daisy_tmp/database.sql'.format(BACK_PATH),
'cp {0}daisy_tmp/daisy.conf /home/daisy_install/'.format(
BACK_PATH),
'rm -rf {0}daisy_tmp'.format(BACK_PATH)
]
tecs_cmn.run_scrip(restore_scripts, msg='Restore failed!')
LOG.info('Restore successfully')
@utils.mutating
def get_backup_file_version(self, req, file_meta):
"""
Get version of daisy backup file.
:param req: The WSGI/Webob Request object
:raises HTTPBadRequest if can't get version of backup file
"""
self.check_file_format(req, file_meta)
scripts = [
'test -d {0} || mkdir {0}'.format(BACK_PATH),
'tar -zxvf {0} -C {1}>/dev/null 2>&1'.format(
file_meta['backup_file_path'], BACK_PATH)
]
tecs_cmn.run_scrip(scripts, msg='Decompression file failed!')
try:
version = subprocess.check_output(
'cat {0}daisy_tmp/version.conf'.format(BACK_PATH),
shell=True, stderr=subprocess.STDOUT).strip()
except:
msg = 'Error occurred when running scripts to get version of' \
' backup file!'
LOG.error(msg)
raise HTTPForbidden(explanation=msg, request=req,
content_type="text/plain")
tecs_cmn.run_scrip(['rm -rf {0}daisy_tmp'.format(BACK_PATH)])
return {"backup_file_version": version}
@utils.mutating
def version(self, req, version):
"""
Get version of daisy.
:param req: The WSGI/Webob Request object
:raises HTTPBadRequest if can't get version of daisy
"""
if version.get('type') == 'internal':
scripts = "rpm -q python-daisy | awk -F'-' '{print $3\"-\"$4}'"
else:
# reserve for external version
return {"daisy_version": '1.0.0-1.1.0'}
try:
version = subprocess.check_output(scripts, shell=True,
stderr=subprocess.STDOUT).strip()
except:
msg = 'Error occurred when running scripts to get version of daisy'
LOG.error(msg)
raise HTTPForbidden(explanation=msg, request=req,
content_type="text/plain")
daisy_version = filter(lambda x: not x.isalpha(), version)[:-1]
return {"daisy_version": daisy_version}
class BackupRestoreDeserializer(wsgi.JSONRequestDeserializer):
"""Handles deserialization of specific controller method requests."""
def _deserialize(self, request):
result = {}
result['file_meta'] = utils.get_dict_meta(request)
return result
def backup(self, request):
return {}
def restore(self, request):
return self._deserialize(request)
def get_backup_file_version(self, request):
return self._deserialize(request)
def version(self, request):
result = {}
result['version'] = utils.get_dict_meta(request)
return result
class BackupRestoreSerializer(wsgi.JSONResponseSerializer):
"""Handles serialization of specific controller method responses."""
def __init__(self):
self.notifier = notifier.Notifier()
def backup(self, response, result):
response.status = 201
response.headers['Content-Type'] = 'application/json'
response.body = self.to_json(result)
return response
def restore(self, response, result):
response.status = 201
response.headers['Content-Type'] = 'application/json'
response.body = self.to_json(result)
return response
def get_backup_file_version(self, response, result):
response.status = 201
response.headers['Content-Type'] = 'application/json'
response.body = self.to_json(result)
return response
def version(self, response, result):
response.status = 201
response.headers['Content-Type'] = 'application/json'
response.body = self.to_json(result)
return response
def create_resource():
"""Version resource factory method"""
deserializer = BackupRestoreDeserializer()
serializer = BackupRestoreSerializer()
return wsgi.Resource(Controller(), deserializer, serializer)

View File

@ -38,6 +38,7 @@ from daisy.common import wsgi
from daisy import i18n from daisy import i18n
from daisy import notifier from daisy import notifier
import daisy.registry.client.v1.api as registry import daisy.registry.client.v1.api as registry
from functools import reduce
LOG = logging.getLogger(__name__) LOG = logging.getLogger(__name__)
_ = i18n._ _ = i18n._
@ -53,15 +54,16 @@ CONF.import_opt('disk_formats', 'daisy.common.config', group='image_format')
CONF.import_opt('container_formats', 'daisy.common.config', CONF.import_opt('container_formats', 'daisy.common.config',
group='image_format') group='image_format')
CONF.import_opt('image_property_quota', 'daisy.common.config') CONF.import_opt('image_property_quota', 'daisy.common.config')
CLUSTER_DEFAULT_NETWORKS = ['PUBLIC', 'DEPLOYMENT', 'PRIVATE', 'EXTERNAL', CLUSTER_DEFAULT_NETWORKS = ['PUBLICAPI', 'DEPLOYMENT', 'DATAPLANE', 'EXTERNAL',
'STORAGE', 'VXLAN', 'MANAGEMENT'] 'STORAGE', 'MANAGEMENT']
class Controller(controller.BaseController): class Controller(controller.BaseController):
""" """
WSGI controller for clusters resource in Daisy v1 API WSGI controller for clusters resource in Daisy v1 API
The clusters resource API is a RESTful web service for cluster data. The API The clusters resource API is a RESTful web service for cluster data.
is as follows:: The API is as follows::
GET /clusters -- Returns a set of brief metadata about clusters GET /clusters -- Returns a set of brief metadata about clusters
GET /clusters -- Returns a set of detailed metadata about GET /clusters -- Returns a set of detailed metadata about
@ -86,57 +88,74 @@ class Controller(controller.BaseController):
cluster_id = kwargs.get('id', None) cluster_id = kwargs.get('id', None)
errmsg = (_("I'm params checker.")) errmsg = (_("I'm params checker."))
LOG.debug(_("Params check for cluster-add or cluster-update begin!")) LOG.debug(
_("Params check for cluster-add or cluster-update begin!"))
def check_params_range(param, type=None): def check_params_range(param, type=None):
''' '''
param : input a list ,such as [start, end] param : input a list ,such as [start, end]
check condition: start must less than end, and existed with pair check condition: start must less than end,
and existed with pair
return True of False return True of False
''' '''
if len(param) != 2: if len(param) != 2:
msg = '%s range must be existed in pairs.' % type msg = '%s range must be existed in pairs.' % type
raise HTTPForbidden(explanation=msg) raise HTTPForbidden(explanation=msg)
if param[0] == None or param[0] == '': if param[0] is None or param[0] == '':
msg = 'The start value of %s range can not be None.' % type msg = 'The start value of %s range can not be None.' % type
raise HTTPForbidden(explanation=msg) raise HTTPForbidden(explanation=msg)
if param[1] == None: if param[1] is None:
msg = 'The end value of %s range can not be None.' % type msg = 'The end value of %s range can not be None.' % type
raise HTTPForbidden(explanation=msg) raise HTTPForbidden(explanation=msg)
if int(param[0]) > int(param[1]): if int(param[0]) > int(param[1]):
msg = 'The start value of the %s range must be less than the end value.' % type msg = 'The start value of the %s range must be less ' \
'than the end value.' % type
raise HTTPForbidden(explanation=msg) raise HTTPForbidden(explanation=msg)
if type not in ['vni']: if type not in ['vni']:
if int(param[0]) < 0 or int(param[0]) > 4096: if int(param[0]) < 0 or int(param[0]) > 4096:
msg = 'Invalid value of the start value(%s) of the %s range .' % (param[0], type) msg = 'Invalid value of the start value(%s) of ' \
'the %s range .' % (param[
0], type)
raise HTTPForbidden(explanation=msg) raise HTTPForbidden(explanation=msg)
if int(param[1]) < 0 or int(param[1]) > 4096: if int(param[1]) < 0 or int(param[1]) > 4096:
msg = 'Invalid value of the end value(%s) of the %s range .' % (param[1], type) msg = 'Invalid value of the end value(%s) of ' \
'the %s range .' % (param[
1], type)
raise HTTPForbidden(explanation=msg) raise HTTPForbidden(explanation=msg)
else: else:
if int(param[0]) < 0 or int(param[0]) > 16777216: if int(param[0]) < 0 or int(param[0]) > 16777216:
msg = 'Invalid value of the start value(%s) of the %s range .' % (param[0], type) msg = 'Invalid value of the start value(%s) of ' \
'the %s range .' % (param[
0], type)
raise HTTPForbidden(explanation=msg) raise HTTPForbidden(explanation=msg)
if int(param[1]) < 0 or int(param[1]) > 16777216: if int(param[1]) < 0 or int(param[1]) > 16777216:
msg = 'Invalid value of the end value(%s) of the %s range .' % (param[1], type) msg = 'Invalid value of the end value(%s) of ' \
'the %s range .' % (param[
1], type)
raise HTTPForbidden(explanation=msg) raise HTTPForbidden(explanation=msg)
return True return True
def _check_auto_scale(req, cluster_meta): def _check_auto_scale(req, cluster_meta):
if cluster_meta.has_key('auto_scale') and cluster_meta['auto_scale'] =='1': if 'auto_scale' in cluster_meta and cluster_meta[
meta = { "auto_scale":'1' } 'auto_scale'] == '1':
params = { 'filters': meta } meta = {"auto_scale": '1'}
clusters = registry.get_clusters_detail(req.context, **params) params = {'filters': meta}
clusters = registry.get_clusters_detail(
req.context, **params)
if clusters: if clusters:
if cluster_id: if cluster_id:
temp_cluster = [cluster for cluster in clusters if cluster['id'] !=cluster_id] temp_cluster = [
cluster for cluster in clusters if
cluster['id'] != cluster_id]
if temp_cluster: if temp_cluster:
errmsg = (_("already exist cluster auto_scale is true")) errmsg = (
raise HTTPBadRequest(explanation=errmsg) _("already exist cluster "
"auto_scale is true"))
raise HTTPBadRequest(explanation=errmsg)
else: else:
errmsg = (_("already exist cluster auto_scale is true")) errmsg = (
_("already exist cluster auto_scale is true"))
raise HTTPBadRequest(explanation=errmsg) raise HTTPBadRequest(explanation=errmsg)
def _ip_into_int(ip): def _ip_into_int(ip):
""" """
@ -144,7 +163,8 @@ class Controller(controller.BaseController):
:param ip: ip string :param ip: ip string
:return: decimalism integer :return: decimalism integer
""" """
return reduce(lambda x, y: (x<<8)+y, map(int, ip.split('.'))) return reduce(lambda x, y: (x << 8) + y,
map(int, ip.split('.')))
def _is_in_network_range(ip, network): def _is_in_network_range(ip, network):
""" """
@ -155,9 +175,13 @@ class Controller(controller.BaseController):
""" """
network = network.split('/') network = network.split('/')
mask = ~(2**(32 - int(network[1])) - 1) mask = ~(2**(32 - int(network[1])) - 1)
return (_ip_into_int(ip) & mask) == (_ip_into_int(network[0]) & mask) return (
_ip_into_int(ip) & mask) == (
_ip_into_int(
network[0]) & mask)
def _check_param_nonull_and_valid(values_set, keys_set, valids_set={}): def _check_param_nonull_and_valid(
values_set, keys_set, valids_set={}):
""" """
Check operation params is not null and valid. Check operation params is not null and valid.
:param values_set: Params set. :param values_set: Params set.
@ -167,10 +191,10 @@ class Controller(controller.BaseController):
""" """
for k in keys_set: for k in keys_set:
v = values_set.get(k, None) v = values_set.get(k, None)
if type(v) == type(True) and v == None: if isinstance(v, type(True)) and v is None:
errmsg = (_("Segment %s can't be None." % k)) errmsg = (_("Segment %s can't be None." % k))
raise HTTPBadRequest(explanation=errmsg) raise HTTPBadRequest(explanation=errmsg)
elif type(v) != type(True) and not v: elif not isinstance(v, type(True)) and not v:
errmsg = (_("Segment %s can't be None." % k)) errmsg = (_("Segment %s can't be None." % k))
raise HTTPBadRequest(explanation=errmsg) raise HTTPBadRequest(explanation=errmsg)
@ -183,15 +207,18 @@ class Controller(controller.BaseController):
def _get_network_detail(req, cluster_id, networks_list): def _get_network_detail(req, cluster_id, networks_list):
all_network_list = [] all_network_list = []
if cluster_id: if cluster_id:
all_network_list = registry.get_networks_detail(req.context, cluster_id) all_network_list = registry.get_networks_detail(
req.context, cluster_id)
if networks_list: if networks_list:
for net_id in networks_list: for net_id in networks_list:
network_detail = registry.get_network_metadata(req.context, net_id) network_detail = registry.get_network_metadata(
req.context, net_id)
all_network_list.append(network_detail) all_network_list.append(network_detail)
all_private_network_list = \ all_private_network_list = [
[network for network in all_network_list if network['network_type'] == "PRIVATE"] network for network in all_network_list if network[
'network_type'] == "DATAPLANE"]
return all_private_network_list return all_private_network_list
def _check_cluster_add_parameters(req, cluster_meta): def _check_cluster_add_parameters(req, cluster_meta):
@ -201,123 +228,92 @@ class Controller(controller.BaseController):
:param cluster_meta: params set :param cluster_meta: params set
:return:error message :return:error message
""" """
if cluster_meta.has_key('nodes'): if 'nodes' in cluster_meta:
orig_keys = list(eval(cluster_meta['nodes'])) orig_keys = list(eval(cluster_meta['nodes']))
for host_id in orig_keys: for host_id in orig_keys:
controller._raise_404_if_host_deleted(req, host_id) controller._raise_404_if_host_deleted(req, host_id)
if cluster_meta.has_key('networks'): if 'networks' in cluster_meta:
orig_keys = list(eval(cluster_meta['networks'])) orig_keys = list(eval(cluster_meta['networks']))
network_with_same_name = [] network_with_same_name = []
for network_id in orig_keys: for network_id in orig_keys:
network_name = controller._raise_404_if_network_deleted(req, network_id) network_name = \
controller._raise_404_if_network_deleted(
req, network_id)
if network_name in CLUSTER_DEFAULT_NETWORKS: if network_name in CLUSTER_DEFAULT_NETWORKS:
return (_("Network name %s of %s already exits" return (_("Network name %s of %s already exits"
" in the cluster, please check." % " in the cluster, please check." %
(network_name, network_id))) (network_name, network_id)))
if network_name in network_with_same_name: if network_name in network_with_same_name:
return (_("Network name can't be same with each other in 'networks[]', " return (_("Network name can't be same with "
"each other in 'networks[]', "
"please check.")) "please check."))
network_with_same_name.append(network_name) network_with_same_name.append(network_name)
# checkout network_params-------------------------------------------------- # checkout network_params
if cluster_meta.get('networking_parameters', None): if cluster_meta.get('networking_parameters', None):
networking_parameters = eval(cluster_meta['networking_parameters']) networking_parameters =\
_check_param_nonull_and_valid(networking_parameters, eval(cluster_meta['networking_parameters'])
['segmentation_type'])
segmentation_type_set = networking_parameters['segmentation_type'].split(",")
for segmentation_type in segmentation_type_set:
if segmentation_type not in ['vlan', 'vxlan', 'flat', 'gre']:
return (_("Segmentation_type of networking_parameters is not valid."))
if segmentation_type =='vxlan':
_check_param_nonull_and_valid(networking_parameters,['vni_range'])
elif segmentation_type =='gre':
_check_param_nonull_and_valid(networking_parameters,['gre_id_range'])
vlan_range = networking_parameters.get("vlan_range", None) # check logic_networks
vni_range = networking_parameters.get("vni_range", None) subnet_name_set = [] # record all subnets's name
gre_id_range = networking_parameters.get("gre_id_range", None) logic_network_name_set = [] # record all logic_network's name
#if (vlan_range and len(vlan_range) != 2) \
# or (vni_range and len(vni_range) != 2) \
# or (gre_id_range and len(gre_id_range) != 2):
# return (_("Range params must be pair."))
if vlan_range:
check_params_range(vlan_range, 'vlan')
if vni_range:
check_params_range(vni_range, 'vni')
if gre_id_range:
check_params_range(gre_id_range, 'gre_id')
# check logic_networks--------------------------------------------------
subnet_name_set = [] # record all subnets's name
logic_network_name_set = [] # record all logic_network's name
subnets_in_logic_network = {} subnets_in_logic_network = {}
external_logic_network_name = [] external_logic_network_name = []
if cluster_meta.get('logic_networks', None): if cluster_meta.get('logic_networks', None):
# get physnet_name list # get physnet_name list
all_private_cluster_networks_list = _get_network_detail( all_private_cluster_networks_list = _get_network_detail(
req, cluster_id, req, cluster_id, cluster_meta.get(
cluster_meta.get('networks', None) 'networks', None) if not isinstance(
if not isinstance(cluster_meta.get('networks', None), unicode) cluster_meta.get(
else eval(cluster_meta.get('networks', None))) 'networks', None), unicode) else eval(
cluster_meta.get(
'networks', None)))
if not all_private_cluster_networks_list: if not all_private_cluster_networks_list:
LOG.info("Private network is empty in db, it lead logical network config invalid.") LOG.info(
physnet_name_set = [net['name'] for net in all_private_cluster_networks_list] "Private network is empty in db, it lead "
"logical network config invalid.")
physnet_name_set = [net['name']
for net in
all_private_cluster_networks_list]
logic_networks = eval(cluster_meta['logic_networks']) logic_networks = eval(cluster_meta['logic_networks'])
for logic_network in logic_networks: for logic_network in logic_networks:
subnets_in_logic_network[logic_network['name']] = [] subnets_in_logic_network[logic_network['name']] = []
# We force setting the physnet_name of flat logical network to 'flat'. # We force setting the physnet_name of flat logical
if logic_network.get('segmentation_type', None) == "flat": # network to 'flat'.
if logic_network['physnet_name'] != "physnet1" or logic_network['type'] != "external": if logic_network.get(
LOG.info("When 'segmentation_type' is flat the 'physnet_name' and 'type' segmentation" 'segmentation_type', None) == "flat":
"must be 'physnet1'' and 'external'', but got '%s' and '%s'.We have changed" if logic_network['physnet_name'] != "physnet1" or \
"it to the valid value.") logic_network[
'type'] != "external":
LOG.info(
"When 'segmentation_type' is flat the "
"'physnet_name' and 'type' segmentation"
"must be 'physnet1'' and 'external'', "
"but got '%s' and '%s'.We have changed"
"it to the valid value.")
logic_network['physnet_name'] = "physnet1" logic_network['physnet_name'] = "physnet1"
logic_network['type'] = "external" logic_network['type'] = "external"
physnet_name_set.append("physnet1") physnet_name_set.append("physnet1")
_check_param_nonull_and_valid( _check_param_nonull_and_valid(
logic_network, logic_network,
['name', 'type', 'physnet_name', 'segmentation_type', 'shared', 'segmentation_id'], ['name', 'type', 'physnet_name',
{'segmentation_type' : networking_parameters['segmentation_type'], 'segmentation_type', 'shared', 'segmentation_id'],
'physnet_name' : ','.join(physnet_name_set), {'segmentation_type': networking_parameters[
'type' : ','.join(["external", "internal"])}) 'segmentation_type'],
'physnet_name': ','.join(physnet_name_set),
'type': ','.join(["external", "internal"])})
if logic_network['type'] == "external": if logic_network['type'] == "external":
external_logic_network_name.append(logic_network['name']) external_logic_network_name.append(
logic_network['name'])
logic_network_name_set.append(logic_network['name']) logic_network_name_set.append(logic_network['name'])
# By segmentation_type check segmentation_id is in range # checkout subnets params------------------------------
segmentation_id = logic_network.get('segmentation_id', None)
if segmentation_id:
err = "Segmentation_id is out of private network %s of %s.Vaild range is [%s, %s]."
segmentation_type = logic_network.get('segmentation_type', None)
if 0 == cmp(segmentation_type, "vlan"):
private_vlan_range = \
[(net['vlan_start'], net['vlan_end'])
for net in all_private_cluster_networks_list
if logic_network['physnet_name'] == net['name']]
if private_vlan_range and \
not private_vlan_range[0][0] or \
not private_vlan_range[0][1]:
return (_("Private network plane %s don't config the 'vlan_start' or "
"'vlan_end' parameter."))
if int(segmentation_id) not in range(private_vlan_range[0][0], private_vlan_range[0][1]):
return (_(err % ("vlan_range", logic_network['physnet_name'],
private_vlan_range[0][0], private_vlan_range[0][1])))
elif 0 == cmp(segmentation_type, "vxlan") and vni_range:
if int(segmentation_id) not in range(vni_range[0], vni_range[1]):
return (_("Segmentation_id is out of vni_range."))
elif 0 == cmp(segmentation_type, "gre") and gre_id_range:
if int(segmentation_id) not in range(gre_id_range[0], gre_id_range[1]):
return (_("Segmentation_id is out of gre_id_range."))
# checkout subnets params--------------------------------------------------
if logic_network.get('subnets', None): if logic_network.get('subnets', None):
subnet_data = logic_network['subnets'] subnet_data = logic_network['subnets']
for subnet in subnet_data: for subnet in subnet_data:
@ -325,49 +321,78 @@ class Controller(controller.BaseController):
subnet, subnet,
['name', 'cidr']) ['name', 'cidr'])
subnet_name_set.append(subnet['name']) subnet_name_set.append(subnet['name'])
# By cidr check floating_ranges is in range and not overlap # By cidr check floating_ranges is in range
#---------------start----- # and not overlap
if subnet['gateway'] and not _is_in_network_range(subnet['gateway'], subnet['cidr']): # ---------------start-----
if subnet['gateway'] and not \
_is_in_network_range(
subnet['gateway'], subnet['cidr']):
return (_("Wrong gateway format.")) return (_("Wrong gateway format."))
if subnet['floating_ranges']: if subnet['floating_ranges']:
inter_ip = lambda x: '.'.join([str(x/(256**i)%256) for i in range(3,-1,-1)]) inter_ip = lambda x: '.'.join(
[str(x / (256**i) % 256) for i in
range(3, -1, -1)])
floating_ranges_with_int_ip = list() floating_ranges_with_int_ip = list()
sorted_floating_ranges = list() sorted_floating_ranges = list()
sorted_floating_ranges_with_int_ip = list() sorted_floating_ranges_with_int_ip = list()
for floating_ip in subnet['floating_ranges']: for floating_ip in subnet[
'floating_ranges']:
if len(floating_ip) != 2: if len(floating_ip) != 2:
return (_("Floating ip must be paris.")) return (
_("Floating ip must "
"be paris."))
ip_start = _ip_into_int(floating_ip[0]) ip_start = _ip_into_int(floating_ip[0])
ip_end = _ip_into_int(floating_ip[1]) ip_end = _ip_into_int(floating_ip[1])
if ip_start > ip_end: if ip_start > ip_end:
return (_("Wrong floating ip format.")) return (
floating_ranges_with_int_ip.append([ip_start, ip_end]) _("Wrong floating ip format."))
sorted_floating_ranges_with_int_ip = sorted(floating_ranges_with_int_ip, key=lambda x : x[0]) floating_ranges_with_int_ip.append(
for ip_range in sorted_floating_ranges_with_int_ip: [ip_start, ip_end])
sorted_floating_ranges_with_int_ip = \
sorted(floating_ranges_with_int_ip,
key=lambda x: x[0])
for ip_range in \
sorted_floating_ranges_with_int_ip:
ip_start = inter_ip(ip_range[0]) ip_start = inter_ip(ip_range[0])
ip_end = inter_ip(ip_range[1]) ip_end = inter_ip(ip_range[1])
sorted_floating_ranges.append([ip_start, ip_end]) sorted_floating_ranges.append(
[ip_start, ip_end])
last_rang_ip = [] last_rang_ip = []
for floating in sorted_floating_ranges: for floating in sorted_floating_ranges:
if not _is_in_network_range(floating[0], subnet['cidr']) \ if not _is_in_network_range(
or not _is_in_network_range(floating[1], subnet['cidr']): floating[0],
return (_("Floating ip or gateway is out of range cidr.")) subnet['cidr']) or not \
_is_in_network_range(
floating[1], subnet['cidr']):
return (
_("Floating ip or gateway "
"is out of range cidr."))
err_list = [err for err in last_rang_ip if _ip_into_int(floating[0]) < err] err_list = [
err for err in last_rang_ip if
_ip_into_int(
floating[0]) < err]
if last_rang_ip and 0 < len(err_list): if last_rang_ip and 0 < len(err_list):
return (_("Between floating ip range can not be overlap.")) return (
last_rang_ip.append(_ip_into_int(floating[1])) _("Between floating ip range "
subnets_in_logic_network[logic_network['name']].append(subnet['name']) "can not be overlap."))
last_rang_ip.append(
_ip_into_int(floating[1]))
subnets_in_logic_network[logic_network[
'name']].append(subnet['name'])
# check external logical network uniqueness # check external logical network uniqueness
if len(external_logic_network_name) > 1: if len(external_logic_network_name) > 1:
return (_("External logical network is uniqueness in the cluster.Got %s." % return (_("External logical network is uniqueness "
",".join(external_logic_network_name))) "in the cluster.Got %s." %
",".join(external_logic_network_name)))
# check logic_network_name uniqueness # check logic_network_name uniqueness
if len(logic_network_name_set) != len(set(logic_network_name_set)): if len(logic_network_name_set) != len(
return (_("Logic network name segment is repetition.")) set(logic_network_name_set)):
return (_("Logic network name segment "
"is repetition."))
# check subnet_name uniqueness # check subnet_name uniqueness
if len(subnet_name_set) != len(set(subnet_name_set)): if len(subnet_name_set) != len(set(subnet_name_set)):
@ -375,36 +400,47 @@ class Controller(controller.BaseController):
cluster_meta['logic_networks'] = unicode(logic_networks) cluster_meta['logic_networks'] = unicode(logic_networks)
# check routers-------------------------------------------------- # check routers------------------------------------------------
subnet_name_set_deepcopy = copy.deepcopy(subnet_name_set) subnet_name_set_deepcopy = copy.deepcopy(subnet_name_set)
router_name_set = [] # record all routers name router_name_set = [] # record all routers name
if cluster_meta.get('routers', None): if cluster_meta.get('routers', None):
router_data = eval(cluster_meta['routers']) router_data = eval(cluster_meta['routers'])
for router in router_data: for router in router_data:
_check_param_nonull_and_valid(router, ['name']) _check_param_nonull_and_valid(router, ['name'])
# check relevance logic_network is valid # check relevance logic_network is valid
external_logic_network_data = router.get('external_logic_network', None) external_logic_network_data = router.get(
'external_logic_network', None)
if external_logic_network_data and \ if external_logic_network_data and \
external_logic_network_data not in logic_network_name_set: external_logic_network_data not in \
return (_("Logic_network %s is not valid range." % external_logic_network_data)) logic_network_name_set:
return (_("Logic_network %s is not valid range." %
external_logic_network_data))
router_name_set.append(router['name']) router_name_set.append(router['name'])
# check relevance subnets is valid # check relevance subnets is valid
for subnet in router.get('subnets', []): for subnet in router.get('subnets', []):
if subnet not in subnet_name_set: if subnet not in subnet_name_set:
return (_("Subnet %s is not valid range." % subnet)) return (
_("Subnet %s is not valid range." %
subnet))
# subnet cann't relate with two routers # subnet cann't relate with two routers
if subnet not in subnet_name_set_deepcopy: if subnet not in subnet_name_set_deepcopy:
return (_("The subnet can't be related with multiple routers.")) return (
_("The subnet can't be related with "
"multiple routers."))
subnet_name_set_deepcopy.remove(subnet) subnet_name_set_deepcopy.remove(subnet)
if external_logic_network_data and \ if external_logic_network_data and \
subnets_in_logic_network[external_logic_network_data] and \ subnets_in_logic_network[
set(subnets_in_logic_network[external_logic_network_data]). \ external_logic_network_data] and \
set(subnets_in_logic_network[
external_logic_network_data]). \
issubset(set(router['subnets'])): issubset(set(router['subnets'])):
return (_("Logic network's subnets is all related with a router, it's not allowed.")) return (
_("Logic network's subnets is all related"
" with a router, it's not allowed."))
# check subnet_name uniqueness # check subnet_name uniqueness
if len(router_name_set) != len(set(router_name_set)): if len(router_name_set) != len(set(router_name_set)):
@ -413,10 +449,13 @@ class Controller(controller.BaseController):
_check_auto_scale(req, cluster_meta) _check_auto_scale(req, cluster_meta)
check_result = _check_cluster_add_parameters(req, cluster_meta) check_result = _check_cluster_add_parameters(req, cluster_meta)
if 0 != cmp(check_result, errmsg): if 0 != cmp(check_result, errmsg):
LOG.exception(_("Params check for cluster-add or cluster-update is failed!")) LOG.exception(
_("Params check for cluster-add or cluster-update "
"is failed!"))
raise HTTPBadRequest(explanation=check_result) raise HTTPBadRequest(explanation=check_result)
LOG.debug(_("Params check for cluster-add or cluster-update is done!")) LOG.debug(
_("Params check for cluster-add or cluster-update is done!"))
return f(*args, **kwargs) return f(*args, **kwargs)
return wrapper return wrapper
@ -448,7 +487,8 @@ class Controller(controller.BaseController):
def _raise_404_if_network_deleted(self, req, network_id): def _raise_404_if_network_deleted(self, req, network_id):
network = self.get_network_meta_or_404(req, network_id) network = self.get_network_meta_or_404(req, network_id)
if network['deleted']: if network['deleted']:
msg = _("Network with identifier %s has been deleted.") % network_id msg = _("Network with identifier %s has been deleted.") % \
network_id
raise HTTPNotFound(msg) raise HTTPNotFound(msg)
return network.get('name', None) return network.get('name', None)
@ -500,9 +540,10 @@ class Controller(controller.BaseController):
if not cluster_name: if not cluster_name:
raise ValueError('cluster name is null!') raise ValueError('cluster name is null!')
cluster_name_split = cluster_name.split('_') cluster_name_split = cluster_name.split('_')
for cluster_name_info in cluster_name_split : for cluster_name_info in cluster_name_split:
if not cluster_name_info.isalnum(): if not cluster_name_info.isalnum():
raise ValueError('cluster name must be numbers or letters or underscores !') raise ValueError(
'cluster name must be numbers or letters or underscores !')
if cluster_meta.get('nodes', None): if cluster_meta.get('nodes', None):
orig_keys = list(eval(cluster_meta['nodes'])) orig_keys = list(eval(cluster_meta['nodes']))
for host_id in orig_keys: for host_id in orig_keys:
@ -514,11 +555,15 @@ class Controller(controller.BaseController):
raise HTTPForbidden(explanation=msg) raise HTTPForbidden(explanation=msg)
if node.get('interfaces', None): if node.get('interfaces', None):
interfaces = node['interfaces'] interfaces = node['interfaces']
input_host_pxe_info = [interface for interface in interfaces input_host_pxe_info = [
if interface.get('is_deployment', None) == 1] interface for interface in interfaces if interface.get(
if not input_host_pxe_info and node.get('os_status',None) != 'active': 'is_deployment', None) == 1]
msg = _("The host %s has more than one dhcp server, " if not input_host_pxe_info and node.get(
"please choose one interface for deployment") % host_id 'os_status', None) != 'active':
msg = _(
"The host %s has more than one dhcp server, "
"please choose one interface for deployment") % \
host_id
raise HTTPServerError(explanation=msg) raise HTTPServerError(explanation=msg)
print cluster_name print cluster_name
print cluster_meta print cluster_meta
@ -537,7 +582,7 @@ class Controller(controller.BaseController):
""" """
self._enforce(req, 'delete_cluster') self._enforce(req, 'delete_cluster')
#cluster = self.get_cluster_meta_or_404(req, id) # cluster = self.get_cluster_meta_or_404(req, id)
print "delete_cluster:%s" % id print "delete_cluster:%s" % id
try: try:
registry.delete_cluster_metadata(req.context, id) registry.delete_cluster_metadata(req.context, id)
@ -556,14 +601,15 @@ class Controller(controller.BaseController):
request=req, request=req,
content_type="text/plain") content_type="text/plain")
except exception.InUseByStore as e: except exception.InUseByStore as e:
msg = (_("cluster %(id)s could not be deleted because it is in use: " msg = (_("cluster %(id)s could not be deleted because "
"it is in use: "
"%(exc)s") % {"id": id, "exc": utils.exception_to_str(e)}) "%(exc)s") % {"id": id, "exc": utils.exception_to_str(e)})
LOG.warn(msg) LOG.warn(msg)
raise HTTPConflict(explanation=msg, raise HTTPConflict(explanation=msg,
request=req, request=req,
content_type="text/plain") content_type="text/plain")
else: else:
#self.notifier.info('cluster.delete', cluster) # self.notifier.info('cluster.delete', cluster)
return Response(body='', status=200) return Response(body='', status=200)
@utils.mutating @utils.mutating
@ -619,26 +665,31 @@ class Controller(controller.BaseController):
:retval Returns the updated cluster information as a mapping :retval Returns the updated cluster information as a mapping
""" """
self._enforce(req, 'update_cluster') self._enforce(req, 'update_cluster')
if cluster_meta.has_key('nodes'): if 'nodes' in cluster_meta:
orig_keys = list(eval(cluster_meta['nodes'])) orig_keys = list(eval(cluster_meta['nodes']))
for host_id in orig_keys: for host_id in orig_keys:
self._raise_404_if_host_deleted(req, host_id) self._raise_404_if_host_deleted(req, host_id)
node = registry.get_host_metadata(req.context, host_id) node = registry.get_host_metadata(req.context, host_id)
if node['status'] == 'in-cluster': if node['status'] == 'in-cluster':
host_cluster = registry.get_host_clusters(req.context, host_id) host_cluster = registry.get_host_clusters(
req.context, host_id)
if host_cluster[0]['cluster_id'] != id: if host_cluster[0]['cluster_id'] != id:
msg = _("Forbidden to add host %s with status " msg = _("Forbidden to add host %s with status "
"'in-cluster' in another cluster") % host_id "'in-cluster' in another cluster") % host_id
raise HTTPForbidden(explanation=msg) raise HTTPForbidden(explanation=msg)
if node.get('interfaces', None): if node.get('interfaces', None):
interfaces = node['interfaces'] interfaces = node['interfaces']
input_host_pxe_info = [interface for interface in interfaces input_host_pxe_info = [
if interface.get('is_deployment', None) == 1] interface for interface in interfaces if interface.get(
if not input_host_pxe_info and node.get('os_status', None) != 'active': 'is_deployment', None) == 1]
msg = _("The host %s has more than one dhcp server, " if not input_host_pxe_info and node.get(
"please choose one interface for deployment") % host_id 'os_status', None) != 'active':
msg = _(
"The host %s has more than one dhcp server, "
"please choose one interface for deployment") % \
host_id
raise HTTPServerError(explanation=msg) raise HTTPServerError(explanation=msg)
if cluster_meta.has_key('networks'): if 'networks' in cluster_meta:
orig_keys = list(eval(cluster_meta['networks'])) orig_keys = list(eval(cluster_meta['networks']))
for network_id in orig_keys: for network_id in orig_keys:
self._raise_404_if_network_deleted(req, network_id) self._raise_404_if_network_deleted(req, network_id)
@ -687,6 +738,7 @@ class Controller(controller.BaseController):
return {'cluster_meta': cluster_meta} return {'cluster_meta': cluster_meta}
class ProjectDeserializer(wsgi.JSONRequestDeserializer): class ProjectDeserializer(wsgi.JSONRequestDeserializer):
"""Handles deserialization of specific controller method requests.""" """Handles deserialization of specific controller method requests."""
@ -701,6 +753,7 @@ class ProjectDeserializer(wsgi.JSONRequestDeserializer):
def update_cluster(self, request): def update_cluster(self, request):
return self._deserialize(request) return self._deserialize(request)
class ProjectSerializer(wsgi.JSONResponseSerializer): class ProjectSerializer(wsgi.JSONResponseSerializer):
"""Handles serialization of specific controller method responses.""" """Handles serialization of specific controller method responses."""
@ -727,6 +780,7 @@ class ProjectSerializer(wsgi.JSONResponseSerializer):
response.headers['Content-Type'] = 'application/json' response.headers['Content-Type'] = 'application/json'
response.body = self.to_json(dict(cluster=cluster_meta)) response.body = self.to_json(dict(cluster=cluster_meta))
return response return response
def get_cluster(self, response, result): def get_cluster(self, response, result):
cluster_meta = result['cluster_meta'] cluster_meta = result['cluster_meta']
response.status = 201 response.status = 201
@ -734,9 +788,9 @@ class ProjectSerializer(wsgi.JSONResponseSerializer):
response.body = self.to_json(dict(cluster=cluster_meta)) response.body = self.to_json(dict(cluster=cluster_meta))
return response return response
def create_resource(): def create_resource():
"""Projects resource factory method""" """Projects resource factory method"""
deserializer = ProjectDeserializer() deserializer = ProjectDeserializer()
serializer = ProjectSerializer() serializer = ProjectSerializer()
return wsgi.Resource(Controller(), deserializer, serializer) return wsgi.Resource(Controller(), deserializer, serializer)

View File

@ -52,21 +52,25 @@ CONF.import_opt('container_formats', 'daisy.common.config',
group='image_format') group='image_format')
CONF.import_opt('image_property_quota', 'daisy.common.config') CONF.import_opt('image_property_quota', 'daisy.common.config')
class Controller(controller.BaseController): class Controller(controller.BaseController):
""" """
WSGI controller for components resource in Daisy v1 API WSGI controller for components resource in Daisy v1 API
The components resource API is a RESTful web service for component data. The API The components resource API is a RESTful web service for component data.
is as follows:: The API is as follows::
GET /components -- Returns a set of brief metadata about components GET /components -- Returns a set of brief metadata about components
GET /components/detail -- Returns a set of detailed metadata about GET /components/detail -- Returns a set of detailed metadata about
components components
HEAD /components/<ID> -- Return metadata about an component with id <ID> HEAD /components/<ID> --
GET /components/<ID> -- Return component data for component with id <ID> Return metadata about an component with id <ID>
GET /components/<ID> --
Return component data for component with id <ID>
POST /components -- Store component data and return metadata about the POST /components -- Store component data and return metadata about the
newly-stored component newly-stored component
PUT /components/<ID> -- Update component metadata and/or upload component PUT /components/<ID> --
Update component metadata and/or upload component
data for a previously-reserved component data for a previously-reserved component
DELETE /components/<ID> -- Delete the component with id <ID> DELETE /components/<ID> -- Delete the component with id <ID>
""" """
@ -132,15 +136,16 @@ class Controller(controller.BaseController):
:raises HTTPBadRequest if x-component-name is missing :raises HTTPBadRequest if x-component-name is missing
""" """
self._enforce(req, 'add_component') self._enforce(req, 'add_component')
#component_id=component_meta["id"] # component_id=component_meta["id"]
#component_owner=component_meta["owner"] # component_owner=component_meta["owner"]
component_name = component_meta["name"] component_name = component_meta["name"]
component_description = component_meta["description"] component_description = component_meta["description"]
#print component_id # print component_id
#print component_owner # print component_owner
print component_name print component_name
print component_description print component_description
component_meta = registry.add_component_metadata(req.context, component_meta) component_meta = registry.add_component_metadata(
req.context, component_meta)
return {'component_meta': component_meta} return {'component_meta': component_meta}
@ -156,7 +161,7 @@ class Controller(controller.BaseController):
""" """
self._enforce(req, 'delete_component') self._enforce(req, 'delete_component')
#component = self.get_component_meta_or_404(req, id) # component = self.get_component_meta_or_404(req, id)
print "delete_component:%s" % id print "delete_component:%s" % id
try: try:
registry.delete_component_metadata(req.context, id) registry.delete_component_metadata(req.context, id)
@ -175,14 +180,15 @@ class Controller(controller.BaseController):
request=req, request=req,
content_type="text/plain") content_type="text/plain")
except exception.InUseByStore as e: except exception.InUseByStore as e:
msg = (_("component %(id)s could not be deleted because it is in use: " msg = (_("component %(id)s could not be "
"deleted because it is in use: "
"%(exc)s") % {"id": id, "exc": utils.exception_to_str(e)}) "%(exc)s") % {"id": id, "exc": utils.exception_to_str(e)})
LOG.warn(msg) LOG.warn(msg)
raise HTTPConflict(explanation=msg, raise HTTPConflict(explanation=msg,
request=req, request=req,
content_type="text/plain") content_type="text/plain")
else: else:
#self.notifier.info('component.delete', component) # self.notifier.info('component.delete', component)
return Response(body='', status=200) return Response(body='', status=200)
@utils.mutating @utils.mutating
@ -280,6 +286,7 @@ class Controller(controller.BaseController):
return {'component_meta': component_meta} return {'component_meta': component_meta}
class ComponentDeserializer(wsgi.JSONRequestDeserializer): class ComponentDeserializer(wsgi.JSONRequestDeserializer):
"""Handles deserialization of specific controller method requests.""" """Handles deserialization of specific controller method requests."""
@ -294,6 +301,7 @@ class ComponentDeserializer(wsgi.JSONRequestDeserializer):
def update_component(self, request): def update_component(self, request):
return self._deserialize(request) return self._deserialize(request)
class ComponentSerializer(wsgi.JSONResponseSerializer): class ComponentSerializer(wsgi.JSONResponseSerializer):
"""Handles serialization of specific controller method responses.""" """Handles serialization of specific controller method responses."""
@ -313,6 +321,7 @@ class ComponentSerializer(wsgi.JSONResponseSerializer):
response.headers['Content-Type'] = 'application/json' response.headers['Content-Type'] = 'application/json'
response.body = self.to_json(dict(component=component_meta)) response.body = self.to_json(dict(component=component_meta))
return response return response
def get_component(self, response, result): def get_component(self, response, result):
component_meta = result['component_meta'] component_meta = result['component_meta']
response.status = 201 response.status = 201
@ -320,9 +329,9 @@ class ComponentSerializer(wsgi.JSONResponseSerializer):
response.body = self.to_json(dict(component=component_meta)) response.body = self.to_json(dict(component=component_meta))
return response return response
def create_resource(): def create_resource():
"""Components resource factory method""" """Components resource factory method"""
deserializer = ComponentDeserializer() deserializer = ComponentDeserializer()
serializer = ComponentSerializer() serializer = ComponentSerializer()
return wsgi.Resource(Controller(), deserializer, serializer) return wsgi.Resource(Controller(), deserializer, serializer)

View File

@ -52,21 +52,28 @@ CONF.import_opt('container_formats', 'daisy.common.config',
group='image_format') group='image_format')
CONF.import_opt('image_property_quota', 'daisy.common.config') CONF.import_opt('image_property_quota', 'daisy.common.config')
class Controller(controller.BaseController): class Controller(controller.BaseController):
""" """
WSGI controller for config_files resource in Daisy v1 API WSGI controller for config_files resource in Daisy v1 API
The config_files resource API is a RESTful web service for config_file data. The API The config_files resource API is a RESTful web service
for config_file data. The API
is as follows:: is as follows::
GET /config_files -- Returns a set of brief metadata about config_files GET /config_files --
Returns a set of brief metadata about config_files
GET /config_files/detail -- Returns a set of detailed metadata about GET /config_files/detail -- Returns a set of detailed metadata about
config_files config_files
HEAD /config_files/<ID> -- Return metadata about an config_file with id <ID> HEAD /config_files/<ID> --
GET /config_files/<ID> -- Return config_file data for config_file with id <ID> Return metadata about an config_file with id <ID>
POST /config_files -- Store config_file data and return metadata about the GET /config_files/<ID> --
Return config_file data for config_file with id <ID>
POST /config_files --
Store config_file data and return metadata about the
newly-stored config_file newly-stored config_file
PUT /config_files/<ID> -- Update config_file metadata and/or upload config_file PUT /config_files/<ID> --
Update config_file metadata and/or upload config_file
data for a previously-reserved config_file data for a previously-reserved config_file
DELETE /config_files/<ID> -- Delete the config_file with id <ID> DELETE /config_files/<ID> -- Delete the config_file with id <ID>
""" """
@ -132,13 +139,14 @@ class Controller(controller.BaseController):
:raises HTTPBadRequest if x-config_file-name is missing :raises HTTPBadRequest if x-config_file-name is missing
""" """
self._enforce(req, 'add_config_file') self._enforce(req, 'add_config_file')
#config_file_id=config_file_meta["id"] # config_file_id=config_file_meta["id"]
config_file_name = config_file_meta["name"] config_file_name = config_file_meta["name"]
config_file_description = config_file_meta["description"] config_file_description = config_file_meta["description"]
#print config_file_id # print config_file_id
print config_file_name print config_file_name
print config_file_description print config_file_description
config_file_meta = registry.add_config_file_metadata(req.context, config_file_meta) config_file_meta = registry.add_config_file_metadata(
req.context, config_file_meta)
return {'config_file_meta': config_file_meta} return {'config_file_meta': config_file_meta}
@ -171,14 +179,15 @@ class Controller(controller.BaseController):
request=req, request=req,
content_type="text/plain") content_type="text/plain")
except exception.InUseByStore as e: except exception.InUseByStore as e:
msg = (_("config_file %(id)s could not be deleted because it is in use: " msg = (_("config_file %(id)s could not be "
"deleted because it is in use: "
"%(exc)s") % {"id": id, "exc": utils.exception_to_str(e)}) "%(exc)s") % {"id": id, "exc": utils.exception_to_str(e)})
LOG.warn(msg) LOG.warn(msg)
raise HTTPConflict(explanation=msg, raise HTTPConflict(explanation=msg,
request=req, request=req,
content_type="text/plain") content_type="text/plain")
else: else:
#self.notifier.info('config_file.delete', config_file) # self.notifier.info('config_file.delete', config_file)
return Response(body='', status=200) return Response(body='', status=200)
@utils.mutating @utils.mutating
@ -215,7 +224,8 @@ class Controller(controller.BaseController):
self._enforce(req, 'get_config_files') self._enforce(req, 'get_config_files')
params = self._get_query_params(req) params = self._get_query_params(req)
try: try:
config_files = registry.get_config_files_detail(req.context, **params) config_files = registry.get_config_files_detail(
req.context, **params)
except exception.Invalid as e: except exception.Invalid as e:
raise HTTPBadRequest(explanation=e.msg, request=req) raise HTTPBadRequest(explanation=e.msg, request=req)
return dict(config_files=config_files) return dict(config_files=config_files)
@ -241,9 +251,8 @@ class Controller(controller.BaseController):
request=req, request=req,
content_type="text/plain") content_type="text/plain")
try: try:
config_file_meta = registry.update_config_file_metadata(req.context, config_file_meta = registry.update_config_file_metadata(
id, req.context, id, config_file_meta)
config_file_meta)
except exception.Invalid as e: except exception.Invalid as e:
msg = (_("Failed to update config_file metadata. Got error: %s") % msg = (_("Failed to update config_file metadata. Got error: %s") %
@ -276,6 +285,7 @@ class Controller(controller.BaseController):
return {'config_file_meta': config_file_meta} return {'config_file_meta': config_file_meta}
class Config_fileDeserializer(wsgi.JSONRequestDeserializer): class Config_fileDeserializer(wsgi.JSONRequestDeserializer):
"""Handles deserialization of specific controller method requests.""" """Handles deserialization of specific controller method requests."""
@ -290,6 +300,7 @@ class Config_fileDeserializer(wsgi.JSONRequestDeserializer):
def update_config_file(self, request): def update_config_file(self, request):
return self._deserialize(request) return self._deserialize(request)
class Config_fileSerializer(wsgi.JSONResponseSerializer): class Config_fileSerializer(wsgi.JSONResponseSerializer):
"""Handles serialization of specific controller method responses.""" """Handles serialization of specific controller method responses."""
@ -317,9 +328,9 @@ class Config_fileSerializer(wsgi.JSONResponseSerializer):
response.body = self.to_json(dict(config_file=config_file_meta)) response.body = self.to_json(dict(config_file=config_file_meta))
return response return response
def create_resource(): def create_resource():
"""config_files resource factory method""" """config_files resource factory method"""
deserializer = Config_fileDeserializer() deserializer = Config_fileDeserializer()
serializer = Config_fileSerializer() serializer = Config_fileSerializer()
return wsgi.Resource(Controller(), deserializer, serializer) return wsgi.Resource(Controller(), deserializer, serializer)

View File

@ -53,21 +53,26 @@ CONF.import_opt('container_formats', 'daisy.common.config',
group='image_format') group='image_format')
CONF.import_opt('image_property_quota', 'daisy.common.config') CONF.import_opt('image_property_quota', 'daisy.common.config')
class Controller(controller.BaseController): class Controller(controller.BaseController):
""" """
WSGI controller for config_sets resource in Daisy v1 API WSGI controller for config_sets resource in Daisy v1 API
The config_sets resource API is a RESTful web service for config_set data. The API The config_sets resource API is a RESTful web service for config_set data.
is as follows:: The API is as follows::
GET /config_sets -- Returns a set of brief metadata about config_sets GET /config_sets -- Returns a set of brief metadata about config_sets
GET /config_sets/detail -- Returns a set of detailed metadata about GET /config_sets/detail -- Returns a set of detailed metadata about
config_sets config_sets
HEAD /config_sets/<ID> -- Return metadata about an config_set with id <ID> HEAD /config_sets/<ID> --
GET /config_sets/<ID> -- Return config_set data for config_set with id <ID> Return metadata about an config_set with id <ID>
POST /config_sets -- Store config_set data and return metadata about the GET /config_sets/<ID> --
Return config_set data for config_set with id <ID>
POST /config_sets --
Store config_set data and return metadata about the
newly-stored config_set newly-stored config_set
PUT /config_sets/<ID> -- Update config_set metadata and/or upload config_set PUT /config_sets/<ID> --
Update config_set metadata and/or upload config_set
data for a previously-reserved config_set data for a previously-reserved config_set
DELETE /config_sets/<ID> -- Delete the config_set with id <ID> DELETE /config_sets/<ID> -- Delete the config_set with id <ID>
""" """
@ -125,7 +130,8 @@ class Controller(controller.BaseController):
def _raise_404_if_cluster_deleted(self, req, cluster_id): def _raise_404_if_cluster_deleted(self, req, cluster_id):
cluster = self.get_cluster_meta_or_404(req, cluster_id) cluster = self.get_cluster_meta_or_404(req, cluster_id)
if cluster['deleted']: if cluster['deleted']:
msg = _("cluster with identifier %s has been deleted.") % cluster_id msg = _("cluster with identifier %s has been deleted.") % \
cluster_id
raise HTTPNotFound(msg) raise HTTPNotFound(msg)
@utils.mutating @utils.mutating
@ -139,13 +145,14 @@ class Controller(controller.BaseController):
:raises HTTPBadRequest if x-config_set-name is missing :raises HTTPBadRequest if x-config_set-name is missing
""" """
self._enforce(req, 'add_config_set') self._enforce(req, 'add_config_set')
#config_set_id=config_set_meta["id"] # config_set_id=config_set_meta["id"]
config_set_name = config_set_meta["name"] config_set_name = config_set_meta["name"]
config_set_description = config_set_meta["description"] config_set_description = config_set_meta["description"]
#print config_set_id # print config_set_id
print config_set_name print config_set_name
print config_set_description print config_set_description
config_set_meta = registry.add_config_set_metadata(req.context, config_set_meta) config_set_meta = registry.add_config_set_metadata(
req.context, config_set_meta)
return {'config_set_meta': config_set_meta} return {'config_set_meta': config_set_meta}
@ -171,21 +178,20 @@ class Controller(controller.BaseController):
request=req, request=req,
content_type="text/plain") content_type="text/plain")
except exception.Forbidden as e: except exception.Forbidden as e:
msg = (_("Forbidden to delete config_set: %s") % LOG.warn(e)
utils.exception_to_str(e)) raise HTTPForbidden(explanation=e,
LOG.warn(msg)
raise HTTPForbidden(explanation=msg,
request=req, request=req,
content_type="text/plain") content_type="text/plain")
except exception.InUseByStore as e: except exception.InUseByStore as e:
msg = (_("config_set %(id)s could not be deleted because it is in use: " msg = (_("config_set %(id)s could not be "
"deleted because it is in use: "
"%(exc)s") % {"id": id, "exc": utils.exception_to_str(e)}) "%(exc)s") % {"id": id, "exc": utils.exception_to_str(e)})
LOG.warn(msg) LOG.warn(msg)
raise HTTPConflict(explanation=msg, raise HTTPConflict(explanation=msg,
request=req, request=req,
content_type="text/plain") content_type="text/plain")
else: else:
#self.notifier.info('config_set.delete', config_set) # self.notifier.info('config_set.delete', config_set)
return Response(body='', status=200) return Response(body='', status=200)
@utils.mutating @utils.mutating
@ -222,7 +228,8 @@ class Controller(controller.BaseController):
self._enforce(req, 'get_config_sets') self._enforce(req, 'get_config_sets')
params = self._get_query_params(req) params = self._get_query_params(req)
try: try:
config_sets = registry.get_config_sets_detail(req.context, **params) config_sets = registry.get_config_sets_detail(
req.context, **params)
except exception.Invalid as e: except exception.Invalid as e:
raise HTTPBadRequest(explanation=e.msg, request=req) raise HTTPBadRequest(explanation=e.msg, request=req)
return dict(config_sets=config_sets) return dict(config_sets=config_sets)
@ -248,9 +255,8 @@ class Controller(controller.BaseController):
request=req, request=req,
content_type="text/plain") content_type="text/plain")
try: try:
config_set_meta = registry.update_config_set_metadata(req.context, config_set_meta = registry.update_config_set_metadata(
id, req.context, id, config_set_meta)
config_set_meta)
except exception.Invalid as e: except exception.Invalid as e:
msg = (_("Failed to update config_set metadata. Got error: %s") % msg = (_("Failed to update config_set metadata. Got error: %s") %
@ -282,48 +288,51 @@ class Controller(controller.BaseController):
self.notifier.info('config_set.update', config_set_meta) self.notifier.info('config_set.update', config_set_meta)
return {'config_set_meta': config_set_meta} return {'config_set_meta': config_set_meta}
def _raise_404_if_role_exist(self,req,config_set_meta): def _raise_404_if_role_exist(self, req, config_set_meta):
role_id_list=[] role_id_list = []
try: try:
roles = registry.get_roles_detail(req.context) roles = registry.get_roles_detail(req.context)
for role in roles: for role in roles:
for role_name in eval(config_set_meta['role']): for role_name in eval(config_set_meta['role']):
if role['cluster_id'] == config_set_meta['cluster'] and role['name'] == role_name: if role['cluster_id'] == config_set_meta[
'cluster'] and role['name'] == role_name:
role_id_list.append(role['id']) role_id_list.append(role['id'])
break break
except exception.Invalid as e: except exception.Invalid as e:
raise HTTPBadRequest(explanation=e.msg, request=req) raise HTTPBadRequest(explanation=e.msg, request=req)
return role_id_list return role_id_list
@utils.mutating @utils.mutating
def cluster_config_set_update(self, req, config_set_meta): def cluster_config_set_update(self, req, config_set_meta):
if config_set_meta.has_key('cluster'): if 'cluster' in config_set_meta:
orig_cluster = str(config_set_meta['cluster']) orig_cluster = str(config_set_meta['cluster'])
self._raise_404_if_cluster_deleted(req, orig_cluster) self._raise_404_if_cluster_deleted(req, orig_cluster)
try: try:
if config_set_meta.get('role',None): if config_set_meta.get('role', None):
role_id_list=self._raise_404_if_role_exist(req,config_set_meta) role_id_list = self._raise_404_if_role_exist(
req, config_set_meta)
if len(role_id_list) == len(eval(config_set_meta['role'])): if len(role_id_list) == len(eval(config_set_meta['role'])):
for role_id in role_id_list: backend = manager.configBackend('clushshell', req)
backend=manager.configBackend('clushshell', req, role_id) backend.push_config_by_roles(role_id_list)
backend.push_config()
else: else:
msg = "the role is not exist" msg = "the role is not exist"
LOG.error(msg) LOG.error(msg)
raise HTTPNotFound(msg) raise HTTPNotFound(msg)
else: else:
roles = registry.get_roles_detail(req.context) roles = registry.get_roles_detail(req.context)
role_id_list = []
for role in roles: for role in roles:
if role['cluster_id'] == config_set_meta['cluster']: if role['cluster_id'] == config_set_meta['cluster']:
backend=manager.configBackend('clushshell', req, role['id']) role_id_list.append(role['id'])
backend.push_config() backend = manager.configBackend('clushshell', req)
backend.push_config_by_roles(role_id_list)
except exception.Invalid as e: except exception.Invalid as e:
raise HTTPBadRequest(explanation=e.msg, request=req) raise HTTPBadRequest(explanation=e.msg, request=req)
config_status={"status":"config successful"} config_status = {"status": "config successful"}
return {'config_set':config_status} return {'config_set': config_status}
else: else:
msg = "the cluster is not exist" msg = "the cluster is not exist"
LOG.error(msg) LOG.error(msg)
@ -332,18 +341,22 @@ class Controller(controller.BaseController):
@utils.mutating @utils.mutating
def cluster_config_set_progress(self, req, config_set_meta): def cluster_config_set_progress(self, req, config_set_meta):
role_list = [] role_list = []
if config_set_meta.has_key('cluster'): if 'cluster' in config_set_meta:
orig_cluster = str(config_set_meta['cluster']) orig_cluster = str(config_set_meta['cluster'])
self._raise_404_if_cluster_deleted(req, orig_cluster) self._raise_404_if_cluster_deleted(req, orig_cluster)
try: try:
if config_set_meta.get('role',None): if config_set_meta.get('role', None):
role_id_list=self._raise_404_if_role_exist(req,config_set_meta) role_id_list = self._raise_404_if_role_exist(
req, config_set_meta)
if len(role_id_list) == len(eval(config_set_meta['role'])): if len(role_id_list) == len(eval(config_set_meta['role'])):
for role_id in role_id_list: for role_id in role_id_list:
role_info = {} role_info = {}
role_meta=registry.get_role_metadata(req.context, role_id) role_meta = registry.get_role_metadata(
role_info['role-name']=role_meta['name'] req.context, role_id)
role_info['config_set_update_progress']=role_meta['config_set_update_progress'] role_info['role-name'] = role_meta['name']
role_info['config_set_update_progress'] = \
role_meta[
'config_set_update_progress']
role_list.append(role_info) role_list.append(role_info)
else: else:
msg = "the role is not exist" msg = "the role is not exist"
@ -354,19 +367,21 @@ class Controller(controller.BaseController):
for role in roles: for role in roles:
if role['cluster_id'] == config_set_meta['cluster']: if role['cluster_id'] == config_set_meta['cluster']:
role_info = {} role_info = {}
role_info['role-name']=role['name'] role_info['role-name'] = role['name']
role_info['config_set_update_progress']=role['config_set_update_progress'] role_info['config_set_update_progress'] = role[
'config_set_update_progress']
role_list.append(role_info) role_list.append(role_info)
except exception.Invalid as e: except exception.Invalid as e:
raise HTTPBadRequest(explanation=e.msg, request=req) raise HTTPBadRequest(explanation=e.msg, request=req)
return role_list return role_list
else: else:
msg = "the cluster is not exist" msg = "the cluster is not exist"
LOG.error(msg) LOG.error(msg)
raise HTTPNotFound(msg) raise HTTPNotFound(msg)
class Config_setDeserializer(wsgi.JSONRequestDeserializer): class Config_setDeserializer(wsgi.JSONRequestDeserializer):
"""Handles deserialization of specific controller method requests.""" """Handles deserialization of specific controller method requests."""
@ -387,6 +402,7 @@ class Config_setDeserializer(wsgi.JSONRequestDeserializer):
def cluster_config_set_progress(self, request): def cluster_config_set_progress(self, request):
return self._deserialize(request) return self._deserialize(request)
class Config_setSerializer(wsgi.JSONResponseSerializer): class Config_setSerializer(wsgi.JSONResponseSerializer):
"""Handles serialization of specific controller method responses.""" """Handles serialization of specific controller method responses."""
@ -426,9 +442,9 @@ class Config_setSerializer(wsgi.JSONResponseSerializer):
response.body = self.to_json(dict(config_set=result)) response.body = self.to_json(dict(config_set=result))
return response return response
def create_resource(): def create_resource():
"""config_sets resource factory method""" """config_sets resource factory method"""
deserializer = Config_setDeserializer() deserializer = Config_setDeserializer()
serializer = Config_setSerializer() serializer = Config_setSerializer()
return wsgi.Resource(Controller(), deserializer, serializer) return wsgi.Resource(Controller(), deserializer, serializer)

View File

@ -52,6 +52,7 @@ CONF.import_opt('container_formats', 'daisy.common.config',
group='image_format') group='image_format')
CONF.import_opt('image_property_quota', 'daisy.common.config') CONF.import_opt('image_property_quota', 'daisy.common.config')
class Controller(controller.BaseController): class Controller(controller.BaseController):
""" """
WSGI controller for configs resource in Daisy v1 API WSGI controller for configs resource in Daisy v1 API
@ -120,32 +121,40 @@ class Controller(controller.BaseController):
if PARAM in req.params: if PARAM in req.params:
params[PARAM] = req.params.get(PARAM) params[PARAM] = req.params.get(PARAM)
return params return params
def _raise_404_if_config_set_delete(self, req, config_set_id): def _raise_404_if_config_set_delete(self, req, config_set_id):
config_set = self.get_config_set_meta_or_404(req, config_set_id) config_set = self.get_config_set_meta_or_404(req, config_set_id)
if config_set['deleted']: if config_set['deleted']:
msg = _("config_set with identifier %s has been deleted.") % config_set_id msg = _("config_set with identifier %s has been deleted.") % \
config_set_id
raise HTTPNotFound(msg) raise HTTPNotFound(msg)
def _raise_404_if_config_file_delete(self, req, config_file_id): def _raise_404_if_config_file_delete(self, req, config_file_id):
config_file = self.get_config_file_meta_or_404(req, config_file_id) config_file = self.get_config_file_meta_or_404(req, config_file_id)
if config_file['deleted']: if config_file['deleted']:
msg = _("config_file with identifier %s has been deleted.") % config_file_id msg = _(
"config_file with identifier %s has been deleted.") % \
config_file_id
raise HTTPNotFound(msg) raise HTTPNotFound(msg)
def _raise_404_if_role_exist(self,req,config_meta):
role_id="" def _raise_404_if_role_exist(self, req, config_meta):
role_id = ""
try: try:
roles = registry.get_roles_detail(req.context) roles = registry.get_roles_detail(req.context)
for role in roles: for role in roles:
if role['cluster_id'] == config_meta['cluster'] and role['name'] == config_meta['role']: if role['cluster_id'] == config_meta[
role_id=role['id'] 'cluster'] and role['name'] == config_meta['role']:
role_id = role['id']
break break
except exception.Invalid as e: except exception.Invalid as e:
raise HTTPBadRequest(explanation=e.msg, request=req) raise HTTPBadRequest(explanation=e.msg, request=req)
return role_id return role_id
def _raise_404_if_cluster_deleted(self, req, cluster_id): def _raise_404_if_cluster_deleted(self, req, cluster_id):
cluster = self.get_cluster_meta_or_404(req, cluster_id) cluster = self.get_cluster_meta_or_404(req, cluster_id)
if cluster['deleted']: if cluster['deleted']:
msg = _("cluster with identifier %s has been deleted.") % cluster_id msg = _("cluster with identifier %s has been deleted.") % \
cluster_id
raise HTTPNotFound(msg) raise HTTPNotFound(msg)
@utils.mutating @utils.mutating
@ -159,19 +168,57 @@ class Controller(controller.BaseController):
:raises HTTPBadRequest if x-config-name is missing :raises HTTPBadRequest if x-config-name is missing
""" """
self._enforce(req, 'add_config') self._enforce(req, 'add_config')
if config_meta.has_key('cluster'): if ('role' in config_meta and
orig_cluster = str(config_meta['cluster']) 'host_id' in config_meta):
self._raise_404_if_cluster_deleted(req, orig_cluster) msg = "role name and host id only have one"
LOG.error(msg)
if config_meta.has_key('role'): raise HTTPBadRequest(explanation=msg, request=req)
role_id=self._raise_404_if_role_exist(req,config_meta) elif 'role' in config_meta:
if not role_id: # the first way to add config
msg = "the role name is not exist" # when have 'role', config_set will be ignore
if config_meta.get('cluster'):
orig_cluster = str(config_meta['cluster'])
self._raise_404_if_cluster_deleted(req, orig_cluster)
else:
msg = "cluster must be given when add config for role"
LOG.error(msg) LOG.error(msg)
raise HTTPNotFound(msg) raise HTTPNotFound(msg)
if config_meta['role']:
role_id = self._raise_404_if_role_exist(req, config_meta)
if not role_id:
msg = "the role name is not exist"
LOG.error(msg)
raise HTTPNotFound(msg)
else:
msg = "the role name can't be empty"
LOG.error(msg)
raise HTTPBadRequest(explanation=msg, request=req)
elif 'host_id' in config_meta:
# the second way to add config
# when have 'host_id', config_set will be ignore
if config_meta['host_id']:
self.get_host_meta_or_404(req, config_meta['host_id'])
else:
msg = "the host id can't be empty"
LOG.error(msg)
raise HTTPBadRequest(explanation=msg, request=req)
elif 'config_set' in config_meta:
# the third way to add config
if config_meta['config_set']:
self.get_config_set_meta_or_404(req,
config_meta['config_set'])
else:
msg = "config set id can't be empty"
LOG.error(msg)
raise HTTPBadRequest(explanation=msg, request=req)
else:
msg = "no way to add config"
LOG.error(msg)
raise HTTPBadRequest(explanation=msg, request=req)
config_meta = registry.config_interface_metadata(req.context, config_meta) config_meta = registry.config_interface_metadata(
req.context, config_meta)
return config_meta return config_meta
@utils.mutating @utils.mutating
@ -204,14 +251,15 @@ class Controller(controller.BaseController):
request=req, request=req,
content_type="text/plain") content_type="text/plain")
except exception.InUseByStore as e: except exception.InUseByStore as e:
msg = (_("config %(id)s could not be deleted because it is in use: " msg = (_("config %(id)s could not be "
"deleted because it is in use: "
"%(exc)s") % {"id": id, "exc": utils.exception_to_str(e)}) "%(exc)s") % {"id": id, "exc": utils.exception_to_str(e)})
LOG.warn(msg) LOG.warn(msg)
raise HTTPConflict(explanation=msg, raise HTTPConflict(explanation=msg,
request=req, request=req,
content_type="text/plain") content_type="text/plain")
else: else:
#self.notifier.info('config.delete', config) # self.notifier.info('config.delete', config)
return Response(body='', status=200) return Response(body='', status=200)
@utils.mutating @utils.mutating
@ -253,6 +301,7 @@ class Controller(controller.BaseController):
raise HTTPBadRequest(explanation=e.msg, request=req) raise HTTPBadRequest(explanation=e.msg, request=req)
return dict(configs=configs) return dict(configs=configs)
class ConfigDeserializer(wsgi.JSONRequestDeserializer): class ConfigDeserializer(wsgi.JSONRequestDeserializer):
"""Handles deserialization of specific controller method requests.""" """Handles deserialization of specific controller method requests."""
@ -263,10 +312,11 @@ class ConfigDeserializer(wsgi.JSONRequestDeserializer):
def add_config(self, request): def add_config(self, request):
return self._deserialize(request) return self._deserialize(request)
def delete_config(self, request): def delete_config(self, request):
return self._deserialize(request) return self._deserialize(request)
class ConfigSerializer(wsgi.JSONResponseSerializer): class ConfigSerializer(wsgi.JSONResponseSerializer):
"""Handles serialization of specific controller method responses.""" """Handles serialization of specific controller method responses."""
@ -293,9 +343,9 @@ class ConfigSerializer(wsgi.JSONResponseSerializer):
response.body = self.to_json(dict(config=config_meta)) response.body = self.to_json(dict(config=config_meta))
return response return response
def create_resource(): def create_resource():
"""configs resource factory method""" """configs resource factory method"""
deserializer = ConfigDeserializer() deserializer = ConfigDeserializer()
serializer = ConfigSerializer() serializer = ConfigSerializer()
return wsgi.Resource(Controller(), deserializer, serializer) return wsgi.Resource(Controller(), deserializer, serializer)

View File

@ -27,6 +27,7 @@ _ = i18n._
class BaseController(object): class BaseController(object):
def get_image_meta_or_404(self, request, image_id): def get_image_meta_or_404(self, request, image_id):
""" """
Grabs the image metadata for an image with a supplied Grabs the image metadata for an image with a supplied
@ -101,6 +102,7 @@ class BaseController(object):
raise webob.exc.HTTPForbidden(msg, raise webob.exc.HTTPForbidden(msg,
request=request, request=request,
content_type='text/plain') content_type='text/plain')
def get_component_meta_or_404(self, request, component_id): def get_component_meta_or_404(self, request, component_id):
""" """
Grabs the component metadata for an component with a supplied Grabs the component metadata for an component with a supplied
@ -175,7 +177,7 @@ class BaseController(object):
raise webob.exc.HTTPForbidden(msg, raise webob.exc.HTTPForbidden(msg,
request=request, request=request,
content_type='text/plain') content_type='text/plain')
def get_network_meta_or_404(self, request, network_id): def get_network_meta_or_404(self, request, network_id):
""" """
Grabs the network metadata for an network with a supplied Grabs the network metadata for an network with a supplied
@ -199,7 +201,7 @@ class BaseController(object):
LOG.debug(msg) LOG.debug(msg)
raise webob.exc.HTTPForbidden(msg, raise webob.exc.HTTPForbidden(msg,
request=request, request=request,
content_type='text/plain') content_type='text/plain')
def get_active_image_meta_or_error(self, request, image_id): def get_active_image_meta_or_error(self, request, image_id):
""" """
@ -242,7 +244,7 @@ class BaseController(object):
raise webob.exc.HTTPBadRequest(explanation=msg, raise webob.exc.HTTPBadRequest(explanation=msg,
request=req, request=req,
content_type='text/plain') content_type='text/plain')
def get_config_file_meta_or_404(self, request, config_file_id): def get_config_file_meta_or_404(self, request, config_file_id):
""" """
Grabs the config_file metadata for an config_file with a supplied Grabs the config_file metadata for an config_file with a supplied
@ -291,7 +293,7 @@ class BaseController(object):
LOG.debug(msg) LOG.debug(msg)
raise webob.exc.HTTPForbidden(msg, raise webob.exc.HTTPForbidden(msg,
request=request, request=request,
content_type='text/plain') content_type='text/plain')
def get_config_meta_or_404(self, request, config_id): def get_config_meta_or_404(self, request, config_id):
""" """
@ -342,7 +344,7 @@ class BaseController(object):
raise webob.exc.HTTPForbidden(msg, raise webob.exc.HTTPForbidden(msg,
request=request, request=request,
content_type='text/plain') content_type='text/plain')
def get_cinder_volume_meta_or_404(self, request, id): def get_cinder_volume_meta_or_404(self, request, id):
""" """
Grabs the config metadata for an config with a supplied Grabs the config metadata for an config with a supplied
@ -366,4 +368,4 @@ class BaseController(object):
LOG.debug(msg) LOG.debug(msg)
raise webob.exc.HTTPForbidden(msg, raise webob.exc.HTTPForbidden(msg,
request=request, request=request,
content_type='text/plain') content_type='text/plain')

View File

@ -16,19 +16,15 @@
""" """
/hosts endpoint for Daisy v1 API /hosts endpoint for Daisy v1 API
""" """
import time
import traceback
import ast import ast
import webob.exc
from oslo_log import log as logging from oslo_log import log as logging
from webob.exc import HTTPBadRequest from webob.exc import HTTPBadRequest
from webob.exc import HTTPForbidden from webob.exc import HTTPForbidden
from webob.exc import HTTPServerError from webob.exc import HTTPNotFound
from webob.exc import HTTPConflict
from webob import Response from webob import Response
from threading import Thread
from daisy import i18n from daisy import i18n
from daisy import notifier from daisy import notifier
@ -43,10 +39,6 @@ import daisy.registry.client.v1.api as registry
from daisy.api.v1 import controller from daisy.api.v1 import controller
from daisy.api.v1 import filters from daisy.api.v1 import filters
try:
import simplejson as json
except ImportError:
import json
LOG = logging.getLogger(__name__) LOG = logging.getLogger(__name__)
_ = i18n._ _ = i18n._
@ -56,13 +48,15 @@ _LW = i18n._LW
SUPPORTED_PARAMS = daisy.api.v1.SUPPORTED_PARAMS SUPPORTED_PARAMS = daisy.api.v1.SUPPORTED_PARAMS
SUPPORTED_FILTERS = daisy.api.v1.SUPPORTED_FILTERS SUPPORTED_FILTERS = daisy.api.v1.SUPPORTED_FILTERS
ACTIVE_IMMUTABLE = daisy.api.v1.ACTIVE_IMMUTABLE ACTIVE_IMMUTABLE = daisy.api.v1.ACTIVE_IMMUTABLE
SERVICE_DISK_SERVICE = ('db', 'glance', 'dbbackup', 'mongodb', 'nova') SERVICE_DISK_SERVICE = ('db', 'glance', 'db_backup', 'mongodb', 'nova')
DISK_LOCATION = ('local', 'share') DISK_LOCATION = ('local', 'share', 'share_cluster')
CINDER_VOLUME_BACKEND_PARAMS = ('management_ips', 'data_ips','pools', PROTOCOL_TYPE = ('FIBER', 'ISCSI', 'CEPH')
CINDER_VOLUME_BACKEND_PARAMS = ('management_ips', 'data_ips', 'pools',
'volume_driver', 'volume_type', 'volume_driver', 'volume_type',
'role_id', 'user_name','user_pwd') 'role_id', 'user_name', 'user_pwd')
CINDER_VOLUME_BACKEND_DRIVER = ['KS3200_IPSAN', 'KS3200_FCSAN', CINDER_VOLUME_BACKEND_DRIVER = ['KS3200_IPSAN', 'KS3200_FCSAN',
'FUJISTU_ETERNUS'] 'FUJITSU_ETERNUS', 'HP3PAR_FCSAN']
class Controller(controller.BaseController): class Controller(controller.BaseController):
""" """
@ -82,6 +76,7 @@ class Controller(controller.BaseController):
data for a previously-reserved host data for a previously-reserved host
DELETE /hosts/<ID> -- Delete the host with id <ID> DELETE /hosts/<ID> -- Delete the host with id <ID>
""" """
def __init__(self): def __init__(self):
self.notifier = notifier.Notifier() self.notifier = notifier.Notifier()
registry.configure_registry_client() registry.configure_registry_client()
@ -99,7 +94,7 @@ class Controller(controller.BaseController):
self.policy.enforce(req.context, action, target) self.policy.enforce(req.context, action, target)
except exception.Forbidden: except exception.Forbidden:
raise HTTPForbidden() raise HTTPForbidden()
def _get_filters(self, req): def _get_filters(self, req):
""" """
Return a dictionary of query param filters from the request Return a dictionary of query param filters from the request
@ -146,39 +141,57 @@ class Controller(controller.BaseController):
def _raise_404_if_service_disk_deleted(self, req, service_disk_id): def _raise_404_if_service_disk_deleted(self, req, service_disk_id):
service_disk = self.get_service_disk_meta_or_404(req, service_disk_id) service_disk = self.get_service_disk_meta_or_404(req, service_disk_id)
if service_disk is None or service_disk['deleted']: if service_disk is None or service_disk['deleted']:
msg = _("service_disk with identifier %s has been deleted.") % service_disk_id msg = _(
"service_disk with identifier %s has been deleted.") % \
service_disk_id
raise HTTPNotFound(msg) raise HTTPNotFound(msg)
def _default_value_set(self, disk_meta):
if (not disk_meta.has_key('disk_location') or
not disk_meta['disk_location'] or
disk_meta['disk_location'] == ''):
disk_meta['disk_location'] = 'local'
if not disk_meta.has_key('lun'):
disk_meta['lun'] = 0
if not disk_meta.has_key('size'):
disk_meta['size'] = -1
def _unique_service_in_role(self, req, disk_meta): def _default_value_set(self, disk_meta):
if ('disk_location' not in disk_meta or
not disk_meta['disk_location'] or
disk_meta['disk_location'] == ''):
disk_meta['disk_location'] = 'local'
if 'lun' not in disk_meta:
disk_meta['lun'] = 0
if 'size' not in disk_meta:
disk_meta['size'] = -1
if 'protocol_type' not in disk_meta:
disk_meta['protocol_type'] = 'ISCSI'
def _unique_service_in_role(self, req, disk_meta):
params = {'filters': {'role_id': disk_meta['role_id']}} params = {'filters': {'role_id': disk_meta['role_id']}}
service_disks = registry.list_service_disk_metadata(req.context, **params) service_disks = registry.list_service_disk_metadata(
for service_disk in service_disks: req.context, **params)
if service_disk['service'] == disk_meta['service']: if disk_meta['disk_location'] == 'share_cluster':
msg = "disk service %s has existed in role %s" %(disk_meta['service'], disk_meta['role_id']) for disk in service_disks:
raise HTTPBadRequest(explanation=msg, if disk['service'] == disk_meta['service'] and \
request=req, disk['disk_location'] != 'share_cluster':
content_type="text/plain") id = disk['id']
registry.delete_service_disk_metadata(req.context, id)
else:
for service_disk in service_disks:
if service_disk['disk_location'] == 'share_cluster' and \
service_disk['service'] == disk_meta['service']:
id = service_disk['id']
registry.delete_service_disk_metadata(req.context, id)
elif service_disk['service'] == disk_meta['service']:
msg = "disk service %s has existed in role %s" % (
disk_meta['service'], disk_meta['role_id'])
LOG.error(msg)
raise HTTPBadRequest(explanation=msg,
request=req,
content_type="text/plain")
def _service_disk_add_meta_valid(self, req, disk_meta): def _service_disk_add_meta_valid(self, req, disk_meta):
if not disk_meta.has_key('role_id'): if 'role_id' not in disk_meta:
msg = "'role_id' must be given" msg = "'role_id' must be given"
raise HTTPBadRequest(explanation=msg, raise HTTPBadRequest(explanation=msg,
request=req, request=req,
content_type="text/plain") content_type="text/plain")
else: else:
self._raise_404_if_role_deleted(req,disk_meta['role_id']) self._raise_404_if_role_deleted(req, disk_meta['role_id'])
if not disk_meta.has_key('service'): if 'service' not in disk_meta:
msg = "'service' must be given" msg = "'service' must be given"
raise HTTPBadRequest(explanation=msg, raise HTTPBadRequest(explanation=msg,
request=req, request=req,
@ -187,20 +200,22 @@ class Controller(controller.BaseController):
if disk_meta['service'] not in SERVICE_DISK_SERVICE: if disk_meta['service'] not in SERVICE_DISK_SERVICE:
msg = "service '%s' is not supported" % disk_meta['service'] msg = "service '%s' is not supported" % disk_meta['service']
raise HTTPBadRequest(explanation=msg, raise HTTPBadRequest(explanation=msg,
request=req, request=req,
content_type="text/plain") content_type="text/plain")
if disk_meta['disk_location'] not in DISK_LOCATION: if disk_meta['disk_location'] not in DISK_LOCATION:
msg = "disk_location %s is not supported" % disk_meta['disk_location'] msg = "disk_location %s is not supported" % disk_meta[
raise HTTPBadRequest(explanation=msg, 'disk_location']
request=req,
content_type="text/plain")
if disk_meta['disk_location'] == 'share' and not disk_meta.has_key('data_ips'):
msg = "'data_ips' must be given when disk_location is share"
raise HTTPBadRequest(explanation=msg, raise HTTPBadRequest(explanation=msg,
request=req, request=req,
content_type="text/plain") content_type="text/plain")
if disk_meta['disk_location'] in ['share', 'share_cluster'] \
and 'data_ips' not in disk_meta:
msg = "'data_ips' must be given when disk_location was not local"
raise HTTPBadRequest(explanation=msg,
request=req,
content_type="text/plain")
if disk_meta['lun'] < 0: if disk_meta['lun'] < 0:
msg = "'lun' should not be less than 0" msg = "'lun' should not be less than 0"
raise HTTPBadRequest(explanation=msg, raise HTTPBadRequest(explanation=msg,
@ -218,48 +233,65 @@ class Controller(controller.BaseController):
raise HTTPBadRequest(explanation=msg, raise HTTPBadRequest(explanation=msg,
request=req, request=req,
content_type="text/plain") content_type="text/plain")
self._unique_service_in_role(req, disk_meta) if disk_meta.get('protocol_type', None) \
and disk_meta['protocol_type'] not in PROTOCOL_TYPE:
msg = "protocol type %s is not supported" % disk_meta[
'protocol_type']
raise HTTPBadRequest(explanation=msg,
request=req,
content_type="text/plain")
self._unique_service_in_role(req, disk_meta)
def _service_disk_update_meta_valid(self, req, id, disk_meta): def _service_disk_update_meta_valid(self, req, id, disk_meta):
orig_disk_meta = self.get_service_disk_meta_or_404(req, id) orig_disk_meta = self.get_service_disk_meta_or_404(req, id)
if disk_meta.has_key('role_id'): if 'role_id' in disk_meta:
self._raise_404_if_role_deleted(req,disk_meta['role_id']) self._raise_404_if_role_deleted(req, disk_meta['role_id'])
if disk_meta.has_key('service'): if 'service' in disk_meta:
if disk_meta['service'] not in SERVICE_DISK_SERVICE: if disk_meta['service'] not in SERVICE_DISK_SERVICE:
msg = "service '%s' is not supported" % disk_meta['service'] msg = "service '%s' is not supported" % disk_meta['service']
raise HTTPBadRequest(explanation=msg, raise HTTPBadRequest(explanation=msg,
request=req, request=req,
content_type="text/plain") content_type="text/plain")
if disk_meta.has_key('disk_location'): if 'disk_location' in disk_meta:
if disk_meta['disk_location'] not in DISK_LOCATION: if disk_meta['disk_location'] not in DISK_LOCATION:
msg = "disk_location '%s' is not supported" % disk_meta['disk_location'] msg = "disk_location '%s' is not supported" % disk_meta[
'disk_location']
raise HTTPBadRequest(explanation=msg, raise HTTPBadRequest(explanation=msg,
request=req, request=req,
content_type="text/plain") content_type="text/plain")
if (disk_meta['disk_location'] == 'share' and if (disk_meta['disk_location'] == 'share' and
not disk_meta.has_key('data_ips') and 'data_ips' not in disk_meta and
not orig_disk_meta['data_ips']): not orig_disk_meta['data_ips']):
msg = "'data_ips' must be given when disk_location is share" msg = "'data_ips' must be given when disk_location is share"
raise HTTPBadRequest(explanation=msg, raise HTTPBadRequest(explanation=msg,
request=req, request=req,
content_type="text/plain") content_type="text/plain")
if disk_meta.has_key('size'): if 'size' in disk_meta:
disk_meta['size'] = ast.literal_eval(str(disk_meta['size'])) disk_meta['size'] = ast.literal_eval(str(disk_meta['size']))
if not isinstance(disk_meta['size'], int): if not isinstance(disk_meta['size'], int):
msg = "'size' is not integer" msg = "'size' is not integer"
raise HTTPBadRequest(explanation=msg, raise HTTPBadRequest(explanation=msg,
request=req, request=req,
content_type="text/plain") content_type="text/plain")
if disk_meta['size'] < -1: if disk_meta['size'] < -1:
msg = "'size' is invalid" msg = "'size' is invalid"
raise HTTPBadRequest(explanation=msg, raise HTTPBadRequest(explanation=msg,
request=req, request=req,
content_type="text/plain") content_type="text/plain")
if disk_meta.get('protocol_type', None) \
and disk_meta['protocol_type'] not in PROTOCOL_TYPE:
msg = "protocol type %s is not supported" % disk_meta[
'protocol_type']
raise HTTPBadRequest(explanation=msg,
request=req,
content_type="text/plain")
@utils.mutating @utils.mutating
def service_disk_add(self, req, disk_meta): def service_disk_add(self, req, disk_meta):
""" """
@ -269,19 +301,19 @@ class Controller(controller.BaseController):
:raises HTTPBadRequest if x-install-cluster is missing :raises HTTPBadRequest if x-install-cluster is missing
""" """
self._enforce(req, 'service_disk_add') self._enforce(req, 'service_disk_add')
self._default_value_set(disk_meta) self._default_value_set(disk_meta)
self._service_disk_add_meta_valid(req, disk_meta) self._service_disk_add_meta_valid(req, disk_meta)
service_disk_meta = registry.add_service_disk_metadata(req.context, disk_meta) service_disk_meta = registry.add_service_disk_metadata(
req.context, disk_meta)
return {'disk_meta': service_disk_meta} return {'disk_meta': service_disk_meta}
@utils.mutating @utils.mutating
def service_disk_delete(self, req, id): def service_disk_delete(self, req, id):
""" """
Deletes a service_disk from Daisy. Deletes a service_disk from Daisy.
:param req: The WSGI/Webob Request object :param req: The WSGI/Webob Request object
:param image_meta: Mapping of metadata about service_disk :param image_meta: Mapping of metadata about service_disk
@ -305,7 +337,8 @@ class Controller(controller.BaseController):
request=req, request=req,
content_type="text/plain") content_type="text/plain")
except exception.InUseByStore as e: except exception.InUseByStore as e:
msg = (_("service_disk %(id)s could not be deleted because it is in use: " msg = (_("service_disk %(id)s could not be deleted "
"because it is in use: "
"%(exc)s") % {"id": id, "exc": utils.exception_to_str(e)}) "%(exc)s") % {"id": id, "exc": utils.exception_to_str(e)})
LOG.warn(msg) LOG.warn(msg)
raise HTTPConflict(explanation=msg, raise HTTPConflict(explanation=msg,
@ -319,9 +352,8 @@ class Controller(controller.BaseController):
self._enforce(req, 'service_disk_update') self._enforce(req, 'service_disk_update')
self._service_disk_update_meta_valid(req, id, disk_meta) self._service_disk_update_meta_valid(req, id, disk_meta)
try: try:
service_disk_meta = registry.update_service_disk_metadata(req.context, service_disk_meta = registry.update_service_disk_metadata(
id, req.context, id, disk_meta)
disk_meta)
except exception.Invalid as e: except exception.Invalid as e:
msg = (_("Failed to update role metadata. Got error: %s") % msg = (_("Failed to update role metadata. Got error: %s") %
@ -353,7 +385,6 @@ class Controller(controller.BaseController):
self.notifier.info('role.update', service_disk_meta) self.notifier.info('role.update', service_disk_meta)
return {'disk_meta': service_disk_meta} return {'disk_meta': service_disk_meta}
@utils.mutating @utils.mutating
def service_disk_detail(self, req, id): def service_disk_detail(self, req, id):
@ -374,53 +405,61 @@ class Controller(controller.BaseController):
def service_disk_list(self, req): def service_disk_list(self, req):
self._enforce(req, 'service_disk_list') self._enforce(req, 'service_disk_list')
params = self._get_query_params(req) params = self._get_query_params(req)
filters=params.get('filters',None) filters = params.get('filters', None)
if 'role_id' in filters: if 'role_id' in filters:
role_id=filters['role_id'] role_id = filters['role_id']
self._raise_404_if_role_deleted(req, role_id) self._raise_404_if_role_deleted(req, role_id)
try: try:
service_disks = registry.list_service_disk_metadata(req.context, **params) service_disks = registry.list_service_disk_metadata(
req.context, **params)
except exception.Invalid as e: except exception.Invalid as e:
raise HTTPBadRequest(explanation=e.msg, request=req) raise HTTPBadRequest(explanation=e.msg, request=req)
return dict(disk_meta=service_disks) return dict(disk_meta=service_disks)
def _cinder_volume_list(self, req, params): def _cinder_volume_list(self, req, params):
try: try:
cinder_volumes = registry.list_cinder_volume_metadata(req.context, **params) cinder_volumes = registry.list_cinder_volume_metadata(
req.context, **params)
except exception.Invalid as e: except exception.Invalid as e:
raise HTTPBadRequest(explanation=e.msg, request=req) raise HTTPBadRequest(explanation=e.msg, request=req)
return cinder_volumes return cinder_volumes
def _is_cinder_volume_repeat(self, req, array_disk_info, update_id = None): def _is_cinder_volume_repeat(self, req, array_disk_info, update_id=None):
cinder_volume_id = None
params = {'filters': {}} params = {'filters': {}}
if update_id: if update_id:
cinder_volume_metal = self.get_cinder_volume_meta_or_404(req, update_id) cinder_volume_metal = self.get_cinder_volume_meta_or_404(
new_management_ips = array_disk_info.get('management_ips', cinder_volume_metal['management_ips']).split(",") req, update_id)
new_pools = array_disk_info.get('pools', cinder_volume_metal['pools']).split(",") new_management_ips = array_disk_info.get(
'management_ips', cinder_volume_metal[
'management_ips']).split(",")
new_pools = array_disk_info.get(
'pools', cinder_volume_metal['pools']).split(",")
else: else:
new_management_ips = array_disk_info['management_ips'].split(",") new_management_ips = array_disk_info['management_ips'].split(",")
new_pools = array_disk_info['pools'].split(",") new_pools = array_disk_info['pools'].split(",")
org_cinder_volumes = self._cinder_volume_list(req, params) org_cinder_volumes = self._cinder_volume_list(req, params)
for cinder_volume in org_cinder_volumes: for cinder_volume in org_cinder_volumes:
if (set(cinder_volume['management_ips'].split(",")) == set(new_management_ips) and if (set(cinder_volume['management_ips'].split(",")) == set(
set(cinder_volume['pools'].split(",")) == set(new_pools)): new_management_ips) and
set(cinder_volume['pools'].split(",")) == set(new_pools)):
if cinder_volume['id'] != update_id: if cinder_volume['id'] != update_id:
msg = 'cinder_volume array disks conflict with cinder_volume %s' % cinder_volume['id'] msg = 'cinder_volume array disks ' \
'conflict with cinder_volume %s' % cinder_volume[
'id']
raise HTTPBadRequest(explanation=msg, request=req) raise HTTPBadRequest(explanation=msg, request=req)
def _get_cinder_volume_backend_index(self, req, disk_array): def _get_cinder_volume_backend_index(self, req, disk_array):
params = {'filters': {}} params = {'filters': {}}
cinder_volumes = self._cinder_volume_list(req, params) cinder_volumes = self._cinder_volume_list(req, params)
index = 1 index = 1
while True: while True:
backend_index = "%s-%s" %(disk_array['volume_driver'], index) backend_index = "%s-%s" % (disk_array['volume_driver'], index)
flag = True flag = True
for cinder_volume in cinder_volumes: for cinder_volume in cinder_volumes:
if backend_index == cinder_volume['backend_index']: if backend_index == cinder_volume['backend_index']:
index=index+1 index = index + 1
flag = False flag = False
break break
if flag: if flag:
@ -437,46 +476,52 @@ class Controller(controller.BaseController):
:raises HTTPBadRequest if x-install-cluster is missing :raises HTTPBadRequest if x-install-cluster is missing
""" """
self._enforce(req, 'cinder_volume_add') self._enforce(req, 'cinder_volume_add')
if not disk_meta.has_key('role_id'): if 'role_id' not in disk_meta:
msg = "'role_id' must be given" msg = "'role_id' must be given"
raise HTTPBadRequest(explanation=msg, raise HTTPBadRequest(explanation=msg,
request=req, request=req,
content_type="text/plain") content_type="text/plain")
else: else:
self._raise_404_if_role_deleted(req,disk_meta['role_id']) self._raise_404_if_role_deleted(req, disk_meta['role_id'])
disk_arrays = eval(disk_meta['disk_array']) disk_arrays = eval(disk_meta['disk_array'])
for disk_array in disk_arrays: for disk_array in disk_arrays:
for key in disk_array.keys(): for key in disk_array.keys():
if (key not in CINDER_VOLUME_BACKEND_PARAMS and if (key not in CINDER_VOLUME_BACKEND_PARAMS and
key != 'data_ips'): key != 'data_ips'):
msg = "'%s' must be given for cinder volume config" % key msg = "'%s' must be given for cinder volume config" % key
raise HTTPBadRequest(explanation=msg, raise HTTPBadRequest(explanation=msg,
request=req, request=req,
content_type="text/plain") content_type="text/plain")
if disk_array['volume_driver'] not in CINDER_VOLUME_BACKEND_DRIVER: if disk_array[
msg = "volume_driver %s is not supported" % disk_array['volume_driver'] 'volume_driver'] not in CINDER_VOLUME_BACKEND_DRIVER:
msg = "volume_driver %s is not supported" % disk_array[
'volume_driver']
raise HTTPBadRequest(explanation=msg, raise HTTPBadRequest(explanation=msg,
request=req, request=req,
content_type="text/plain") content_type="text/plain")
if (disk_array['volume_driver'] == 'FUJISTU_ETERNUS' and if (disk_array['volume_driver'] == 'FUJITSU_ETERNUS' and
(not disk_array.has_key('data_ips') or ('data_ips' not in disk_array or
not disk_array['data_ips'])): not disk_array['data_ips'])):
msg = "data_ips must be given when using FUJISTU Disk Array" msg = "data_ips must be given " \
"when using FUJITSU Disk Array"
raise HTTPBadRequest(explanation=msg, raise HTTPBadRequest(explanation=msg,
request=req, request=req,
content_type="text/plain") content_type="text/plain")
self._is_cinder_volume_repeat(req, disk_array) self._is_cinder_volume_repeat(req, disk_array)
disk_array['role_id'] = disk_meta['role_id'] disk_array['role_id'] = disk_meta['role_id']
disk_array['backend_index'] = self._get_cinder_volume_backend_index(req, disk_array) disk_array['backend_index'] = \
cinder_volumes = registry.add_cinder_volume_metadata(req.context, disk_array) self._get_cinder_volume_backend_index(
req, disk_array)
cinder_volumes = registry.add_cinder_volume_metadata(
req.context, disk_array)
return {'disk_meta': cinder_volumes} return {'disk_meta': cinder_volumes}
@utils.mutating @utils.mutating
def cinder_volume_delete(self, req, id): def cinder_volume_delete(self, req, id):
""" """
Deletes a service_disk from Daisy. Deletes a service_disk from Daisy.
:param req: The WSGI/Webob Request object :param req: The WSGI/Webob Request object
:param image_meta: Mapping of metadata about service_disk :param image_meta: Mapping of metadata about service_disk
@ -500,7 +545,8 @@ class Controller(controller.BaseController):
request=req, request=req,
content_type="text/plain") content_type="text/plain")
except exception.InUseByStore as e: except exception.InUseByStore as e:
msg = (_("cindre volume %(id)s could not be deleted because it is in use: " msg = (_("cindre volume %(id)s could not "
"be deleted because it is in use: "
"%(exc)s") % {"id": id, "exc": utils.exception_to_str(e)}) "%(exc)s") % {"id": id, "exc": utils.exception_to_str(e)})
LOG.warn(msg) LOG.warn(msg)
raise HTTPConflict(explanation=msg, raise HTTPConflict(explanation=msg,
@ -510,20 +556,21 @@ class Controller(controller.BaseController):
return Response(body='', status=200) return Response(body='', status=200)
def _is_data_ips_valid(self, req, update_id, update_meta): def _is_data_ips_valid(self, req, update_id, update_meta):
orgin_cinder_volume = self.get_cinder_volume_meta_or_404(req, update_id) orgin_cinder_volume = self.get_cinder_volume_meta_or_404(
req, update_id)
new_driver = update_meta.get('volume_driver', new_driver = update_meta.get('volume_driver',
orgin_cinder_volume['volume_driver']) orgin_cinder_volume['volume_driver'])
if new_driver != 'FUJISTU_ETERNUS': if new_driver != 'FUJITSU_ETERNUS':
return return
new_data_ips = update_meta.get('data_ips', new_data_ips = update_meta.get('data_ips',
orgin_cinder_volume['data_ips']) orgin_cinder_volume['data_ips'])
if not new_data_ips: if not new_data_ips:
msg = "data_ips must be given when using FUJISTU Disk Array" msg = "data_ips must be given when using FUJITSU Disk Array"
raise HTTPBadRequest(explanation=msg, raise HTTPBadRequest(explanation=msg,
request=req, request=req,
content_type="text/plain") content_type="text/plain")
@utils.mutating @utils.mutating
def cinder_volume_update(self, req, id, disk_meta): def cinder_volume_update(self, req, id, disk_meta):
@ -531,28 +578,29 @@ class Controller(controller.BaseController):
if key not in CINDER_VOLUME_BACKEND_PARAMS: if key not in CINDER_VOLUME_BACKEND_PARAMS:
msg = "'%s' must be given for cinder volume config" % key msg = "'%s' must be given for cinder volume config" % key
raise HTTPBadRequest(explanation=msg, raise HTTPBadRequest(explanation=msg,
request=req, request=req,
content_type="text/plain") content_type="text/plain")
if disk_meta.has_key('role_id'): if 'role_id' in disk_meta:
self._raise_404_if_role_deleted(req,disk_meta['role_id']) self._raise_404_if_role_deleted(req, disk_meta['role_id'])
if (disk_meta.has_key('volume_driver') and if ('volume_driver' in disk_meta and disk_meta[
disk_meta['volume_driver'] not in CINDER_VOLUME_BACKEND_DRIVER): 'volume_driver'] not in CINDER_VOLUME_BACKEND_DRIVER):
msg = "volume_driver %s is not supported" % disk_meta['volume_driver'] msg = "volume_driver %s is not supported" % disk_meta[
'volume_driver']
raise HTTPBadRequest(explanation=msg, raise HTTPBadRequest(explanation=msg,
request=req, request=req,
content_type="text/plain") content_type="text/plain")
self._is_cinder_volume_repeat(req, disk_meta, id) self._is_cinder_volume_repeat(req, disk_meta, id)
self._is_data_ips_valid(req, id, disk_meta) self._is_data_ips_valid(req, id, disk_meta)
try: try:
cinder_volume_meta = registry.update_cinder_volume_metadata(req.context, cinder_volume_meta = registry.update_cinder_volume_metadata(
id, req.context, id, disk_meta)
disk_meta)
except exception.Invalid as e: except exception.Invalid as e:
msg = (_("Failed to update cinder_volume metadata. Got error: %s") % msg = (
utils.exception_to_str(e)) _("Failed to update cinder_volume metadata. Got error: %s") %
utils.exception_to_str(e))
LOG.warn(msg) LOG.warn(msg)
raise HTTPBadRequest(explanation=msg, raise HTTPBadRequest(explanation=msg,
request=req, request=req,
@ -580,7 +628,7 @@ class Controller(controller.BaseController):
self.notifier.info('cinder_volume.update', cinder_volume_meta) self.notifier.info('cinder_volume.update', cinder_volume_meta)
return {'disk_meta': cinder_volume_meta} return {'disk_meta': cinder_volume_meta}
@utils.mutating @utils.mutating
def cinder_volume_detail(self, req, id): def cinder_volume_detail(self, req, id):
""" """
@ -595,17 +643,17 @@ class Controller(controller.BaseController):
self._enforce(req, 'cinder_volume_detail') self._enforce(req, 'cinder_volume_detail')
cinder_volume_meta = self.get_cinder_volume_meta_or_404(req, id) cinder_volume_meta = self.get_cinder_volume_meta_or_404(req, id)
return {'disk_meta': cinder_volume_meta} return {'disk_meta': cinder_volume_meta}
def cinder_volume_list(self, req): def cinder_volume_list(self, req):
self._enforce(req, 'cinder_volume_list') self._enforce(req, 'cinder_volume_list')
params = self._get_query_params(req) params = self._get_query_params(req)
filters=params.get('filters',None) filters = params.get('filters', None)
if 'role_id' in filters: if 'role_id' in filters:
role_id=filters['role_id'] role_id = filters['role_id']
self._raise_404_if_role_deleted(req, role_id) self._raise_404_if_role_deleted(req, role_id)
cinder_volumes = self._cinder_volume_list(req, params) cinder_volumes = self._cinder_volume_list(req, params)
return dict(disk_meta=cinder_volumes) return dict(disk_meta=cinder_volumes)
class DiskArrayDeserializer(wsgi.JSONRequestDeserializer): class DiskArrayDeserializer(wsgi.JSONRequestDeserializer):
"""Handles deserialization of specific controller method requests.""" """Handles deserialization of specific controller method requests."""
@ -614,19 +662,20 @@ class DiskArrayDeserializer(wsgi.JSONRequestDeserializer):
result = {} result = {}
result["disk_meta"] = utils.get_dict_meta(request) result["disk_meta"] = utils.get_dict_meta(request)
return result return result
def service_disk_add(self, request): def service_disk_add(self, request):
return self._deserialize(request) return self._deserialize(request)
def service_disk_update(self, request): def service_disk_update(self, request):
return self._deserialize(request) return self._deserialize(request)
def cinder_volume_add(self, request): def cinder_volume_add(self, request):
return self._deserialize(request) return self._deserialize(request)
def cinder_volume_update(self, request): def cinder_volume_update(self, request):
return self._deserialize(request) return self._deserialize(request)
class DiskArraySerializer(wsgi.JSONResponseSerializer): class DiskArraySerializer(wsgi.JSONResponseSerializer):
"""Handles serialization of specific controller method responses.""" """Handles serialization of specific controller method responses."""
@ -634,33 +683,30 @@ class DiskArraySerializer(wsgi.JSONResponseSerializer):
self.notifier = notifier.Notifier() self.notifier = notifier.Notifier()
def service_disk_add(self, response, result): def service_disk_add(self, response, result):
disk_meta = result['disk_meta']
response.status = 201 response.status = 201
response.headers['Content-Type'] = 'application/json' response.headers['Content-Type'] = 'application/json'
response.body = self.to_json(result) response.body = self.to_json(result)
return response return response
def service_disk_update(self, response, result): def service_disk_update(self, response, result):
disk_meta = result['disk_meta']
response.status = 201 response.status = 201
response.headers['Content-Type'] = 'application/json' response.headers['Content-Type'] = 'application/json'
response.body = self.to_json(result) response.body = self.to_json(result)
return response return response
def cinder_volume_add(self, response, result): def cinder_volume_add(self, response, result):
disk_meta = result['disk_meta']
response.status = 201 response.status = 201
response.headers['Content-Type'] = 'application/json' response.headers['Content-Type'] = 'application/json'
response.body = self.to_json(result) response.body = self.to_json(result)
return response return response
def cinder_volume_update(self, response, result): def cinder_volume_update(self, response, result):
disk_meta = result['disk_meta']
response.status = 201 response.status = 201
response.headers['Content-Type'] = 'application/json' response.headers['Content-Type'] = 'application/json'
response.body = self.to_json(result) response.body = self.to_json(result)
return response return response
def create_resource(): def create_resource():
"""Image members resource factory method""" """Image members resource factory method"""
deserializer = DiskArrayDeserializer() deserializer = DiskArrayDeserializer()

View File

@ -24,8 +24,8 @@ from webob.exc import HTTPConflict
from webob.exc import HTTPForbidden from webob.exc import HTTPForbidden
from webob.exc import HTTPNotFound from webob.exc import HTTPNotFound
from webob import Response from webob import Response
import copy
import json # import json
from daisy.api import policy from daisy.api import policy
import daisy.api.v1 import daisy.api.v1
@ -41,7 +41,7 @@ import daisy.registry.client.v1.api as registry
from daisy.registry.api.v1 import template from daisy.registry.api.v1 import template
import daisy.api.backends.tecs.common as tecs_cmn import daisy.api.backends.tecs.common as tecs_cmn
import daisy.api.backends.common as daisy_cmn
try: try:
import simplejson as json import simplejson as json
except ImportError: except ImportError:
@ -64,21 +64,26 @@ CONF.import_opt('container_formats', 'daisy.common.config',
group='image_format') group='image_format')
CONF.import_opt('image_property_quota', 'daisy.common.config') CONF.import_opt('image_property_quota', 'daisy.common.config')
class Controller(controller.BaseController): class Controller(controller.BaseController):
""" """
WSGI controller for Templates resource in Daisy v1 API WSGI controller for Templates resource in Daisy v1 API
The HostTemplates resource API is a RESTful web Template for Template data. The API The HostTemplates resource API is a RESTful web Template for Template data.
is as follows:: The API is as follows::
GET /HostTemplates -- Returns a set of brief metadata about Templates GET /HostTemplates -- Returns a set of brief metadata about Templates
GET /HostTemplates/detail -- Returns a set of detailed metadata about GET /HostTemplates/detail -- Returns a set of detailed metadata about
HostTemplates HostTemplates
HEAD /HostTemplates/<ID> -- Return metadata about an Template with id <ID> HEAD /HostTemplates/<ID> --
GET /HostTemplates/<ID> -- Return Template data for Template with id <ID> Return metadata about an Template with id <ID>
POST /HostTemplates -- Store Template data and return metadata about the GET /HostTemplates/<ID> --
Return Template data for Template with id <ID>
POST /HostTemplates --
Store Template data and return metadata about the
newly-stored Template newly-stored Template
PUT /HostTemplates/<ID> -- Update Template metadata and/or upload Template PUT /HostTemplates/<ID> --
Update Template metadata and/or upload Template
data for a previously-reserved Template data for a previously-reserved Template
DELETE /HostTemplates/<ID> -- Delete the Template with id <ID> DELETE /HostTemplates/<ID> -- Delete the Template with id <ID>
""" """
@ -136,8 +141,9 @@ class Controller(controller.BaseController):
def _raise_404_if_cluster_deleted(self, req, cluster_id): def _raise_404_if_cluster_deleted(self, req, cluster_id):
cluster = self.get_cluster_meta_or_404(req, cluster_id) cluster = self.get_cluster_meta_or_404(req, cluster_id)
if cluster['deleted']: if cluster['deleted']:
msg = _("Cluster with identifier %s has been deleted.") % cluster_id msg = _("Cluster with identifier %s has been deleted.") % \
raise webob.exc.HTTPNotFound(msg) cluster_id
raise HTTPNotFound(msg)
@utils.mutating @utils.mutating
def add_template(self, req, host_template): def add_template(self, req, host_template):
@ -150,9 +156,9 @@ class Controller(controller.BaseController):
:raises HTTPBadRequest if x-Template-name is missing :raises HTTPBadRequest if x-Template-name is missing
""" """
self._enforce(req, 'add_host_template') self._enforce(req, 'add_host_template')
template_name = host_template["name"]
host_template = registry.add_host_template_metadata(
host_template = registry.add_host_template_metadata(req.context, host_template) req.context, host_template)
return {'host_template': template} return {'host_template': template}
@ -167,7 +173,7 @@ class Controller(controller.BaseController):
:retval Returns the updated image information as a mapping :retval Returns the updated image information as a mapping
""" """
self._enforce(req, 'update_host_template') self._enforce(req, 'update_host_template')
#orig_Template_meta = self.get_Template_meta_or_404(req, id) # orig_Template_meta = self.get_Template_meta_or_404(req, id)
''' '''
if orig_Template_meta['deleted']: if orig_Template_meta['deleted']:
msg = _("Forbidden to update deleted Template.") msg = _("Forbidden to update deleted Template.")
@ -176,9 +182,8 @@ class Controller(controller.BaseController):
content_type="text/plain") content_type="text/plain")
''' '''
try: try:
host_template = registry.update_host_template_metadata(req.context, host_template = registry.update_host_template_metadata(
template_id, req.context, template_id, host_template)
host_template)
except exception.Invalid as e: except exception.Invalid as e:
msg = (_("Failed to update template metadata. Got error: %s") % msg = (_("Failed to update template metadata. Got error: %s") %
@ -210,47 +215,51 @@ class Controller(controller.BaseController):
self.notifier.info('host_template.update', host_template) self.notifier.info('host_template.update', host_template)
return {'host_template': host_template} return {'host_template': host_template}
def _filter_params(self, host_meta): def _filter_params(self, host_meta):
for key in host_meta.keys(): for key in host_meta.keys():
if key=="id" or key=="updated_at" or key=="deleted_at" or key=="created_at" or key=="deleted": if key == "id" or key == "updated_at" or key == "deleted_at" or \
key == "created_at" or key == "deleted":
del host_meta[key] del host_meta[key]
if host_meta.has_key("memory"): if "memory" in host_meta:
del host_meta['memory'] del host_meta['memory']
if host_meta.has_key("system"): if "system" in host_meta:
del host_meta['system'] del host_meta['system']
if host_meta.has_key("disks"):
del host_meta['disks']
if host_meta.has_key("os_status"):
del host_meta['os_status']
if host_meta.has_key("status"): if "disks" in host_meta:
del host_meta['status'] del host_meta['disks']
if host_meta.has_key("messages"): if "os_status" in host_meta:
del host_meta['messages'] del host_meta['os_status']
if host_meta.has_key("cpu"): if "status" in host_meta:
del host_meta['status']
if "messages" in host_meta:
del host_meta['messages']
if "cpu" in host_meta:
del host_meta['cpu'] del host_meta['cpu']
if host_meta.has_key("ipmi_addr"): if "ipmi_addr" in host_meta:
del host_meta['ipmi_addr'] del host_meta['ipmi_addr']
if host_meta.has_key("interfaces"): if "interfaces" in host_meta:
for interface in host_meta['interfaces']: for interface in host_meta['interfaces']:
for key in interface.keys(): for key in interface.keys():
if key=="id" or key=="updated_at" or key=="deleted_at" \ if key == "id" or key == "updated_at" or \
or key=="created_at" or key=="deleted" or key=="current_speed" \ key == "deleted_at" \
or key=="max_speed" or key=="host_id" or key=="state": or key == "created_at" or key == "deleted" or \
key == "current_speed" \
or key == "max_speed" or key == "host_id" or \
key == "state":
del interface[key] del interface[key]
for assigned_network in interface['assigned_networks']: for assigned_network in interface['assigned_networks']:
if assigned_network.has_key("ip"): if "ip" in assigned_network:
assigned_network['ip'] = "" assigned_network['ip'] = ""
return host_meta return host_meta
@utils.mutating @utils.mutating
def get_host_template_detail(self, req, template_id): def get_host_template_detail(self, req, template_id):
""" """
@ -263,7 +272,8 @@ class Controller(controller.BaseController):
""" """
self._enforce(req, 'get_host_template_detail') self._enforce(req, 'get_host_template_detail')
try: try:
host_template = registry.host_template_detail_metadata(req.context, template_id) host_template = registry.host_template_detail_metadata(
req.context, template_id)
return {'host_template': host_template} return {'host_template': host_template}
except exception.NotFound as e: except exception.NotFound as e:
msg = (_("Failed to find host template: %s") % msg = (_("Failed to find host template: %s") %
@ -280,30 +290,33 @@ class Controller(controller.BaseController):
request=req, request=req,
content_type="text/plain") content_type="text/plain")
except exception.InUseByStore as e: except exception.InUseByStore as e:
msg = (_("host template %(id)s could not be get because it is in use: " msg = (_("host template %(id)s could not be get "
"%(exc)s") % {"id": template_id, "exc": utils.exception_to_str(e)}) "because it is in use: "
"%(exc)s") % {"id": template_id,
"exc": utils.exception_to_str(e)})
LOG.error(msg) LOG.error(msg)
raise HTTPConflict(explanation=msg, raise HTTPConflict(explanation=msg,
request=req, request=req,
content_type="text/plain") content_type="text/plain")
else: else:
#self.notifier.info('host.delete', host) # self.notifier.info('host.delete', host)
return Response(body='', status=200) return Response(body='', status=200)
@utils.mutating @utils.mutating
def get_host_template_lists(self, req): def get_host_template_lists(self, req):
self._enforce(req, 'get_template_lists') self._enforce(req, 'get_template_lists')
params = self._get_query_params(req) params = self._get_query_params(req)
template_meta = {} template_meta = {}
try: try:
host_template_lists = registry.host_template_lists_metadata(req.context, **params) host_template_lists = registry.host_template_lists_metadata(
req.context, **params)
if host_template_lists and host_template_lists[0]: if host_template_lists and host_template_lists[0]:
template_meta = json.loads(host_template_lists[0]['hosts']) template_meta = json.loads(host_template_lists[0]['hosts'])
return {'host_template': template_meta} return {'host_template': template_meta}
except exception.Invalid as e: except exception.Invalid as e:
raise HTTPBadRequest(explanation=e.msg, request=req) raise HTTPBadRequest(explanation=e.msg, request=req)
return dict(host_template=host_template_lists) return dict(host_template=host_template_lists)
@utils.mutating @utils.mutating
def host_to_template(self, req, host_template): def host_to_template(self, req, host_template):
""" """
@ -315,21 +328,32 @@ class Controller(controller.BaseController):
""" """
self._enforce(req, 'host_to_template') self._enforce(req, 'host_to_template')
if host_template.get('host_id', None): if host_template.get('host_id', None):
origin_host_meta = self.get_host_meta_or_404(req, host_template['host_id']) origin_host_meta = self.get_host_meta_or_404(
req, host_template['host_id'])
host_meta = self._filter_params(origin_host_meta) host_meta = self._filter_params(origin_host_meta)
if host_template.get('host_template_name', None) and host_template.get('cluster_name', None): if host_template.get(
'host_template_name',
None) and host_template.get(
'cluster_name',
None):
host_meta['name'] = host_template['host_template_name'] host_meta['name'] = host_template['host_template_name']
host_meta['description'] = host_template.get('description', None) host_meta['description'] = host_template.get(
params = {'filters':{'cluster_name':host_template['cluster_name']}} 'description', None)
templates = registry.host_template_lists_metadata(req.context, **params) params = {
if templates and templates[0]: 'filters': {
'cluster_name': host_template['cluster_name']}}
templates = registry.host_template_lists_metadata(
req.context, **params)
if templates and templates[0]:
had_host_template = False had_host_template = False
if templates[0]['hosts']: if templates[0]['hosts']:
templates[0]['hosts'] = json.loads(templates[0]['hosts']) templates[0]['hosts'] = json.loads(
templates[0]['hosts'])
else: else:
templates[0]['hosts'] = [] templates[0]['hosts'] = []
for index in range(len(templates[0]['hosts'])): for index in range(len(templates[0]['hosts'])):
if host_template['host_template_name'] == templates[0]['hosts'][index]['name']: if host_template['host_template_name'] == templates[
0]['hosts'][index]['name']:
had_host_template = True had_host_template = True
templates[0]['hosts'][index] = host_meta templates[0]['hosts'][index] = host_meta
break break
@ -337,12 +361,15 @@ class Controller(controller.BaseController):
host_meta['name'] = host_template['host_template_name'] host_meta['name'] = host_template['host_template_name']
templates[0]['hosts'].append(host_meta) templates[0]['hosts'].append(host_meta)
templates[0]['hosts'] = json.dumps(templates[0]['hosts']) templates[0]['hosts'] = json.dumps(templates[0]['hosts'])
host_template = registry.update_host_template_metadata(req.context, host_template = registry.update_host_template_metadata(
templates[0]['id'], req.context, templates[0]['id'], templates[0])
templates[0])
else: else:
param = {"cluster_name": host_template['cluster_name'], "hosts":json.dumps([host_meta])} param = {
host_template = registry.add_host_template_metadata(req.context, param) "cluster_name": host_template['cluster_name'],
"hosts": json.dumps(
[host_meta])}
host_template = registry.add_host_template_metadata(
req.context, param)
return {'host_template': host_template} return {'host_template': host_template}
@utils.mutating @utils.mutating
@ -350,8 +377,9 @@ class Controller(controller.BaseController):
if not host_template.get('cluster_name', None): if not host_template.get('cluster_name', None):
msg = "cluster name is null" msg = "cluster name is null"
raise HTTPNotFound(explanation=msg) raise HTTPNotFound(explanation=msg)
params = {'filters':{'cluster_name':host_template['cluster_name']}} params = {'filters': {'cluster_name': host_template['cluster_name']}}
templates = registry.host_template_lists_metadata(req.context, **params) templates = registry.host_template_lists_metadata(
req.context, **params)
hosts_param = [] hosts_param = []
host_template_used = {} host_template_used = {}
if templates and templates[0]: if templates and templates[0]:
@ -362,66 +390,79 @@ class Controller(controller.BaseController):
break break
if not host_template_used: if not host_template_used:
msg = "not host_template %s" % host_template['host_template_name'] msg = "not host_template %s" % host_template['host_template_name']
raise HTTPNotFound(explanation=msg, request=req, content_type="text/plain") raise HTTPNotFound(
explanation=msg,
request=req,
content_type="text/plain")
if host_template.get('host_id', None): if host_template.get('host_id', None):
self.get_host_meta_or_404(req, host_template['host_id']) self.get_host_meta_or_404(req, host_template['host_id'])
else: else:
msg="host_id is not null" msg = "host_id is not null"
raise HTTPBadRequest(explanation = msg) raise HTTPBadRequest(explanation=msg)
host_id = host_template['host_id'] host_id = host_template['host_id']
params = {'filters':{'name': host_template['cluster_name']}} params = {'filters': {'name': host_template['cluster_name']}}
clusters = registry.get_clusters_detail(req.context, **params) clusters = registry.get_clusters_detail(req.context, **params)
if clusters and clusters[0]: if clusters and clusters[0]:
host_template_used['cluster'] = clusters[0]['id'] host_template_used['cluster'] = clusters[0]['id']
if host_template_used.has_key('role') and host_template_used['role']: if 'role' in host_template_used and host_template_used['role']:
role_id_list = [] role_id_list = []
host_role_list = [] host_role_list = []
if host_template_used.has_key('cluster'): if 'cluster' in host_template_used:
params = self._get_query_params(req) params = self._get_query_params(req)
role_list = registry.get_roles_detail(req.context, **params) role_list = registry.get_roles_detail(req.context, **params)
for role_name in role_list: for role_name in role_list:
if role_name['cluster_id'] == host_template_used['cluster']: if role_name['cluster_id'] == host_template_used[
'cluster']:
host_role_list = list(host_template_used['role']) host_role_list = list(host_template_used['role'])
if role_name['name'] in host_role_list: if role_name['name'] in host_role_list:
role_id_list.append(role_name['id']) role_id_list.append(role_name['id'])
host_template_used['role'] = role_id_list host_template_used['role'] = role_id_list
if host_template_used.has_key('name'): if 'name' in host_template_used:
host_template_used.pop('name') host_template_used.pop('name')
if host_template_used.has_key('dmi_uuid'): if 'dmi_uuid' in host_template_used:
host_template_used.pop('dmi_uuid') host_template_used.pop('dmi_uuid')
if host_template_used.has_key('ipmi_user'): if 'ipmi_user' in host_template_used:
host_template_used.pop('ipmi_user') host_template_used.pop('ipmi_user')
if host_template_used.has_key('ipmi_passwd'): if 'ipmi_passwd' in host_template_used:
host_template_used.pop('ipmi_passwd') host_template_used.pop('ipmi_passwd')
if host_template_used.has_key('ipmi_addr'): if 'ipmi_addr' in host_template_used:
host_template_used.pop('ipmi_addr') host_template_used.pop('ipmi_addr')
host_template_interfaces = host_template_used.get('interfaces', None) host_template_interfaces = host_template_used.get('interfaces', None)
if host_template_interfaces: if host_template_interfaces:
template_ether_interface = [interface for interface in host_template_interfaces if interface['type'] == "ether" ] template_ether_interface = [
interface for interface in host_template_interfaces if
interface['type'] == "ether"]
orig_host_meta = registry.get_host_metadata(req.context, host_id) orig_host_meta = registry.get_host_metadata(req.context, host_id)
orig_host_interfaces = orig_host_meta.get('interfaces', None) orig_host_interfaces = orig_host_meta.get('interfaces', None)
temp_orig_host_interfaces = [ interface for interface in orig_host_interfaces if interface['type'] == "ether" ] temp_orig_host_interfaces = [
interface for interface in orig_host_interfaces if
interface['type'] == "ether"]
if len(temp_orig_host_interfaces) != len(template_ether_interface): if len(temp_orig_host_interfaces) != len(template_ether_interface):
msg = (_('host_id %s does not match the host_id host_template ' msg = (_('host_id %s does not match the host_id host_template '
'%s.') % (host_id, host_template['host_template_name'])) '%s.') % (host_id,
raise HTTPBadRequest(explanation = msg) host_template['host_template_name']))
raise HTTPBadRequest(explanation=msg)
interface_match_flag = 0 interface_match_flag = 0
for host_template_interface in host_template_interfaces: for host_template_interface in host_template_interfaces:
if host_template_interface['type'] == 'ether': if host_template_interface['type'] == 'ether':
for orig_host_interface in orig_host_interfaces: for orig_host_interface in orig_host_interfaces:
if orig_host_interface['pci'] == host_template_interface['pci']: if orig_host_interface[
'pci'] == host_template_interface['pci']:
interface_match_flag += 1 interface_match_flag += 1
host_template_interface['mac'] = orig_host_interface['mac'] host_template_interface[
if host_template_interface.has_key('ip'): 'mac'] = orig_host_interface['mac']
if 'ip' in host_template_interface:
host_template_interface.pop('ip') host_template_interface.pop('ip')
if interface_match_flag != len(template_ether_interface): if interface_match_flag != len(template_ether_interface):
msg = (_('host_id %s does not match the host ' msg = (_('host_id %s does not match the host '
'host_template %s.') % (host_id, host_template['host_template_name'])) 'host_template %s.') % (
host_id, host_template['host_template_name']))
raise HTTPBadRequest(explanation=msg) raise HTTPBadRequest(explanation=msg)
host_template_used['interfaces'] = str(host_template_interfaces) host_template_used['interfaces'] = str(host_template_interfaces)
host_template = registry.update_host_metadata(req.context, host_id, host_template_used) host_template = registry.update_host_metadata(
req.context, host_id, host_template_used)
return {"host_template": host_template} return {"host_template": host_template}
@utils.mutating @utils.mutating
def delete_host_template(self, req, host_template): def delete_host_template(self, req, host_template):
""" """
@ -437,8 +478,11 @@ class Controller(controller.BaseController):
if not host_template.get('cluster_name', None): if not host_template.get('cluster_name', None):
msg = "cluster name is null" msg = "cluster name is null"
raise HTTPNotFound(explanation=msg) raise HTTPNotFound(explanation=msg)
params = {'filters':{'cluster_name':host_template['cluster_name']}} params = {
host_templates = registry.host_template_lists_metadata(req.context, **params) 'filters': {
'cluster_name': host_template['cluster_name']}}
host_templates = registry.host_template_lists_metadata(
req.context, **params)
template_param = [] template_param = []
had_host_template = False had_host_template = False
if host_templates and host_templates[0]: if host_templates and host_templates[0]:
@ -449,18 +493,20 @@ class Controller(controller.BaseController):
had_host_template = True had_host_template = True
break break
if not had_host_template: if not had_host_template:
msg = "not host template name %s" %host_template['host_template_name'] msg = "not host template name %s" % host_template[
'host_template_name']
raise HTTPNotFound(explanation=msg) raise HTTPNotFound(explanation=msg)
else: else:
host_templates[0]['hosts'] = json.dumps(template_param) host_templates[0]['hosts'] = json.dumps(template_param)
host_template = registry.update_host_template_metadata(req.context, host_template = registry.update_host_template_metadata(
host_templates[0]['id'], req.context, host_templates[0]['id'],
host_templates[0]) host_templates[0])
return {"host_template": host_template} return {"host_template": host_template}
else: else:
msg = "host template cluster name %s is null" %host_template['cluster_name'] msg = "host template cluster name %s is null" % host_template[
raise HTTPNotFound(explanation=msg) 'cluster_name']
raise HTTPNotFound(explanation=msg)
except exception.NotFound as e: except exception.NotFound as e:
msg = (_("Failed to find host template to delete: %s") % msg = (_("Failed to find host template to delete: %s") %
utils.exception_to_str(e)) utils.exception_to_str(e))
@ -476,15 +522,18 @@ class Controller(controller.BaseController):
request=req, request=req,
content_type="text/plain") content_type="text/plain")
except exception.InUseByStore as e: except exception.InUseByStore as e:
msg = (_("template %(id)s could not be deleted because it is in use: " msg = (_("template %(id)s could not be deleted "
"%(exc)s") % {"id": template_id, "exc": utils.exception_to_str(e)}) "because it is in use: "
"%(exc)s") % {"id": host_template['host_id'],
"exc": utils.exception_to_str(e)})
LOG.error(msg) LOG.error(msg)
raise HTTPConflict(explanation=msg, raise HTTPConflict(explanation=msg,
request=req, request=req,
content_type="text/plain") content_type="text/plain")
else: else:
return Response(body='', status=200) return Response(body='', status=200)
class HostTemplateDeserializer(wsgi.JSONRequestDeserializer): class HostTemplateDeserializer(wsgi.JSONRequestDeserializer):
"""Handles deserialization of specific controller method requests.""" """Handles deserialization of specific controller method requests."""
@ -492,13 +541,12 @@ class HostTemplateDeserializer(wsgi.JSONRequestDeserializer):
result = {} result = {}
result["host_template"] = utils.get_template_meta(request) result["host_template"] = utils.get_template_meta(request)
return result return result
def add_host_template(self, request): def add_host_template(self, request):
return self._deserialize(request) return self._deserialize(request)
def update_host_template(self, request): def update_host_template(self, request):
return self._deserialize(request) return self._deserialize(request)
def host_to_template(self, request): def host_to_template(self, request):
return self._deserialize(request) return self._deserialize(request)
@ -509,6 +557,7 @@ class HostTemplateDeserializer(wsgi.JSONRequestDeserializer):
def delete_host_template(self, request): def delete_host_template(self, request):
return self._deserialize(request) return self._deserialize(request)
class HostTemplateSerializer(wsgi.JSONResponseSerializer): class HostTemplateSerializer(wsgi.JSONResponseSerializer):
"""Handles serialization of specific controller method responses.""" """Handles serialization of specific controller method responses."""
@ -528,18 +577,20 @@ class HostTemplateSerializer(wsgi.JSONResponseSerializer):
response.headers['Content-Type'] = 'application/json' response.headers['Content-Type'] = 'application/json'
response.body = self.to_json(dict(host_template=host_template)) response.body = self.to_json(dict(host_template=host_template))
return response return response
def get_host_template_detail(self, response, result): def get_host_template_detail(self, response, result):
host_template = result['host_template'] host_template = result['host_template']
response.status = 201 response.status = 201
response.headers['Content-Type'] = 'application/json' response.headers['Content-Type'] = 'application/json'
response.body = self.to_json(dict(host_template=host_template)) response.body = self.to_json(dict(host_template=host_template))
return response return response
def update_host_template(self, response, result): def update_host_template(self, response, result):
host_template = result['host_template'] host_template = result['host_template']
response.status = 201 response.status = 201
response.headers['Content-Type'] = 'application/json' response.headers['Content-Type'] = 'application/json'
response.body = self.to_json(dict(host_template=host_template)) response.body = self.to_json(dict(host_template=host_template))
return response return response
def host_to_template(self, response, result): def host_to_template(self, response, result):
host_template = result['host_template'] host_template = result['host_template']
@ -560,7 +611,7 @@ class HostTemplateSerializer(wsgi.JSONResponseSerializer):
response.status = 201 response.status = 201
response.headers['Content-Type'] = 'application/json' response.headers['Content-Type'] = 'application/json'
response.body = self.to_json(dict(host_template=host_template)) response.body = self.to_json(dict(host_template=host_template))
def create_resource(): def create_resource():
"""Templates resource factory method""" """Templates resource factory method"""

File diff suppressed because it is too large Load Diff

347
code/daisy/daisy/api/v1/hwms.py Executable file
View File

@ -0,0 +1,347 @@
# Copyright 2013 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
/Hwm endpoint for Daisy v1 API
"""
from oslo_config import cfg
from oslo_log import log as logging
import webob.exc
from webob.exc import HTTPBadRequest
from webob.exc import HTTPConflict
from webob.exc import HTTPForbidden
from webob.exc import HTTPNotFound
from webob import Response
from daisy.api import policy
import daisy.api.v1
from daisy.api.v1 import controller
from daisy.api.v1 import filters
from daisy.common import exception
from daisy.common import property_utils
from daisy.common import utils
from daisy.common import wsgi
from daisy import i18n
from daisy import notifier
import daisy.registry.client.v1.api as registry
from daisy.registry.api.v1 import hwms
import daisy.api.backends.tecs.common as tecs_cmn
daisy_tecs_path = tecs_cmn.daisy_tecs_path
LOG = logging.getLogger(__name__)
_ = i18n._
_LE = i18n._LE
_LI = i18n._LI
_LW = i18n._LW
SUPPORTED_PARAMS = hwms.SUPPORTED_PARAMS
SUPPORTED_FILTERS = hwms.SUPPORTED_FILTERS
ACTIVE_IMMUTABLE = daisy.api.v1.ACTIVE_IMMUTABLE
CONF = cfg.CONF
CONF.import_opt('disk_formats', 'daisy.common.config',
group='image_format')
CONF.import_opt('container_formats', 'daisy.common.config',
group='image_format')
CONF.import_opt('image_property_quota', 'daisy.common.config')
class Controller(controller.BaseController):
"""
WSGI controller for hwms resource in Daisy v1 API
The Templates resource API is a RESTful web Template for Template data.
The API is as follows::
GET /Templates -- Returns a set of brief metadata about Templates
GET /Templates/detail -- Returns a set of detailed metadata about
Templates
HEAD /Templates/<ID> -- Return metadata about an Template with id <ID>
GET /Templates/<ID> -- Return Template data for Template with id <ID>
POST /Templates -- Store Template data and return metadata about the
newly-stored Template
PUT /Templates/<ID> -- Update Template metadata and/or upload Template
data for a previously-reserved Template
DELETE /Templates/<ID> -- Delete the Template with id <ID>
"""
def __init__(self):
self.notifier = notifier.Notifier()
registry.configure_registry_client()
self.policy = policy.Enforcer()
if property_utils.is_property_protection_enabled():
self.prop_enforcer = property_utils.PropertyRules(self.policy)
else:
self.prop_enforcer = None
def _enforce(self, req, action, target=None):
"""Authorize an action against our policies"""
if target is None:
target = {}
try:
self.policy.enforce(req.context, action, target)
except exception.Forbidden:
raise HTTPForbidden()
def _get_filters(self, req):
"""
Return a dictionary of query param filters from the request
:param req: the Request object coming from the wsgi layer
:retval a dict of key/value filters
"""
query_filters = {}
for param in req.params:
if param in SUPPORTED_FILTERS:
query_filters[param] = req.params.get(param)
if not filters.validate(param, query_filters[param]):
raise HTTPBadRequest(_('Bad value passed to filter '
'%(filter)s got %(val)s')
% {'filter': param,
'val': query_filters[param]})
return query_filters
def _get_query_params(self, req):
"""
Extracts necessary query params from request.
:param req: the WSGI Request object
:retval dict of parameters that can be used by registry client
"""
params = {'filters': self._get_filters(req)}
for PARAM in SUPPORTED_PARAMS:
if PARAM in req.params:
params[PARAM] = req.params.get(PARAM)
return params
def _raise_404_if_cluster_deleted(self, req, cluster_id):
cluster = self.get_cluster_meta_or_404(req, cluster_id)
if cluster['deleted']:
msg = _("Cluster with identifier %s has been deleted.") % \
cluster_id
raise webob.exc.HTTPNotFound(msg)
def get_clusters_hwm_ip(self, req):
params = self._get_query_params(req)
clusters_hwm_ip = list()
clusters = registry.get_clusters_detail(req.context, **params)
for cluster in clusters:
clusters_hwm_ip.append(cluster.get('hwm_ip'))
return clusters_hwm_ip
@utils.mutating
def add_hwm(self, req, hwm):
"""
Adds a new hwm to Daisy.
:param req: The WSGI/Webob Request object
:param image_meta: Mapping of metadata about Template
:raises HTTPBadRequest if x-Template-name is missing
"""
self._enforce(req, 'add_template')
hwm = registry.add_hwm_metadata(req.context, hwm)
return {'hwm': hwm}
@utils.mutating
def update_hwm(self, req, id, hwm):
"""
Updates an existing hwm with the registry.
:param request: The WSGI/Webob Request object
:param id: The opaque image identifier
:retval Returns the updated image information as a mapping
"""
self._enforce(req, 'update_hwm')
hwm_meta = registry.hwm_detail_metadata(req.context, id)
hwm_ip = hwm_meta['hwm_ip']
clusters_hwm_ip = self.get_clusters_hwm_ip(req)
if hwm_ip in clusters_hwm_ip:
msg = (_("Hwm %s has already used in cluster, "
"it can not be update. " % hwm_ip))
LOG.error(msg)
raise HTTPForbidden(explanation=msg, request=req,
content_type="text/plain")
try:
hwm = registry.update_hwm_metadata(req.context, id, hwm)
except exception.Invalid as e:
msg = (_("Failed to update hwm metadata. Got error: %s") %
utils.exception_to_str(e))
LOG.warn(msg)
raise HTTPBadRequest(explanation=msg,
request=req,
content_type="text/plain")
except exception.NotFound as e:
msg = (_("Failed to find hwm to update: %s") %
utils.exception_to_str(e))
LOG.warn(msg)
raise HTTPNotFound(explanation=msg,
request=req,
content_type="text/plain")
except exception.Forbidden as e:
msg = (_("Forbidden to update hwm: %s") %
utils.exception_to_str(e))
LOG.warn(msg)
raise HTTPForbidden(explanation=msg,
request=req,
content_type="text/plain")
except (exception.Conflict, exception.Duplicate) as e:
LOG.warn(utils.exception_to_str(e))
raise HTTPConflict(body=_('hwm operation conflicts'),
request=req,
content_type='text/plain')
else:
self.notifier.info('hwm.update', hwm)
return {'hwm': hwm}
@utils.mutating
def delete_hwm(self, req, id):
"""
delete a existing hwm template with the registry.
:param request: The WSGI/Webob Request object
:param id: The opaque image identifier
:retval Returns the updated image information as a mapping
"""
self._enforce(req, 'delete_hwm')
hwm_meta = registry.hwm_detail_metadata(req.context, id)
hwm_ip = hwm_meta['hwm_ip']
clusters_hwm_ip = self.get_clusters_hwm_ip(req)
if hwm_ip in clusters_hwm_ip:
msg = (_("Hwm %s has already used in cluster, "
"it can not be deleted. " % hwm_ip))
LOG.error(msg)
raise HTTPForbidden(explanation=msg, request=req,
content_type="text/plain")
try:
registry.delete_hwm_metadata(req.context, id)
except exception.NotFound as e:
msg = (_("Failed to find hwm to delete: %s") %
utils.exception_to_str(e))
LOG.error(msg)
raise HTTPNotFound(explanation=msg, request=req,
content_type="text/plain")
except exception.Forbidden as e:
msg = (_("Forbidden to delete hwm: %s") %
utils.exception_to_str(e))
LOG.error(msg)
raise HTTPForbidden(explanation=msg, request=req,
content_type="text/plain")
except exception.InUseByStore as e:
msg = (_(
"hwm %(id)s could not be deleted because it is in "
"use:%(exc)s") % {"id": id, "exc": utils.exception_to_str(e)})
LOG.error(msg)
raise HTTPConflict(explanation=msg, request=req,
content_type="text/plain")
else:
return Response(body='', status=200)
@utils.mutating
def detail(self, req, id):
"""
delete a existing hwm with the registry.
:param request: The WSGI/Webob Request object
:param id: The opaque image identifie
:retval Returns the updated image information as a mapping
"""
self._enforce(req, 'detail')
context = req.context
try:
hwm_meta = registry.hwm_detail_metadata(context, id)
except exception.NotFound:
msg = "Hwm with identifier %s not found" % id
LOG.debug(msg)
raise webob.exc.HTTPNotFound(
msg, request=req, content_type='text/plain')
except exception.Forbidden:
msg = "Forbidden hwm access"
LOG.debug(msg)
raise webob.exc.HTTPForbidden(msg,
request=req,
content_type='text/plain')
return {'hwm': hwm_meta}
@utils.mutating
def list(self, req):
self._enforce(req, 'list')
params = self._get_query_params(req)
try:
hwm_list = registry.hwm_list_metadata(req.context, **params)
except exception.Invalid as e:
raise HTTPBadRequest(explanation=e.msg, request=req)
return dict(hwm=hwm_list)
class HwmDeserializer(wsgi.JSONRequestDeserializer):
"""Handles deserialization of specific controller method requests."""
def _deserialize(self, request):
result = {}
result["hwm"] = utils.get_hwm_meta(request)
return result
def add_hwm(self, request):
return self._deserialize(request)
def update_hwm(self, request):
return self._deserialize(request)
class HwmSerializer(wsgi.JSONResponseSerializer):
"""Handles serialization of specific controller method responses."""
def __init__(self):
self.notifier = notifier.Notifier()
def add_hwm(self, response, result):
hwm = result['hwm']
response.status = 201
response.headers['Content-Type'] = 'application/json'
response.body = self.to_json(dict(hwm=hwm))
return response
def delete_hwm(self, response, result):
hwm = result['hwm']
response.status = 201
response.headers['Content-Type'] = 'application/json'
response.body = self.to_json(dict(hwm=hwm))
return response
def get_detail(self, response, result):
hwm = result['hwm']
response.status = 201
response.headers['Content-Type'] = 'application/json'
response.body = self.to_json(dict(hwm=hwm))
return response
def update_hwm(self, response, result):
hwm = result['hwm']
response.status = 201
response.headers['Content-Type'] = 'application/json'
response.body = self.to_json(dict(hwm=hwm))
return response
def create_resource():
"""Templates resource factory method"""
deserializer = HwmDeserializer()
serializer = HwmSerializer()
return wsgi.Resource(Controller(), deserializer, serializer)

View File

@ -905,8 +905,9 @@ class Controller(controller.BaseController):
# Once an image is 'active' only an admin can # Once an image is 'active' only an admin can
# modify certain core metadata keys # modify certain core metadata keys
for key in ACTIVE_IMMUTABLE: for key in ACTIVE_IMMUTABLE:
if (orig_status == 'active' and image_meta.get(key) is not None if (orig_status == 'active' and
and image_meta.get(key) != orig_image_meta.get(key)): image_meta.get(key) is not None and
image_meta.get(key) != orig_image_meta.get(key)):
msg = _("Forbidden to modify '%s' of active image.") % key msg = _("Forbidden to modify '%s' of active image.") % key
raise HTTPForbidden(explanation=msg, raise HTTPForbidden(explanation=msg,
request=req, request=req,

View File

@ -17,13 +17,11 @@
/hosts endpoint for Daisy v1 API /hosts endpoint for Daisy v1 API
""" """
import time import time
import traceback
import webob.exc import webob.exc
from oslo_log import log as logging from oslo_log import log as logging
from webob.exc import HTTPBadRequest from webob.exc import HTTPBadRequest
from webob.exc import HTTPForbidden from webob.exc import HTTPForbidden
from webob.exc import HTTPServerError
from threading import Thread from threading import Thread
@ -44,10 +42,6 @@ import daisy.api.backends.common as daisy_cmn
from daisy.api.backends import driver from daisy.api.backends import driver
from daisy.api.backends import os as os_handle from daisy.api.backends import os as os_handle
try:
import simplejson as json
except ImportError:
import json
LOG = logging.getLogger(__name__) LOG = logging.getLogger(__name__)
_ = i18n._ _ = i18n._
@ -67,31 +61,40 @@ BACKENDS_UNINSTALL_ORDER = []
def get_deployment_backends(req, cluster_id, backends_order): def get_deployment_backends(req, cluster_id, backends_order):
cluster_roles = daisy_cmn.get_cluster_roles_detail(req,cluster_id) cluster_roles = daisy_cmn.get_cluster_roles_detail(req, cluster_id)
cluster_backends = set([role['deployment_backend'] for role in cluster_roles if daisy_cmn.get_hosts_of_role(req, role['id'])]) cluster_backends = set([role['deployment_backend']
ordered_backends = [backend for backend in backends_order if backend in cluster_backends] for role in cluster_roles if
other_backends = [backend for backend in cluster_backends if backend not in backends_order] daisy_cmn.get_hosts_of_role(req, role['id'])])
deployment_backends =ordered_backends + other_backends ordered_backends = [
return deployment_backends backend for backend in backends_order if backend in cluster_backends]
other_backends = [
backend for backend in cluster_backends if
backend not in backends_order]
deployment_backends = ordered_backends + other_backends
return deployment_backends
class InstallTask(object): class InstallTask(object):
""" """
Class for install OS and TECS. Class for install OS and TECS.
""" """
""" Definition for install states.""" """ Definition for install states."""
def __init__(self, req, cluster_id): def __init__(self, req, cluster_id):
self.req = req self.req = req
self.cluster_id = cluster_id self.cluster_id = cluster_id
def _backends_install(self): def _backends_install(self):
backends = get_deployment_backends(self.req, self.cluster_id, BACKENDS_INSTALL_ORDER) backends = get_deployment_backends(
self.req, self.cluster_id, BACKENDS_INSTALL_ORDER)
if not backends: if not backends:
LOG.info(_("No backends need to install.")) LOG.info(_("No backends need to install."))
return return
for backend in backends: for backend in backends:
backend_driver = driver.load_deployment_dirver(backend) backend_driver = driver.load_deployment_dirver(backend)
backend_driver.install(self.req, self.cluster_id) backend_driver.install(self.req, self.cluster_id)
# this will be raise raise all the exceptions of the thread to log file # this will be raise raise all the exceptions of the thread to log file
def run(self): def run(self):
try: try:
self._run() self._run()
@ -102,43 +105,57 @@ class InstallTask(object):
""" """
Exectue os installation with sync mode. Exectue os installation with sync mode.
:return: :return:
""" """
# get hosts config which need to install OS # get hosts config which need to install OS
all_hosts_need_os = os_handle.get_cluster_hosts_config(self.req, self.cluster_id) all_hosts_need_os = os_handle.get_cluster_hosts_config(
self.req, self.cluster_id)
if all_hosts_need_os: if all_hosts_need_os:
hosts_with_role_need_os = [host_detail for host_detail in all_hosts_need_os if host_detail['status'] == 'with-role'] hosts_with_role_need_os = [
hosts_without_role_need_os = [host_detail for host_detail in all_hosts_need_os if host_detail['status'] != 'with-role'] host_detail for host_detail in all_hosts_need_os if
host_detail['status'] == 'with-role']
hosts_without_role_need_os = [
host_detail for host_detail in all_hosts_need_os if
host_detail['status'] != 'with-role']
else: else:
LOG.info(_("No host need to install os, begin to install " LOG.info(_("No host need to install os, begin to install "
"backends for cluster %s." % self.cluster_id)) "backends for cluster %s." % self.cluster_id))
self._backends_install() self._backends_install()
return return
run_once_flag = True run_once_flag = True
# if no hosts with role need os, install backend applications immediately # if no hosts with role need os, install backend applications
# immediately
if not hosts_with_role_need_os: if not hosts_with_role_need_os:
run_once_flag = False run_once_flag = False
role_hosts_need_os = [] role_hosts_need_os = []
LOG.info(_("All of hosts with role is 'active', begin to install " LOG.info(_("All of hosts with role is 'active', begin to install "
"backend applications for cluster %s first." % self.cluster_id)) "backend applications for cluster %s first." %
self.cluster_id))
self._backends_install() self._backends_install()
else: else:
role_hosts_need_os = [host_detail['id'] for host_detail in hosts_with_role_need_os] role_hosts_need_os = [host_detail['id']
for host_detail in hosts_with_role_need_os]
# hosts with role put the head of the list # hosts with role put the head of the list
order_hosts_need_os = hosts_with_role_need_os + hosts_without_role_need_os order_hosts_need_os = hosts_with_role_need_os + \
hosts_without_role_need_os
while order_hosts_need_os: while order_hosts_need_os:
os_install = os_handle.OSInstall(self.req, self.cluster_id) os_install = os_handle.OSInstall(self.req, self.cluster_id)
#all os will be installed batch by batch with max_parallel_os_number which was set in daisy-api.conf # all os will be installed batch by batch with
(order_hosts_need_os,role_hosts_need_os) = os_install.install_os(order_hosts_need_os,role_hosts_need_os) # max_parallel_os_number which was set in daisy-api.conf
# after a batch of os install over, judge if all role hosts install os completely, (order_hosts_need_os, role_hosts_need_os) = os_install.install_os(
order_hosts_need_os, role_hosts_need_os)
# after a batch of os install over, judge if all
# role hosts install os completely,
# if role_hosts_need_os is empty, install TECS immediately # if role_hosts_need_os is empty, install TECS immediately
if run_once_flag and not role_hosts_need_os: if run_once_flag and not role_hosts_need_os:
run_once_flag = False run_once_flag = False
#wait to reboot os after new os installed # wait to reboot os after new os installed
time.sleep(10) time.sleep(10)
LOG.info(_("All hosts with role install successfully, " LOG.info(_("All hosts with role install successfully, "
"begin to install backend applications for cluster %s." % self.cluster_id)) "begin to install backend applications "
"for cluster %s." %
self.cluster_id))
self._backends_install() self._backends_install()
@ -160,6 +177,7 @@ class Controller(controller.BaseController):
data for a previously-reserved host data for a previously-reserved host
DELETE /hosts/<ID> -- Delete the host with id <ID> DELETE /hosts/<ID> -- Delete the host with id <ID>
""" """
def __init__(self): def __init__(self):
self.notifier = notifier.Notifier() self.notifier = notifier.Notifier()
registry.configure_registry_client() registry.configure_registry_client()
@ -177,13 +195,14 @@ class Controller(controller.BaseController):
self.policy.enforce(req.context, action, target) self.policy.enforce(req.context, action, target)
except exception.Forbidden: except exception.Forbidden:
raise HTTPForbidden() raise HTTPForbidden()
def _raise_404_if_cluster_deleted(self, req, cluster_id): def _raise_404_if_cluster_deleted(self, req, cluster_id):
cluster = self.get_cluster_meta_or_404(req, cluster_id) cluster = self.get_cluster_meta_or_404(req, cluster_id)
if cluster['deleted']: if cluster['deleted']:
msg = _("Cluster with identifier %s has been deleted.") % cluster_id msg = _("Cluster with identifier %s has been deleted.") % \
cluster_id
raise webob.exc.HTTPNotFound(msg) raise webob.exc.HTTPNotFound(msg)
def _get_filters(self, req): def _get_filters(self, req):
""" """
Return a dictionary of query param filters from the request Return a dictionary of query param filters from the request
@ -225,19 +244,24 @@ class Controller(controller.BaseController):
:raises HTTPBadRequest if x-install-cluster is missing :raises HTTPBadRequest if x-install-cluster is missing
""" """
if 'deployment_interface' in install_meta:
os_handle.pxe_server_build(req, install_meta)
return {"status": "pxe is installed"}
cluster_id = install_meta['cluster_id'] cluster_id = install_meta['cluster_id']
self._enforce(req, 'install_cluster') self._enforce(req, 'install_cluster')
self._raise_404_if_cluster_deleted(req, cluster_id) self._raise_404_if_cluster_deleted(req, cluster_id)
if install_meta.get("deployment_interface", None): daisy_cmn.set_role_status_and_progress(
os_handle.pxe_server_build(req, install_meta) req, cluster_id, 'install',
return {"status": "pxe is installed"} {'messages': 'Waiting for TECS installation', 'progress': '0'},
'tecs')
# if have hosts need to install os, TECS installataion executed in InstallTask # if have hosts need to install os,
# TECS installataion executed in InstallTask
os_install_obj = InstallTask(req, cluster_id) os_install_obj = InstallTask(req, cluster_id)
os_install_thread = Thread(target=os_install_obj.run) os_install_thread = Thread(target=os_install_obj.run)
os_install_thread.start() os_install_thread.start()
return {"status":"begin install"} return {"status": "begin install"}
@utils.mutating @utils.mutating
def uninstall_cluster(self, req, cluster_id): def uninstall_cluster(self, req, cluster_id):
@ -251,20 +275,24 @@ class Controller(controller.BaseController):
self._enforce(req, 'uninstall_cluster') self._enforce(req, 'uninstall_cluster')
self._raise_404_if_cluster_deleted(req, cluster_id) self._raise_404_if_cluster_deleted(req, cluster_id)
backends = get_deployment_backends(req, cluster_id, BACKENDS_UNINSTALL_ORDER) backends = get_deployment_backends(
req, cluster_id, BACKENDS_UNINSTALL_ORDER)
for backend in backends: for backend in backends:
backend_driver = driver.load_deployment_dirver(backend) backend_driver = driver.load_deployment_dirver(backend)
uninstall_thread = Thread(target=backend_driver.uninstall, args=(req, cluster_id)) uninstall_thread = Thread(
target=backend_driver.uninstall, args=(
req, cluster_id))
uninstall_thread.start() uninstall_thread.start()
return {"status":"begin uninstall"} return {"status": "begin uninstall"}
@utils.mutating @utils.mutating
def uninstall_progress(self, req, cluster_id): def uninstall_progress(self, req, cluster_id):
self._enforce(req, 'uninstall_progress') self._enforce(req, 'uninstall_progress')
self._raise_404_if_cluster_deleted(req, cluster_id) self._raise_404_if_cluster_deleted(req, cluster_id)
all_nodes = {} all_nodes = {}
backends = get_deployment_backends(req, cluster_id, BACKENDS_UNINSTALL_ORDER) backends = get_deployment_backends(
req, cluster_id, BACKENDS_UNINSTALL_ORDER)
if not backends: if not backends:
LOG.info(_("No backends need to uninstall.")) LOG.info(_("No backends need to uninstall."))
return all_nodes return all_nodes
@ -274,7 +302,6 @@ class Controller(controller.BaseController):
all_nodes.update(nodes_process) all_nodes.update(nodes_process)
return all_nodes return all_nodes
@utils.mutating @utils.mutating
def update_cluster(self, req, cluster_id): def update_cluster(self, req, cluster_id):
""" """
@ -287,29 +314,36 @@ class Controller(controller.BaseController):
self._enforce(req, 'update_cluster') self._enforce(req, 'update_cluster')
self._raise_404_if_cluster_deleted(req, cluster_id) self._raise_404_if_cluster_deleted(req, cluster_id)
backends = get_deployment_backends(req, cluster_id, BACKENDS_UPGRADE_ORDER) backends = get_deployment_backends(
req, cluster_id, BACKENDS_UPGRADE_ORDER)
if not backends: if not backends:
LOG.info(_("No backends need to update.")) LOG.info(_("No backends need to update."))
return {"status":""} return {"status": ""}
daisy_cmn.set_role_status_and_progress(
req, cluster_id, 'upgrade',
{'messages': 'Waiting for TECS upgrading', 'progress': '0'},
'tecs')
for backend in backends: for backend in backends:
backend_driver = driver.load_deployment_dirver(backend) backend_driver = driver.load_deployment_dirver(backend)
update_thread = Thread(target=backend_driver.upgrade, args=(req, cluster_id)) update_thread = Thread(target=backend_driver.upgrade,
args=(req, cluster_id))
update_thread.start() update_thread.start()
return {"status":"begin update"} return {"status": "begin update"}
@utils.mutating @utils.mutating
def update_progress(self, req, cluster_id): def update_progress(self, req, cluster_id):
self._enforce(req, 'update_progress') self._enforce(req, 'update_progress')
self._raise_404_if_cluster_deleted(req, cluster_id) self._raise_404_if_cluster_deleted(req, cluster_id)
backends = get_deployment_backends(req, cluster_id, BACKENDS_UPGRADE_ORDER) backends = get_deployment_backends(
req, cluster_id, BACKENDS_UPGRADE_ORDER)
all_nodes = {} all_nodes = {}
for backend in backends: for backend in backends:
backend_driver = driver.load_deployment_dirver(backend) backend_driver = driver.load_deployment_dirver(backend)
nodes_process = backend_driver.upgrade_progress(req, cluster_id) nodes_process = backend_driver.upgrade_progress(req, cluster_id)
all_nodes.update(nodes_process) all_nodes.update(nodes_process)
return all_nodes return all_nodes
@utils.mutating @utils.mutating
def export_db(self, req, install_meta): def export_db(self, req, install_meta):
""" """
@ -324,7 +358,8 @@ class Controller(controller.BaseController):
self._raise_404_if_cluster_deleted(req, cluster_id) self._raise_404_if_cluster_deleted(req, cluster_id)
all_config_files = {} all_config_files = {}
backends = get_deployment_backends(req, cluster_id, BACKENDS_INSTALL_ORDER) backends = get_deployment_backends(
req, cluster_id, BACKENDS_INSTALL_ORDER)
if not backends: if not backends:
LOG.info(_("No backends need to export.")) LOG.info(_("No backends need to export."))
return all_config_files return all_config_files
@ -345,17 +380,18 @@ class Controller(controller.BaseController):
""" """
self._enforce(req, 'update_disk_array') self._enforce(req, 'update_disk_array')
self._raise_404_if_cluster_deleted(req, cluster_id) self._raise_404_if_cluster_deleted(req, cluster_id)
tecs_backend_name = 'tecs' tecs_backend_name = 'tecs'
backends = get_deployment_backends(req, cluster_id, BACKENDS_UNINSTALL_ORDER) backends = get_deployment_backends(
req, cluster_id, BACKENDS_UNINSTALL_ORDER)
if tecs_backend_name not in backends: if tecs_backend_name not in backends:
message = "No tecs backend" message = "No tecs backend"
LOG.info(_(message)) LOG.info(_(message))
else: else:
backend_driver = driver.load_deployment_dirver(tecs_backend_name) backend_driver = driver.load_deployment_dirver(tecs_backend_name)
message = backend_driver.update_disk_array(req, cluster_id) message = backend_driver.update_disk_array(req, cluster_id)
return {'status':message} return {'status': message}
class InstallDeserializer(wsgi.JSONRequestDeserializer): class InstallDeserializer(wsgi.JSONRequestDeserializer):
"""Handles deserialization of specific controller method requests.""" """Handles deserialization of specific controller method requests."""
@ -367,13 +403,14 @@ class InstallDeserializer(wsgi.JSONRequestDeserializer):
def install_cluster(self, request): def install_cluster(self, request):
return self._deserialize(request) return self._deserialize(request)
def export_db(self, request): def export_db(self, request):
return self._deserialize(request) return self._deserialize(request)
def update_disk_array(self, request): def update_disk_array(self, request):
return {} return {}
class InstallSerializer(wsgi.JSONResponseSerializer): class InstallSerializer(wsgi.JSONResponseSerializer):
"""Handles serialization of specific controller method responses.""" """Handles serialization of specific controller method responses."""
@ -385,7 +422,7 @@ class InstallSerializer(wsgi.JSONResponseSerializer):
response.headers['Content-Type'] = 'application/json' response.headers['Content-Type'] = 'application/json'
response.body = self.to_json(result) response.body = self.to_json(result)
return response return response
def export_db(self, response, result): def export_db(self, response, result):
response.status = 201 response.status = 201
response.headers['Content-Type'] = 'application/json' response.headers['Content-Type'] = 'application/json'
@ -397,7 +434,8 @@ class InstallSerializer(wsgi.JSONResponseSerializer):
response.headers['Content-Type'] = 'application/json' response.headers['Content-Type'] = 'application/json'
response.body = self.to_json(result) response.body = self.to_json(result)
return response return response
def create_resource(): def create_resource():
"""Image members resource factory method""" """Image members resource factory method"""
deserializer = InstallDeserializer() deserializer = InstallDeserializer()

View File

@ -53,36 +53,38 @@ class Controller(controller.BaseController):
def _raise_404_if_project_deleted(self, req, cluster_id): def _raise_404_if_project_deleted(self, req, cluster_id):
project = self.get_cluster_meta_or_404(req, cluster_id) project = self.get_cluster_meta_or_404(req, cluster_id)
if project['deleted']: if project['deleted']:
msg = _("Cluster with identifier %s has been deleted.") % cluster_id msg = _("Cluster with identifier %s has been deleted.") % \
cluster_id
raise webob.exc.HTTPNotFound(msg) raise webob.exc.HTTPNotFound(msg)
# def get_cluster_hosts(self, req, cluster_id, host_id=None): # def get_cluster_hosts(self, req, cluster_id, host_id=None):
# """ # """
# Return a list of dictionaries indicating the members of the # Return a list of dictionaries indicating the members of the
# image, i.e., those tenants the image is shared with. # image, i.e., those tenants the image is shared with.
# #
# :param req: the Request object coming from the wsgi layer # :param req: the Request object coming from the wsgi layer
# :param image_id: The opaque image identifier # :param image_id: The opaque image identifier
# :retval The response body is a mapping of the following form:: # :retval The response body is a mapping of the following form::
# {'members': [ # {'members': [
# {'host_id': <HOST>, ...}, ... # {'host_id': <HOST>, ...}, ...
# ]} # ]}
# """ # """
# self._enforce(req, 'get_cluster_hosts') # self._enforce(req, 'get_cluster_hosts')
# self._raise_404_if_project_deleted(req, cluster_id) # self._raise_404_if_project_deleted(req, cluster_id)
# #
# try: # try:
# members = registry.get_cluster_hosts(req.context, cluster_id, host_id) # members = registry.get_cluster_hosts(
# except exception.NotFound: # req.context, cluster_id, host_id)
# msg = _("Project with identifier %s not found") % cluster_id # except exception.NotFound:
# LOG.warn(msg) # msg = _("Project with identifier %s not found") % cluster_id
# raise webob.exc.HTTPNotFound(msg) # LOG.warn(msg)
# except exception.Forbidden: # raise webob.exc.HTTPNotFound(msg)
# msg = _("Unauthorized project access") # except exception.Forbidden:
# LOG.warn(msg) # msg = _("Unauthorized project access")
# raise webob.exc.HTTPForbidden(msg) # LOG.warn(msg)
# return dict(members=members) # raise webob.exc.HTTPForbidden(msg)
# return dict(members=members)
@utils.mutating @utils.mutating
def delete(self, req, image_id, id): def delete(self, req, image_id, id):
@ -104,7 +106,7 @@ class Controller(controller.BaseController):
raise webob.exc.HTTPNotFound(explanation=e.msg) raise webob.exc.HTTPNotFound(explanation=e.msg)
return webob.exc.HTTPNoContent() return webob.exc.HTTPNoContent()
@utils.mutating @utils.mutating
def add_cluster_host(self, req, cluster_id, host_id, body=None): def add_cluster_host(self, req, cluster_id, host_id, body=None):
""" """
@ -113,7 +115,7 @@ class Controller(controller.BaseController):
self._enforce(req, 'add_cluster_host') self._enforce(req, 'add_cluster_host')
self._raise_404_if_project_deleted(req, cluster_id) self._raise_404_if_project_deleted(req, cluster_id)
self._raise_404_if_host_deleted(req, host_id) self._raise_404_if_host_deleted(req, host_id)
try: try:
registry.add_cluster_host(req.context, cluster_id, host_id) registry.add_cluster_host(req.context, cluster_id, host_id)
except exception.Invalid as e: except exception.Invalid as e:
@ -127,7 +129,7 @@ class Controller(controller.BaseController):
raise webob.exc.HTTPNotFound(explanation=e.msg) raise webob.exc.HTTPNotFound(explanation=e.msg)
return webob.exc.HTTPNoContent() return webob.exc.HTTPNoContent()
@utils.mutating @utils.mutating
def delete_cluster_host(self, req, cluster_id, host_id): def delete_cluster_host(self, req, cluster_id, host_id):
""" """
@ -147,7 +149,7 @@ class Controller(controller.BaseController):
raise webob.exc.HTTPNotFound(explanation=e.msg) raise webob.exc.HTTPNotFound(explanation=e.msg)
return webob.exc.HTTPNoContent() return webob.exc.HTTPNoContent()
def default(self, req, image_id, id, body=None): def default(self, req, image_id, id, body=None):
"""This will cover the missing 'show' and 'create' actions""" """This will cover the missing 'show' and 'create' actions"""
raise webob.exc.HTTPMethodNotAllowed() raise webob.exc.HTTPMethodNotAllowed()

View File

@ -36,6 +36,7 @@ from daisy.common import wsgi
from daisy import i18n from daisy import i18n
from daisy import notifier from daisy import notifier
import daisy.registry.client.v1.api as registry import daisy.registry.client.v1.api as registry
from functools import reduce
LOG = logging.getLogger(__name__) LOG = logging.getLogger(__name__)
_ = i18n._ _ = i18n._
@ -52,9 +53,16 @@ CONF.import_opt('container_formats', 'daisy.common.config',
group='image_format') group='image_format')
CONF.import_opt('image_property_quota', 'daisy.common.config') CONF.import_opt('image_property_quota', 'daisy.common.config')
SUPPORT_NETWORK_TYPE = ('PUBLIC', 'PRIVATE', 'STORAGE', 'MANAGEMENT', 'EXTERNAL', 'DEPLOYMENT', 'VXLAN') SUPPORT_NETWORK_TYPE = (
SUPPORT_NETWORK_TEMPLATE_TYPE = ('custom', 'template', 'default') 'PUBLICAPI',
SUPPORT_ML2_TYPE = ('ovs', 'sriov(direct)', 'sriov(macvtap)', 'DATAPLANE',
'STORAGE',
'MANAGEMENT',
'EXTERNAL',
'DEPLOYMENT',
'HEARTBEAT')
SUPPORT_NETWORK_TEMPLATE_TYPE = ('custom', 'template', 'default', 'system')
SUPPORT_ML2_TYPE = ('ovs', 'sriov(direct)', 'sriov(macvtap)',
'ovs,sriov(direct)', 'ovs,sriov(macvtap)') 'ovs,sriov(direct)', 'ovs,sriov(macvtap)')
SUPPORT_NETWORK_CAPABILITY = ('high', 'low') SUPPORT_NETWORK_CAPABILITY = ('high', 'low')
@ -99,12 +107,15 @@ class Controller(controller.BaseController):
def _raise_404_if_network_deleted(self, req, network_id): def _raise_404_if_network_deleted(self, req, network_id):
network = self.get_network_meta_or_404(req, network_id) network = self.get_network_meta_or_404(req, network_id)
if network['deleted']: if network['deleted']:
msg = _("Network with identifier %s has been deleted.") % network_id msg = _("Network with identifier %s has been deleted.") % \
network_id
raise HTTPNotFound(msg) raise HTTPNotFound(msg)
def _raise_404_if_cluster_delete(self, req, cluster_id): def _raise_404_if_cluster_delete(self, req, cluster_id):
cluster_id = self.get_cluster_meta_or_404(req, cluster_id) cluster_id = self.get_cluster_meta_or_404(req, cluster_id)
if cluster_id['deleted']: if cluster_id['deleted']:
msg = _("cluster_id with identifier %s has been deleted.") % cluster_id msg = _("cluster_id with identifier %s has been deleted.") % \
cluster_id
raise HTTPNotFound(msg) raise HTTPNotFound(msg)
def _get_network_name_by_cluster_id(self, context, cluster_id): def _get_network_name_by_cluster_id(self, context, cluster_id):
@ -114,7 +125,6 @@ class Controller(controller.BaseController):
network_name_list.append(network['name']) network_name_list.append(network['name'])
return network_name_list return network_name_list
def _get_filters(self, req): def _get_filters(self, req):
""" """
Return a dictionary of query param filters from the request Return a dictionary of query param filters from the request
@ -146,7 +156,7 @@ class Controller(controller.BaseController):
if PARAM in req.params: if PARAM in req.params:
params[PARAM] = req.params.get(PARAM) params[PARAM] = req.params.get(PARAM)
return params return params
def validate_ip_format(self, ip_str): def validate_ip_format(self, ip_str):
''' '''
valid ip_str format = '10.43.178.9' valid ip_str format = '10.43.178.9'
@ -157,21 +167,21 @@ class Controller(controller.BaseController):
'10.43.1789', invalid format '10.43.1789', invalid format
''' '''
valid_fromat = False valid_fromat = False
if ip_str.count('.') == 3 and \ if ip_str.count('.') == 3 and all(num.isdigit() and 0 <= int(
all(num.isdigit() and 0<=int(num)<256 for num in ip_str.rstrip().split('.')): num) < 256 for num in ip_str.rstrip().split('.')):
valid_fromat = True valid_fromat = True
if valid_fromat == False: if not valid_fromat:
msg = (_("%s invalid ip format!") % ip_str) msg = (_("%s invalid ip format!") % ip_str)
LOG.warn(msg) LOG.warn(msg)
raise HTTPForbidden(msg) raise HTTPForbidden(msg)
def _ip_into_int(self, ip): def _ip_into_int(self, ip):
""" """
Switch ip string to decimalism integer.. Switch ip string to decimalism integer..
:param ip: ip string :param ip: ip string
:return: decimalism integer :return: decimalism integer
""" """
return reduce(lambda x, y: (x<<8)+y, map(int, ip.split('.'))) return reduce(lambda x, y: (x << 8) + y, map(int, ip.split('.')))
def _is_in_network_range(self, ip, network): def _is_in_network_range(self, ip, network):
""" """
@ -182,9 +192,13 @@ class Controller(controller.BaseController):
""" """
network = network.split('/') network = network.split('/')
mask = ~(2**(32 - int(network[1])) - 1) mask = ~(2**(32 - int(network[1])) - 1)
return (self._ip_into_int(ip) & mask) == (self._ip_into_int(network[0]) & mask) return (
self._ip_into_int(ip) & mask) == (
self._ip_into_int(
network[0]) & mask)
def _verify_uniqueness_of_network_name(self, req, network_list, network_meta, is_update = False): def _verify_uniqueness_of_network_name(
self, req, network_list, network_meta, is_update=False):
""" """
Network name is match case and uniqueness in cluster. Network name is match case and uniqueness in cluster.
:param req: :param req:
@ -192,50 +206,137 @@ class Controller(controller.BaseController):
:param network_meta: network plane need be verified :param network_meta: network plane need be verified
:return: :return:
""" """
if not network_list or not network_meta or not network_meta.get('name', None): if not network_list or not network_meta or not network_meta.get(
msg = _("Input params invalid for verifying uniqueness of network name.") 'name', None):
msg = _("Input params invalid for verifying uniqueness of "
"network name.")
raise HTTPBadRequest(msg, request=req, content_type="text/plain") raise HTTPBadRequest(msg, request=req, content_type="text/plain")
network_name = network_meta['name'] network_name = network_meta['name']
for network in network_list['networks']: for network in network_list['networks']:
if (is_update and if (is_update and
network_name == network['name'] and network_name == network['name'] and
network_meta['id'] == network['id']): network_meta['id'] == network['id']):
return return
# network name don't match case # network name don't match case
network_name_list = [network['name'].lower() for network in network_name_list = [network['name'].lower() for network in
network_list['networks'] if network.get('name', None)] network_list['networks'] if
network.get('name', None)]
if network_name.lower() in network_name_list: if network_name.lower() in network_name_list:
msg = _("Name of network isn't match case and %s already exits in the cluster." % network_name) msg = _(
"Name of network isn't match case and %s already exits "
"in the cluster." %
network_name)
raise HTTPConflict(msg, request=req, content_type="text/plain") raise HTTPConflict(msg, request=req, content_type="text/plain")
if not is_update: if not is_update:
# Input networks type can't be same with db record which is all ready exit, # Input networks type can't be same with db record
# which is all ready exit,
# except PRIVATE network. # except PRIVATE network.
network_type_exist_list = \ network_type_exist_list = \
[network['network_type'] for network in network_list['networks'] [network['network_type'] for network in
if network.get('network_type', None) and network['network_type'] != "PRIVATE" network_list['networks']
and network['network_type'] != "STORAGE"] if network.get('network_type', None) and
if network_meta.get("network_type", None) in network_type_exist_list: network['network_type'] != "DATAPLANE" and
msg = _("The %s network plane %s must be only, except PRIVATE network." % network['network_type'] != "STORAGE" and
(network_meta['network_type'], network_name)) network['network_type'] != "HEARTBEAT"]
if network_meta.get(
"network_type",
None) in network_type_exist_list:
msg = _(
"The %s network plane %s must be unique, "
"except DATAPLANE/STORAGE/HEARTBEAT network." %
(network_meta['network_type'], network_name))
raise HTTPConflict(msg, request=req, content_type="text/plain") raise HTTPConflict(msg, request=req, content_type="text/plain")
def _valid_vlan_range(self, req, network_meta): def _valid_network_range(self, req, network_meta):
if ((network_meta.has_key('vlan_start') and not network_meta.has_key('vlan_end')) or if (('vlan_start' in network_meta and 'vlan_end' not in
(not network_meta.has_key('vlan_start') and network_meta.has_key('vlan_end'))): network_meta) or (
raise HTTPBadRequest(explanation="vlan-start and vlan-end must be appeared at the same time", request=req) 'vlan_start' not in network_meta and
if network_meta.has_key('vlan_start'): 'vlan_end' in network_meta)):
if not (int(network_meta['vlan_start']) >= 1 and msg = "vlan-start and vlan-end must be appeared "\
"at the same time"
LOG.error(msg)
raise HTTPBadRequest(explanation=msg, request=req)
if 'vlan_start' in network_meta:
if not (int(network_meta['vlan_start']) >= 1 and
int(network_meta['vlan_start']) <= 4094): int(network_meta['vlan_start']) <= 4094):
raise HTTPBadRequest(explanation="vlan-start must be a integer in '1~4096'", request=req) msg = "vlan_start must be a integer in 1~4096"
if network_meta.has_key('vlan_end'): LOG.error(msg)
if not (int(network_meta['vlan_end']) >= 1 and raise HTTPBadRequest(explanation=msg, request=req)
if 'vlan_end' in network_meta:
if not (int(network_meta['vlan_end']) >= 1 and
int(network_meta['vlan_end']) <= 4094): int(network_meta['vlan_end']) <= 4094):
raise HTTPBadRequest(explanation="vlan-end must be a integer in '1~4096'", request=req) msg = "vlan_end must be a integer in 1~4096"
LOG.error(msg)
raise HTTPBadRequest(explanation=msg, request=req)
if int(network_meta['vlan_start']) > int(network_meta['vlan_end']): if int(network_meta['vlan_start']) > int(network_meta['vlan_end']):
raise HTTPBadRequest(explanation="vlan-start must be less than vlan-end", request=req) msg = "vlan_start must be less than vlan_end"
LOG.error(msg)
raise HTTPBadRequest(explanation=msg, request=req)
if (('vni_start' in network_meta and 'vni_end' not in
network_meta) or (
'vni_start' not in network_meta and
'vni_end' in network_meta)):
msg = "vni_start and vni_end must be appeared at the same time"
LOG.error(msg)
raise HTTPBadRequest(explanation=msg, request=req)
if 'vni_start' in network_meta:
if not (int(network_meta['vni_start']) >= 1 and
int(network_meta['vni_start']) <= 16777216):
msg = "vni_start must be a integer in 1~16777216"
LOG.error(msg)
raise HTTPBadRequest(explanation=msg, request=req)
if 'vni_end' in network_meta:
if not (int(network_meta['vni_end']) >= 1 and
int(network_meta['vni_end']) <= 16777216):
msg = "vni_end must be a integer in 1~16777216"
LOG.error(msg)
raise HTTPBadRequest(explanation=msg, request=req)
if int(network_meta['vni_start']) > int(network_meta['vni_end']):
msg = "vni_start must be less than vni_end"
LOG.error(msg)
raise HTTPBadRequest(explanation=msg, request=req)
if (('gre_id_start' in network_meta and 'gre_id_end' not in
network_meta) or (
'gre_id_start' not in network_meta and
'gre_id_end' in network_meta)):
msg = "gre_id_start and gre_id_end must"\
"be appeared at the same time"
LOG.error(msg)
raise HTTPBadRequest(explanation=msg, request=req)
if 'gre_id_start' in network_meta:
if not (int(network_meta['gre_id_start']) >= 1 and
int(network_meta['gre_id_start']) <= 4094):
msg = "gre_id_start must be a integer in 1~4094"
LOG.error(msg)
raise HTTPBadRequest(explanation=msg, request=req)
if 'gre_id_end' in network_meta:
if not (int(network_meta['gre_id_end']) >= 1 and
int(network_meta['gre_id_end']) <= 4094):
msg = "gre_id_end must be a integer in 1~4094"
LOG.error(msg)
raise HTTPBadRequest(explanation=msg, request=req)
if int(network_meta['gre_id_start']) >\
int(network_meta['gre_id_end']):
msg = "gre_id_start must be less than gre_id_end"
LOG.error(msg)
raise HTTPBadRequest(explanation=msg, request=req)
def _verify_heartbeat_network(self, req, network_list, network_meta):
heartbeat_networks = [
network for network in network_list['networks'] if network.get(
'network_type',
None) and network['network_type'] == "HEARTBEAT"]
if len(heartbeat_networks) >= 2:
raise HTTPBadRequest(
explanation="HEARTBEAT network plane number must be "
"less than two",
request=req)
@utils.mutating @utils.mutating
def add_network(self, req, network_meta): def add_network(self, req, network_meta):
@ -248,46 +349,62 @@ class Controller(controller.BaseController):
:raises HTTPBadRequest if x-host-name is missing :raises HTTPBadRequest if x-host-name is missing
""" """
self._enforce(req, 'add_network') self._enforce(req, 'add_network')
cluster_id = network_meta.get('cluster_id',None) cluster_id = network_meta.get('cluster_id', None)
if cluster_id: if cluster_id:
self._raise_404_if_cluster_delete(req, cluster_id) self._raise_404_if_cluster_delete(req, cluster_id)
network_list = self.detail(req, cluster_id) network_list = self.detail(req, cluster_id)
self._verify_uniqueness_of_network_name(req, network_list, network_meta) self._verify_uniqueness_of_network_name(
req, network_list, network_meta)
if 'network_type' in network_meta and network_meta[
'network_type'] == "HEARTBEAT":
self._verify_heartbeat_network(req, network_list, network_meta)
# else: # else:
# if network_meta.get('type',None) != "template": # if network_meta.get('type',None) != "template":
# raise HTTPBadRequest(explanation="cluster id must be given", request=req) # raise HTTPBadRequest(explanation="cluster id must be given",
network_name=network_meta.get('name',None) # request=req)
network_name = network_meta.get('name', None)
network_name_split = network_name.split('_') network_name_split = network_name.split('_')
for network_name_info in network_name_split : for network_name_info in network_name_split:
if not network_name_info.isalnum(): if not network_name_info.isalnum():
raise ValueError('network name must be numbers or letters or underscores !') raise ValueError(
if not network_meta.has_key('network_type'): 'network name must be numbers or letters or underscores !')
raise HTTPBadRequest(explanation="network-type must be given", request=req) if 'network_type' not in network_meta:
if network_meta['network_type'] not in SUPPORT_NETWORK_TYPE: raise HTTPBadRequest(
raise HTTPBadRequest(explanation="unsupported network-type", request=req) explanation="network-type must be given",
request=req)
if network_meta['network_type'] not in SUPPORT_NETWORK_TYPE:
if (network_meta.has_key('type') and raise HTTPBadRequest(
network_meta['type'] not in SUPPORT_NETWORK_TEMPLATE_TYPE): explanation="unsupported network-type",
raise HTTPBadRequest(explanation="unsupported type", request=req) request=req)
if (network_meta.has_key('capability') and
network_meta['capability'] not in SUPPORT_NETWORK_CAPABILITY):
raise HTTPBadRequest(explanation="unsupported capability type", request=req)
self._valid_vlan_range(req, network_meta)
if network_meta.get('ip_ranges', None): if ('type' in network_meta and
network_meta['type'] not in SUPPORT_NETWORK_TEMPLATE_TYPE):
raise HTTPBadRequest(explanation="unsupported type", request=req)
if ('capability' in network_meta and
network_meta['capability'] not in SUPPORT_NETWORK_CAPABILITY):
raise HTTPBadRequest(
explanation="unsupported capability type",
request=req)
self._valid_network_range(req, network_meta)
if network_meta.get('ip_ranges', None) and \
eval(network_meta['ip_ranges']):
cidr = None cidr = None
if not network_meta.has_key('cidr'): if 'cidr' not in network_meta:
msg = (_("When ip range was specified, the CIDR parameter can not be empty.")) msg = (
_("When ip range was specified, the CIDR parameter "
"can not be empty."))
LOG.warn(msg) LOG.warn(msg)
raise HTTPForbidden(msg) raise HTTPForbidden(msg)
else: else:
cidr = network_meta['cidr'] cidr = network_meta['cidr']
cidr_division = cidr.split('/') cidr_division = cidr.split('/')
if len(cidr_division) != 2 or ( cidr_division[1] \ if len(cidr_division) != 2 or (
and int(cidr_division[1]) > 32 or int(cidr_division[1]) < 0): cidr_division[1] and int(
cidr_division[1]) > 32 or int(
cidr_division[1]) < 0):
msg = (_("Wrong CIDR format.")) msg = (_("Wrong CIDR format."))
LOG.warn(msg) LOG.warn(msg)
raise HTTPForbidden(msg) raise HTTPForbidden(msg)
@ -299,39 +416,50 @@ class Controller(controller.BaseController):
sorted_int_ip_ranges_list = list() sorted_int_ip_ranges_list = list()
for ip_pair in ip_ranges: for ip_pair in ip_ranges:
if ['start', 'end'] != ip_pair.keys(): if ['start', 'end'] != ip_pair.keys():
msg = (_("IP range was not start with 'start:' or end with 'end:'.")) msg = (
_("IP range was not start with 'start:' or "
"end with 'end:'."))
LOG.warn(msg) LOG.warn(msg)
raise HTTPForbidden(msg) raise HTTPForbidden(msg)
ip_start = ip_pair['start'] ip_start = ip_pair['start']
ip_end = ip_pair['end'] ip_end = ip_pair['end']
self.validate_ip_format(ip_start) #check ip format self.validate_ip_format(ip_start) # check ip format
self.validate_ip_format(ip_end) self.validate_ip_format(ip_end)
if not self._is_in_network_range(ip_start, cidr): if not self._is_in_network_range(ip_start, cidr):
msg = (_("IP address %s was not in the range of CIDR %s." % (ip_start, cidr))) msg = (
_("IP address %s was not in the range "
"of CIDR %s." % (ip_start, cidr)))
LOG.warn(msg) LOG.warn(msg)
raise HTTPForbidden(msg) raise HTTPForbidden(msg)
if not self._is_in_network_range(ip_end, cidr): if not self._is_in_network_range(ip_end, cidr):
msg = (_("IP address %s was not in the range of CIDR %s." % (ip_end, cidr))) msg = (
_("IP address %s was not in the range "
"of CIDR %s." % (ip_end, cidr)))
LOG.warn(msg) LOG.warn(msg)
raise HTTPForbidden(msg) raise HTTPForbidden(msg)
#transform ip format to int when the string format is valid # transform ip format to int when the string format is
# valid
int_ip_start = self._ip_into_int(ip_start) int_ip_start = self._ip_into_int(ip_start)
int_ip_end = self._ip_into_int(ip_end) int_ip_end = self._ip_into_int(ip_end)
if int_ip_start > int_ip_end: if int_ip_start > int_ip_end:
msg = (_("Wrong ip range format.")) msg = (_("Wrong ip range format."))
LOG.warn(msg) LOG.warn(msg)
raise HTTPForbidden(msg) raise HTTPForbidden(msg)
int_ip_ranges_list.append([int_ip_start, int_ip_end]) int_ip_ranges_list.append([int_ip_start, int_ip_end])
sorted_int_ip_ranges_list = sorted(int_ip_ranges_list, key=lambda x : x[0]) sorted_int_ip_ranges_list = sorted(
int_ip_ranges_list, key=lambda x: x[0])
for int_ip_range in sorted_int_ip_ranges_list: for int_ip_range in sorted_int_ip_ranges_list:
if last_ip_range_end and last_ip_range_end >= int_ip_range[0]: if last_ip_range_end and last_ip_range_end >= int_ip_range[
0]:
msg = (_("Between ip ranges can not be overlap.")) msg = (_("Between ip ranges can not be overlap."))
LOG.warn(msg) # such as "[10, 15], [12, 16]", last_ip_range_end >= int_ip_range[0], this ip ranges were overlap # such as "[10, 15], [12, 16]", last_ip_range_end >=
# int_ip_range[0], this ip ranges were overlap
LOG.warn(msg)
raise HTTPForbidden(msg) raise HTTPForbidden(msg)
else: else:
last_ip_range_end = int_ip_range[1] last_ip_range_end = int_ip_range[1]
@ -353,16 +481,33 @@ class Controller(controller.BaseController):
'have the same cidr')) 'have the same cidr'))
raise HTTPBadRequest(explanation=msg) raise HTTPBadRequest(explanation=msg)
if network_meta.get('gateway', None) and network_meta.get('cidr', None): if network_meta.get(
'gateway',
None) and network_meta.get(
'cidr',
None):
gateway = network_meta['gateway'] gateway = network_meta['gateway']
cidr = network_meta['cidr'] cidr = network_meta['cidr']
self.validate_ip_format(gateway) self.validate_ip_format(gateway)
return_flag = self._is_in_network_range(gateway, cidr) return_flag = self._is_in_network_range(gateway, cidr)
if not return_flag: if not return_flag:
msg = (_('The gateway %s was not in the same segment with the cidr %s of management network.' % (gateway, cidr))) msg = (
_(
'The gateway %s was not in the same segment '
'with the cidr %s of management network.' %
(gateway, cidr)))
raise HTTPBadRequest(explanation=msg) raise HTTPBadRequest(explanation=msg)
if network_meta.get('cluster_id') and network_meta.get('gateway'):
networks = registry.get_networks_detail(req.context, cluster_id)
gateways = [network['gateway'] for network in networks
if network['name'] != network_meta['name'] and
network['gateway']]
if gateways:
msg = (_('More than one gateway found in cluster.'))
LOG.error(msg)
raise HTTPConflict(explanation=msg)
network_meta = registry.add_network_metadata(req.context, network_meta) network_meta = registry.add_network_metadata(req.context, network_meta)
return {'network_meta': network_meta} return {'network_meta': network_meta}
@ -377,14 +522,16 @@ class Controller(controller.BaseController):
:raises HTTPBadRequest if x-host-name is missing :raises HTTPBadRequest if x-host-name is missing
""" """
self._enforce(req, 'delete_network') self._enforce(req, 'delete_network')
#self._raise_404_if_cluster_deleted(req, cluster_id) # self._raise_404_if_cluster_deleted(req, cluster_id)
#self._raise_404_if_network_deleted(req, network_id) # self._raise_404_if_network_deleted(req, network_id)
network = self.get_network_meta_or_404(req, network_id) network = self.get_network_meta_or_404(req, network_id)
if network['deleted']: if network['deleted']:
msg = _("Network with identifier %s has been deleted.") % network_id msg = _("Network with identifier %s has been deleted.") % \
network_id
raise HTTPNotFound(msg) raise HTTPNotFound(msg)
if network['type'] != 'custom': if network['type'] != 'custom':
msg = _("Type of network was not custom, can not delete this network.") msg = _("Type of network was not custom, can not "
"delete this network.")
raise HTTPForbidden(msg) raise HTTPForbidden(msg)
try: try:
registry.delete_network_metadata(req.context, network_id) registry.delete_network_metadata(req.context, network_id)
@ -403,14 +550,15 @@ class Controller(controller.BaseController):
request=req, request=req,
content_type="text/plain") content_type="text/plain")
except exception.InUseByStore as e: except exception.InUseByStore as e:
msg = (_("Network %(id)s could not be deleted because it is in use: " msg = (_("Network %(id)s could not be deleted "
"because it is in use: "
"%(exc)s") % {"id": id, "exc": utils.exception_to_str(e)}) "%(exc)s") % {"id": id, "exc": utils.exception_to_str(e)})
LOG.warn(msg) LOG.warn(msg)
raise HTTPConflict(explanation=msg, raise HTTPConflict(explanation=msg,
request=req, request=req,
content_type="text/plain") content_type="text/plain")
else: else:
#self.notifier.info('host.delete', host) # self.notifier.info('host.delete', host)
return Response(body='', status=200) return Response(body='', status=200)
@utils.mutating @utils.mutating
@ -436,10 +584,19 @@ class Controller(controller.BaseController):
""" """
self._enforce(req, 'get_all_network') self._enforce(req, 'get_all_network')
params = self._get_query_params(req) params = self._get_query_params(req)
filters = params.get('filters')
if filters and filters.get('type'):
if filters['type'] not in SUPPORT_NETWORK_TEMPLATE_TYPE:
msg = "type '%s' is not support." % filters['type']
LOG.error(msg)
raise HTTPBadRequest(explanation=msg, request=req)
try: try:
networks = registry.get_all_networks(req.context,**params) networks = registry.get_all_networks(req.context, **params)
except Exception: except Exception:
raise HTTPBadRequest(explanation="Get all networks failed.", request=req) raise HTTPBadRequest(
explanation="Get all networks failed.",
request=req)
return dict(networks=networks) return dict(networks=networks)
def detail(self, req, id): def detail(self, req, id):
@ -458,15 +615,15 @@ class Controller(controller.BaseController):
'deleted_at': <TIMESTAMP>|<NONE>,}, ... 'deleted_at': <TIMESTAMP>|<NONE>,}, ...
]} ]}
""" """
cluster_id = self._raise_404_if_cluster_delete(req, id) self._raise_404_if_cluster_delete(req, id)
self._enforce(req, 'get_networks') self._enforce(req, 'get_networks')
params = self._get_query_params(req) params = self._get_query_params(req)
try: try:
networks = registry.get_networks_detail(req.context, id,**params) networks = registry.get_networks_detail(req.context, id, **params)
except exception.Invalid as e: except exception.Invalid as e:
raise HTTPBadRequest(explanation=e.msg, request=req) raise HTTPBadRequest(explanation=e.msg, request=req)
return dict(networks=networks) return dict(networks=networks)
@utils.mutating @utils.mutating
def update_network(self, req, network_id, network_meta): def update_network(self, req, network_id, network_meta):
""" """
@ -477,14 +634,16 @@ class Controller(controller.BaseController):
:retval Returns the updated image information as a mapping :retval Returns the updated image information as a mapping
""" """
if network_meta.has_key('name'): if 'name' in network_meta:
network_name=network_meta.get('name',None) network_name = network_meta.get('name', None)
network_name_split = network_name.split('_') network_name_split = network_name.split('_')
for network_name_info in network_name_split : for network_name_info in network_name_split:
if not network_name_info.isalnum(): if not network_name_info.isalnum():
raise ValueError('network name must be numbers or letters or underscores !') raise ValueError(
'network name must be numbers or '
'letters or underscores !')
self._enforce(req, 'update_network') self._enforce(req, 'update_network')
#orig_cluster_meta = self.get_cluster_meta_or_404(req, cluster_id) # orig_cluster_meta = self.get_cluster_meta_or_404(req, cluster_id)
orig_network_meta = self.get_network_meta_or_404(req, network_id) orig_network_meta = self.get_network_meta_or_404(req, network_id)
# Do not allow any updates on a deleted network. # Do not allow any updates on a deleted network.
if orig_network_meta['deleted']: if orig_network_meta['deleted']:
@ -492,23 +651,27 @@ class Controller(controller.BaseController):
raise HTTPForbidden(explanation=msg, raise HTTPForbidden(explanation=msg,
request=req, request=req,
content_type="text/plain") content_type="text/plain")
if (network_meta.has_key('network_type') and if ('network_type' in network_meta and
network_meta['network_type'] not in SUPPORT_NETWORK_TYPE): network_meta['network_type'] not in SUPPORT_NETWORK_TYPE):
raise HTTPBadRequest(explanation="unsupported network-type", request=req) raise HTTPBadRequest(
if (network_meta.has_key('type') and explanation="unsupported network-type",
network_meta['type'] not in SUPPORT_NETWORK_TEMPLATE_TYPE): request=req)
raise HTTPBadRequest(explanation="unsupported type", request=req) if ('type' in network_meta and
if (network_meta.has_key('type') and network_meta['type'] not in SUPPORT_NETWORK_TEMPLATE_TYPE):
network_meta['type'] == 'template'): raise HTTPBadRequest(explanation="unsupported type", request=req)
raise HTTPBadRequest(explanation="network template type is not allowed to update", request=req) if ('type' in network_meta and
network_meta['type'] == 'template'):
raise HTTPBadRequest(
explanation="network template type is not allowed to update",
if (network_meta.has_key('capability') and request=req)
network_meta['capability'] not in SUPPORT_NETWORK_CAPABILITY):
raise HTTPBadRequest(explanation="unsupported capability type", request=req)
self._valid_vlan_range(req, network_meta) if ('capability' in network_meta and
network_meta['capability'] not in SUPPORT_NETWORK_CAPABILITY):
raise HTTPBadRequest(
explanation="unsupported capability type",
request=req)
self._valid_network_range(req, network_meta)
network_name = network_meta.get('name', None) network_name = network_meta.get('name', None)
cluster_id = orig_network_meta['cluster_id'] cluster_id = orig_network_meta['cluster_id']
@ -516,17 +679,20 @@ class Controller(controller.BaseController):
network_updated = copy.deepcopy(network_meta) network_updated = copy.deepcopy(network_meta)
network_updated['id'] = network_id network_updated['id'] = network_id
network_type = network_meta.get('network_type', None) network_type = network_meta.get('network_type', None)
network_updated['network_type'] = \ network_updated['network_type'] = orig_network_meta[
orig_network_meta['network_type'] if not network_type else network_type 'network_type'] if not network_type else network_type
network_list = self.detail(req, cluster_id) network_list = self.detail(req, cluster_id)
self._verify_uniqueness_of_network_name(req, network_list, network_updated, True) self._verify_uniqueness_of_network_name(
req, network_list, network_updated, True)
cidr = network_meta.get('cidr', orig_network_meta['cidr']) cidr = network_meta.get('cidr', orig_network_meta['cidr'])
vlan_id = network_meta.get('vlan_id', orig_network_meta['vlan_id']) vlan_id = network_meta.get('vlan_id', orig_network_meta['vlan_id'])
if cidr: if cidr:
cidr_division = cidr.split('/') cidr_division = cidr.split('/')
if len(cidr_division) != 2 or ( cidr_division[1] \ if len(cidr_division) != 2 or (
and int(cidr_division[1]) > 32 or int(cidr_division[1]) < 0): cidr_division[1] and int(
cidr_division[1]) > 32 or int(
cidr_division[1]) < 0):
msg = (_("Wrong CIDR format.")) msg = (_("Wrong CIDR format."))
LOG.warn(msg) LOG.warn(msg)
raise HTTPForbidden(msg) raise HTTPForbidden(msg)
@ -549,9 +715,12 @@ class Controller(controller.BaseController):
'have the same cidr')) 'have the same cidr'))
raise HTTPBadRequest(explanation=msg) raise HTTPBadRequest(explanation=msg)
if network_meta.get('ip_ranges', None): if network_meta.get('ip_ranges', None) and \
eval(network_meta['ip_ranges']):
if not cidr: if not cidr:
msg = (_("When ip range was specified, the CIDR parameter can not be empty.")) msg = (
_("When ip range was specified, "
"the CIDR parameter can not be empty."))
LOG.warn(msg) LOG.warn(msg)
raise HTTPForbidden(msg) raise HTTPForbidden(msg)
ip_ranges = eval(network_meta['ip_ranges']) ip_ranges = eval(network_meta['ip_ranges'])
@ -560,53 +729,81 @@ class Controller(controller.BaseController):
sorted_int_ip_ranges_list = list() sorted_int_ip_ranges_list = list()
for ip_pair in ip_ranges: for ip_pair in ip_ranges:
if ['start', 'end'] != ip_pair.keys(): if ['start', 'end'] != ip_pair.keys():
msg = (_("IP range was not start with 'start:' or end with 'end:'.")) msg = (
_("IP range was not start with 'start:' "
"or end with 'end:'."))
LOG.warn(msg) LOG.warn(msg)
raise HTTPForbidden(msg) raise HTTPForbidden(msg)
ip_start = ip_pair['start'] ip_start = ip_pair['start']
ip_end = ip_pair['end'] ip_end = ip_pair['end']
self.validate_ip_format(ip_start) #check ip format self.validate_ip_format(ip_start) # check ip format
self.validate_ip_format(ip_end) self.validate_ip_format(ip_end)
if not self._is_in_network_range(ip_start, cidr): if not self._is_in_network_range(ip_start, cidr):
msg = (_("IP address %s was not in the range of CIDR %s." % (ip_start, cidr))) msg = (
_("IP address %s was not in the "
"range of CIDR %s." % (ip_start, cidr)))
LOG.warn(msg) LOG.warn(msg)
raise HTTPForbidden(msg) raise HTTPForbidden(msg)
if not self._is_in_network_range(ip_end, cidr): if not self._is_in_network_range(ip_end, cidr):
msg = (_("IP address %s was not in the range of CIDR %s." % (ip_end, cidr))) msg = (
_("IP address %s was not in the "
"range of CIDR %s." % (ip_end, cidr)))
LOG.warn(msg) LOG.warn(msg)
raise HTTPForbidden(msg) raise HTTPForbidden(msg)
#transform ip format to int when the string format is valid # transform ip format to int when the string format is valid
int_ip_start = self._ip_into_int(ip_start) int_ip_start = self._ip_into_int(ip_start)
int_ip_end = self._ip_into_int(ip_end) int_ip_end = self._ip_into_int(ip_end)
if int_ip_start > int_ip_end: if int_ip_start > int_ip_end:
msg = (_("Wrong ip range format.")) msg = (_("Wrong ip range format."))
LOG.warn(msg) LOG.warn(msg)
raise HTTPForbidden(msg) raise HTTPForbidden(msg)
int_ip_ranges_list.append([int_ip_start, int_ip_end]) int_ip_ranges_list.append([int_ip_start, int_ip_end])
sorted_int_ip_ranges_list = sorted(int_ip_ranges_list, key=lambda x : x[0]) sorted_int_ip_ranges_list = sorted(
LOG.warn("sorted_int_ip_ranges_list: "% sorted_int_ip_ranges_list) int_ip_ranges_list, key=lambda x: x[0])
#check ip ranges overlap LOG.warn("sorted_int_ip_ranges_list: " % sorted_int_ip_ranges_list)
# check ip ranges overlap
for int_ip_range in sorted_int_ip_ranges_list: for int_ip_range in sorted_int_ip_ranges_list:
if last_ip_range_end and last_ip_range_end >= int_ip_range[0]: if last_ip_range_end and last_ip_range_end >= int_ip_range[0]:
msg = (_("Between ip ranges can not be overlap.")) msg = (_("Between ip ranges can not be overlap."))
LOG.warn(msg) # such as "[10, 15], [12, 16]", last_ip_range_end >= int_ip_range[0], this ip ranges were overlap # such as "[10, 15], [12, 16]", last_ip_range_end >=
# int_ip_range[0], this ip ranges were overlap
LOG.warn(msg)
raise HTTPForbidden(msg) raise HTTPForbidden(msg)
else: else:
last_ip_range_end = int_ip_range[1] last_ip_range_end = int_ip_range[1]
if network_meta.get('gateway', orig_network_meta['gateway']) and network_meta.get('cidr', orig_network_meta['cidr']): if network_meta.get(
'gateway',
orig_network_meta['gateway']) and network_meta.get(
'cidr',
orig_network_meta['cidr']):
gateway = network_meta.get('gateway', orig_network_meta['gateway']) gateway = network_meta.get('gateway', orig_network_meta['gateway'])
cidr = network_meta.get('cidr', orig_network_meta['cidr']) cidr = network_meta.get('cidr', orig_network_meta['cidr'])
self.validate_ip_format(gateway) self.validate_ip_format(gateway)
return_flag = self._is_in_network_range(gateway, cidr) return_flag = self._is_in_network_range(gateway, cidr)
if not return_flag: if not return_flag:
msg = (_('The gateway %s was not in the same segment with the cidr %s of management network.' % (gateway, cidr))) msg = (
_(
'The gateway %s was not in the same '
'segment with the cidr %s of management network.' %
(gateway, cidr)))
raise HTTPBadRequest(explanation=msg) raise HTTPBadRequest(explanation=msg)
# allow one gateway in one cluster
if network_meta.get('cluster_id') and (network_meta.get('gateway')):
networks = registry.get_networks_detail(req.context, cluster_id)
gateways = [network['gateway'] for network in networks
if network['name'] != orig_network_meta['name'] and
network['gateway']]
if gateways:
msg = (_('More than one gateway found in cluster.'))
LOG.error(msg)
raise HTTPConflict(explanation=msg)
try: try:
network_meta = registry.update_network_metadata(req.context, network_meta = registry.update_network_metadata(req.context,
network_id, network_id,
@ -626,12 +823,8 @@ class Controller(controller.BaseController):
request=req, request=req,
content_type="text/plain") content_type="text/plain")
except exception.Forbidden as e: except exception.Forbidden as e:
msg = (_("Forbidden to update network: %s") % LOG.warn(e)
utils.exception_to_str(e)) raise HTTPForbidden(e)
LOG.warn(msg)
raise HTTPForbidden(explanation=msg,
request=req,
content_type="text/plain")
except (exception.Conflict, exception.Duplicate) as e: except (exception.Conflict, exception.Duplicate) as e:
LOG.warn(utils.exception_to_str(e)) LOG.warn(utils.exception_to_str(e))
raise HTTPConflict(body=_('Network operation conflicts'), raise HTTPConflict(body=_('Network operation conflicts'),
@ -642,6 +835,7 @@ class Controller(controller.BaseController):
return {'network_meta': network_meta} return {'network_meta': network_meta}
class HostDeserializer(wsgi.JSONRequestDeserializer): class HostDeserializer(wsgi.JSONRequestDeserializer):
"""Handles deserialization of specific controller method requests.""" """Handles deserialization of specific controller method requests."""
@ -656,6 +850,7 @@ class HostDeserializer(wsgi.JSONRequestDeserializer):
def update_network(self, request): def update_network(self, request):
return self._deserialize(request) return self._deserialize(request)
class HostSerializer(wsgi.JSONResponseSerializer): class HostSerializer(wsgi.JSONResponseSerializer):
"""Handles serialization of specific controller method responses.""" """Handles serialization of specific controller method responses."""
@ -683,9 +878,9 @@ class HostSerializer(wsgi.JSONResponseSerializer):
response.body = self.to_json(dict(network=network_meta)) response.body = self.to_json(dict(network=network_meta))
return response return response
def create_resource(): def create_resource():
"""Hosts resource factory method""" """Hosts resource factory method"""
deserializer = HostDeserializer() deserializer = HostDeserializer()
serializer = HostSerializer() serializer = HostSerializer()
return wsgi.Resource(Controller(), deserializer, serializer) return wsgi.Resource(Controller(), deserializer, serializer)

View File

@ -46,8 +46,16 @@ SUPPORTED_PARAMS = daisy.api.v1.SUPPORTED_PARAMS
SUPPORTED_FILTERS = daisy.api.v1.SUPPORTED_FILTERS SUPPORTED_FILTERS = daisy.api.v1.SUPPORTED_FILTERS
ACTIVE_IMMUTABLE = daisy.api.v1.ACTIVE_IMMUTABLE ACTIVE_IMMUTABLE = daisy.api.v1.ACTIVE_IMMUTABLE
SUPPORTED_DEPLOYMENT_BACKENDS = ('tecs', 'zenic', 'proton') SUPPORTED_DEPLOYMENT_BACKENDS = ('tecs', 'zenic', 'proton')
SUPPORTED_ROLE = ('CONTROLLER_LB', 'CONTROLLER_HA', 'COMPUTER', 'ZENIC_CTL', 'ZENIC_NFM', SUPPORTED_ROLE = (
'ZENIC_MDB', 'PROTON', 'CHILD_CELL_1_COMPUTER', 'CONTROLLER_CHILD_CELL_1') 'CONTROLLER_LB',
'CONTROLLER_HA',
'COMPUTER',
'ZENIC_CTL',
'ZENIC_NFM',
'ZENIC_MDB',
'PROTON',
'CHILD_CELL_1_COMPUTER',
'CONTROLLER_CHILD_CELL_1')
SUPPORT_DISK_LOCATION = ('local', 'share') SUPPORT_DISK_LOCATION = ('local', 'share')
CONF = cfg.CONF CONF = cfg.CONF
@ -56,6 +64,7 @@ CONF.import_opt('container_formats', 'daisy.common.config',
group='image_format') group='image_format')
CONF.import_opt('image_property_quota', 'daisy.common.config') CONF.import_opt('image_property_quota', 'daisy.common.config')
class Controller(controller.BaseController): class Controller(controller.BaseController):
""" """
WSGI controller for roles resource in Daisy v1 API WSGI controller for roles resource in Daisy v1 API
@ -130,86 +139,97 @@ class Controller(controller.BaseController):
if host['deleted']: if host['deleted']:
msg = _("Node with identifier %s has been deleted.") % host_id msg = _("Node with identifier %s has been deleted.") % host_id
raise HTTPNotFound(msg) raise HTTPNotFound(msg)
def _raise_404_if_service_deleted(self, req, service_id): def _raise_404_if_service_deleted(self, req, service_id):
service = self.get_service_meta_or_404(req, service_id) service = self.get_service_meta_or_404(req, service_id)
if service['deleted']: if service['deleted']:
msg = _("Service with identifier %s has been deleted.") % service_id msg = _("Service with identifier %s has been deleted.") % \
service_id
raise HTTPNotFound(msg) raise HTTPNotFound(msg)
def _raise_404_if_config_set_deleted(self, req, config_set_id): def _raise_404_if_config_set_deleted(self, req, config_set_id):
config_set = self.get_config_set_meta_or_404(req, config_set_id) config_set = self.get_config_set_meta_or_404(req, config_set_id)
if config_set['deleted']: if config_set['deleted']:
msg = _("Config_Set with identifier %s has been deleted.") % config_set_id msg = _("Config_Set with identifier %s has been deleted.") % \
config_set_id
raise HTTPNotFound(msg) raise HTTPNotFound(msg)
def _raise_404_if_cluster_deleted(self, req, cluster_id): def _raise_404_if_cluster_deleted(self, req, cluster_id):
cluster = self.get_cluster_meta_or_404(req, cluster_id) cluster = self.get_cluster_meta_or_404(req, cluster_id)
if cluster['deleted']: if cluster['deleted']:
msg = _("cluster with identifier %s has been deleted.") % cluster_id msg = _("cluster with identifier %s has been deleted.") % \
cluster_id
raise HTTPNotFound(msg) raise HTTPNotFound(msg)
def _get_service_name_list(self, req, role_service_id_list): def _get_service_name_list(self, req, role_service_id_list):
service_name_list = [] service_name_list = []
for service_id in role_service_id_list: for service_id in role_service_id_list:
service_meta = registry.get_service_metadata(req.context, service_id) service_meta = registry.get_service_metadata(
req.context, service_id)
service_name_list.append(service_meta['name']) service_name_list.append(service_meta['name'])
return service_name_list return service_name_list
def _get_host_disk_except_os_disk_by_info(self, host_info): def _get_host_disk_except_os_disk_by_info(self, host_info):
''' '''
type(host_info): <type 'dict'> type(host_info): <type 'dict'>
host_disk_except_os_disk_lists: disk_size , type = int host_disk_except_os_disk_lists: disk_size , type = int
''' '''
#import pdb;pdb.set_trace() # import pdb;pdb.set_trace()
host_disk_except_os_disk_lists = 0 host_disk_except_os_disk_lists = 0
os_disk_m = host_info.get('root_lv_size', 51200) os_disk_m = host_info.get('root_lv_size', 102400)
swap_size_m = host_info.get('swap_lv_size', None) swap_size_m = host_info.get('swap_lv_size', None)
if swap_size_m: if swap_size_m:
swap_size_m = (swap_size_m / 4)*4 swap_size_m = (swap_size_m / 4) * 4
else: else:
swap_size_m = 0 swap_size_m = 0
boot_partition_m = 400 boot_partition_m = 400
redundant_partiton_m = 600 redundant_partiton_m = 600
if not os_disk_m: if not os_disk_m:
os_disk_m = 51200 os_disk_m = 102400
#host_disk = 1024 # host_disk = 1024
host_disks = host_info.get('disks', None) host_disks = host_info.get('disks', None)
host_disk_size_m = 0 host_disk_size_m = 0
if host_disks: if host_disks:
for key, value in host_disks.items(): for key, value in host_disks.items():
disk_size_b = str(value.get('size', None)) disk_size_b = str(value.get('size', None))
disk_size_b_str = disk_size_b.strip().split()[0] disk_size_b_str = disk_size_b.strip().split()[0]
if disk_size_b_str: if disk_size_b_str:
disk_size_b_int = int(disk_size_b_str) disk_size_b_int = int(disk_size_b_str)
disk_size_m = disk_size_b_int//(1024*1024) disk_size_m = disk_size_b_int // (1024 * 1024)
host_disk_size_m = host_disk_size_m + disk_size_m host_disk_size_m = host_disk_size_m + disk_size_m
host_disk_except_os_disk_lists = host_disk_size_m - os_disk_m - swap_size_m - boot_partition_m - redundant_partiton_m host_disk_except_os_disk_lists = host_disk_size_m - os_disk_m - \
LOG.warn('----start----host_disk_except_os_disk_lists: %s -----end--' % host_disk_except_os_disk_lists) swap_size_m - boot_partition_m - redundant_partiton_m
LOG.warn(
'----start----host_disk_except_os_disk_lists: %s -----end--' %
host_disk_except_os_disk_lists)
return host_disk_except_os_disk_lists return host_disk_except_os_disk_lists
def _check_host_validity(self, **paras): def _check_host_validity(self, **paras):
''' '''
paras['db_lv_size'], paras['glance_lv_size'] , paras['disk_size'] paras['db_lv_size'], paras['glance_lv_size'] , paras['disk_size']
''' '''
disk_size = paras.get('disk_size', None) disk_size = paras.get('disk_size', None)
LOG.warn('--------disk_size:----- %s'% disk_size) LOG.warn('--------disk_size:----- %s' % disk_size)
if disk_size: if disk_size:
disk_size_m = int(disk_size) disk_size_m = int(disk_size)
else: else:
disk_size_m = 0 disk_size_m = 0
if disk_size_m == 0: #Host hard disk size was 0, think that the host does not need to install the system if disk_size_m == 0: # Host hard disk size was 0,
return #Don't need to ckeck the validity of hard disk size # think that the host does not need to install the system
return # Don't need to ckeck the validity of hard disk size
db_lv_size_m = paras.get('db_lv_size', 300) db_lv_size_m = paras.get('db_lv_size', 300)
if db_lv_size_m: if db_lv_size_m:
db_lv_size_m = int(db_lv_size_m) db_lv_size_m = int(db_lv_size_m)
else: else:
db_lv_size_m = 0 db_lv_size_m = 0
glance_lv_size_m = paras.get('glance_lv_size', 17100) glance_lv_size_m = paras.get('glance_lv_size', 17100)
if glance_lv_size_m: if glance_lv_size_m:
glance_lv_size_m = int(glance_lv_size_m) glance_lv_size_m = int(glance_lv_size_m)
else: else:
glance_lv_size_m = 0 glance_lv_size_m = 0
nova_lv_size_m = paras.get('nova_lv_size', 0) nova_lv_size_m = paras.get('nova_lv_size', 0)
if nova_lv_size_m: if nova_lv_size_m:
nova_lv_size_m = int(nova_lv_size_m) nova_lv_size_m = int(nova_lv_size_m)
@ -217,11 +237,13 @@ class Controller(controller.BaseController):
nova_lv_size_m = 0 nova_lv_size_m = 0
if nova_lv_size_m == -1: if nova_lv_size_m == -1:
nova_lv_size_m = 0 nova_lv_size_m = 0
glance_lv_size_m = (glance_lv_size_m/4)*4 glance_lv_size_m = (glance_lv_size_m / 4) * 4
db_lv_size_m = (db_lv_size_m/4)*4 db_lv_size_m = (db_lv_size_m / 4) * 4
nova_lv_size_m = (nova_lv_size_m/4)*4 nova_lv_size_m = (nova_lv_size_m / 4) * 4
if glance_lv_size_m + db_lv_size_m + nova_lv_size_m > disk_size_m: if glance_lv_size_m + db_lv_size_m + nova_lv_size_m > disk_size_m:
msg = _("There isn't enough disk space to specify database or glance or nova disk, please specify database or glance or nova disk size again") msg = _("There isn't enough disk space to specify database or "
"glance or nova disk, please specify database or "
"glance or nova disk size again")
LOG.debug(msg) LOG.debug(msg)
raise HTTPForbidden(msg) raise HTTPForbidden(msg)
@ -236,7 +258,12 @@ class Controller(controller.BaseController):
def _check_config_set_id_exist(self, req, config_set_id): def _check_config_set_id_exist(self, req, config_set_id):
self._raise_404_if_config_set_deleted(req, config_set_id) self._raise_404_if_config_set_deleted(req, config_set_id)
def _check_glance_lv_value(self, req, glance_lv_value, role_name, service_name_list): def _check_glance_lv_value(
self,
req,
glance_lv_value,
role_name,
service_name_list):
if int(glance_lv_value) < 0 and int(glance_lv_value) != -1: if int(glance_lv_value) < 0 and int(glance_lv_value) != -1:
msg = _("glance_lv_size can't be negative except -1.") msg = _("glance_lv_size can't be negative except -1.")
raise HTTPForbidden(explanation=msg, raise HTTPForbidden(explanation=msg,
@ -250,150 +277,170 @@ class Controller(controller.BaseController):
content_type="text/plain") content_type="text/plain")
def _check_db_lv_size(self, req, db_lv_size, service_name_list): def _check_db_lv_size(self, req, db_lv_size, service_name_list):
if int(db_lv_size) < 0 and int(db_lv_size) != -1 : if int(db_lv_size) < 0 and int(db_lv_size) != -1:
msg = _("The size of database disk can't be negative except -1.") msg = _("The size of database disk can't be negative except -1.")
LOG.debug(msg) LOG.debug(msg)
raise HTTPForbidden(msg) raise HTTPForbidden(msg)
#Only the role with database service can be formulated the size of a database. # Only the role with database service can be formulated the size of
if 'mariadb' not in service_name_list and 'mongodb' not in service_name_list: # a database.
if 'mariadb' not in service_name_list and 'mongodb' not in \
service_name_list:
msg = _('The role without database service is unable ' msg = _('The role without database service is unable '
'to specify the size of the database!') 'to specify the size of the database!')
LOG.debug(msg) LOG.debug(msg)
raise HTTPForbidden(explanation=msg, raise HTTPForbidden(explanation=msg,
request=req, request=req,
content_type="text/plain") content_type="text/plain")
def _check_nova_lv_size(self, req, nova_lv_size, role_name): def _check_nova_lv_size(self, req, nova_lv_size, role_name):
if role_name != "COMPUTER": if role_name != "COMPUTER":
msg = _("The role is not COMPUTER, it can't set logic " msg = _("The role is not COMPUTER, it can't set logic "
"volume disk for nova.") "volume disk for nova.")
raise HTTPForbidden(explanation=msg, raise HTTPForbidden(explanation=msg,
request=req, request=req,
content_type="text/plain") content_type="text/plain")
try: try:
if int(nova_lv_size) < 0 and int(nova_lv_size) != -1: if int(nova_lv_size) < 0 and int(nova_lv_size) != -1:
msg = _("The nova_lv_size must be -1 or [0, N).") msg = _("The nova_lv_size must be -1 or [0, N).")
raise HTTPForbidden(explanation=msg, raise HTTPForbidden(explanation=msg,
request=req, request=req,
content_type="text/plain") content_type="text/plain")
except: except:
msg = _("The nova_lv_size must be -1 or [0, N).") msg = _("The nova_lv_size must be -1 or [0, N).")
raise HTTPForbidden(explanation=msg, raise HTTPForbidden(explanation=msg,
request=req, request=req,
content_type="text/plain") content_type="text/plain")
def _check_all_lv_size(self, req, db_lv_size, glance_lv_size, nova_lv_size, def _check_all_lv_size(self, req, db_lv_size, glance_lv_size, nova_lv_size,
host_id_list, cluster_id, argws): host_id_list, cluster_id, argws):
if db_lv_size or glance_lv_size or nova_lv_size: if db_lv_size or glance_lv_size or nova_lv_size:
for host_id in host_id_list: for host_id in host_id_list:
host_disk_db_glance_nova_size = self.get_host_disk_db_glance_nova_size(req, host_id, cluster_id) host_disk_db_glance_nova_size = \
if host_disk_db_glance_nova_size['db_lv_size'] and db_lv_size and \ self.get_host_disk_db_glance_nova_size(
int(db_lv_size) < int(host_disk_db_glance_nova_size['db_lv_size']): req, host_id, cluster_id)
argws['db_lv_size'] = host_disk_db_glance_nova_size['db_lv_size'] if host_disk_db_glance_nova_size['db_lv_size'] and \
db_lv_size and int(
db_lv_size) < int(host_disk_db_glance_nova_size[
'db_lv_size']):
argws['db_lv_size'] = host_disk_db_glance_nova_size[
'db_lv_size']
else: else:
argws['db_lv_size'] = db_lv_size argws['db_lv_size'] = db_lv_size
if host_disk_db_glance_nova_size['glance_lv_size'] and glance_lv_size and \ if host_disk_db_glance_nova_size['glance_lv_size'] and \
int(glance_lv_size) < int(host_disk_db_glance_nova_size['glance_lv_size']): glance_lv_size and int(
argws['glance_lv_size'] = host_disk_db_glance_nova_size['glance_lv_size'] glance_lv_size) < int(host_disk_db_glance_nova_size[
'glance_lv_size']):
argws['glance_lv_size'] = host_disk_db_glance_nova_size[
'glance_lv_size']
else: else:
argws['glance_lv_size'] = glance_lv_size argws['glance_lv_size'] = glance_lv_size
if host_disk_db_glance_nova_size['nova_lv_size'] and nova_lv_size and \ if host_disk_db_glance_nova_size['nova_lv_size'] and \
int(nova_lv_size) < int(host_disk_db_glance_nova_size['nova_lv_size']): nova_lv_size and int(
argws['nova_lv_size'] = host_disk_db_glance_nova_size['nova_lv_size'] nova_lv_size) < int(host_disk_db_glance_nova_size[
'nova_lv_size']):
argws['nova_lv_size'] = host_disk_db_glance_nova_size[
'nova_lv_size']
else: else:
argws['nova_lv_size'] = nova_lv_size argws['nova_lv_size'] = nova_lv_size
argws['disk_size'] = host_disk_db_glance_nova_size['disk_size'] argws['disk_size'] = host_disk_db_glance_nova_size['disk_size']
LOG.warn('--------host(%s) check_host_validity argws:----- %s'% (host_id, argws)) LOG.warn(
'--------host(%s) check_host_validity argws:----- %s' %
(host_id, argws))
self._check_host_validity(**argws) self._check_host_validity(**argws)
def _check_deployment_backend(self, req, deployment_backend): def _check_deployment_backend(self, req, deployment_backend):
if deployment_backend not in SUPPORTED_DEPLOYMENT_BACKENDS: if deployment_backend not in SUPPORTED_DEPLOYMENT_BACKENDS:
msg = "deployment backend '%s' is not supported." % deployment_backend msg = "deployment backend '%s' is not supported." % \
deployment_backend
raise HTTPBadRequest(explanation=msg, raise HTTPBadRequest(explanation=msg,
request=req, request=req,
content_type="text/plain") content_type="text/plain")
def _check_role_type_in_update_role(self, req, role_type, orig_role_meta): def _check_role_type_in_update_role(self, req, role_type, orig_role_meta):
if orig_role_meta['type'].lower() != role_type.lower(): if orig_role_meta['type'].lower() != role_type.lower():
msg = _("Role type can not be updated to other type.") msg = _("Role type can not be updated to other type.")
LOG.debug(msg) LOG.debug(msg)
raise HTTPForbidden(msg) raise HTTPForbidden(msg)
def _check_cluster_id_in_role_update(self, req, role_cluster, orig_role_meta): def _check_cluster_id_in_role_update(
self, req, role_cluster, orig_role_meta):
if orig_role_meta['type'].lower() == 'template': if orig_role_meta['type'].lower() == 'template':
msg = _("The template role does not belong to any cluster.") msg = _("The template role does not belong to any cluster.")
LOG.debug(msg) LOG.debug(msg)
raise HTTPForbidden(msg) raise HTTPForbidden(msg)
orig_role_cluster = orig_role_meta['cluster_id'] orig_role_cluster = orig_role_meta['cluster_id']
if orig_role_cluster != role_cluster: #Can not change the cluster which the role belongs to if orig_role_cluster != role_cluster: # Can not change the cluster
# which the role belongs to
msg = _("Can't update the cluster of the role.") msg = _("Can't update the cluster of the role.")
LOG.debug(msg) LOG.debug(msg)
raise HTTPForbidden(msg) raise HTTPForbidden(msg)
else: else:
self._raise_404_if_cluster_deleted(req, role_cluster) self._raise_404_if_cluster_deleted(req, role_cluster)
def _check_role_name_in_role_update(self, req, role_meta, orig_role_meta): def _check_role_name_in_role_update(self, req, role_meta, orig_role_meta):
role_name = role_meta['name'] role_name = role_meta['name']
cluster_id = role_meta.get('cluster_id', orig_role_meta['cluster_id']) cluster_id = role_meta.get('cluster_id', orig_role_meta['cluster_id'])
if cluster_id: if cluster_id:
self.check_cluster_role_name_repetition(req, role_name, cluster_id) self.check_cluster_role_name_repetition(req, role_name, cluster_id)
else: #role type was template, cluster id was None else: # role type was template, cluster id was None
self.check_template_role_name_repetition(req, role_name) self.check_template_role_name_repetition(req, role_name)
def _check_all_lv_size_of_nodes_with_role_in_role_update(self, req, role_meta, orig_role_meta, def _check_all_lv_size_of_nodes_with_role_in_role_update(
role_host_id_list): self, req, role_meta, orig_role_meta, role_host_id_list):
#check host with this role at the same time # check host with this role at the same time
cluster_id = role_meta.get('cluster_id', None) cluster_id = role_meta.get('cluster_id', None)
if not cluster_id: #role with cluster if not cluster_id: # role with cluster
cluster_id = orig_role_meta['cluster_id'] cluster_id = orig_role_meta['cluster_id']
if not cluster_id: #without cluster id, raise Error if not cluster_id: # without cluster id, raise Error
msg = _("The cluster_id parameter can not be None!") msg = _("The cluster_id parameter can not be None!")
LOG.debug(msg) LOG.debug(msg)
raise HTTPForbidden(msg) raise HTTPForbidden(msg)
argws = dict() argws = dict()
if role_meta.has_key('db_lv_size'): if 'db_lv_size' in role_meta:
db_lv_size = role_meta['db_lv_size'] db_lv_size = role_meta['db_lv_size']
else: #The db_lv_size has been specified before. else: # The db_lv_size has been specified before.
db_lv_size = orig_role_meta.get('db_lv_size') db_lv_size = orig_role_meta.get('db_lv_size')
if role_meta.has_key('glance_lv_size'): if 'glance_lv_size' in role_meta:
glance_lv_size = role_meta['glance_lv_size'] glance_lv_size = role_meta['glance_lv_size']
else: else:
glance_lv_size = orig_role_meta.get('glance_lv_size') glance_lv_size = orig_role_meta.get('glance_lv_size')
if role_meta.has_key('nova_lv_size'): if 'nova_lv_size' in role_meta:
nova_lv_size = role_meta['nova_lv_size'] nova_lv_size = role_meta['nova_lv_size']
else: else:
nova_lv_size = orig_role_meta.get('nova_lv_size') nova_lv_size = orig_role_meta.get('nova_lv_size')
if role_meta.has_key('nodes'): if 'nodes' in role_meta:
host_id_list = list(eval(role_meta['nodes'])) + role_host_id_list host_id_list = list(eval(role_meta['nodes'])) + role_host_id_list
else: else:
host_id_list = role_host_id_list host_id_list = role_host_id_list
self._check_all_lv_size(req, db_lv_size, glance_lv_size, self._check_all_lv_size(req, db_lv_size, glance_lv_size,
nova_lv_size, host_id_list, cluster_id, argws) nova_lv_size, host_id_list, cluster_id, argws)
def _check_ntp_server(self, req, role_name): def _check_ntp_server(self, req, role_name):
if role_name != 'CONTROLLER_HA': if role_name != 'CONTROLLER_HA':
msg = 'The role %s need no ntp_server' % role_name msg = 'The role %s need no ntp_server' % role_name
raise HTTPForbidden(explanation=msg) raise HTTPForbidden(explanation=msg)
def _check_role_type_in_role_add(self, req, role_meta): def _check_role_type_in_role_add(self, req, role_meta):
#role_type == None or not template, cluster id must not be None # role_type == None or not template, cluster id must not be None
role_type = role_meta['type'] role_type = role_meta['type']
if role_type.lower() != 'template': if role_type.lower() != 'template':
role_cluster_id = role_meta.get('cluster_id', None) role_cluster_id = role_meta.get('cluster_id', None)
if not role_cluster_id: #add role without cluster id parameter, raise error if not role_cluster_id: # add role without cluster id parameter,
msg = _("The cluster_id parameter can not be None if role was not a template type.") # raise error
msg = _(
"The cluster_id parameter can not be None "
"if role was not a template type.")
LOG.debug(msg) LOG.debug(msg)
raise HTTPForbidden(msg) raise HTTPForbidden(msg)
else: #role_type == template, cluster id is not necessary else: # role_type == template, cluster id is not necessary
if role_meta.has_key('cluster_id'): if 'cluster_id' in role_meta:
msg = _("Tht template role cannot be added to any cluster.") msg = _("Tht template role cannot be added to any cluster.")
LOG.debug(msg) LOG.debug(msg)
raise HTTPForbidden(msg) raise HTTPForbidden(msg)
def _check_all_lv_size_with_role_in_role_add(self, req, role_meta): def _check_all_lv_size_with_role_in_role_add(self, req, role_meta):
cluster_id = role_meta.get('cluster_id', None) cluster_id = role_meta.get('cluster_id', None)
if not cluster_id: #without cluster id, raise Error if not cluster_id: # without cluster id, raise Error
msg = _("The cluster_id parameter can not be None!") msg = _("The cluster_id parameter can not be None!")
LOG.debug(msg) LOG.debug(msg)
raise HTTPForbidden(msg) raise HTTPForbidden(msg)
@ -403,87 +450,104 @@ class Controller(controller.BaseController):
nova_lv_size = role_meta.get('nova_lv_size', 0) nova_lv_size = role_meta.get('nova_lv_size', 0)
host_id_list = list(eval(role_meta['nodes'])) host_id_list = list(eval(role_meta['nodes']))
self._check_all_lv_size(req, db_lv_size, glance_lv_size, self._check_all_lv_size(req, db_lv_size, glance_lv_size,
nova_lv_size, host_id_list, cluster_id, argws) nova_lv_size, host_id_list, cluster_id, argws)
def get_host_disk_db_glance_nova_size(self, req, host_id, cluster_id): def get_host_disk_db_glance_nova_size(self, req, host_id, cluster_id):
''' '''
return : return :
host_disk_db_glance_nova_size['disk_size'] = 1024000 host_disk_db_glance_nova_size['disk_size'] = 1024000
host_disk_db_glance_nova_size['db_lv_size'] = 1011 host_disk_db_glance_nova_size['db_lv_size'] = 1011
host_disk_db_glance_nova_size['glance_lv_size'] = 1011 host_disk_db_glance_nova_size['glance_lv_size'] = 1011
host_disk_db_glance_nova_size['nova_lv_size'] = 1011 host_disk_db_glance_nova_size['nova_lv_size'] = 1011
''' '''
#import pdb;pdb.set_trace() # import pdb;pdb.set_trace()
host_disk_db_glance_nova_size = dict() host_disk_db_glance_nova_size = dict()
db_lv_size = list() db_lv_size = list()
glance_lv_size = list() glance_lv_size = list()
nova_lv_size= list() nova_lv_size = list()
disk_size = list() # disk_size = list()
host_info = self.get_host_meta_or_404(req, host_id) host_info = self.get_host_meta_or_404(req, host_id)
if host_info: if host_info:
if host_info.has_key('deleted') and host_info['deleted']: if 'deleted' in host_info and host_info['deleted']:
msg = _("Node with identifier %s has been deleted.") % host_info['id'] msg = _("Node with identifier %s has been deleted.") % \
host_info[
'id']
LOG.debug(msg) LOG.debug(msg)
raise HTTPNotFound(msg) raise HTTPNotFound(msg)
#get host disk infomation # get host disk infomation
host_disk = self._get_host_disk_except_os_disk_by_info(host_info) host_disk = self._get_host_disk_except_os_disk_by_info(host_info)
host_disk_db_glance_nova_size['disk_size'] = host_disk host_disk_db_glance_nova_size['disk_size'] = host_disk
#get role_host db/galnce/nova infomation # get role_host db/galnce/nova infomation
cluster_info = self.get_cluster_meta_or_404(req, cluster_id) cluster_info = self.get_cluster_meta_or_404(req, cluster_id)
if host_info.has_key('cluster'): #host with cluster if 'cluster' in host_info: # host with cluster
if host_info['cluster'] != cluster_info['name']: if host_info['cluster'] != cluster_info['name']:
#type(host_info['cluster']) = list, type(cluster_info['name']) = str # type(host_info['cluster']) = list,
msg = _("Role and hosts belong to different cluster.") # type(cluster_info['name']) = str
msg = _("Role and hosts belong to different cluster.")
LOG.debug(msg) LOG.debug(msg)
raise HTTPNotFound(msg) raise HTTPNotFound(msg)
else: else:
all_roles = registry.get_roles_detail(req.context) all_roles = registry.get_roles_detail(req.context)
cluster_roles = [role for role in all_roles if role['cluster_id'] == cluster_id] cluster_roles = [
#roles infomation saved in cluster_roles role for role in all_roles if role['cluster_id'] ==
if host_info.has_key('role') and host_info['role']: #host with role cluster_id]
# roles infomation saved in cluster_roles
if 'role' in host_info and host_info[
'role']: # host with role
for role in cluster_roles: for role in cluster_roles:
if role['name'] in host_info['role'] and cluster_roles: if role['name'] in host_info[
'role'] and cluster_roles:
db_lv_size.append(role.get('db_lv_size', None)) db_lv_size.append(role.get('db_lv_size', None))
glance_lv_size.append(role.get('glance_lv_size', None)) glance_lv_size.append(
nova_lv_size.append(role.get('nova_lv_size', None)) role.get('glance_lv_size', None))
nova_lv_size.append(
if db_lv_size: role.get('nova_lv_size', None))
if db_lv_size:
host_disk_db_glance_nova_size['db_lv_size'] = max(db_lv_size) host_disk_db_glance_nova_size['db_lv_size'] = max(db_lv_size)
else: #host without cluster else: # host without cluster
host_disk_db_glance_nova_size['db_lv_size'] = 0 host_disk_db_glance_nova_size['db_lv_size'] = 0
if glance_lv_size: if glance_lv_size:
host_disk_db_glance_nova_size['glance_lv_size'] = max(glance_lv_size) host_disk_db_glance_nova_size[
'glance_lv_size'] = max(glance_lv_size)
else: else:
host_disk_db_glance_nova_size['glance_lv_size'] = 0 host_disk_db_glance_nova_size['glance_lv_size'] = 0
if nova_lv_size: if nova_lv_size:
host_disk_db_glance_nova_size['nova_lv_size'] = max(nova_lv_size) host_disk_db_glance_nova_size['nova_lv_size'] = max(nova_lv_size)
else: else:
host_disk_db_glance_nova_size['nova_lv_size'] = 0 host_disk_db_glance_nova_size['nova_lv_size'] = 0
LOG.warn('--------host(%s)disk_db_glance_nova_size:----- %s'% (host_id, host_disk_db_glance_nova_size)) LOG.warn('--------host(%s)disk_db_glance_nova_size:----- %s' %
(host_id, host_disk_db_glance_nova_size))
return host_disk_db_glance_nova_size return host_disk_db_glance_nova_size
def check_cluster_role_name_repetition(self, req, role_name, cluster_id): def check_cluster_role_name_repetition(self, req, role_name, cluster_id):
all_roles = registry.get_roles_detail(req.context) all_roles = registry.get_roles_detail(req.context)
cluster_roles = [role for role in all_roles if role['cluster_id'] == cluster_id] cluster_roles = [role for role in all_roles if role[
'cluster_id'] == cluster_id]
cluster_roles_name = [role['name'].lower() for role in cluster_roles] cluster_roles_name = [role['name'].lower() for role in cluster_roles]
if role_name.lower() in cluster_roles_name: if role_name.lower() in cluster_roles_name:
msg = _("The role %s has already been in the cluster %s!" % (role_name, cluster_id)) msg = _(
"The role %s has already been in the cluster %s!" %
(role_name, cluster_id))
LOG.debug(msg) LOG.debug(msg)
raise HTTPForbidden(msg) raise HTTPForbidden(msg)
def check_template_role_name_repetition(self, req, role_name): def check_template_role_name_repetition(self, req, role_name):
all_roles = registry.get_roles_detail(req.context) all_roles = registry.get_roles_detail(req.context)
template_roles = [role for role in all_roles if role['cluster_id'] == None] template_roles = [
role for role in all_roles if role['cluster_id'] is None]
template_roles_name = [role['name'].lower() for role in template_roles] template_roles_name = [role['name'].lower() for role in template_roles]
if role_name.lower() in template_roles_name: if role_name.lower() in template_roles_name:
msg = _("The role %s has already been in the the template role." % role_name) msg = _(
"The role %s has already been in the the template role." %
role_name)
LOG.debug(msg) LOG.debug(msg)
raise HTTPForbidden(msg) raise HTTPForbidden(msg)
def _check_disk_parameters(self, req, role_meta): def _check_disk_parameters(self, req, role_meta):
if (role_meta.has_key('disk_location') and if ('disk_location' in role_meta and
role_meta['disk_location'] not in SUPPORT_DISK_LOCATION): role_meta['disk_location'] not in SUPPORT_DISK_LOCATION):
msg = _("value of disk_location is not supported.") msg = _("value of disk_location is not supported.")
raise HTTPForbidden(msg) raise HTTPForbidden(msg)
@ -496,69 +560,82 @@ class Controller(controller.BaseController):
role_service_id_list, role_host_id_list): role_service_id_list, role_host_id_list):
role_name = orig_role_meta['name'] role_name = orig_role_meta['name']
if role_meta.get('type', None): if role_meta.get('type', None):
self._check_role_type_in_update_role(req, role_meta['type'], orig_role_meta) self._check_role_type_in_update_role(
if role_meta.has_key('ntp_server'): req, role_meta['type'], orig_role_meta)
if 'ntp_server' in role_meta:
self._check_ntp_server(req, role_name) self._check_ntp_server(req, role_name)
if role_meta.has_key('nodes'): if 'nodes' in role_meta:
self._check_nodes_exist(req, list(eval(role_meta['nodes']))) self._check_nodes_exist(req, list(eval(role_meta['nodes'])))
if role_meta.has_key('services'): if 'services' in role_meta:
self._check_services_exist(req, list(eval(role_meta['services']))) self._check_services_exist(req, list(eval(role_meta['services'])))
role_service_id_list.extend(list(eval(role_meta['services']))) role_service_id_list.extend(list(eval(role_meta['services'])))
if role_meta.has_key('config_set_id'): if 'config_set_id' in role_meta:
self._check_config_set_id_exist(req, str(role_meta['config_set_id'])) self._check_config_set_id_exist(
if role_meta.has_key('cluster_id'): req, str(role_meta['config_set_id']))
self._check_cluster_id_in_role_update(req, str(role_meta['cluster_id']), orig_role_meta) if 'cluster_id' in role_meta:
if role_meta.has_key('name'): self._check_cluster_id_in_role_update(
self._check_role_name_in_role_update(req, role_meta, orig_role_meta) req, str(role_meta['cluster_id']), orig_role_meta)
service_name_list = self._get_service_name_list(req, role_service_id_list) if 'name' in role_meta:
glance_lv_value = role_meta.get('glance_lv_size', orig_role_meta['glance_lv_size']) self._check_role_name_in_role_update(
req, role_meta, orig_role_meta)
service_name_list = self._get_service_name_list(
req, role_service_id_list)
glance_lv_value = role_meta.get(
'glance_lv_size', orig_role_meta['glance_lv_size'])
if glance_lv_value: if glance_lv_value:
self._check_glance_lv_value(req, glance_lv_value, role_name, service_name_list) self._check_glance_lv_value(
req, glance_lv_value, role_name, service_name_list)
if role_meta.get('db_lv_size', None) and role_meta['db_lv_size']: if role_meta.get('db_lv_size', None) and role_meta['db_lv_size']:
self._check_db_lv_size(req, role_meta['db_lv_size'], service_name_list) self._check_db_lv_size(
req, role_meta['db_lv_size'], service_name_list)
if role_meta.get('nova_lv_size', None): if role_meta.get('nova_lv_size', None):
self._check_nova_lv_size(req, role_meta['nova_lv_size'], role_name) self._check_nova_lv_size(req, role_meta['nova_lv_size'], role_name)
if role_meta.has_key('nodes') or role_host_id_list: if 'nodes' in role_meta or role_host_id_list:
self._check_all_lv_size_of_nodes_with_role_in_role_update(req, role_meta, orig_role_meta, self._check_all_lv_size_of_nodes_with_role_in_role_update(
role_host_id_list) req, role_meta, orig_role_meta, role_host_id_list)
self._check_disk_parameters(req, role_meta) self._check_disk_parameters(req, role_meta)
if role_meta.has_key('deployment_backend'): if 'deployment_backend' in role_meta:
self._check_deployment_backend(req, role_meta['deployment_backend']) self._check_deployment_backend(
req, role_meta['deployment_backend'])
if role_meta.get('role_type', None): if role_meta.get('role_type', None):
self._check_type_role_reasonable(req, role_meta) self._check_type_role_reasonable(req, role_meta)
def _check_role_add_parameters(self, req, role_meta, role_service_id_list): def _check_role_add_parameters(self, req, role_meta, role_service_id_list):
role_type = role_meta.get('type', None)
role_name = role_meta.get('name', None) role_name = role_meta.get('name', None)
if role_meta.get('type', None): if role_meta.get('type', None):
self._check_role_type_in_role_add(req, role_meta) self._check_role_type_in_role_add(req, role_meta)
if role_meta.has_key('nodes'): if 'nodes' in role_meta:
self._check_nodes_exist(req, list(eval(role_meta['nodes']))) self._check_nodes_exist(req, list(eval(role_meta['nodes'])))
if role_meta.has_key('services'): if 'services' in role_meta:
self._check_services_exist(req, list(eval(role_meta['services']))) self._check_services_exist(req, list(eval(role_meta['services'])))
role_service_id_list.extend(list(eval(role_meta['services']))) role_service_id_list.extend(list(eval(role_meta['services'])))
if role_meta.has_key('config_set_id'): if 'config_set_id' in role_meta:
self._check_config_set_id_exist(req, str(role_meta['config_set_id'])) self._check_config_set_id_exist(
if role_meta.has_key('cluster_id'): req, str(role_meta['config_set_id']))
if 'cluster_id' in role_meta:
orig_cluster = str(role_meta['cluster_id']) orig_cluster = str(role_meta['cluster_id'])
self._raise_404_if_cluster_deleted(req, orig_cluster) self._raise_404_if_cluster_deleted(req, orig_cluster)
self.check_cluster_role_name_repetition(req, role_name, orig_cluster) self.check_cluster_role_name_repetition(
req, role_name, orig_cluster)
else: else:
self.check_template_role_name_repetition(req, role_name) self.check_template_role_name_repetition(req, role_name)
service_name_list = self._get_service_name_list(req, role_service_id_list) service_name_list = self._get_service_name_list(
req, role_service_id_list)
glance_lv_value = role_meta.get('glance_lv_size', None) glance_lv_value = role_meta.get('glance_lv_size', None)
if glance_lv_value: if glance_lv_value:
self._check_glance_lv_value(req, glance_lv_value, role_name, service_name_list) self._check_glance_lv_value(
req, glance_lv_value, role_name, service_name_list)
if role_meta.get('db_lv_size', None) and role_meta['db_lv_size']: if role_meta.get('db_lv_size', None) and role_meta['db_lv_size']:
self._check_db_lv_size(req, role_meta['db_lv_size'], service_name_list) self._check_db_lv_size(
req, role_meta['db_lv_size'], service_name_list)
if role_meta.get('nova_lv_size', None): if role_meta.get('nova_lv_size', None):
self._check_nova_lv_size(req, role_meta['nova_lv_size'], role_name) self._check_nova_lv_size(req, role_meta['nova_lv_size'], role_name)
if role_meta.has_key('nodes'): if 'nodes' in role_meta:
self._check_all_lv_size_with_role_in_role_add(req, role_meta) self._check_all_lv_size_with_role_in_role_add(req, role_meta)
self._check_disk_parameters(req, role_meta) self._check_disk_parameters(req, role_meta)
if role_meta.has_key('deployment_backend'): if 'deployment_backend' in role_meta:
self._check_deployment_backend(req, role_meta['deployment_backend']) self._check_deployment_backend(
req, role_meta['deployment_backend'])
else: else:
role_meta['deployment_backend'] = 'tecs' role_meta['deployment_backend'] = 'tecs'
if role_meta.get('role_type', None): if role_meta.get('role_type', None):
@ -591,7 +668,7 @@ class Controller(controller.BaseController):
def delete_role(self, req, id): def delete_role(self, req, id):
""" """
Deletes a role from Daisy. Deletes a role from Daisy.
:param req: The WSGI/Webob Request object :param req: The WSGI/Webob Request object
:param image_meta: Mapping of metadata about role :param image_meta: Mapping of metadata about role
@ -599,7 +676,7 @@ class Controller(controller.BaseController):
""" """
self._enforce(req, 'delete_role') self._enforce(req, 'delete_role')
#role = self.get_role_meta_or_404(req, id) # role = self.get_role_meta_or_404(req, id)
print "delete_role:%s" % id print "delete_role:%s" % id
try: try:
registry.delete_role_metadata(req.context, id) registry.delete_role_metadata(req.context, id)
@ -625,7 +702,7 @@ class Controller(controller.BaseController):
request=req, request=req,
content_type="text/plain") content_type="text/plain")
else: else:
#self.notifier.info('role.delete', role) # self.notifier.info('role.delete', role)
return Response(body='', status=200) return Response(body='', status=200)
@utils.mutating @utils.mutating
@ -661,11 +738,11 @@ class Controller(controller.BaseController):
""" """
self._enforce(req, 'get_roles') self._enforce(req, 'get_roles')
params = self._get_query_params(req) params = self._get_query_params(req)
filters=params.get('filters',None) filters = params.get('filters', None)
if 'cluster_id' in filters: if 'cluster_id' in filters:
cluster_id=filters['cluster_id'] cluster_id = filters['cluster_id']
self._raise_404_if_cluster_deleted(req, cluster_id) self._raise_404_if_cluster_deleted(req, cluster_id)
try: try:
roles = registry.get_roles_detail(req.context, **params) roles = registry.get_roles_detail(req.context, **params)
except exception.Invalid as e: except exception.Invalid as e:
@ -684,13 +761,28 @@ class Controller(controller.BaseController):
""" """
orig_role_meta = self.get_role_meta_or_404(req, id) orig_role_meta = self.get_role_meta_or_404(req, id)
role_service_list = registry.get_role_services(req.context, id) role_service_list = registry.get_role_services(req.context, id)
role_service_id_list = [ role_service['service_id'] for role_service in role_service_list ] role_service_id_list = [role_service['service_id']
for role_service in role_service_list]
role_host_info_list = registry.get_role_host_metadata(req.context, id) role_host_info_list = registry.get_role_host_metadata(req.context, id)
role_host_id_list = [role_host['host_id'] for role_host in role_host_info_list] role_host_id_list = [role_host['host_id']
self._check_role_update_parameters(req, role_meta, orig_role_meta, role_service_id_list, role_host_id_list) for role_host in role_host_info_list]
self._check_role_update_parameters(
req,
role_meta,
orig_role_meta,
role_service_id_list,
role_host_id_list)
if orig_role_meta['role_type'] == "CONTROLLER_HA":
cluster_meta = {}
cluster_meta['public_vip'] = role_meta.get(
'public_vip') or role_meta.get('vip')
if cluster_meta['public_vip']:
cluster_meta = registry.update_cluster_metadata(
req.context, orig_role_meta['cluster_id'], cluster_meta)
self._enforce(req, 'modify_image') self._enforce(req, 'modify_image')
#orig_role_meta = self.get_role_meta_or_404(req, id) # orig_role_meta = self.get_role_meta_or_404(req, id)
# Do not allow any updates on a deleted image. # Do not allow any updates on a deleted image.
# Fix for LP Bug #1060930 # Fix for LP Bug #1060930
@ -735,6 +827,7 @@ class Controller(controller.BaseController):
return {'role_meta': role_meta} return {'role_meta': role_meta}
class RoleDeserializer(wsgi.JSONRequestDeserializer): class RoleDeserializer(wsgi.JSONRequestDeserializer):
"""Handles deserialization of specific controller method requests.""" """Handles deserialization of specific controller method requests."""
@ -749,6 +842,7 @@ class RoleDeserializer(wsgi.JSONRequestDeserializer):
def update_role(self, request): def update_role(self, request):
return self._deserialize(request) return self._deserialize(request)
class RoleSerializer(wsgi.JSONResponseSerializer): class RoleSerializer(wsgi.JSONResponseSerializer):
"""Handles serialization of specific controller method responses.""" """Handles serialization of specific controller method responses."""
@ -768,6 +862,7 @@ class RoleSerializer(wsgi.JSONResponseSerializer):
response.headers['Content-Type'] = 'application/json' response.headers['Content-Type'] = 'application/json'
response.body = self.to_json(dict(role=role_meta)) response.body = self.to_json(dict(role=role_meta))
return response return response
def get_role(self, response, result): def get_role(self, response, result):
role_meta = result['role_meta'] role_meta = result['role_meta']
response.status = 201 response.status = 201
@ -775,6 +870,7 @@ class RoleSerializer(wsgi.JSONResponseSerializer):
response.body = self.to_json(dict(role=role_meta)) response.body = self.to_json(dict(role=role_meta))
return response return response
def create_resource(): def create_resource():
"""Roles resource factory method""" """Roles resource factory method"""
deserializer = RoleDeserializer() deserializer = RoleDeserializer()

View File

@ -14,7 +14,7 @@
# under the License. # under the License.
#from daisy.api.v1 import images # from daisy.api.v1 import images
from daisy.api.v1 import hosts from daisy.api.v1 import hosts
from daisy.api.v1 import clusters from daisy.api.v1 import clusters
from daisy.api.v1 import template from daisy.api.v1 import template
@ -29,14 +29,17 @@ from daisy.api.v1 import networks
from daisy.api.v1 import install from daisy.api.v1 import install
from daisy.api.v1 import disk_array from daisy.api.v1 import disk_array
from daisy.api.v1 import host_template from daisy.api.v1 import host_template
from daisy.api.v1 import hwms
from daisy.common import wsgi from daisy.common import wsgi
from daisy.api.v1 import backup_restore
class API(wsgi.Router): class API(wsgi.Router):
"""WSGI router for Glance v1 API requests.""" """WSGI router for Glance v1 API requests."""
def __init__(self, mapper): def __init__(self, mapper):
reject_method_resource = wsgi.Resource(wsgi.RejectMethodController()) wsgi.Resource(wsgi.RejectMethodController())
'''images_resource = images.create_resource() '''images_resource = images.create_resource()
@ -126,7 +129,6 @@ class API(wsgi.Router):
controller=members_resource, controller=members_resource,
action="index_shared_images")''' action="index_shared_images")'''
hosts_resource = hosts.create_resource() hosts_resource = hosts.create_resource()
mapper.connect("/nodes", mapper.connect("/nodes",
@ -145,11 +147,17 @@ class API(wsgi.Router):
controller=hosts_resource, controller=hosts_resource,
action='detail', action='detail',
conditions={'method': ['GET']}) conditions={'method': ['GET']})
mapper.connect("/nodes/{id}", mapper.connect("/nodes/{id}",
controller=hosts_resource, controller=hosts_resource,
action='get_host', action='get_host',
conditions={'method': ['GET']}) conditions={'method': ['GET']})
mapper.connect("/hwm_nodes",
controller=hosts_resource,
action='update_hwm_host',
conditions={'method': ['POST']})
mapper.connect("/discover_host/", mapper.connect("/discover_host/",
controller=hosts_resource, controller=hosts_resource,
action='discover_host', action='discover_host',
@ -159,17 +167,17 @@ class API(wsgi.Router):
controller=hosts_resource, controller=hosts_resource,
action='add_discover_host', action='add_discover_host',
conditions={'method': ['POST']}) conditions={'method': ['POST']})
mapper.connect("/discover/nodes/{id}", mapper.connect("/discover/nodes/{id}",
controller=hosts_resource, controller=hosts_resource,
action='delete_discover_host', action='delete_discover_host',
conditions={'method': ['DELETE']}) conditions={'method': ['DELETE']})
mapper.connect("/discover/nodes", mapper.connect("/discover/nodes",
controller=hosts_resource, controller=hosts_resource,
action='detail_discover_host', action='detail_discover_host',
conditions={'method': ['GET']}) conditions={'method': ['GET']})
mapper.connect("/discover/nodes/{id}", mapper.connect("/discover/nodes/{id}",
controller=hosts_resource, controller=hosts_resource,
action='update_discover_host', action='update_discover_host',
@ -179,9 +187,43 @@ class API(wsgi.Router):
controller=hosts_resource, controller=hosts_resource,
action='get_discover_host_detail', action='get_discover_host_detail',
conditions={'method': ['GET']}) conditions={'method': ['GET']})
mapper.connect("/pxe_discover/nodes",
controller=hosts_resource,
action='add_pxe_host',
conditions={'method': ['POST']})
mapper.connect("/pxe_discover/nodes/{id}",
controller=hosts_resource,
action='update_pxe_host',
conditions={'method': ['PUT']})
hwms_resource = hwms.create_resource()
mapper.connect("/hwm",
controller=hwms_resource,
action='add_hwm',
conditions={'method': ['POST']})
mapper.connect("/hwm/{id}",
controller=hwms_resource,
action='delete_hwm',
conditions={'method': ['DELETE']})
mapper.connect("/hwm/{id}",
controller=hwms_resource,
action='update_hwm',
conditions={'method': ['PUT']})
mapper.connect("/hwm",
controller=hwms_resource,
action='list',
conditions={'method': ['GET']})
mapper.connect("/hwm/{id}",
controller=hwms_resource,
action='detail',
conditions={'method': ['GET']})
clusters_resource = clusters.create_resource() clusters_resource = clusters.create_resource()
mapper.connect("/clusters", mapper.connect("/clusters",
controller=clusters_resource, controller=clusters_resource,
action='add_cluster', action='add_cluster',
@ -193,56 +235,54 @@ class API(wsgi.Router):
mapper.connect("/clusters/{id}", mapper.connect("/clusters/{id}",
controller=clusters_resource, controller=clusters_resource,
action='update_cluster', action='update_cluster',
conditions={'method': ['PUT']}) conditions={'method': ['PUT']})
mapper.connect("/clusters", mapper.connect("/clusters",
controller=clusters_resource, controller=clusters_resource,
action='detail', action='detail',
conditions={'method': ['GET']}) conditions={'method': ['GET']})
mapper.connect("/clusters/{id}", mapper.connect("/clusters/{id}",
controller=clusters_resource, controller=clusters_resource,
action='get_cluster', action='get_cluster',
conditions={'method': ['GET']}) conditions={'method': ['GET']})
mapper.connect("/clusters/{id}", mapper.connect("/clusters/{id}",
controller=clusters_resource, controller=clusters_resource,
action='update_cluster', action='update_cluster',
conditions={'method': ['PUT']}) conditions={'method': ['PUT']})
template_resource = template.create_resource() template_resource = template.create_resource()
mapper.connect("/template", mapper.connect("/template",
controller=template_resource, controller=template_resource,
action='add_template', action='add_template',
conditions={'method': ['POST']}) conditions={'method': ['POST']})
mapper.connect("/template/{template_id}", mapper.connect("/template/{template_id}",
controller=template_resource, controller=template_resource,
action='update_template', action='update_template',
conditions={'method': ['PUT']}) conditions={'method': ['PUT']})
mapper.connect("/template/{template_id}", mapper.connect("/template/{template_id}",
controller=template_resource, controller=template_resource,
action='delete_template', action='delete_template',
conditions={'method': ['DELETE']}) conditions={'method': ['DELETE']})
mapper.connect("/template/lists", mapper.connect("/template/lists",
controller=template_resource, controller=template_resource,
action='get_template_lists', action='get_template_lists',
conditions={'method': ['GET']}) conditions={'method': ['GET']})
mapper.connect("/template/{template_id}", mapper.connect("/template/{template_id}",
controller=template_resource, controller=template_resource,
action='get_template_detail', action='get_template_detail',
conditions={'method': ['GET']}) conditions={'method': ['GET']})
mapper.connect("/export_db_to_json", mapper.connect("/export_db_to_json",
controller=template_resource, controller=template_resource,
action='export_db_to_json', action='export_db_to_json',
conditions={'method': ['POST']}) conditions={'method': ['POST']})
mapper.connect("/import_json_to_template", mapper.connect("/import_json_to_template",
controller=template_resource, controller=template_resource,
action='import_json_to_template', action='import_json_to_template',
@ -253,7 +293,6 @@ class API(wsgi.Router):
action='import_template_to_db', action='import_template_to_db',
conditions={'method': ['POST']}) conditions={'method': ['POST']})
host_template_resource = host_template.create_resource() host_template_resource = host_template.create_resource()
mapper.connect("/host_template", mapper.connect("/host_template",
controller=host_template_resource, controller=host_template_resource,
@ -262,7 +301,7 @@ class API(wsgi.Router):
mapper.connect("/host_template/{template_id}", mapper.connect("/host_template/{template_id}",
controller=host_template_resource, controller=host_template_resource,
action='update_host_template', action='update_host_template',
conditions={'method': ['PUT']}) conditions={'method': ['PUT']})
mapper.connect("/host_template", mapper.connect("/host_template",
controller=host_template_resource, controller=host_template_resource,
action='delete_host_template', action='delete_host_template',
@ -270,11 +309,11 @@ class API(wsgi.Router):
mapper.connect("/host_template/lists", mapper.connect("/host_template/lists",
controller=host_template_resource, controller=host_template_resource,
action='get_host_template_lists', action='get_host_template_lists',
conditions={'method': ['GET']}) conditions={'method': ['GET']})
mapper.connect("/host_template/{template_id}", mapper.connect("/host_template/{template_id}",
controller=host_template_resource, controller=host_template_resource,
action='get_host_template_detail', action='get_host_template_detail',
conditions={'method': ['GET']}) conditions={'method': ['GET']})
mapper.connect("/host_to_template", mapper.connect("/host_to_template",
controller=host_template_resource, controller=host_template_resource,
action='host_to_template', action='host_to_template',
@ -283,7 +322,7 @@ class API(wsgi.Router):
controller=host_template_resource, controller=host_template_resource,
action='template_to_host', action='template_to_host',
conditions={'method': ['PUT']}) conditions={'method': ['PUT']})
components_resource = components.create_resource() components_resource = components.create_resource()
mapper.connect("/components", mapper.connect("/components",
controller=components_resource, controller=components_resource,
@ -296,16 +335,16 @@ class API(wsgi.Router):
mapper.connect("/components/detail", mapper.connect("/components/detail",
controller=components_resource, controller=components_resource,
action='detail', action='detail',
conditions={'method': ['GET']}) conditions={'method': ['GET']})
mapper.connect("/components/{id}", mapper.connect("/components/{id}",
controller=components_resource, controller=components_resource,
action='get_component', action='get_component',
conditions={'method': ['GET']}) conditions={'method': ['GET']})
mapper.connect("/components/{id}", mapper.connect("/components/{id}",
controller=components_resource, controller=components_resource,
action='update_component', action='update_component',
conditions={'method': ['PUT']}) conditions={'method': ['PUT']})
services_resource = services.create_resource() services_resource = services.create_resource()
mapper.connect("/services", mapper.connect("/services",
controller=services_resource, controller=services_resource,
@ -318,15 +357,15 @@ class API(wsgi.Router):
mapper.connect("/services/detail", mapper.connect("/services/detail",
controller=services_resource, controller=services_resource,
action='detail', action='detail',
conditions={'method': ['GET']}) conditions={'method': ['GET']})
mapper.connect("/services/{id}", mapper.connect("/services/{id}",
controller=services_resource, controller=services_resource,
action='get_service', action='get_service',
conditions={'method': ['GET']}) conditions={'method': ['GET']})
mapper.connect("/services/{id}", mapper.connect("/services/{id}",
controller=services_resource, controller=services_resource,
action='update_service', action='update_service',
conditions={'method': ['PUT']}) conditions={'method': ['PUT']})
roles_resource = roles.create_resource() roles_resource = roles.create_resource()
mapper.connect("/roles", mapper.connect("/roles",
@ -340,15 +379,15 @@ class API(wsgi.Router):
mapper.connect("/roles/detail", mapper.connect("/roles/detail",
controller=roles_resource, controller=roles_resource,
action='detail', action='detail',
conditions={'method': ['GET']}) conditions={'method': ['GET']})
mapper.connect("/roles/{id}", mapper.connect("/roles/{id}",
controller=roles_resource, controller=roles_resource,
action='get_role', action='get_role',
conditions={'method': ['GET']}) conditions={'method': ['GET']})
mapper.connect("/roles/{id}", mapper.connect("/roles/{id}",
controller=roles_resource, controller=roles_resource,
action='update_role', action='update_role',
conditions={'method': ['PUT']}) conditions={'method': ['PUT']})
members_resource = members.create_resource() members_resource = members.create_resource()
mapper.connect("/clusters/{cluster_id}/nodes/{host_id}", mapper.connect("/clusters/{cluster_id}/nodes/{host_id}",
@ -359,102 +398,102 @@ class API(wsgi.Router):
controller=members_resource, controller=members_resource,
action="delete_cluster_host", action="delete_cluster_host",
conditions={'method': ['DELETE']}) conditions={'method': ['DELETE']})
# mapper.connect("/clusters/{cluster_id}/nodes/{host_id}", # mapper.connect("/clusters/{cluster_id}/nodes/{host_id}",
# controller=members_resource, # controller=members_resource,
# action="get_cluster_hosts", # action="get_cluster_hosts",
# conditions={'method': ['GET']}) # conditions={'method': ['GET']})
# mapper.connect("/clusters/{cluster_id}/nodes", # mapper.connect("/clusters/{cluster_id}/nodes",
# controller=members_resource, # controller=members_resource,
# action="get_cluster_hosts", # action="get_cluster_hosts",
# conditions={'method': ['GET']}) # conditions={'method': ['GET']})
# mapper.connect("/multi_clusters/nodes/{host_id}", # mapper.connect("/multi_clusters/nodes/{host_id}",
# controller=members_resource, # controller=members_resource,
# action="get_host_clusters", # action="get_host_clusters",
# conditions={'method': ['GET']}) # conditions={'method': ['GET']})
config_files_resource = config_files.create_resource() config_files_resource = config_files.create_resource()
mapper.connect("/config_files", mapper.connect("/config_files",
controller=config_files_resource, controller=config_files_resource,
action="add_config_file", action="add_config_file",
conditions={'method': ['POST']}) conditions={'method': ['POST']})
mapper.connect("/config_files/{id}", mapper.connect("/config_files/{id}",
controller=config_files_resource, controller=config_files_resource,
action="delete_config_file", action="delete_config_file",
conditions={'method': ['DELETE']}) conditions={'method': ['DELETE']})
mapper.connect("/config_files/{id}", mapper.connect("/config_files/{id}",
controller=config_files_resource, controller=config_files_resource,
action="update_config_file", action="update_config_file",
conditions={'method': ['PUT']}) conditions={'method': ['PUT']})
mapper.connect("/config_files/detail", mapper.connect("/config_files/detail",
controller=config_files_resource, controller=config_files_resource,
action="detail", action="detail",
conditions={'method': ['GET']}) conditions={'method': ['GET']})
mapper.connect("/config_files/{id}", mapper.connect("/config_files/{id}",
controller=config_files_resource, controller=config_files_resource,
action="get_config_file", action="get_config_file",
conditions=dict(method=["GET"])) conditions=dict(method=["GET"]))
config_sets_resource = config_sets.create_resource() config_sets_resource = config_sets.create_resource()
mapper.connect("/config_sets", mapper.connect("/config_sets",
controller=config_sets_resource, controller=config_sets_resource,
action="add_config_set", action="add_config_set",
conditions={'method': ['POST']}) conditions={'method': ['POST']})
mapper.connect("/config_sets/{id}", mapper.connect("/config_sets/{id}",
controller=config_sets_resource, controller=config_sets_resource,
action="delete_config_set", action="delete_config_set",
conditions={'method': ['DELETE']}) conditions={'method': ['DELETE']})
mapper.connect("/config_sets/{id}", mapper.connect("/config_sets/{id}",
controller=config_sets_resource, controller=config_sets_resource,
action="update_config_set", action="update_config_set",
conditions={'method': ['PUT']}) conditions={'method': ['PUT']})
mapper.connect("/config_sets/detail", mapper.connect("/config_sets/detail",
controller=config_sets_resource, controller=config_sets_resource,
action="detail", action="detail",
conditions={'method': ['GET']}) conditions={'method': ['GET']})
mapper.connect("/config_sets/{id}", mapper.connect("/config_sets/{id}",
controller=config_sets_resource, controller=config_sets_resource,
action="get_config_set", action="get_config_set",
conditions=dict(method=["GET"])) conditions=dict(method=["GET"]))
mapper.connect("/cluster_config_set_update", mapper.connect("/cluster_config_set_update",
controller=config_sets_resource, controller=config_sets_resource,
action="cluster_config_set_update", action="cluster_config_set_update",
conditions={'method': ['POST']}) conditions={'method': ['POST']})
mapper.connect("/cluster_config_set_progress", mapper.connect("/cluster_config_set_progress",
controller=config_sets_resource, controller=config_sets_resource,
action="cluster_config_set_progress", action="cluster_config_set_progress",
conditions={'method': ['POST']}) conditions={'method': ['POST']})
configs_resource = configs.create_resource() configs_resource = configs.create_resource()
mapper.connect("/configs", mapper.connect("/configs",
controller=configs_resource, controller=configs_resource,
action="add_config", action="add_config",
conditions={'method': ['POST']}) conditions={'method': ['POST']})
mapper.connect("/configs_delete", mapper.connect("/configs_delete",
controller=configs_resource, controller=configs_resource,
action="delete_config", action="delete_config",
conditions={'method': ['DELETE']}) conditions={'method': ['DELETE']})
mapper.connect("/configs/detail", mapper.connect("/configs/detail",
controller=configs_resource, controller=configs_resource,
action="detail", action="detail",
conditions={'method': ['GET']}) conditions={'method': ['GET']})
mapper.connect("/configs/{id}", mapper.connect("/configs/{id}",
controller=configs_resource, controller=configs_resource,
action="get_config", action="get_config",
conditions=dict(method=["GET"])) conditions=dict(method=["GET"]))
networks_resource = networks.create_resource() networks_resource = networks.create_resource()
@ -474,16 +513,16 @@ class API(wsgi.Router):
controller=networks_resource, controller=networks_resource,
action='detail', action='detail',
conditions={'method': ['GET']}) conditions={'method': ['GET']})
mapper.connect("/networks/{id}", mapper.connect("/networks/{id}",
controller=networks_resource, controller=networks_resource,
action='get_network', action='get_network',
conditions={'method': ['GET']}) conditions={'method': ['GET']})
mapper.connect("/networks", mapper.connect("/networks",
controller=networks_resource, controller=networks_resource,
action='get_all_network', action='get_all_network',
conditions={'method': ['GET']}) conditions={'method': ['GET']})
install_resource = install.create_resource() install_resource = install.create_resource()
@ -491,12 +530,12 @@ class API(wsgi.Router):
controller=install_resource, controller=install_resource,
action='install_cluster', action='install_cluster',
conditions={'method': ['POST']}) conditions={'method': ['POST']})
mapper.connect("/export_db", mapper.connect("/export_db",
controller=install_resource, controller=install_resource,
action='export_db', action='export_db',
conditions={'method': ['POST']}) conditions={'method': ['POST']})
mapper.connect("/uninstall/{cluster_id}", mapper.connect("/uninstall/{cluster_id}",
controller=install_resource, controller=install_resource,
action='uninstall_cluster', action='uninstall_cluster',
@ -510,23 +549,23 @@ class API(wsgi.Router):
controller=install_resource, controller=install_resource,
action='update_cluster', action='update_cluster',
conditions={'method': ['POST']}) conditions={'method': ['POST']})
mapper.connect("/update/{cluster_id}", mapper.connect("/update/{cluster_id}",
controller=install_resource, controller=install_resource,
action='update_progress', action='update_progress',
conditions={'method': ['GET']}) conditions={'method': ['GET']})
mapper.connect("/disk_array/{cluster_id}", mapper.connect("/disk_array/{cluster_id}",
controller=install_resource, controller=install_resource,
action='update_disk_array', action='update_disk_array',
conditions={'method': ['POST']}) conditions={'method': ['POST']})
#mapper.connect("/update/{cluster_id}/versions/{versions_id}", # mapper.connect("/update/{cluster_id}/versions/{versions_id}",
# controller=update_resource, # controller=update_resource,
# action='update_cluster_version', # action='update_cluster_version',
# conditions={'method': ['POST']}) # conditions={'method': ['POST']})
array_resource = disk_array.create_resource() array_resource = disk_array.create_resource()
mapper.connect("/service_disk", mapper.connect("/service_disk",
controller=array_resource, controller=array_resource,
action='service_disk_add', action='service_disk_add',
@ -547,7 +586,7 @@ class API(wsgi.Router):
controller=array_resource, controller=array_resource,
action='service_disk_detail', action='service_disk_detail',
conditions={'method': ['GET']}) conditions={'method': ['GET']})
mapper.connect("/cinder_volume", mapper.connect("/cinder_volume",
controller=array_resource, controller=array_resource,
action='cinder_volume_add', action='cinder_volume_add',
@ -568,7 +607,23 @@ class API(wsgi.Router):
controller=array_resource, controller=array_resource,
action='cinder_volume_detail', action='cinder_volume_detail',
conditions={'method': ['GET']}) conditions={'method': ['GET']})
backup_restore_resource = backup_restore.create_resource()
mapper.connect("/backup",
controller=backup_restore_resource,
action='backup',
conditions={'method': ['POST']})
mapper.connect("/restore",
controller=backup_restore_resource,
action='restore',
conditions={'method': ['POST']})
mapper.connect("/backup_file_version",
controller=backup_restore_resource,
action='get_backup_file_version',
conditions={'method': ['POST']})
mapper.connect("/version",
controller=backup_restore_resource,
action='version',
conditions={'method': ['POST']})
super(API, self).__init__(mapper) super(API, self).__init__(mapper)

View File

@ -52,12 +52,13 @@ CONF.import_opt('container_formats', 'daisy.common.config',
group='image_format') group='image_format')
CONF.import_opt('image_property_quota', 'daisy.common.config') CONF.import_opt('image_property_quota', 'daisy.common.config')
class Controller(controller.BaseController): class Controller(controller.BaseController):
""" """
WSGI controller for services resource in Daisy v1 API WSGI controller for services resource in Daisy v1 API
The services resource API is a RESTful web service for service data. The API The services resource API is a RESTful web service for service data.
is as follows:: The API is as follows::
GET /services -- Returns a set of brief metadata about services GET /services -- Returns a set of brief metadata about services
GET /services/detail -- Returns a set of detailed metadata about GET /services/detail -- Returns a set of detailed metadata about
@ -124,7 +125,8 @@ class Controller(controller.BaseController):
def _raise_404_if_component_deleted(self, req, component_id): def _raise_404_if_component_deleted(self, req, component_id):
component = self.get_component_meta_or_404(req, component_id) component = self.get_component_meta_or_404(req, component_id)
if component['deleted']: if component['deleted']:
msg = _("Component with identifier %s has been deleted.") % component_id msg = _("Component with identifier %s has been deleted.") % \
component_id
raise HTTPNotFound(msg) raise HTTPNotFound(msg)
@utils.mutating @utils.mutating
@ -141,7 +143,7 @@ class Controller(controller.BaseController):
service_name = service_meta["name"] service_name = service_meta["name"]
service_description = service_meta["description"] service_description = service_meta["description"]
if service_meta.has_key('component_id'): if 'component_id' in service_meta:
orig_component_id = str(service_meta['component_id']) orig_component_id = str(service_meta['component_id'])
self._raise_404_if_component_deleted(req, orig_component_id) self._raise_404_if_component_deleted(req, orig_component_id)
@ -163,7 +165,7 @@ class Controller(controller.BaseController):
""" """
self._enforce(req, 'delete_service') self._enforce(req, 'delete_service')
#service = self.get_service_meta_or_404(req, id) # service = self.get_service_meta_or_404(req, id)
print "delete_service:%s" % id print "delete_service:%s" % id
try: try:
registry.delete_service_metadata(req.context, id) registry.delete_service_metadata(req.context, id)
@ -182,14 +184,15 @@ class Controller(controller.BaseController):
request=req, request=req,
content_type="text/plain") content_type="text/plain")
except exception.InUseByStore as e: except exception.InUseByStore as e:
msg = (_("service %(id)s could not be deleted because it is in use: " msg = (_("service %(id)s could not be deleted "
"because it is in use: "
"%(exc)s") % {"id": id, "exc": utils.exception_to_str(e)}) "%(exc)s") % {"id": id, "exc": utils.exception_to_str(e)})
LOG.warn(msg) LOG.warn(msg)
raise HTTPConflict(explanation=msg, raise HTTPConflict(explanation=msg,
request=req, request=req,
content_type="text/plain") content_type="text/plain")
else: else:
#self.notifier.info('service.delete', service) # self.notifier.info('service.delete', service)
return Response(body='', status=200) return Response(body='', status=200)
@utils.mutating @utils.mutating
@ -287,6 +290,7 @@ class Controller(controller.BaseController):
return {'service_meta': service_meta} return {'service_meta': service_meta}
class ServiceDeserializer(wsgi.JSONRequestDeserializer): class ServiceDeserializer(wsgi.JSONRequestDeserializer):
"""Handles deserialization of specific controller method requests.""" """Handles deserialization of specific controller method requests."""
@ -301,6 +305,7 @@ class ServiceDeserializer(wsgi.JSONRequestDeserializer):
def update_service(self, request): def update_service(self, request):
return self._deserialize(request) return self._deserialize(request)
class ServiceSerializer(wsgi.JSONResponseSerializer): class ServiceSerializer(wsgi.JSONResponseSerializer):
"""Handles serialization of specific controller method responses.""" """Handles serialization of specific controller method responses."""
@ -320,6 +325,7 @@ class ServiceSerializer(wsgi.JSONResponseSerializer):
response.headers['Content-Type'] = 'application/json' response.headers['Content-Type'] = 'application/json'
response.body = self.to_json(dict(service=service_meta)) response.body = self.to_json(dict(service=service_meta))
return response return response
def get_service(self, response, result): def get_service(self, response, result):
service_meta = result['service_meta'] service_meta = result['service_meta']
response.status = 201 response.status = 201
@ -327,6 +333,7 @@ class ServiceSerializer(wsgi.JSONResponseSerializer):
response.body = self.to_json(dict(service=service_meta)) response.body = self.to_json(dict(service=service_meta))
return response return response
def create_resource(): def create_resource():
"""Services resource factory method""" """Services resource factory method"""
deserializer = ServiceDeserializer() deserializer = ServiceDeserializer()

View File

@ -42,10 +42,6 @@ from daisy.registry.api.v1 import template
import daisy.api.backends.tecs.common as tecs_cmn import daisy.api.backends.tecs.common as tecs_cmn
import daisy.api.backends.common as daisy_cmn import daisy.api.backends.common as daisy_cmn
try:
import simplejson as json
except ImportError:
import json
daisy_tecs_path = tecs_cmn.daisy_tecs_path daisy_tecs_path = tecs_cmn.daisy_tecs_path
@ -64,12 +60,13 @@ CONF.import_opt('container_formats', 'daisy.common.config',
group='image_format') group='image_format')
CONF.import_opt('image_property_quota', 'daisy.common.config') CONF.import_opt('image_property_quota', 'daisy.common.config')
class Controller(controller.BaseController): class Controller(controller.BaseController):
""" """
WSGI controller for Templates resource in Daisy v1 API WSGI controller for Templates resource in Daisy v1 API
The Templates resource API is a RESTful web Template for Template data. The API The Templates resource API is a RESTful web Template for Template data.
is as follows:: The API is as follows::
GET /Templates -- Returns a set of brief metadata about Templates GET /Templates -- Returns a set of brief metadata about Templates
GET /Templates/detail -- Returns a set of detailed metadata about GET /Templates/detail -- Returns a set of detailed metadata about
@ -136,8 +133,9 @@ class Controller(controller.BaseController):
def _raise_404_if_cluster_deleted(self, req, cluster_id): def _raise_404_if_cluster_deleted(self, req, cluster_id):
cluster = self.get_cluster_meta_or_404(req, cluster_id) cluster = self.get_cluster_meta_or_404(req, cluster_id)
if cluster['deleted']: if cluster['deleted']:
msg = _("Cluster with identifier %s has been deleted.") % cluster_id msg = _("Cluster with identifier %s has been deleted.") % \
raise webob.exc.HTTPNotFound(msg) cluster_id
raise HTTPNotFound(msg)
@utils.mutating @utils.mutating
def add_template(self, req, template): def add_template(self, req, template):
@ -150,8 +148,7 @@ class Controller(controller.BaseController):
:raises HTTPBadRequest if x-Template-name is missing :raises HTTPBadRequest if x-Template-name is missing
""" """
self._enforce(req, 'add_template') self._enforce(req, 'add_template')
template_name = template["name"]
template = registry.add_template_metadata(req.context, template) template = registry.add_template_metadata(req.context, template)
return {'template': template} return {'template': template}
@ -169,8 +166,8 @@ class Controller(controller.BaseController):
self._enforce(req, 'update_template') self._enforce(req, 'update_template')
try: try:
template = registry.update_template_metadata(req.context, template = registry.update_template_metadata(req.context,
template_id, template_id,
template) template)
except exception.Invalid as e: except exception.Invalid as e:
msg = (_("Failed to update template metadata. Got error: %s") % msg = (_("Failed to update template metadata. Got error: %s") %
@ -202,6 +199,7 @@ class Controller(controller.BaseController):
self.notifier.info('template.update', template) self.notifier.info('template.update', template)
return {'template': template} return {'template': template}
@utils.mutating @utils.mutating
def delete_template(self, req, template_id): def delete_template(self, req, template_id):
""" """
@ -230,23 +228,25 @@ class Controller(controller.BaseController):
request=req, request=req,
content_type="text/plain") content_type="text/plain")
except exception.InUseByStore as e: except exception.InUseByStore as e:
msg = (_("template %(id)s could not be deleted because it is in use: " msg = (_("template %(id)s could not be deleted "
"%(exc)s") % {"id": template_id, "exc": utils.exception_to_str(e)}) "because it is in use: "
"%(exc)s") % {"id": template_id,
"exc": utils.exception_to_str(e)})
LOG.error(msg) LOG.error(msg)
raise HTTPConflict(explanation=msg, raise HTTPConflict(explanation=msg,
request=req, request=req,
content_type="text/plain") content_type="text/plain")
else: else:
return Response(body='', status=200) return Response(body='', status=200)
def _del_general_params(self,param): def _del_general_params(self, param):
del param['created_at'] del param['created_at']
del param['updated_at'] del param['updated_at']
del param['deleted'] del param['deleted']
del param['deleted_at'] del param['deleted_at']
del param['id'] del param['id']
def _del_cluster_params(self,cluster): def _del_cluster_params(self, cluster):
del cluster['networks'] del cluster['networks']
del cluster['vlan_start'] del cluster['vlan_start']
del cluster['vlan_end'] del cluster['vlan_end']
@ -259,7 +259,27 @@ class Controller(controller.BaseController):
del cluster['segmentation_type'] del cluster['segmentation_type']
del cluster['base_mac'] del cluster['base_mac']
del cluster['name'] del cluster['name']
def _get_cinder_volumes(self, req, role):
cinder_volume_params = {'filters': {'role_id': role['id']}}
cinder_volumes = registry.list_cinder_volume_metadata(
req.context, **cinder_volume_params)
for cinder_volume in cinder_volumes:
if cinder_volume.get('role_id', None):
cinder_volume['role_id'] = role['name']
self._del_general_params(cinder_volume)
return cinder_volumes
def _get_services_disk(self, req, role):
params = {'filters': {'role_id': role['id']}}
services_disk = registry.list_service_disk_metadata(
req.context, **params)
for service_disk in services_disk:
if service_disk.get('role_id', None):
service_disk['role_id'] = role['name']
self._del_general_params(service_disk)
return services_disk
@utils.mutating @utils.mutating
def export_db_to_json(self, req, template): def export_db_to_json(self, req, template):
""" """
@ -267,40 +287,45 @@ class Controller(controller.BaseController):
:param req: The WSGI/Webob Request object :param req: The WSGI/Webob Request object
:raises HTTPBadRequest if x-Template-cluster is missing :raises HTTPBadRequest if x-Template-cluster is missing
""" """
cluster_name = template.get('cluster_name',None) cluster_name = template.get('cluster_name', None)
type = template.get('type',None) type = template.get('type', None)
description = template.get('description',None) description = template.get('description', None)
template_name = template.get('template_name',None) template_name = template.get('template_name', None)
self._enforce(req, 'export_db_to_json') self._enforce(req, 'export_db_to_json')
cinder_volume_list = [] cinder_volume_list = []
service_disk_list = []
template_content = {} template_content = {}
template_json = {} template_json = {}
template_id = "" template_id = ""
if not type or type == "tecs": if not type or type == "tecs":
try: try:
params = {'filters': {'name':cluster_name}} params = {'filters': {'name': cluster_name}}
clusters = registry.get_clusters_detail(req.context, **params) clusters = registry.get_clusters_detail(req.context, **params)
if clusters: if clusters:
cluster_id = clusters[0]['id'] cluster_id = clusters[0]['id']
else: else:
msg = "the cluster %s is not exist"%cluster_name msg = "the cluster %s is not exist" % cluster_name
LOG.error(msg) LOG.error(msg)
raise HTTPForbidden(explanation=msg, request=req, content_type="text/plain") raise HTTPForbidden(
explanation=msg,
params = {'filters': {'cluster_id':cluster_id}} request=req,
cluster = registry.get_cluster_metadata(req.context, cluster_id) content_type="text/plain")
params = {'filters': {'cluster_id': cluster_id}}
cluster = registry.get_cluster_metadata(
req.context, cluster_id)
roles = registry.get_roles_detail(req.context, **params) roles = registry.get_roles_detail(req.context, **params)
networks = registry.get_networks_detail(req.context, cluster_id,**params) networks = registry.get_networks_detail(
req.context, cluster_id, **params)
for role in roles: for role in roles:
cinder_volume_params = {'filters': {'role_id':role['id']}} cinder_volumes = self._get_cinder_volumes(req, role)
cinder_volumes = registry.list_cinder_volume_metadata(req.context, **cinder_volume_params) cinder_volume_list += cinder_volumes
for cinder_volume in cinder_volumes: services_disk = self._get_services_disk(req, role)
if cinder_volume.get('role_id',None): service_disk_list += services_disk
cinder_volume['role_id'] = role['name']
self._del_general_params(cinder_volume) if role.get('config_set_id', None):
cinder_volume_list.append(cinder_volume) config_set = registry.get_config_set_metadata(
if role.get('config_set_id',None): req.context, role['config_set_id'])
config_set = registry.get_config_set_metadata(req.context, role['config_set_id'])
role['config_set_id'] = config_set['name'] role['config_set_id'] = config_set['name']
del role['cluster_id'] del role['cluster_id']
del role['status'] del role['status']
@ -309,16 +334,17 @@ class Controller(controller.BaseController):
del role['config_set_update_progress'] del role['config_set_update_progress']
self._del_general_params(role) self._del_general_params(role)
for network in networks: for network in networks:
network_detail = registry.get_network_metadata(req.context, network['id']) network_detail = registry.get_network_metadata(
if network_detail.get('ip_ranges',None): req.context, network['id'])
if network_detail.get('ip_ranges', None):
network['ip_ranges'] = network_detail['ip_ranges'] network['ip_ranges'] = network_detail['ip_ranges']
del network['cluster_id'] del network['cluster_id']
self._del_general_params(network) self._del_general_params(network)
if cluster.get('routers',None): if cluster.get('routers', None):
for router in cluster['routers']: for router in cluster['routers']:
del router['cluster_id'] del router['cluster_id']
self._del_general_params(router) self._del_general_params(router)
if cluster.get('logic_networks',None): if cluster.get('logic_networks', None):
for logic_network in cluster['logic_networks']: for logic_network in cluster['logic_networks']:
for subnet in logic_network['subnets']: for subnet in logic_network['subnets']:
del subnet['logic_network_id'] del subnet['logic_network_id']
@ -326,7 +352,7 @@ class Controller(controller.BaseController):
self._del_general_params(subnet) self._del_general_params(subnet)
del logic_network['cluster_id'] del logic_network['cluster_id']
self._del_general_params(logic_network) self._del_general_params(logic_network)
if cluster.get('nodes',None): if cluster.get('nodes', None):
del cluster['nodes'] del cluster['nodes']
self._del_general_params(cluster) self._del_general_params(cluster)
self._del_cluster_params(cluster) self._del_cluster_params(cluster)
@ -334,140 +360,226 @@ class Controller(controller.BaseController):
template_content['roles'] = roles template_content['roles'] = roles
template_content['networks'] = networks template_content['networks'] = networks
template_content['cinder_volumes'] = cinder_volume_list template_content['cinder_volumes'] = cinder_volume_list
template_content['services_disk'] = service_disk_list
template_json['content'] = json.dumps(template_content) template_json['content'] = json.dumps(template_content)
template_json['type'] = 'tecs' template_json['type'] = 'tecs'
template_json['name'] = template_name template_json['name'] = template_name
template_json['description'] = description template_json['description'] = description
template_host_params = {'cluster_name':cluster_name} template_host_params = {'cluster_name': cluster_name}
template_hosts = registry.host_template_lists_metadata(req.context, **template_host_params) template_hosts = registry.host_template_lists_metadata(
req.context, **template_host_params)
if template_hosts: if template_hosts:
template_json['hosts'] = template_hosts[0]['hosts'] template_json['hosts'] = template_hosts[0]['hosts']
else: else:
template_json['hosts'] = "[]" template_json['hosts'] = "[]"
template_params = {'filters': {'name':template_name}} template_params = {'filters': {'name': template_name}}
template_list = registry.template_lists_metadata(req.context, **template_params) template_list = registry.template_lists_metadata(
req.context, **template_params)
if template_list: if template_list:
update_template = registry.update_template_metadata(req.context, template_list[0]['id'], template_json) registry.update_template_metadata(
req.context, template_list[0]['id'], template_json)
template_id = template_list[0]['id'] template_id = template_list[0]['id']
else: else:
add_template = registry.add_template_metadata(req.context, template_json) add_template = registry.add_template_metadata(
req.context, template_json)
template_id = add_template['id'] template_id = add_template['id']
if template_id: if template_id:
template_detail = registry.template_detail_metadata(req.context, template_id) template_detail = registry.template_detail_metadata(
req.context, template_id)
self._del_general_params(template_detail) self._del_general_params(template_detail)
template_detail['content'] = json.loads(template_detail['content']) template_detail['content'] = json.loads(
template_detail['content'])
if template_detail['hosts']: if template_detail['hosts']:
template_detail['hosts'] = json.loads(template_detail['hosts']) template_detail['hosts'] = json.loads(
template_detail['hosts'])
tecs_json = daisy_tecs_path + "%s.json"%template_name
tecs_json = daisy_tecs_path + "%s.json" % template_name
cmd = 'rm -rf %s' % (tecs_json,) cmd = 'rm -rf %s' % (tecs_json,)
daisy_cmn.subprocess_call(cmd) daisy_cmn.subprocess_call(cmd)
with open(tecs_json, "w+") as fp: with open(tecs_json, "w+") as fp:
fp.write(json.dumps(template_detail)) json.dump(template_detail, fp, indent=2)
except exception.Invalid as e: except exception.Invalid as e:
raise HTTPBadRequest(explanation=e.msg, request=req) raise HTTPBadRequest(explanation=e.msg, request=req)
return {"template":template_detail} return {"template": template_detail}
@utils.mutating @utils.mutating
def import_json_to_template(self, req, template): def import_json_to_template(self, req, template):
template_id = "" template_id = ""
template = json.loads(template.get('template',None)) template = json.loads(template.get('template', None))
template_cluster = copy.deepcopy(template) template_cluster = copy.deepcopy(template)
template_name = template_cluster.get('name',None) template_name = template_cluster.get('name', None)
template_params = {'filters': {'name':template_name}} template_params = {'filters': {'name': template_name}}
try: try:
if template_cluster.get('content',None): if template_cluster.get('content', None):
template_cluster['content'] = json.dumps(template_cluster['content']) template_cluster['content'] = json.dumps(
if template_cluster.get('hosts',None): template_cluster['content'])
template_cluster['hosts'] = json.dumps(template_cluster['hosts']) if template_cluster.get('hosts', None):
template_cluster['hosts'] = json.dumps(
template_cluster['hosts'])
else: else:
template_cluster['hosts'] = "[]" template_cluster['hosts'] = "[]"
template_list = registry.template_lists_metadata(req.context, **template_params) template_list = registry.template_lists_metadata(
req.context, **template_params)
if template_list: if template_list:
update_template_cluster = registry.update_template_metadata(req.context, template_list[0]['id'], template_cluster) registry.update_template_metadata(
req.context, template_list[0]['id'], template_cluster)
template_id = template_list[0]['id'] template_id = template_list[0]['id']
else: else:
add_template_cluster = registry.add_template_metadata(req.context, template_cluster) add_template_cluster = registry.add_template_metadata(
req.context, template_cluster)
template_id = add_template_cluster['id'] template_id = add_template_cluster['id']
if template_id: if template_id:
template_detail = registry.template_detail_metadata(req.context, template_id) template_detail = registry.template_detail_metadata(
req.context, template_id)
del template_detail['deleted'] del template_detail['deleted']
del template_detail['deleted_at'] del template_detail['deleted_at']
except exception.Invalid as e: except exception.Invalid as e:
raise HTTPBadRequest(explanation=e.msg, request=req) raise HTTPBadRequest(explanation=e.msg, request=req)
return {"template":template_detail} return {"template": template_detail}
def _import_cinder_volumes_to_db(self, req,
template_cinder_volumes, roles):
for template_cinder_volume in template_cinder_volumes:
has_template_role = False
for role in roles:
if template_cinder_volume['role_id'] == role['name']:
has_template_role = True
template_cinder_volume['role_id'] = role['id']
break
if has_template_role:
registry.add_cinder_volume_metadata(req.context,
template_cinder_volume)
else:
msg = "can't find role %s in new cluster when\
import cinder_volumes from template"\
% template_cinder_volume['role_id']
raise HTTPBadRequest(explanation=msg, request=req)
def _import_services_disk_to_db(self, req,
template_services_disk, roles):
for template_service_disk in template_services_disk:
has_template_role = False
for role in roles:
if template_service_disk['role_id'] == role['name']:
has_template_role = True
template_service_disk['role_id'] = role['id']
break
if has_template_role:
registry.add_service_disk_metadata(req.context,
template_service_disk)
else:
msg = "can't find role %s in new cluster when\
import service_disks from template"\
% template_service_disk['role_id']
raise HTTPBadRequest(explanation=msg, request=req)
@utils.mutating @utils.mutating
def import_template_to_db(self, req, template): def import_template_to_db(self, req, template):
cluster_id = "" cluster_id = ""
template_cluster = {} template_cluster = {}
cluster_meta = {} cluster_meta = {}
template_meta = copy.deepcopy(template) template_meta = copy.deepcopy(template)
template_name = template_meta.get('name',None) template_name = template_meta.get('name', None)
cluster_name = template_meta.get('cluster',None) cluster_name = template_meta.get('cluster', None)
template_params = {'filters': {'name':template_name}} template_params = {'filters': {'name': template_name}}
template_list = registry.template_lists_metadata(req.context, **template_params) template_list = registry.template_lists_metadata(
req.context, **template_params)
if template_list: if template_list:
template_cluster = template_list[0] template_cluster = template_list[0]
else: else:
msg = "the template %s is not exist" % template_name msg = "the template %s is not exist" % template_name
LOG.error(msg) LOG.error(msg)
raise HTTPForbidden(explanation=msg, request=req, content_type="text/plain") raise HTTPForbidden(
explanation=msg,
request=req,
content_type="text/plain")
try: try:
template_content = json.loads(template_cluster['content']) template_content = json.loads(template_cluster['content'])
template_content_cluster = template_content['cluster'] template_content_cluster = template_content['cluster']
template_content_cluster['name'] = cluster_name template_content_cluster['name'] = cluster_name
template_content_cluster['networking_parameters'] = str(template_content_cluster['networking_parameters']) template_content_cluster['networking_parameters'] = str(
template_content_cluster['logic_networks'] = str(template_content_cluster['logic_networks']) template_content_cluster['networking_parameters'])
template_content_cluster['logic_networks'] = template_content_cluster['logic_networks'].replace("\'true\'","True") template_content_cluster['logic_networks'] = str(
template_content_cluster['routers'] = str(template_content_cluster['routers']) template_content_cluster['logic_networks'])
template_content_cluster['logic_networks'] = \
template_content_cluster[
'logic_networks'].replace("\'true\'", "True")
template_content_cluster['routers'] = str(
template_content_cluster['routers'])
if template_cluster['hosts']: if template_cluster['hosts']:
template_hosts = json.loads(template_cluster['hosts']) template_hosts = json.loads(template_cluster['hosts'])
template_host_params = {'cluster_name':cluster_name} template_host_params = {'cluster_name': cluster_name}
template_host_list = registry.host_template_lists_metadata(req.context, **template_host_params) template_host_list = registry.host_template_lists_metadata(
req.context, **template_host_params)
if template_host_list: if template_host_list:
update_template_meta = {"cluster_name": cluster_name, "hosts":json.dumps(template_hosts)} update_template_meta = {
registry.update_host_template_metadata(req.context, template_host_list[0]['id'], update_template_meta) "cluster_name": cluster_name,
"hosts": json.dumps(template_hosts)}
registry.update_host_template_metadata(
req.context, template_host_list[0]['id'],
update_template_meta)
else: else:
template_meta = {"cluster_name": cluster_name, "hosts":json.dumps(template_hosts)} template_meta = {
registry.add_host_template_metadata(req.context, template_meta) "cluster_name": cluster_name,
"hosts": json.dumps(template_hosts)}
cluster_params = {'filters': {'name':cluster_name}} registry.add_host_template_metadata(
clusters = registry.get_clusters_detail(req.context, **cluster_params) req.context, template_meta)
cluster_params = {'filters': {'name': cluster_name}}
clusters = registry.get_clusters_detail(
req.context, **cluster_params)
if clusters: if clusters:
msg = "the cluster %s is exist" % clusters[0]['name'] msg = "the cluster %s is exist" % clusters[0]['name']
LOG.error(msg) LOG.error(msg)
raise HTTPForbidden(explanation=msg, request=req, content_type="text/plain") raise HTTPForbidden(
explanation=msg,
request=req,
content_type="text/plain")
else: else:
cluster_meta = registry.add_cluster_metadata(req.context, template_content['cluster']) if template_content_cluster.get('auto_scale', None) == 1:
params = {'filters': ''}
clusters_list = registry.get_clusters_detail(
req.context, **params)
for cluster in clusters_list:
if cluster.get('auto_scale', None) == 1:
template_content_cluster['auto_scale'] = 0
break
cluster_meta = registry.add_cluster_metadata(
req.context, template_content['cluster'])
cluster_id = cluster_meta['id'] cluster_id = cluster_meta['id']
params = {'filters':{}} params = {'filters': {}}
networks = registry.get_networks_detail(req.context, cluster_id,**params) networks = registry.get_networks_detail(
req.context, cluster_id, **params)
template_content_networks = template_content['networks'] template_content_networks = template_content['networks']
for template_content_network in template_content_networks: for template_content_network in template_content_networks:
template_content_network['ip_ranges'] = str(template_content_network['ip_ranges']) template_content_network['ip_ranges'] = str(
template_content_network['ip_ranges'])
network_exist = 'false' network_exist = 'false'
for network in networks: for network in networks:
if template_content_network['name'] == network['name']: if template_content_network['name'] == network['name']:
update_network_meta = registry.update_network_metadata(req.context, network['id'], template_content_network) registry.update_network_metadata(
req.context, network['id'],
template_content_network)
network_exist = 'true' network_exist = 'true'
if network_exist == 'false': if network_exist == 'false':
template_content_network['cluster_id'] = cluster_id template_content_network['cluster_id'] = cluster_id
add_network_meta = registry.add_network_metadata(req.context, template_content_network) registry.add_network_metadata(
req.context, template_content_network)
params = {'filters': {'cluster_id':cluster_id}}
params = {'filters': {'cluster_id': cluster_id}}
roles = registry.get_roles_detail(req.context, **params) roles = registry.get_roles_detail(req.context, **params)
template_content_roles = template_content['roles'] template_content_roles = template_content['roles']
for template_content_role in template_content_roles: for template_content_role in template_content_roles:
@ -475,34 +587,25 @@ class Controller(controller.BaseController):
del template_content_role['config_set_id'] del template_content_role['config_set_id']
for role in roles: for role in roles:
if template_content_role['name'] == role['name']: if template_content_role['name'] == role['name']:
update_role_meta = registry.update_role_metadata(req.context, role['id'], template_content_role) registry.update_role_metadata(
req.context, role['id'], template_content_role)
role_exist = 'true' role_exist = 'true'
if role_exist == 'false': if role_exist == 'false':
template_content_role['cluster_id'] = cluster_id template_content_role['cluster_id'] = cluster_id
add_role_meta = registry.add_role_metadata(req.context, template_content_role) registry.add_role_metadata(
req.context, template_content_role)
cinder_volumes = registry.list_cinder_volume_metadata(req.context, **params)
template_content_cinder_volumes = template_content['cinder_volumes'] self._import_cinder_volumes_to_db(
for template_content_cinder_volume in template_content_cinder_volumes: req, template_content['cinder_volumes'], roles)
cinder_volume_exist = 'false' self._import_services_disk_to_db(req,
roles = registry.get_roles_detail(req.context, **params) template_content['services_disk'],
for role in roles: roles)
if template_content_cinder_volume['role_id'] == role['name']:
template_content_cinder_volume['role_id'] = role['id']
for cinder_volume in cinder_volumes:
if template_content_cinder_volume['role_id'] == cinder_volume['role_id']:
update_cinder_volume_meta = registry.update_cinder_volume_metadata(req.context, cinder_volume['id'], template_content_cinder_volume)
cinder_volume_exist = 'true'
if cinder_volume_exist == 'false':
add_cinder_volumes = registry.add_cinder_volume_metadata(req.context, template_content_cinder_volume)
except exception.Invalid as e: except exception.Invalid as e:
raise HTTPBadRequest(explanation=e.msg, request=req) raise HTTPBadRequest(explanation=e.msg, request=req)
return {"template":cluster_meta} return {"template": cluster_meta}
@utils.mutating @utils.mutating
def get_template_detail(self, req, template_id): def get_template_detail(self, req, template_id):
""" """
@ -513,7 +616,8 @@ class Controller(controller.BaseController):
""" """
self._enforce(req, 'get_template_detail') self._enforce(req, 'get_template_detail')
try: try:
template = registry.template_detail_metadata(req.context, template_id) template = registry.template_detail_metadata(
req.context, template_id)
return {'template': template} return {'template': template}
except exception.NotFound as e: except exception.NotFound as e:
msg = (_("Failed to find template: %s") % msg = (_("Failed to find template: %s") %
@ -531,97 +635,104 @@ class Controller(controller.BaseController):
content_type="text/plain") content_type="text/plain")
except exception.InUseByStore as e: except exception.InUseByStore as e:
msg = (_("template %(id)s could not be get because it is in use: " msg = (_("template %(id)s could not be get because it is in use: "
"%(exc)s") % {"id": template_id, "exc": utils.exception_to_str(e)}) "%(exc)s") % {"id": template_id,
"exc": utils.exception_to_str(e)})
LOG.error(msg) LOG.error(msg)
raise HTTPConflict(explanation=msg, raise HTTPConflict(explanation=msg,
request=req, request=req,
content_type="text/plain") content_type="text/plain")
else: else:
return Response(body='', status=200) return Response(body='', status=200)
@utils.mutating @utils.mutating
def get_template_lists(self, req): def get_template_lists(self, req):
self._enforce(req, 'get_template_lists') self._enforce(req, 'get_template_lists')
params = self._get_query_params(req) params = self._get_query_params(req)
try: try:
template_lists = registry.template_lists_metadata(req.context, **params) template_lists = registry.template_lists_metadata(
req.context, **params)
except exception.Invalid as e: except exception.Invalid as e:
raise HTTPBadRequest(explanation=e.msg, request=req) raise HTTPBadRequest(explanation=e.msg, request=req)
return dict(template=template_lists) return dict(template=template_lists)
class TemplateDeserializer(wsgi.JSONRequestDeserializer): class TemplateDeserializer(wsgi.JSONRequestDeserializer):
"""Handles deserialization of specific controller method requests.""" """Handles deserialization of specific controller method requests."""
def _deserialize(self, request): def _deserialize(self, request):
result = {} result = {}
result["template"] = utils.get_template_meta(request) result["template"] = utils.get_template_meta(request)
return result return result
def add_template(self, request): def add_template(self, request):
return self._deserialize(request) return self._deserialize(request)
def update_template(self, request): def update_template(self, request):
return self._deserialize(request) return self._deserialize(request)
def export_db_to_json(self, request): def export_db_to_json(self, request):
return self._deserialize(request) return self._deserialize(request)
def import_json_to_template(self, request): def import_json_to_template(self, request):
return self._deserialize(request) return self._deserialize(request)
def import_template_to_db(self, request): def import_template_to_db(self, request):
return self._deserialize(request) return self._deserialize(request)
class TemplateSerializer(wsgi.JSONResponseSerializer): class TemplateSerializer(wsgi.JSONResponseSerializer):
"""Handles serialization of specific controller method responses.""" """Handles serialization of specific controller method responses."""
def __init__(self): def __init__(self):
self.notifier = notifier.Notifier() self.notifier = notifier.Notifier()
def add_template(self, response, result): def add_template(self, response, result):
template = result['template'] template = result['template']
response.status = 201 response.status = 201
response.headers['Content-Type'] = 'application/json' response.headers['Content-Type'] = 'application/json'
response.body = self.to_json(dict(template=template)) response.body = self.to_json(dict(template=template))
return response return response
def delete_template(self, response, result): def delete_template(self, response, result):
template = result['template'] template = result['template']
response.status = 201 response.status = 201
response.headers['Content-Type'] = 'application/json' response.headers['Content-Type'] = 'application/json'
response.body = self.to_json(dict(template=template)) response.body = self.to_json(dict(template=template))
return response return response
def get_template_detail(self, response, result): def get_template_detail(self, response, result):
template = result['template'] template = result['template']
response.status = 201 response.status = 201
response.headers['Content-Type'] = 'application/json' response.headers['Content-Type'] = 'application/json'
response.body = self.to_json(dict(template=template)) response.body = self.to_json(dict(template=template))
return response return response
def update_template(self, response, result): def update_template(self, response, result):
template = result['template'] template = result['template']
response.status = 201 response.status = 201
response.headers['Content-Type'] = 'application/json' response.headers['Content-Type'] = 'application/json'
response.body = self.to_json(dict(template=template)) response.body = self.to_json(dict(template=template))
return response return response
def export_db_to_json(self, response, result): def export_db_to_json(self, response, result):
response.status = 201 response.status = 201
response.headers['Content-Type'] = 'application/json' response.headers['Content-Type'] = 'application/json'
response.body = self.to_json(result) response.body = self.to_json(result)
return response return response
def import_json_to_template(self, response, result): def import_json_to_template(self, response, result):
response.status = 201 response.status = 201
response.headers['Content-Type'] = 'application/json' response.headers['Content-Type'] = 'application/json'
response.body = self.to_json(result) response.body = self.to_json(result)
return response return response
def import_template_to_db(self, response, result): def import_template_to_db(self, response, result):
response.status = 201 response.status = 201
response.headers['Content-Type'] = 'application/json' response.headers['Content-Type'] = 'application/json'
response.body = self.to_json(result) response.body = self.to_json(result)
return response return response
def create_resource(): def create_resource():
"""Templates resource factory method""" """Templates resource factory method"""
deserializer = TemplateDeserializer() deserializer = TemplateDeserializer()

View File

@ -32,6 +32,7 @@ _LI = i18n._LI
class ImageActionsController(object): class ImageActionsController(object):
def __init__(self, db_api=None, policy_enforcer=None, notifier=None, def __init__(self, db_api=None, policy_enforcer=None, notifier=None,
store_api=None): store_api=None):
self.db_api = db_api or daisy.db.get_api() self.db_api = db_api or daisy.db.get_api()
@ -39,7 +40,7 @@ class ImageActionsController(object):
self.notifier = notifier or daisy.notifier.Notifier() self.notifier = notifier or daisy.notifier.Notifier()
self.store_api = store_api or glance_store self.store_api = store_api or glance_store
self.gateway = daisy.gateway.Gateway(self.db_api, self.store_api, self.gateway = daisy.gateway.Gateway(self.db_api, self.store_api,
self.notifier, self.policy) self.notifier, self.policy)
@utils.mutating @utils.mutating
def deactivate(self, req, image_id): def deactivate(self, req, image_id):

View File

@ -33,6 +33,7 @@ _LE = i18n._LE
class ImageDataController(object): class ImageDataController(object):
def __init__(self, db_api=None, store_api=None, def __init__(self, db_api=None, store_api=None,
policy_enforcer=None, notifier=None, policy_enforcer=None, notifier=None,
gateway=None): gateway=None):
@ -42,7 +43,7 @@ class ImageDataController(object):
policy = policy_enforcer or daisy.api.policy.Enforcer() policy = policy_enforcer or daisy.api.policy.Enforcer()
notifier = notifier or daisy.notifier.Notifier() notifier = notifier or daisy.notifier.Notifier()
gateway = daisy.gateway.Gateway(db_api, store_api, gateway = daisy.gateway.Gateway(db_api, store_api,
notifier, policy) notifier, policy)
self.gateway = gateway self.gateway = gateway
def _restore(self, image_repo, image): def _restore(self, image_repo, image):

View File

@ -38,6 +38,7 @@ _ = i18n._
class ImageMembersController(object): class ImageMembersController(object):
def __init__(self, db_api=None, policy_enforcer=None, notifier=None, def __init__(self, db_api=None, policy_enforcer=None, notifier=None,
store_api=None): store_api=None):
self.db_api = db_api or daisy.db.get_api() self.db_api = db_api or daisy.db.get_api()
@ -45,7 +46,7 @@ class ImageMembersController(object):
self.notifier = notifier or daisy.notifier.Notifier() self.notifier = notifier or daisy.notifier.Notifier()
self.store_api = store_api or glance_store self.store_api = store_api or glance_store
self.gateway = daisy.gateway.Gateway(self.db_api, self.store_api, self.gateway = daisy.gateway.Gateway(self.db_api, self.store_api,
self.notifier, self.policy) self.notifier, self.policy)
@utils.mutating @utils.mutating
def create(self, req, image_id, member_id): def create(self, req, image_id, member_id):
@ -250,6 +251,7 @@ class RequestDeserializer(wsgi.JSONRequestDeserializer):
class ResponseSerializer(wsgi.JSONResponseSerializer): class ResponseSerializer(wsgi.JSONResponseSerializer):
def __init__(self, schema=None): def __init__(self, schema=None):
super(ResponseSerializer, self).__init__() super(ResponseSerializer, self).__init__()
self.schema = schema or get_schema() self.schema = schema or get_schema()

View File

@ -31,6 +31,7 @@ _ = i18n._
class Controller(object): class Controller(object):
def __init__(self, db_api=None, policy_enforcer=None, notifier=None, def __init__(self, db_api=None, policy_enforcer=None, notifier=None,
store_api=None): store_api=None):
self.db_api = db_api or daisy.db.get_api() self.db_api = db_api or daisy.db.get_api()
@ -38,7 +39,7 @@ class Controller(object):
self.notifier = notifier or daisy.notifier.Notifier() self.notifier = notifier or daisy.notifier.Notifier()
self.store_api = store_api or glance_store self.store_api = store_api or glance_store
self.gateway = daisy.gateway.Gateway(self.db_api, self.store_api, self.gateway = daisy.gateway.Gateway(self.db_api, self.store_api,
self.notifier, self.policy) self.notifier, self.policy)
@utils.mutating @utils.mutating
def update(self, req, image_id, tag_value): def update(self, req, image_id, tag_value):
@ -85,6 +86,7 @@ class Controller(object):
class ResponseSerializer(wsgi.JSONResponseSerializer): class ResponseSerializer(wsgi.JSONResponseSerializer):
def update(self, response, result): def update(self, response, result):
response.status_int = 204 response.status_int = 204

View File

@ -46,6 +46,7 @@ CONF.import_opt('container_formats', 'daisy.common.config',
class ImagesController(object): class ImagesController(object):
def __init__(self, db_api=None, policy_enforcer=None, notifier=None, def __init__(self, db_api=None, policy_enforcer=None, notifier=None,
store_api=None): store_api=None):
self.db_api = db_api or daisy.db.get_api() self.db_api = db_api or daisy.db.get_api()
@ -53,7 +54,7 @@ class ImagesController(object):
self.notifier = notifier or daisy.notifier.Notifier() self.notifier = notifier or daisy.notifier.Notifier()
self.store_api = store_api or glance_store self.store_api = store_api or glance_store
self.gateway = daisy.gateway.Gateway(self.db_api, self.store_api, self.gateway = daisy.gateway.Gateway(self.db_api, self.store_api,
self.notifier, self.policy) self.notifier, self.policy)
@utils.mutating @utils.mutating
def create(self, req, image, extra_properties, tags): def create(self, req, image, extra_properties, tags):
@ -188,8 +189,8 @@ class ImagesController(object):
self._do_add_locations(image, path[1], value) self._do_add_locations(image, path[1], value)
else: else:
if ((hasattr(image, path_root) or if ((hasattr(image, path_root) or
path_root in image.extra_properties) path_root in image.extra_properties) and
and json_schema_version == 4): json_schema_version == 4):
msg = _("Property %s already present.") msg = _("Property %s already present.")
raise webob.exc.HTTPConflict(msg % path_root) raise webob.exc.HTTPConflict(msg % path_root)
if hasattr(image, path_root): if hasattr(image, path_root):
@ -681,6 +682,7 @@ class RequestDeserializer(wsgi.JSONRequestDeserializer):
class ResponseSerializer(wsgi.JSONResponseSerializer): class ResponseSerializer(wsgi.JSONResponseSerializer):
def __init__(self, schema=None): def __init__(self, schema=None):
super(ResponseSerializer, self).__init__() super(ResponseSerializer, self).__init__()
self.schema = schema or get_schema() self.schema = schema or get_schema()

View File

@ -48,13 +48,14 @@ CONF = cfg.CONF
class NamespaceController(object): class NamespaceController(object):
def __init__(self, db_api=None, policy_enforcer=None, notifier=None): def __init__(self, db_api=None, policy_enforcer=None, notifier=None):
self.db_api = db_api or daisy.db.get_api() self.db_api = db_api or daisy.db.get_api()
self.policy = policy_enforcer or policy.Enforcer() self.policy = policy_enforcer or policy.Enforcer()
self.notifier = notifier or daisy.notifier.Notifier() self.notifier = notifier or daisy.notifier.Notifier()
self.gateway = daisy.gateway.Gateway(db_api=self.db_api, self.gateway = daisy.gateway.Gateway(db_api=self.db_api,
notifier=self.notifier, notifier=self.notifier,
policy_enforcer=self.policy) policy_enforcer=self.policy)
self.ns_schema_link = '/v2/schemas/metadefs/namespace' self.ns_schema_link = '/v2/schemas/metadefs/namespace'
self.obj_schema_link = '/v2/schemas/metadefs/object' self.obj_schema_link = '/v2/schemas/metadefs/object'
self.tag_schema_link = '/v2/schemas/metadefs/tag' self.tag_schema_link = '/v2/schemas/metadefs/tag'
@ -486,6 +487,7 @@ class RequestDeserializer(wsgi.JSONRequestDeserializer):
class ResponseSerializer(wsgi.JSONResponseSerializer): class ResponseSerializer(wsgi.JSONResponseSerializer):
def __init__(self, schema=None): def __init__(self, schema=None):
super(ResponseSerializer, self).__init__() super(ResponseSerializer, self).__init__()
self.schema = schema self.schema = schema
@ -781,20 +783,20 @@ def get_collection_schema():
def get_namespace_href(namespace): def get_namespace_href(namespace):
base_href = '/v2/metadefs/namespaces/%s' % namespace.namespace base_href = '/v2/metadefs/namespaces/%s' % namespace.namespace
return base_href return base_href
def get_object_href(namespace_name, metadef_object): def get_object_href(namespace_name, metadef_object):
base_href = ('/v2/metadefs/namespaces/%s/objects/%s' % base_href = ('/v2/metadefs/namespaces/%s/objects/%s' %
(namespace_name, metadef_object.name)) (namespace_name, metadef_object.name))
return base_href return base_href
def get_tag_href(namespace_name, metadef_tag): def get_tag_href(namespace_name, metadef_tag):
base_href = ('/v2/metadefs/namespaces/%s/tags/%s' % base_href = ('/v2/metadefs/namespaces/%s/tags/%s' %
(namespace_name, metadef_tag.name)) (namespace_name, metadef_tag.name))
return base_href return base_href
def create_resource(): def create_resource():

View File

@ -42,13 +42,14 @@ CONF = cfg.CONF
class MetadefObjectsController(object): class MetadefObjectsController(object):
def __init__(self, db_api=None, policy_enforcer=None, notifier=None): def __init__(self, db_api=None, policy_enforcer=None, notifier=None):
self.db_api = db_api or daisy.db.get_api() self.db_api = db_api or daisy.db.get_api()
self.policy = policy_enforcer or policy.Enforcer() self.policy = policy_enforcer or policy.Enforcer()
self.notifier = notifier or daisy.notifier.Notifier() self.notifier = notifier or daisy.notifier.Notifier()
self.gateway = daisy.gateway.Gateway(db_api=self.db_api, self.gateway = daisy.gateway.Gateway(db_api=self.db_api,
notifier=self.notifier, notifier=self.notifier,
policy_enforcer=self.policy) policy_enforcer=self.policy)
self.obj_schema_link = '/v2/schemas/metadefs/object' self.obj_schema_link = '/v2/schemas/metadefs/object'
def create(self, req, metadata_object, namespace): def create(self, req, metadata_object, namespace):
@ -294,6 +295,7 @@ class RequestDeserializer(wsgi.JSONRequestDeserializer):
class ResponseSerializer(wsgi.JSONResponseSerializer): class ResponseSerializer(wsgi.JSONResponseSerializer):
def __init__(self, schema=None): def __init__(self, schema=None):
super(ResponseSerializer, self).__init__() super(ResponseSerializer, self).__init__()
self.schema = schema or get_schema() self.schema = schema or get_schema()
@ -324,9 +326,9 @@ class ResponseSerializer(wsgi.JSONResponseSerializer):
def get_object_href(namespace_name, metadef_object): def get_object_href(namespace_name, metadef_object):
base_href = ('/v2/metadefs/namespaces/%s/objects/%s' % base_href = ('/v2/metadefs/namespaces/%s/objects/%s' %
(namespace_name, metadef_object.name)) (namespace_name, metadef_object.name))
return base_href return base_href
def create_resource(): def create_resource():

View File

@ -40,13 +40,14 @@ _LI = i18n._LI
class NamespacePropertiesController(object): class NamespacePropertiesController(object):
def __init__(self, db_api=None, policy_enforcer=None, notifier=None): def __init__(self, db_api=None, policy_enforcer=None, notifier=None):
self.db_api = db_api or daisy.db.get_api() self.db_api = db_api or daisy.db.get_api()
self.policy = policy_enforcer or policy.Enforcer() self.policy = policy_enforcer or policy.Enforcer()
self.notifier = notifier or daisy.notifier.Notifier() self.notifier = notifier or daisy.notifier.Notifier()
self.gateway = daisy.gateway.Gateway(db_api=self.db_api, self.gateway = daisy.gateway.Gateway(db_api=self.db_api,
notifier=self.notifier, notifier=self.notifier,
policy_enforcer=self.policy) policy_enforcer=self.policy)
def _to_dict(self, model_property_type): def _to_dict(self, model_property_type):
# Convert the model PropertyTypes dict to a JSON encoding # Convert the model PropertyTypes dict to a JSON encoding
@ -213,6 +214,7 @@ class RequestDeserializer(wsgi.JSONRequestDeserializer):
class ResponseSerializer(wsgi.JSONResponseSerializer): class ResponseSerializer(wsgi.JSONResponseSerializer):
def __init__(self, schema=None): def __init__(self, schema=None):
super(ResponseSerializer, self).__init__() super(ResponseSerializer, self).__init__()
self.schema = schema self.schema = schema
@ -288,7 +290,7 @@ def get_collection_schema():
# individual property schema inside property collections # individual property schema inside property collections
namespace_properties_schema.required.remove('name') namespace_properties_schema.required.remove('name')
return daisy.schema.DictCollectionSchema('properties', return daisy.schema.DictCollectionSchema('properties',
namespace_properties_schema) namespace_properties_schema)
def create_resource(): def create_resource():

View File

@ -40,13 +40,14 @@ _LI = i18n._LI
class ResourceTypeController(object): class ResourceTypeController(object):
def __init__(self, db_api=None, policy_enforcer=None, notifier=None): def __init__(self, db_api=None, policy_enforcer=None, notifier=None):
self.db_api = db_api or daisy.db.get_api() self.db_api = db_api or daisy.db.get_api()
self.policy = policy_enforcer or policy.Enforcer() self.policy = policy_enforcer or policy.Enforcer()
self.notifier = notifier or daisy.notifier.Notifier() self.notifier = notifier or daisy.notifier.Notifier()
self.gateway = daisy.gateway.Gateway(db_api=self.db_api, self.gateway = daisy.gateway.Gateway(db_api=self.db_api,
notifier=self.notifier, notifier=self.notifier,
policy_enforcer=self.policy) policy_enforcer=self.policy)
def index(self, req): def index(self, req):
try: try:
@ -167,6 +168,7 @@ class RequestDeserializer(wsgi.JSONRequestDeserializer):
class ResponseSerializer(wsgi.JSONResponseSerializer): class ResponseSerializer(wsgi.JSONResponseSerializer):
def __init__(self, schema=None): def __init__(self, schema=None):
super(ResponseSerializer, self).__init__() super(ResponseSerializer, self).__init__()
self.schema = schema self.schema = schema
@ -253,7 +255,7 @@ def get_schema():
def get_collection_schema(): def get_collection_schema():
resource_type_schema = get_schema() resource_type_schema = get_schema()
return daisy.schema.CollectionSchema('resource_type_associations', return daisy.schema.CollectionSchema('resource_type_associations',
resource_type_schema) resource_type_schema)
def create_resource(): def create_resource():

View File

@ -46,8 +46,8 @@ class TagsController(object):
self.policy = policy_enforcer or policy.Enforcer() self.policy = policy_enforcer or policy.Enforcer()
self.notifier = notifier or daisy.notifier.Notifier() self.notifier = notifier or daisy.notifier.Notifier()
self.gateway = daisy.gateway.Gateway(db_api=self.db_api, self.gateway = daisy.gateway.Gateway(db_api=self.db_api,
notifier=self.notifier, notifier=self.notifier,
policy_enforcer=self.policy) policy_enforcer=self.policy)
self.tag_schema_link = '/v2/schemas/metadefs/tag' self.tag_schema_link = '/v2/schemas/metadefs/tag'
def create(self, req, namespace, tag_name): def create(self, req, namespace, tag_name):

View File

@ -53,7 +53,7 @@ class TasksController(object):
self.notifier = notifier or daisy.notifier.Notifier() self.notifier = notifier or daisy.notifier.Notifier()
self.store_api = store_api or glance_store self.store_api = store_api or glance_store
self.gateway = daisy.gateway.Gateway(self.db_api, self.store_api, self.gateway = daisy.gateway.Gateway(self.db_api, self.store_api,
self.notifier, self.policy) self.notifier, self.policy)
def create(self, req, task): def create(self, req, task):
task_factory = self.gateway.get_task_factory(req.context) task_factory = self.gateway.get_task_factory(req.context)
@ -229,8 +229,8 @@ class ResponseSerializer(wsgi.JSONResponseSerializer):
def __init__(self, task_schema=None, partial_task_schema=None): def __init__(self, task_schema=None, partial_task_schema=None):
super(ResponseSerializer, self).__init__() super(ResponseSerializer, self).__init__()
self.task_schema = task_schema or get_task_schema() self.task_schema = task_schema or get_task_schema()
self.partial_task_schema = (partial_task_schema self.partial_task_schema = (partial_task_schema or
or _get_partial_task_schema()) _get_partial_task_schema())
def _inject_location_header(self, response, task): def _inject_location_header(self, response, task):
location = self._get_task_location(task) location = self._get_task_location(task)

View File

@ -27,6 +27,17 @@ import sys
import eventlet import eventlet
from daisy.common import utils from daisy.common import utils
import glance_store
from oslo_config import cfg
from oslo_log import log as logging
import osprofiler.notifier
import osprofiler.web
from daisy.common import config
from daisy.common import exception
from daisy.common import wsgi
from daisy import notifier
from daisy.openstack.common import systemd
# Monkey patch socket, time, select, threads # Monkey patch socket, time, select, threads
eventlet.patcher.monkey_patch(all=False, socket=True, time=True, eventlet.patcher.monkey_patch(all=False, socket=True, time=True,
@ -40,17 +51,6 @@ possible_topdir = os.path.normpath(os.path.join(os.path.abspath(sys.argv[0]),
if os.path.exists(os.path.join(possible_topdir, 'glance', '__init__.py')): if os.path.exists(os.path.join(possible_topdir, 'glance', '__init__.py')):
sys.path.insert(0, possible_topdir) sys.path.insert(0, possible_topdir)
import glance_store
from oslo_config import cfg
from oslo_log import log as logging
import osprofiler.notifier
import osprofiler.web
from daisy.common import config
from daisy.common import exception
from daisy.common import wsgi
from daisy import notifier
from daisy.openstack.common import systemd
CONF = cfg.CONF CONF = cfg.CONF
CONF.import_group("profiler", "daisy.common.wsgi") CONF.import_group("profiler", "daisy.common.wsgi")

View File

@ -31,8 +31,9 @@ period, we automatically sweep it up.
import os import os
import sys import sys
from oslo_log import log as logging from oslo_log import log as logging
from daisy.common import config
from daisy.image_cache import cleaner
# If ../glance/__init__.py exists, add ../ to Python search path, so that # If ../glance/__init__.py exists, add ../ to Python search path, so that
# it will override what happens to be installed in /usr/(local/)lib/python... # it will override what happens to be installed in /usr/(local/)lib/python...
@ -42,8 +43,6 @@ possible_topdir = os.path.normpath(os.path.join(os.path.abspath(sys.argv[0]),
if os.path.exists(os.path.join(possible_topdir, 'glance', '__init__.py')): if os.path.exists(os.path.join(possible_topdir, 'glance', '__init__.py')):
sys.path.insert(0, possible_topdir) sys.path.insert(0, possible_topdir)
from daisy.common import config
from daisy.image_cache import cleaner
CONF = config.CONF CONF = config.CONF
logging.register_options(CONF) logging.register_options(CONF)

View File

@ -19,16 +19,16 @@
A simple cache management utility for daisy. A simple cache management utility for daisy.
""" """
from __future__ import print_function from __future__ import print_function
import functools import functools
import optparse import optparse
import os import os
import sys import sys
import time import time
from oslo_utils import timeutils from oslo_utils import timeutils
from daisy.common import utils from daisy.common import utils
from daisy.common import exception
import daisy.image_cache.client
from daisy.version import version_info as version
# If ../glance/__init__.py exists, add ../ to Python search path, so that # If ../glance/__init__.py exists, add ../ to Python search path, so that
# it will override what happens to be installed in /usr/(local/)lib/python... # it will override what happens to be installed in /usr/(local/)lib/python...
@ -38,10 +38,6 @@ possible_topdir = os.path.normpath(os.path.join(os.path.abspath(sys.argv[0]),
if os.path.exists(os.path.join(possible_topdir, 'glance', '__init__.py')): if os.path.exists(os.path.join(possible_topdir, 'glance', '__init__.py')):
sys.path.insert(0, possible_topdir) sys.path.insert(0, possible_topdir)
from daisy.common import exception
import daisy.image_cache.client
from daisy.version import version_info as version
SUCCESS = 0 SUCCESS = 0
FAILURE = 1 FAILURE = 1

View File

@ -24,6 +24,11 @@ images to be pretched.
import os import os
import sys import sys
import glance_store
from oslo_log import log as logging
from daisy.common import config
from daisy.image_cache import prefetcher
# If ../glance/__init__.py exists, add ../ to Python search path, so that # If ../glance/__init__.py exists, add ../ to Python search path, so that
# it will override what happens to be installed in /usr/(local/)lib/python... # it will override what happens to be installed in /usr/(local/)lib/python...
@ -33,11 +38,6 @@ possible_topdir = os.path.normpath(os.path.join(os.path.abspath(sys.argv[0]),
if os.path.exists(os.path.join(possible_topdir, 'glance', '__init__.py')): if os.path.exists(os.path.join(possible_topdir, 'glance', '__init__.py')):
sys.path.insert(0, possible_topdir) sys.path.insert(0, possible_topdir)
import glance_store
from oslo_log import log as logging
from daisy.common import config
from daisy.image_cache import prefetcher
CONF = config.CONF CONF = config.CONF
logging.register_options(CONF) logging.register_options(CONF)

View File

@ -25,6 +25,8 @@ import os
import sys import sys
from oslo_log import log as logging from oslo_log import log as logging
from daisy.common import config
from daisy.image_cache import pruner
# If ../glance/__init__.py exists, add ../ to Python search path, so that # If ../glance/__init__.py exists, add ../ to Python search path, so that
# it will override what happens to be installed in /usr/(local/)lib/python... # it will override what happens to be installed in /usr/(local/)lib/python...
@ -34,8 +36,6 @@ possible_topdir = os.path.normpath(os.path.join(os.path.abspath(sys.argv[0]),
if os.path.exists(os.path.join(possible_topdir, 'glance', '__init__.py')): if os.path.exists(os.path.join(possible_topdir, 'glance', '__init__.py')):
sys.path.insert(0, possible_topdir) sys.path.insert(0, possible_topdir)
from daisy.common import config
from daisy.image_cache import pruner
CONF = config.CONF CONF = config.CONF
logging.register_options(CONF) logging.register_options(CONF)

View File

@ -30,6 +30,12 @@ import subprocess
import sys import sys
import tempfile import tempfile
import time import time
from oslo_config import cfg
from oslo_utils import units
# NOTE(jokke): simplified transition to py3, behaves like py2 xrange
from six.moves import range
from daisy.common import config
from daisy import i18n
# If ../glance/__init__.py exists, add ../ to Python search path, so that # If ../glance/__init__.py exists, add ../ to Python search path, so that
# it will override what happens to be installed in /usr/(local/)lib/python... # it will override what happens to be installed in /usr/(local/)lib/python...
@ -39,13 +45,6 @@ possible_topdir = os.path.normpath(os.path.join(os.path.abspath(sys.argv[0]),
if os.path.exists(os.path.join(possible_topdir, 'glance', '__init__.py')): if os.path.exists(os.path.join(possible_topdir, 'glance', '__init__.py')):
sys.path.insert(0, possible_topdir) sys.path.insert(0, possible_topdir)
from oslo_config import cfg
from oslo_utils import units
# NOTE(jokke): simplified transition to py3, behaves like py2 xrange
from six.moves import range
from daisy.common import config
from daisy import i18n
_ = i18n._ _ = i18n._

View File

@ -29,15 +29,6 @@ from __future__ import print_function
import os import os
import sys import sys
# If ../glance/__init__.py exists, add ../ to Python search path, so that
# it will override what happens to be installed in /usr/(local/)lib/python...
possible_topdir = os.path.normpath(os.path.join(os.path.abspath(sys.argv[0]),
os.pardir,
os.pardir))
if os.path.exists(os.path.join(possible_topdir, 'daisy', '__init__.py')):
sys.path.insert(0, possible_topdir)
from oslo_config import cfg from oslo_config import cfg
from oslo_db.sqlalchemy import migration from oslo_db.sqlalchemy import migration
from oslo_log import log as logging from oslo_log import log as logging
@ -52,6 +43,14 @@ from daisy.db.sqlalchemy import api as db_api
from daisy.db.sqlalchemy import metadata from daisy.db.sqlalchemy import metadata
from daisy import i18n from daisy import i18n
# If ../glance/__init__.py exists, add ../ to Python search path, so that
# it will override what happens to be installed in /usr/(local/)lib/python...
possible_topdir = os.path.normpath(os.path.join(os.path.abspath(sys.argv[0]),
os.pardir,
os.pardir))
if os.path.exists(os.path.join(possible_topdir, 'daisy', '__init__.py')):
sys.path.insert(0, possible_topdir)
CONF = cfg.CONF CONF = cfg.CONF
LOG = logging.getLogger(__name__) LOG = logging.getLogger(__name__)

View File

@ -23,8 +23,14 @@ Reference implementation server for Daisy orchestration
import os import os
import sys import sys
import eventlet import eventlet
from oslo_config import cfg
from oslo_log import log as logging
from daisy.common import exception
from daisy.common import config
from daisy.openstack.common import loopingcall
from daisy.orchestration.manager import OrchestrationManager
import six
# Monkey patch socket and time # Monkey patch socket and time
eventlet.patcher.monkey_patch(all=False, socket=True, time=True, thread=True) eventlet.patcher.monkey_patch(all=False, socket=True, time=True, thread=True)
@ -37,23 +43,12 @@ possible_topdir = os.path.normpath(os.path.join(os.path.abspath(sys.argv[0]),
if os.path.exists(os.path.join(possible_topdir, 'daisy', '__init__.py')): if os.path.exists(os.path.join(possible_topdir, 'daisy', '__init__.py')):
sys.path.insert(0, possible_topdir) sys.path.insert(0, possible_topdir)
from oslo_config import cfg
from oslo_log import log as logging
import osprofiler.notifier
import osprofiler.web
from daisy.common import exception
from daisy.common import config
from daisy.common import utils
from daisy.common import wsgi
from daisy import notifier
from daisy.openstack.common import systemd
from daisy.openstack.common import loopingcall
from daisy.orchestration.manager import OrchestrationManager
CONF = cfg.CONF CONF = cfg.CONF
scale_opts = [ scale_opts = [
cfg.StrOpt('auto_scale_interval', default=60, cfg.StrOpt('auto_scale_interval', default=60,
help='Number of seconds between two checkings to compute auto scale status'), help='Number of seconds between two '
'checkings to compute auto scale status'),
] ]
CONF.register_opts(scale_opts, group='orchestration') CONF.register_opts(scale_opts, group='orchestration')
logging.register_options(CONF) logging.register_options(CONF)
@ -62,10 +57,11 @@ logging.register_options(CONF)
def fail(returncode, e): def fail(returncode, e):
sys.stderr.write("ERROR: %s\n" % six.text_type(e)) sys.stderr.write("ERROR: %s\n" % six.text_type(e))
def main(): def main():
try: try:
config.parse_args() config.parse_args()
logging.setup(CONF,'daisy') logging.setup(CONF, 'daisy')
timer = loopingcall.FixedIntervalLoopingCall( timer = loopingcall.FixedIntervalLoopingCall(
OrchestrationManager.find_auto_scale_cluster) OrchestrationManager.find_auto_scale_cluster)
timer.start(float(CONF.orchestration.auto_scale_interval)).wait() timer.start(float(CONF.orchestration.auto_scale_interval)).wait()

View File

@ -25,6 +25,16 @@ import os
import sys import sys
import eventlet import eventlet
from oslo_config import cfg
from oslo_log import log as logging
import osprofiler.notifier
import osprofiler.web
from daisy.common import config
from daisy.common import utils
from daisy.common import wsgi
from daisy import notifier
from daisy.openstack.common import systemd
# Monkey patch socket and time # Monkey patch socket and time
eventlet.patcher.monkey_patch(all=False, socket=True, time=True, thread=True) eventlet.patcher.monkey_patch(all=False, socket=True, time=True, thread=True)
@ -37,16 +47,6 @@ possible_topdir = os.path.normpath(os.path.join(os.path.abspath(sys.argv[0]),
if os.path.exists(os.path.join(possible_topdir, 'daisy', '__init__.py')): if os.path.exists(os.path.join(possible_topdir, 'daisy', '__init__.py')):
sys.path.insert(0, possible_topdir) sys.path.insert(0, possible_topdir)
from oslo_config import cfg
from oslo_log import log as logging
import osprofiler.notifier
import osprofiler.web
from daisy.common import config
from daisy.common import utils
from daisy.common import wsgi
from daisy import notifier
from daisy.openstack.common import systemd
CONF = cfg.CONF CONF = cfg.CONF
CONF.import_group("profiler", "daisy.common.wsgi") CONF.import_group("profiler", "daisy.common.wsgi")

View File

@ -21,6 +21,13 @@ Glance Scrub Service
import os import os
import sys import sys
import glance_store
from oslo_config import cfg
from oslo_log import log as logging
from daisy.common import config
from daisy.openstack.common import systemd
from daisy import scrubber
# If ../glance/__init__.py exists, add ../ to Python search path, so that # If ../glance/__init__.py exists, add ../ to Python search path, so that
# it will override what happens to be installed in /usr/(local/)lib/python... # it will override what happens to be installed in /usr/(local/)lib/python...
@ -30,14 +37,6 @@ possible_topdir = os.path.normpath(os.path.join(os.path.abspath(sys.argv[0]),
if os.path.exists(os.path.join(possible_topdir, 'glance', '__init__.py')): if os.path.exists(os.path.join(possible_topdir, 'glance', '__init__.py')):
sys.path.insert(0, possible_topdir) sys.path.insert(0, possible_topdir)
import glance_store
from oslo_config import cfg
from oslo_log import log as logging
from daisy.common import config
from daisy.openstack.common import systemd
from daisy import scrubber
CONF = cfg.CONF CONF = cfg.CONF
logging.register_options(CONF) logging.register_options(CONF)

View File

@ -27,6 +27,15 @@ import sys
import eventlet import eventlet
from daisy.common import utils from daisy.common import utils
from oslo.config import cfg
from oslo_log import log as logging
import osprofiler.notifier
import osprofiler.web
from daisy.common import config
from daisy.common import exception
from daisy.common import wsgi
from daisy import notifier
# Monkey patch socket, time, select, threads # Monkey patch socket, time, select, threads
eventlet.patcher.monkey_patch(socket=True, time=True, select=True, eventlet.patcher.monkey_patch(socket=True, time=True, select=True,
@ -40,15 +49,6 @@ possible_topdir = os.path.normpath(os.path.join(os.path.abspath(sys.argv[0]),
if os.path.exists(os.path.join(possible_topdir, 'glance', '__init__.py')): if os.path.exists(os.path.join(possible_topdir, 'glance', '__init__.py')):
sys.path.insert(0, possible_topdir) sys.path.insert(0, possible_topdir)
from oslo.config import cfg
from oslo_log import log as logging
import osprofiler.notifier
import osprofiler.web
from daisy.common import config
from daisy.common import exception
from daisy.common import wsgi
from daisy import notifier
CONF = cfg.CONF CONF = cfg.CONF
CONF.import_group("profiler", "daisy.common.wsgi") CONF.import_group("profiler", "daisy.common.wsgi")

View File

@ -399,6 +399,7 @@ class PropertyDefinition(AttributeDefinition):
class RelationDefinition(AttributeDefinition): class RelationDefinition(AttributeDefinition):
"""A base class for Attributes defining cross-artifact relations""" """A base class for Attributes defining cross-artifact relations"""
def __init__(self, internal=False, **kwargs): def __init__(self, internal=False, **kwargs):
self.internal = internal self.internal = internal
kwargs.setdefault('mutable', False) kwargs.setdefault('mutable', False)
@ -482,6 +483,7 @@ class ArtifactPropertyDescriptor(object):
class ArtifactAttributes(object): class ArtifactAttributes(object):
"""A container class storing description of Artifact Type attributes""" """A container class storing description of Artifact Type attributes"""
def __init__(self): def __init__(self):
self.properties = {} self.properties = {}
self.dependencies = {} self.dependencies = {}

View File

@ -121,7 +121,7 @@ class SemVerString(String):
super(SemVerString, super(SemVerString,
self).__init__(validators=[(validate, self).__init__(validators=[(validate,
"Invalid semver string")], "Invalid semver string")],
**kwargs) **kwargs)
@ -436,7 +436,7 @@ class ArtifactReference(declarative.RelationDefinition):
if artifact.type_name not in type_names: if artifact.type_name not in type_names:
return False return False
if (type_version is not None and if (type_version is not None and
artifact.type_version != type_version): artifact.type_version != type_version):
return False return False
return True return True
@ -477,6 +477,7 @@ class ArtifactReferenceList(declarative.ListAttributeDefinition,
class Blob(object): class Blob(object):
"""A Binary object being part of the Artifact""" """A Binary object being part of the Artifact"""
def __init__(self, size=0, locations=None, checksum=None, item_key=None): def __init__(self, size=0, locations=None, checksum=None, item_key=None):
"""Initializes a new Binary Object for an Artifact """Initializes a new Binary Object for an Artifact

View File

@ -48,6 +48,7 @@ CONF.register_opts(plugins_opts)
class ArtifactsPluginLoader(object): class ArtifactsPluginLoader(object):
def __init__(self, namespace): def __init__(self, namespace):
self.mgr = enabled.EnabledExtensionManager( self.mgr = enabled.EnabledExtensionManager(
check_func=self._gen_check_func(), check_func=self._gen_check_func(),

View File

@ -44,6 +44,7 @@ _ = i18n._
class BaseStrategy(object): class BaseStrategy(object):
def __init__(self): def __init__(self):
self.auth_token = None self.auth_token = None
# TODO(sirp): Should expose selecting public/internal/admin URL. # TODO(sirp): Should expose selecting public/internal/admin URL.
@ -62,6 +63,7 @@ class BaseStrategy(object):
class NoAuthStrategy(BaseStrategy): class NoAuthStrategy(BaseStrategy):
def authenticate(self): def authenticate(self):
pass pass

View File

@ -27,6 +27,7 @@ _FATAL_EXCEPTION_FORMAT_ERRORS = False
class RedirectException(Exception): class RedirectException(Exception):
def __init__(self, url): def __init__(self, url):
self.url = urlparse.urlparse(url) self.url = urlparse.urlparse(url)
@ -336,13 +337,16 @@ class TaskException(DaisyException):
class BadTaskConfiguration(DaisyException): class BadTaskConfiguration(DaisyException):
message = _("Task was not configured properly") message = _("Task was not configured properly")
class InstallException(DaisyException): class InstallException(DaisyException):
message = _("Cluster installtation raise exception") message = _("Cluster installtation raise exception")
class InstallTimeoutException(DaisyException): class InstallTimeoutException(DaisyException):
message = _( message = _(
"Time out, during install TECS components to cluster %(cluster_id)s") "Time out, during install TECS components to cluster %(cluster_id)s")
class TaskNotFound(TaskException, NotFound): class TaskNotFound(TaskException, NotFound):
message = _("Task with the given id %(task_id)s was not found") message = _("Task with the given id %(task_id)s was not found")
@ -566,23 +570,32 @@ class InvalidJsonPatchPath(JsonPatchException):
class InvalidNetworkConfig(DaisyException): class InvalidNetworkConfig(DaisyException):
pass pass
class InvalidIP(DaisyException): class InvalidIP(DaisyException):
pass pass
class OSInstallFailed(DaisyException): class OSInstallFailed(DaisyException):
message = _("os installtation failed.") message = _("os installtation failed.")
class IMPIOprationFailed(DaisyException): class IMPIOprationFailed(DaisyException):
message = _("ipmi command failed.") message = _("ipmi command failed.")
class ThreadBinException(DaisyException): class ThreadBinException(DaisyException):
def __init__(self, *args): def __init__(self, *args):
super(ThreadBinException, self).__init__(*args) super(ThreadBinException, self).__init__(*args)
class SubprocessCmdFailed(DaisyException): class SubprocessCmdFailed(DaisyException):
message = _("suprocess command failed.") message = _("suprocess command failed.")
class DeleteConstrainted(DaisyException): class DeleteConstrainted(DaisyException):
message = _("delete is not allowed.") message = _("delete is not allowed.")
class TrustMeFailed(DaisyException):
message = _("Trust me script failed.")

View File

@ -251,8 +251,8 @@ class RPCClient(client.BaseClient):
# checking if content contains the '_error' key, # checking if content contains the '_error' key,
# verify if it is an instance of dict - since the # verify if it is an instance of dict - since the
# RPC call may have returned something different. # RPC call may have returned something different.
if self.raise_exc and (isinstance(content, dict) if self.raise_exc and (isinstance(content, dict) and
and '_error' in content): '_error' in content):
error = content['_error'] error = content['_error']
try: try:
exc_cls = imp.import_class(error['cls']) exc_cls = imp.import_class(error['cls'])

View File

@ -12,11 +12,6 @@
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations # License for the specific language governing permissions and limitations
# under the License. # under the License.
__all__ = [
'run',
]
from oslo_concurrency import lockutils from oslo_concurrency import lockutils
from oslo_log import log as logging from oslo_log import log as logging
from oslo_utils import excutils from oslo_utils import excutils
@ -28,6 +23,9 @@ from daisy.common.scripts import utils as script_utils
from daisy.common import store_utils from daisy.common import store_utils
from daisy.common import utils as common_utils from daisy.common import utils as common_utils
from daisy import i18n from daisy import i18n
__all__ = [
'run',
]
LOG = logging.getLogger(__name__) LOG = logging.getLogger(__name__)
@ -150,7 +148,7 @@ def set_image_data(image, uri, task_id):
data_iter = None data_iter = None
try: try:
LOG.info(_LI("Task %(task_id)s: Got image data uri %(data_uri)s to be " LOG.info(_LI("Task %(task_id)s: Got image data uri %(data_uri)s to be "
"imported") % {"data_uri": uri, "task_id": task_id}) "imported") % {"data_uri": uri, "task_id": task_id})
data_iter = script_utils.get_image_data_iter(uri) data_iter = script_utils.get_image_data_iter(uri)
image.set_data(data_iter) image.set_data(data_iter)
except Exception as e: except Exception as e:

View File

@ -12,7 +12,12 @@
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations # License for the specific language governing permissions and limitations
# under the License. # under the License.
import urllib2
from oslo_log import log as logging
from daisy.common import exception
from daisy import i18n
__all__ = [ __all__ = [
'get_task', 'get_task',
'unpack_task_input', 'unpack_task_input',
@ -22,14 +27,6 @@ __all__ = [
] ]
import urllib2
from oslo_log import log as logging
from daisy.common import exception
from daisy import i18n
LOG = logging.getLogger(__name__) LOG = logging.getLogger(__name__)
_ = i18n._ _ = i18n._
_LE = i18n._LE _LE = i18n._LE
@ -100,7 +97,7 @@ def validate_location_uri(location):
"source of image data.") "source of image data.")
# NOTE: raise Exception and let the encompassing block save # NOTE: raise Exception and let the encompassing block save
# the error msg in the task.message. # the error msg in the task.message.
raise StandardError(msg) raise Exception(msg)
else: else:
# TODO(nikhil): add other supported uris # TODO(nikhil): add other supported uris

View File

@ -25,6 +25,7 @@ _ = i18n._
class DBVersion(object): class DBVersion(object):
def __init__(self, components_long, prerelease, build): def __init__(self, components_long, prerelease, build):
""" """
Creates a DBVersion object out of 3 component fields. This initializer Creates a DBVersion object out of 3 component fields. This initializer
@ -54,8 +55,8 @@ class DBVersion(object):
other.version == self.version) other.version == self.version)
def __ne__(self, other): def __ne__(self, other):
return (not isinstance(other, DBVersion) return (not isinstance(other, DBVersion) or
or self.version != other.version) self.version != other.version)
def __composite_values__(self): def __composite_values__(self):
long_version = _version_to_long(self.version) long_version = _version_to_long(self.version)

View File

@ -63,6 +63,7 @@ def is_multiple_swift_store_accounts_enabled():
class SwiftParams(object): class SwiftParams(object):
def __init__(self): def __init__(self):
if is_multiple_swift_store_accounts_enabled(): if is_multiple_swift_store_accounts_enabled():
self.params = self._load_config() self.params = self._load_config()
@ -71,8 +72,8 @@ class SwiftParams(object):
def _form_default_params(self): def _form_default_params(self):
default = {} default = {}
if (CONF.swift_store_user and CONF.swift_store_key if (CONF.swift_store_user and CONF.swift_store_key and
and CONF.swift_store_auth_address): CONF.swift_store_auth_address):
default['user'] = CONF.swift_store_user default['user'] = CONF.swift_store_user
default['key'] = CONF.swift_store_key default['key'] = CONF.swift_store_key
default['auth_address'] = CONF.swift_store_auth_address default['auth_address'] = CONF.swift_store_auth_address

View File

@ -21,6 +21,7 @@ System-level utilities and helper functions.
""" """
import errno import errno
from functools import reduce
try: try:
from eventlet import sleep from eventlet import sleep
@ -46,9 +47,11 @@ from oslo_utils import netutils
from oslo_utils import strutils from oslo_utils import strutils
import six import six
from webob import exc from webob import exc
import ConfigParser
from daisy.common import exception from daisy.common import exception
from daisy import i18n from daisy import i18n
from ironicclient import client as ironic_client
CONF = cfg.CONF CONF = cfg.CONF
@ -73,6 +76,11 @@ IMAGE_META_HEADERS = ['x-image-meta-location', 'x-image-meta-size',
DAISY_TEST_SOCKET_FD_STR = 'DAISY_TEST_SOCKET_FD' DAISY_TEST_SOCKET_FD_STR = 'DAISY_TEST_SOCKET_FD'
DISCOVER_DEFAULTS = {
'listen_port': '5050',
'ironic_url': 'http://127.0.0.1:6385/v1',
}
def chunkreadable(iter, chunk_size=65536): def chunkreadable(iter, chunk_size=65536):
""" """
@ -135,6 +143,7 @@ MAX_COOP_READER_BUFFER_SIZE = 134217728 # 128M seems like a sane buffer limit
class CooperativeReader(object): class CooperativeReader(object):
""" """
An eventlet thread friendly class for reading in image data. An eventlet thread friendly class for reading in image data.
@ -144,6 +153,7 @@ class CooperativeReader(object):
starvation, ie allows all threads to be scheduled periodically rather than starvation, ie allows all threads to be scheduled periodically rather than
having the same thread be continuously active. having the same thread be continuously active.
""" """
def __init__(self, fd): def __init__(self, fd):
""" """
:param fd: Underlying image file object :param fd: Underlying image file object
@ -223,10 +233,12 @@ class CooperativeReader(object):
class LimitingReader(object): class LimitingReader(object):
""" """
Reader designed to fail when reading image data past the configured Reader designed to fail when reading image data past the configured
allowable amount. allowable amount.
""" """
def __init__(self, data, limit): def __init__(self, data, limit):
""" """
:param data: Underlying image data object :param data: Underlying image data object
@ -330,72 +342,91 @@ def get_image_meta_from_headers(response):
result[key] = strutils.bool_from_string(result[key]) result[key] = strutils.bool_from_string(result[key])
return result return result
def get_host_meta(response): def get_host_meta(response):
result = {} result = {}
for key,value in response.json.items(): for key, value in response.json.items():
result[key] = value result[key] = value
return result return result
def get_hwm_meta(response):
result = {}
for key, value in response.json.items():
result[key] = value
return result
def get_cluster_meta(response): def get_cluster_meta(response):
result = {} result = {}
for key,value in response.json.items(): for key, value in response.json.items():
result[key] = value result[key] = value
return result return result
def get_component_meta(response): def get_component_meta(response):
result = {} result = {}
for key,value in response.json.items(): for key, value in response.json.items():
result[key] = value result[key] = value
return result return result
def get_service_meta(response): def get_service_meta(response):
result = {} result = {}
for key,value in response.json.items(): for key, value in response.json.items():
result[key] = value result[key] = value
return result return result
def get_template_meta(response): def get_template_meta(response):
result = {} result = {}
for key,value in response.json.items(): for key, value in response.json.items():
result[key] = value result[key] = value
return result return result
def get_role_meta(response): def get_role_meta(response):
result = {} result = {}
for key,value in response.json.items(): for key, value in response.json.items():
result[key] = value result[key] = value
return result return result
def get_config_file_meta(response): def get_config_file_meta(response):
result = {} result = {}
for key,value in response.json.items(): for key, value in response.json.items():
result[key] = value result[key] = value
return result return result
def get_config_set_meta(response): def get_config_set_meta(response):
result = {} result = {}
for key,value in response.json.items(): for key, value in response.json.items():
result[key] = value result[key] = value
return result return result
def get_config_meta(response): def get_config_meta(response):
result = {} result = {}
for key,value in response.json.items(): for key, value in response.json.items():
result[key] = value
return result
def get_network_meta(response):
result = {}
for key,value in response.json.items():
result[key] = value result[key] = value
return result return result
def get_dict_meta(response):
def get_network_meta(response):
result = {} result = {}
for key,value in response.json.items(): for key, value in response.json.items():
result[key] = value result[key] = value
return result return result
def get_dict_meta(response):
result = {}
for key, value in response.json.items():
result[key] = value
return result
def create_mashup_dict(image_meta): def create_mashup_dict(image_meta):
""" """
Returns a dictionary-like mashup of the image core properties Returns a dictionary-like mashup of the image core properties
@ -434,6 +465,7 @@ def safe_remove(path):
class PrettyTable(object): class PrettyTable(object):
"""Creates an ASCII art table for use in bin/glance """Creates an ASCII art table for use in bin/glance
Example: Example:
@ -442,6 +474,7 @@ class PrettyTable(object):
--- ----------------- ------------ ----- --- ----------------- ------------ -----
122 image 22 0 122 image 22 0
""" """
def __init__(self): def __init__(self):
self.columns = [] self.columns = []
@ -506,8 +539,9 @@ def get_terminal_size():
try: try:
height_width = struct.unpack('hh', fcntl.ioctl(sys.stderr.fileno(), height_width = struct.unpack('hh', fcntl.ioctl(sys.stderr.fileno(),
termios.TIOCGWINSZ, termios.TIOCGWINSZ,
struct.pack('HH', 0, 0))) struct.pack(
'HH', 0, 0)))
except Exception: except Exception:
pass pass
@ -802,3 +836,254 @@ def get_search_plugins():
ext_manager = stevedore.extension.ExtensionManager( ext_manager = stevedore.extension.ExtensionManager(
namespace, invoke_on_load=True) namespace, invoke_on_load=True)
return ext_manager.extensions return ext_manager.extensions
def get_host_min_mac(host_interfaces):
if not isinstance(host_interfaces, list):
host_interfaces = eval(host_interfaces)
macs = [interface['mac'] for interface in host_interfaces
if interface['type'] == 'ether' and interface['mac']]
min_mac = min(macs)
return min_mac
def ip_into_int(ip):
"""
Switch ip string to decimalism integer..
:param ip: ip string
:return: decimalism integer
"""
return reduce(lambda x, y: (x << 8) + y, map(int, ip.split('.')))
def is_ip_in_cidr(ip, cidr):
"""
Check ip is in cidr
:param ip: Ip will be checked, like:192.168.1.2.
:param cidr: Ip range,like:192.168.0.0/24.
:return: If ip in cidr, return True, else return False.
"""
network = cidr.split('/')
mask = ~(2**(32 - int(network[1])) - 1)
return (ip_into_int(ip) & mask) == (ip_into_int(network[0]) & mask)
def is_ip_in_ranges(ip, ip_ranges):
"""
Check ip is in range
: ip: Ip will be checked, like:192.168.1.2.
: ip_ranges : Ip ranges, like:
[{'start':'192.168.0.10', 'end':'192.168.0.20'}
{'start':'192.168.0.50', 'end':'192.168.0.60'}]
:return: If ip in ip_ranges, return True, else return False.
"""
for ip_range in ip_ranges:
start_ip_int = ip_into_int(ip_range['start'])
end_ip_int = ip_into_int(ip_range['end'])
ip_int = ip_into_int(ip)
if ip_int >= start_ip_int and ip_int <= end_ip_int:
return True
return False
def get_ironicclient(): # pragma: no cover
"""Get Ironic client instance."""
config_discoverd = ConfigParser.ConfigParser(defaults=DISCOVER_DEFAULTS)
config_discoverd.read("/etc/ironic-discoverd/discoverd.conf")
ironic_url = config_discoverd.get("discoverd", "ironic_url")
args = {'os_auth_token': 'fake',
'ironic_url': ironic_url}
return ironic_client.get_client(1, **args)
def get_host_hw_info(host_interface):
host_hw_config = {}
ironicclient = get_ironicclient()
if host_interface:
min_mac = get_host_min_mac(host_interface)
try:
host_obj = ironicclient.physical_node.get(min_mac)
host_hw_config = dict([(f, getattr(host_obj, f, ''))
for f in ['system', 'memory', 'cpu',
'disks', 'interfaces',
'pci', 'devices']])
except Exception:
LOG.exception(_LE("Unable to find ironic data %s")
% Exception)
return host_hw_config
def get_dvs_interfaces(host_interfaces):
dvs_interfaces = []
if not isinstance(host_interfaces, list):
host_interfaces = eval(host_interfaces)
for interface in host_interfaces:
if not isinstance(interface, dict):
interface = eval(interface)
if ('vswitch_type' in interface and
interface['vswitch_type'] == 'dvs'):
dvs_interfaces.append(interface)
return dvs_interfaces
def get_clc_pci_info(pci_info):
clc_pci = []
flag1 = 'Intel Corporation Coleto Creek PCIe Endpoint'
flag2 = '8086:0435'
for pci in pci_info:
if flag1 in pci or flag2 in pci:
clc_pci.append(pci.split()[0])
return clc_pci
def cpu_str_to_list(spec):
"""Parse a CPU set specification.
:param spec: cpu set string eg "1-4,^3,6"
Each element in the list is either a single
CPU number, a range of CPU numbers, or a
caret followed by a CPU number to be excluded
from a previous range.
:returns: a set of CPU indexes
"""
cpusets = []
if not spec:
return cpusets
cpuset_ids = set()
cpuset_reject_ids = set()
for rule in spec.split(','):
rule = rule.strip()
# Handle multi ','
if len(rule) < 1:
continue
# Note the count limit in the .split() call
range_parts = rule.split('-', 1)
if len(range_parts) > 1:
# So, this was a range; start by converting the parts to ints
try:
start, end = [int(p.strip()) for p in range_parts]
except ValueError:
raise exception.Invalid(_("Invalid range expression %r")
% rule)
# Make sure it's a valid range
if start > end:
raise exception.Invalid(_("Invalid range expression %r")
% rule)
# Add available CPU ids to set
cpuset_ids |= set(range(start, end + 1))
elif rule[0] == '^':
# Not a range, the rule is an exclusion rule; convert to int
try:
cpuset_reject_ids.add(int(rule[1:].strip()))
except ValueError:
raise exception.Invalid(_("Invalid exclusion "
"expression %r") % rule)
else:
# OK, a single CPU to include; convert to int
try:
cpuset_ids.add(int(rule))
except ValueError:
raise exception.Invalid(_("Invalid inclusion "
"expression %r") % rule)
# Use sets to handle the exclusion rules for us
cpuset_ids -= cpuset_reject_ids
cpusets = list(cpuset_ids)
cpusets.sort()
return cpusets
def cpu_list_to_str(cpu_list):
"""Parse a CPU list to string.
:param cpu_list: eg "[1,2,3,4,6,7]"
:returns: a string of CPU ranges, eg 1-4,6,7
"""
spec = ''
if not cpu_list:
return spec
cpu_list.sort()
count = 0
group_cpus = []
tmp_cpus = []
for cpu in cpu_list:
if count == 0:
init = cpu
tmp_cpus.append(cpu)
else:
if cpu == (init + count):
tmp_cpus.append(cpu)
else:
group_cpus.append(tmp_cpus)
tmp_cpus = []
count = 0
init = cpu
tmp_cpus.append(cpu)
count += 1
group_cpus.append(tmp_cpus)
for group in group_cpus:
if len(group) > 2:
group_spec = ("%s-%s" % (group[0], group[0]+len(group)-1))
else:
group_str = [str(num) for num in group]
group_spec = ','.join(group_str)
if spec:
spec += ',' + group_spec
else:
spec = group_spec
return spec
def simple_subprocess_call(cmd):
return_code = subprocess.call(cmd,
shell=True,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
return return_code
def translate_quotation_marks_for_shell(orig_str):
translated_str = ''
quotation_marks = '"'
quotation_marks_count = orig_str.count(quotation_marks)
if quotation_marks_count > 0:
replace_marks = '\\"'
translated_str = orig_str.replace(quotation_marks, replace_marks)
else:
translated_str = orig_str
return translated_str
def get_numa_node_cpus(host_cpu):
numa = {}
if 'numa_node0' in host_cpu:
numa['numa_node0'] = cpu_str_to_list(host_cpu['numa_node0'])
if 'numa_node1' in host_cpu:
numa['numa_node1'] = cpu_str_to_list(host_cpu['numa_node1'])
return numa
def get_numa_node_from_cpus(numa, str_cpus):
numa_nodes = []
cpu_list = cpu_str_to_list(str_cpus)
for cpu in cpu_list:
if cpu in numa['numa_node0']:
numa_nodes.append(0)
if cpu in numa['numa_node1']:
numa_nodes.append(1)
numa_nodes = list(set(numa_nodes))
numa_nodes.sort()
return numa_nodes

View File

@ -0,0 +1,392 @@
# Copyright 2010 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# Copyright 2014 SoftLayer Technologies, Inc.
# Copyright 2015 Mirantis, Inc
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
System-level utilities and helper functions.
"""
from oslo_config import cfg
from oslo_log import log as logging
from webob import exc
from daisy.common import utils
from daisy import i18n
CONF = cfg.CONF
LOG = logging.getLogger(__name__)
_ = i18n._
_LE = i18n._LE
def get_total_cpus_for_numa(numa_cpus):
all_cpus = []
for value in numa_cpus.values():
all_cpus.extend(value)
return all_cpus
def get_default_os_num(host_roles_name):
if (('CONTROLLER_LB' in host_roles_name or
'CONTROLLER_HA' in host_roles_name) and
'COMPUTER' in host_roles_name):
# host with role of CONTOLLER and COMPUTER,
# isolate 4 cpu cores default for OS and TECS
os_cpu_num = 4
elif 'COMPUTER' in host_roles_name:
# host with role of COMPUTER only,
# isolate 2 cpu cores default for OS and TECS
os_cpu_num = 2
elif ('CONTROLLER_LB' in host_roles_name or
'CONTROLLER_HA' in host_roles_name):
# host with role of CONTOLLER only,
# don't isolate cpu for OS and TECS
os_cpu_num = 0
else:
os_cpu_num = 0
return os_cpu_num
def pci_get_cpu_sets(numa_cpus, pci_info, device_numa_node):
high_pci_cpu_set = {}
msg = ''
return_code = 0
status = {'rc': 0, 'msg': ''}
if not numa_cpus:
msg = "The architecture of CPU does not supported"
LOG.info(msg)
return_code = 0
status['rc'] = return_code
status['msg'] = msg
return (status, high_pci_cpu_set)
# get Intel Corporation Coleto Creek PCIe Endpoint
clc_pci_lines = utils.get_clc_pci_info(pci_info)
if not clc_pci_lines:
msg = "No CLC card in system"
LOG.info(msg)
return_code = 0
status['rc'] = return_code
status['msg'] = msg
return (status, high_pci_cpu_set)
high_pci_cpusets = []
for clc_pci_line in clc_pci_lines:
numa_node = device_numa_node['0000:' + clc_pci_line]
numa_key = 'numa_node' + str(numa_node)
if numa_key not in numa_cpus:
msg = "Can't find numa_node '%s' for CLC" % numa_node
return_code = 1
status['rc'] = return_code
status['msg'] = msg
return (status, high_pci_cpu_set)
high_pci_cpusets += numa_cpus[numa_key]
high_pci_cpu_set['high'] = list(set(high_pci_cpusets))
total_cpus = get_total_cpus_for_numa(numa_cpus)
high_pci_cpu_set['low'] =\
list(set(total_cpus) - set(high_pci_cpu_set['high']))
LOG.debug("high_pci_cpu_set:%s" % high_pci_cpu_set)
return (status, high_pci_cpu_set)
# if numa codes are not same, return -1
def get_numa_by_nic(nic_info, device_numa_node):
numa = []
try:
for nic in nic_info:
numa.append(device_numa_node[nic['bus']])
numa = list(set(numa))
numa_info = (-1 if len(numa) > 1 else numa[0])
except Exception:
numa_info = -1
return numa_info
def dvs_get_cpu_sets(dic_numas, nic_info, device_numa_node, cpu_num=4):
dvs_cpu_set = []
total_cpus = []
high_cpu_set = []
low_cpu_set = []
cpu_set = {}
msg = ''
return_code = 0
status = {}
if not dic_numas:
msg = "The architecture of CPU not supported"
LOG.info(msg)
return_code = 1
status['rc'] = return_code
status['msg'] = msg
return (status, cpu_set)
numa_node = get_numa_by_nic(nic_info, device_numa_node)
if numa_node < 0:
msg = 'Invalid numa node:%s' % numa_node
LOG.info(msg)
return_code = 3
status['rc'] = return_code
status['msg'] = msg
return (status, cpu_set)
numa_key = "numa_node%s" % numa_node
if numa_key not in dic_numas:
msg = "Can't find numa node '%s' for DVS" % numa_node
return_code = 4
status['rc'] = return_code
status['msg'] = msg
return (status, cpu_set)
if len(dic_numas[numa_key]) < (cpu_num + 1):
msg = "CPU on %s is not enough" % numa_key
LOG.info(msg)
return_code = 5
status['rc'] = return_code
status['msg'] = msg
return (status, cpu_set)
total_cpus = get_total_cpus_for_numa(dic_numas)
LOG.debug("total_cpu:%s" % total_cpus)
# sort
dic_numas[numa_key] = sorted(dic_numas[numa_key], reverse=True)
for i in dic_numas[numa_key][0:cpu_num]:
dvs_cpu_set.append(i)
high_cpu_set = dic_numas[numa_key]
low_cpu_set =\
list(set(total_cpus).difference(set(dic_numas[numa_key])))
LOG.debug("cpu used by dvs:%s" % dvs_cpu_set)
LOG.debug("low_cpu_set:%s" % low_cpu_set)
LOG.debug("high_cpu_set:%s" % high_cpu_set)
cpu_set['dvs'] = dvs_cpu_set
cpu_set['high'] = high_cpu_set
cpu_set['low'] = low_cpu_set
LOG.debug("cpu_set:%s" % cpu_set)
msg = 'Success'
status['rc'] = return_code
status['msg'] = msg
LOG.debug("status:%s" % status)
return (status, cpu_set)
def get_dvs_cpusets(numa_cpus, host_detail, host_hw_info):
dvs_nics_name = []
dvs_interfaces = utils.get_dvs_interfaces(host_detail['interfaces'])
for dvs_interface in dvs_interfaces:
if dvs_interface['type'] == 'ether':
dvs_nics_name.append(dvs_interface['name'])
if dvs_interface['type'] == 'bond':
if dvs_interface.get('slaves', None):
dvs_nics_name.extend(dvs_interface['slaves'])
elif dvs_interface.get('slave1', None) and \
dvs_interface.get('slave2', None):
slave_list = []
slave_list.append(dvs_interface['slave1'])
slave_list.append(dvs_interface['slave2'])
dvs_nics_name.extend(slave_list)
dvs_cpusets = {}
if dvs_nics_name:
nics_info = [{'name': nic_name, 'bus': interface['pci']}
for nic_name in dvs_nics_name
for interface in host_hw_info['interfaces'].values()
if nic_name == interface['name']]
dvs_cpu_num = 4
device_numa = {}
for device in host_hw_info['devices'].values():
device_numa.update(device)
LOG.info("DVS netcard info: '%s'" % nics_info)
(status, dvs_cpusets) = \
dvs_get_cpu_sets(numa_cpus,
nics_info,
device_numa,
dvs_cpu_num)
if status['rc'] != 0:
msg = "Get dvs cpu sets for host '%s' failed,\
detail error is '%s'"\
% (host_detail['name'], status['msg'])
LOG.error(msg)
raise exc.HTTPBadRequest(explanation=msg)
return dvs_cpusets
def get_pci_cpusets(numa_cpus, host_hw_info):
device_numa = {}
for device in host_hw_info['devices'].values():
device_numa.update(device)
(status, pci_cpusets) = pci_get_cpu_sets(numa_cpus,
host_hw_info['pci'].values(),
device_numa)
if status['rc'] != 0:
LOG.error(status['msg'])
raise exc.HTTPBadRequest(explanation=status['msg'])
return pci_cpusets
def allocate_os_cpus(roles_name, pci_cpusets, dvs_cpusets):
os_cpus = []
if not dvs_cpusets and not pci_cpusets:
return os_cpus
os_cpu_num = get_default_os_num(roles_name)
if os_cpu_num == 0:
return os_cpus
os_available_cpuset = []
if ((pci_cpusets and pci_cpusets.get('high')) and
(not dvs_cpusets or not dvs_cpusets.get('high'))):
# if only pci exist, get OS cores from pci lowset
cpus_low = pci_cpusets.get('low', [])
cpus_high = pci_cpusets.get('high', [])
if dvs_cpusets and dvs_cpusets.get('high'):
# if only dvs exist, get OS cores from dvs lowset.
# if pci and dvs exist at the same time,
# get OS cores from lowset from dvs lowset.
cpus_low = list(set(dvs_cpusets.get('low', [])) -
set(dvs_cpusets.get('dvs', [])))
cpus_high = list(set(dvs_cpusets.get('high', [])) -
set(dvs_cpusets.get('dvs', [])))
cpus_low.sort()
cpus_high.sort()
os_available_cpuset = cpus_low + cpus_high
if not os_available_cpuset:
return os_cpus
if (len(os_available_cpuset) < os_cpu_num):
msg = 'cpus are not enough'
LOG.error(msg)
raise exc.HTTPBadRequest(explanation=msg)
# cpu core 0 must give OS
cpu0 = 0
if cpu0 in os_available_cpuset:
os_available_cpuset.remove(cpu0)
os_available_cpuset = [cpu0] + os_available_cpuset
os_cpus = os_available_cpuset[:os_cpu_num]
return os_cpus
# when config role 'COMPUTER', allocate cpus for CLC
def allocate_clc_cpus(host_detail):
pci_cpu_sets = {}
if 'COMPUTER' not in host_detail.get('role', []):
return pci_cpu_sets
host_interfaces = host_detail.get('interfaces')
if host_interfaces:
host_hw_info = utils.get_host_hw_info(host_interfaces)
else:
return pci_cpu_sets
host_id = host_detail.get('id')
clc_pci = utils.get_clc_pci_info(host_hw_info['pci'].values())
if not clc_pci:
return pci_cpu_sets
else:
LOG.info("CLC card pci number: '%s'" % clc_pci)
numa_cpus = utils.get_numa_node_cpus(host_hw_info.get('cpu', {}))
if not numa_cpus or not numa_cpus['numa_node0']:
msg = "No NUMA CPU found from of host '%s'" % host_id
LOG.info(msg)
return pci_cpu_sets
LOG.info("Get CLC cpusets of host '%s'" % host_id)
pci_cpu_sets = get_pci_cpusets(numa_cpus, host_hw_info)
if not pci_cpu_sets or not pci_cpu_sets.get('high'):
msg = "Can't get CLC cpusets of host '%s'" % host_id
LOG.error(msg)
raise exc.HTTPBadRequest(explanation=msg)
return pci_cpu_sets
# when config DVS on network plane mapping, allocate cpus for dvs
def allocate_dvs_cpus(host_detail):
dvs_cpu_sets = {}
host_interfaces = host_detail.get('interfaces')
if not host_interfaces:
return dvs_cpu_sets
dvs_interfaces = utils.get_dvs_interfaces(host_interfaces)
if not dvs_interfaces:
return dvs_cpu_sets
host_id = host_detail.get('id')
host_hw_info = utils.get_host_hw_info(host_interfaces)
numa_cpus = utils.get_numa_node_cpus(host_hw_info.get('cpu', {}))
if not numa_cpus or not numa_cpus['numa_node0']:
msg = "No NUMA CPU found from of host '%s'" % host_id
LOG.info(msg)
return dvs_cpu_sets
LOG.info("Get dvs cpusets of host '%s'" % host_id)
dvs_cpu_sets = get_dvs_cpusets(numa_cpus,
host_detail,
host_hw_info)
if not dvs_cpu_sets or not dvs_cpu_sets.get('high'):
msg = "Can't get dvs high cpusets of host '%s'" % host_id
LOG.error(msg)
raise exc.HTTPBadRequest(explanation=msg)
return dvs_cpu_sets
def allocate_cpus(host_detail):
host_cpu_sets = {'dvs_high_cpuset': '',
'pci_high_cpuset': '',
'suggest_dvs_cpus': '',
'suggest_os_cpus': ''}
dvs_cpusets = allocate_dvs_cpus(host_detail)
pci_cpusets = allocate_clc_cpus(host_detail)
# no CLC and no DVS
if (not pci_cpusets and not dvs_cpusets):
return host_cpu_sets
host_roles_name = host_detail.get('role', [])
os_cpus = allocate_os_cpus(host_roles_name,
pci_cpusets,
dvs_cpusets)
host_cpu_sets['dvs_high_cpuset'] =\
utils.cpu_list_to_str(dvs_cpusets.get('high', []))
host_cpu_sets['pci_high_cpuset'] =\
utils.cpu_list_to_str(pci_cpusets.get('high', []))
host_cpu_sets['suggest_dvs_cpus'] =\
utils.cpu_list_to_str(dvs_cpusets.get('dvs', []))
host_cpu_sets['suggest_os_cpus'] = utils.cpu_list_to_str(os_cpus)
LOG.info("NUMA CPU usage for host %s: %s"
% (host_detail['id'], host_cpu_sets))
return host_cpu_sets

View File

@ -244,6 +244,7 @@ class Server(object):
This class requires initialize_glance_store set to True if This class requires initialize_glance_store set to True if
glance store needs to be initialized. glance store needs to be initialized.
""" """
def __init__(self, threads=1000, initialize_glance_store=False): def __init__(self, threads=1000, initialize_glance_store=False):
os.umask(0o27) # ensure files are created with the correct privileges os.umask(0o27) # ensure files are created with the correct privileges
self._logger = logging.getLogger("eventlet.wsgi.server") self._logger = logging.getLogger("eventlet.wsgi.server")
@ -638,6 +639,7 @@ class APIMapper(routes.Mapper):
class RejectMethodController(object): class RejectMethodController(object):
def reject(self, req, allowed_methods, *args, **kwargs): def reject(self, req, allowed_methods, *args, **kwargs):
LOG.debug("The method %s is not allowed for this resource" % LOG.debug("The method %s is not allowed for this resource" %
req.environ['REQUEST_METHOD']) req.environ['REQUEST_METHOD'])

View File

@ -40,7 +40,7 @@ class WSMEModelTransformer(object):
for name in names: for name in names:
value = getattr(db_entity, name, None) value = getattr(db_entity, name, None)
if value is not None: if value is not None:
if type(value) == datetime: if isinstance(value, datetime):
iso_datetime_value = timeutils.isotime(value) iso_datetime_value = timeutils.isotime(value)
values.update({name: iso_datetime_value}) values.update({name: iso_datetime_value})
else: else:

View File

@ -52,8 +52,8 @@ class ImageAsAnArtifact(v1_1.ImageAsAnArtifact):
if service['name'] == 'glance') if service['name'] == 'glance')
try: try:
client = daisyclient.Client(version=2, client = daisyclient.Client(version=2,
endpoint=glance_endpoint, endpoint=glance_endpoint,
token=context.auth_token) token=context.auth_token)
legacy_image = client.images.get(self.legacy_image_id) legacy_image = client.images.get(self.legacy_image_id)
except Exception: except Exception:
raise exception.InvalidArtifactPropertyValue( raise exception.InvalidArtifactPropertyValue(

View File

@ -607,4 +607,3 @@ def artifact_publish(client, artifact_id,
return client.artifact_publish(artifact_id=artifact_id, return client.artifact_publish(artifact_id=artifact_id,
type_name=type_name, type_name=type_name,
type_version=type_version) type_version=type_version)

View File

@ -272,8 +272,8 @@ def _filter_images(images, filters, context,
elif visibility == 'private': elif visibility == 'private':
if image['is_public']: if image['is_public']:
continue continue
if not (has_ownership or (context.is_admin if not (has_ownership or (context.is_admin and not
and not admin_as_user)): admin_as_user)):
continue continue
elif visibility == 'shared': elif visibility == 'shared':
if not is_member: if not is_member:
@ -387,8 +387,8 @@ def _image_get(context, image_id, force_show_deleted=False, status=None):
LOG.warn(_LW('Could not find image %s') % image_id) LOG.warn(_LW('Could not find image %s') % image_id)
raise exception.NotFound() raise exception.NotFound()
if image['deleted'] and not (force_show_deleted if image['deleted'] and not (force_show_deleted or
or context.can_see_deleted): context.can_see_deleted):
LOG.warn(_LW('Unable to get deleted image')) LOG.warn(_LW('Unable to get deleted image'))
raise exception.NotFound() raise exception.NotFound()
@ -625,7 +625,7 @@ def _image_locations_delete_all(context, image_id, delete_time=None):
delete_time=delete_time) delete_time=delete_time)
for i, loc in enumerate(DATA['locations']): for i, loc in enumerate(DATA['locations']):
if image_id == loc['image_id'] and loc['deleted'] == False: if image_id == loc['image_id'] and loc['deleted'] is False:
del DATA['locations'][i] del DATA['locations'][i]

File diff suppressed because it is too large Load Diff

Some files were not shown because too many files have changed in this diff Show More