Add suppport for CSV import and deployment

also add tow packages to requirements.txt and test-requirements.txt.

Change-Id: I591e3958b4897537f9dba8c0ab38e7fe0159f11f
This commit is contained in:
grace.yu 2014-02-20 16:03:34 -08:00
parent 7f080e4e51
commit 7c4ef4a0d6
16 changed files with 802 additions and 36 deletions

307
bin/csvdeploy.py Executable file
View File

@ -0,0 +1,307 @@
#!/usr/bin/python
import os
import re
import csv
import ast
import sys
from copy import deepcopy
from multiprocessing import Process, Queue
from optparse import OptionParser
try:
from compass.apiclient.restful import Client
except ImportError:
curr_dir = os.path.dirname(os.path.realpath(__file__))
apiclient_dir = os.path.dirname(curr_dir) + '/compass/apiclient'
sys.path.append(apiclient_dir)
from restful import Client
DELIMITER = ","
# Sqlite tables
TABLES = {
'switch_config': {'columns': ['id', 'ip', 'filter_port']},
'switch': {'columns': ['id', 'ip', 'credential_data']},
'machine': {'columns': ['id', 'mac', 'port', 'vlan', 'switch_id']},
'cluster': {'columns': ['id', 'name', 'security_config',
'networking_config', 'partition_config',
'adapter_id', 'state']},
'cluster_host': {'columns': ['id', 'cluster_id', 'hostname', 'machine_id',
'config_data', 'state']},
'adapter': {'columns': ['id', 'name', 'os', 'target_system']},
'role': {'columns': ['id', 'name', 'target_system', 'description']}
}
def start(csv_dir, compass_url):
""" Start deploy both failed clusters and new clusters.
"""
# Get clusters and hosts data from CSV
clusters_data = get_csv('cluster.csv', csv_dir)
hosts_data = get_csv('cluster_host.csv', csv_dir)
data = {}
for cluster in clusters_data:
tmp = {}
tmp['cluster_data'] = cluster
tmp['hosts_data'] = []
data[cluster['id']] = tmp
for host in hosts_data:
cluster_id = host['cluster_id']
if cluster_id not in data:
print ("Unknown cluster_id=%s of the host! host_id=%s!"
% (cluster_id, host['id']))
sys.exit(1)
data[cluster_id]['hosts_data'].append(host)
apiClient = _APIClient(compass_url)
results_q = Queue()
ps = []
for elem in data:
cluster_data = data[elem]['cluster_data']
hosts_data = data[elem]['hosts_data']
p = Process(target=apiClient.execute,
args=(cluster_data, hosts_data, results_q))
ps.append(p)
p.start()
for p in ps:
p.join()
progress_file = '/'.join((csv_dir, 'progress.csv'))
cluster_headers = ['cluster_id', 'progress_url']
host_headers = ['host_id', 'progress_url']
with open(progress_file, 'wb') as f:
print "Writing all progress information to %s......" % progress_file
writer = csv.writer(f, delimiter=DELIMITER, quoting=csv.QUOTE_MINIMAL)
while not results_q.empty():
record = results_q.get()
hosts = []
cluster = [record['deployment']['cluster']['cluster_id'],
record['deployment']['cluster']['url']]
writer.writerow(cluster_headers)
writer.writerow(cluster)
for elem in record['deployment']['hosts']:
host = [elem['host_id'], elem['url']]
hosts.append(host)
writer.writerow(host_headers)
writer.writerows(hosts)
print "Done!\n"
def get_csv(fname, csv_dir):
""" Parse csv files into python variables. all nested fields in db will be
assembled.
:param fname: CSV file name
:param csv_dir: CSV files directory
Return a list of dict which key is column name and value is its data.
"""
headers = []
rows = []
file_dir = '/'.join((csv_dir, fname))
with open(file_dir) as f:
reader = csv.reader(f, delimiter=DELIMITER, quoting=csv.QUOTE_MINIMAL)
headers = reader.next()
rows = [x for x in reader]
result = []
for row in rows:
data = {}
for col_name, value in zip(headers, row):
if re.match(r'^[\d]+$', value):
# the value should be an integer
value = int(value)
elif re.match(r'^\[(\'\w*\'){1}(\s*,\s*\'\w*\')*\]$', value):
# the value should be a list
value = ast.literal_eval(value)
elif value == 'None':
value = ''
if col_name.find('.') > 0:
tmp_result = {}
tmp_result[col_name.split('.')[-1]] = value
keys = col_name.split('.')[::-1][1:]
for key in keys:
tmp = {}
tmp[key] = tmp_result
tmp_result = tmp
merge_dict(data, tmp_result)
else:
data[col_name] = value
result.append(data)
return result
def merge_dict(lhs, rhs, override=True):
"""Merge nested right dict into left nested dict recursively.
:param lhs: dict to be merged into.
:type lhs: dict
:param rhs: dict to merge from.
:type rhs: dict
:param override: the value in rhs overide the value in left if True.
:type override: str
:raises: TypeError if lhs or rhs is not a dict.
"""
if not rhs:
return
if not isinstance(lhs, dict):
raise TypeError('lhs type is %s while expected is dict' % type(lhs),
lhs)
if not isinstance(rhs, dict):
raise TypeError('rhs type is %s while expected is dict' % type(rhs),
rhs)
for key, value in rhs.items():
if isinstance(value, dict) and key in lhs and isinstance(lhs[key],
dict):
merge_dict(lhs[key], value, override)
else:
if override or key not in lhs:
lhs[key] = deepcopy(value)
class _APIClient(Client):
def __init__(self, url, headers=None, proxies=None, stream=None):
super(_APIClient, self).__init__(url, headers, proxies, stream)
def set_cluster_resource(self, cluster_id, resource, data):
url = "/clusters/%d/%s" % (cluster_id, resource)
return self._put(url, data=data)
def execute(self, cluster_data, hosts_data, resp_results):
""" The process including create or update a cluster and the cluster
configuration, add or update a host in the cluster, and deploy
the updated hosts.
:param cluster_data: the dictionary of cluster data
"""
cluster_id = cluster_data['id']
code, resp = self.get_cluster(cluster_id)
if code == 404:
# Create a new cluster
name = cluster_data['name']
adapter_id = cluster_data['adapter_id']
code, resp = self.add_cluster(name, adapter_id)
if code != 200:
print ("Failed to create the cluster which name is "
"%s!\nError message: %s" % (name, resp['message']))
sys.exit(1)
# Update the config(security, networking, partition) of the cluster
security_req = {}
networking_req = {}
partition_req = {}
security_req['security'] = cluster_data['security_config']
networking_req['networking'] = cluster_data['networking_config']
partition_req['partition'] = cluster_data['partition_config']
print "Update Security config......."
code, resp = self.set_cluster_resource(cluster_id, 'security',
security_req)
if code != 200:
print ("Failed to update Security config for cluster id=%s!\n"
"Error message: " % (cluster_id, resp['message']))
sys.exit(1)
print "Update Networking config......."
code, resp = self.set_cluster_resource(cluster_id, 'networking',
networking_req)
if code != 200:
print ("Failed to update Networking config for cluster id=%s!\n"
"Error message: %s" % (cluster_id, resp['message']))
sys.exit(1)
print "Update Partition config......."
code, resp = self.set_cluster_resource(cluster_id, 'partition',
partition_req)
if code != 200:
print ("Failed to update Partition config for cluster id=%s!\n"
"Error message: " % (cluster_id, resp['message']))
sys.exit(1)
deploy_list = []
deploy_hosts_data = []
machines_list = []
new_hosts_data = []
for record in hosts_data:
if record['state'] and int(record['deploy_action']):
deploy_list.append(record['id'])
deploy_hosts_data.append(record)
elif int(record['deploy_action']):
machines_list.append(record['machine_id'])
new_hosts_data.append(record)
if machines_list:
# add new hosts to the cluster
code, resp = self.add_hosts(cluster_id, machines_list)
if code != 200:
print ("Failed to add hosts to the cluster id=%s!\n"
"Error message: %s.\nfailed hosts are %s"
% (cluster_id, resp['message'], resp['failedMachines']))
sys.exit(1)
for record, host in zip(new_hosts_data, resp['cluster_hosts']):
record['id'] = host['id']
deploy_list.append(host['id'])
deploy_hosts_data.append(record)
# Update the config of each host in the cluster
for host in deploy_hosts_data:
req = {}
host_id = host['id']
print "Updating the config of host id=%s" % host['id']
req['hostname'] = host['hostname']
req.update(host['config_data'])
code, resp = self.update_host_config(int(host_id), raw_data=req)
if code != 200:
print ("Failed to update the config of the host id=%s!\n"
"Error message: %s" % (host_id, resp['message']))
sys.exit(1)
# Start to deploy the cluster
print "Start to deploy the cluster!....."
deploy_req = {"deploy": deploy_list}
code, resp = self.deploy_hosts(cluster_id, raw_data=deploy_req)
print "======>resp-----%s" % resp
print "---Cluster Info---"
print "cluster_id url"
print (" %s %s"
% (resp['deployment']['cluster']['cluster_id'],
resp['deployment']['cluster']['url']))
print "---Hosts Info-----"
print "host_id url"
for host in resp['deployment']['hosts']:
print " %s %s" % (host['host_id'], host['url'])
print "---------------------------------------------------------------"
print "\n"
resp_results.put(resp)
if __name__ == "__main__":
usage = "usage: %prog [options]"
parser = OptionParser(usage)
parser.add_option("-d", "--csv-dir", dest="csv_dir",
help="The directory of CSV files used for depolyment")
parser.add_option("-u", "--compass-url", dest="compass_url",
help="The URL of Compass server")
(options, args) = parser.parse_args()
if not os.exists(options.csv_dir):
print "Cannot find the directory: %s" % options.csv_dir
start(options.csv_dir, options.compass_url)

View File

@ -7,7 +7,7 @@ import os
from compass.apiclient.restful import Client
COMPASS_SERVER_URL = 'http://127.0.0.1'
COMPASS_SERVER_URL = 'http://127.0.0.1/api'
SWITCH_IP = '10.145.81.220'
SWITCH_SNMP_VERSION = 'v2c'
SWITCH_SNMP_COMMUNITY = 'public'

View File

@ -118,7 +118,7 @@ class Client(object):
if limit:
params['limit'] = limit
return self._get('/api/switches', params=params)
return self._get('/switches', params=params)
def get_switch(self, switch_id):
"""Lists details for a specified switch.
@ -126,7 +126,7 @@ class Client(object):
:param switch_id: switch id.
:type switch_id: int.
"""
return self._get('/api/switches/%s' % switch_id)
return self._get('/switches/%s' % switch_id)
def add_switch(self, switch_ip, version=None, community=None,
username=None, password=None, raw_data=None):
@ -167,7 +167,7 @@ class Client(object):
if password:
data['switch']['credential']['password'] = password
return self._post('/api/switches', data=data)
return self._post('/switches', data=data)
def update_switch(self, switch_id, ip_addr=None,
version=None, community=None,
@ -213,11 +213,11 @@ class Client(object):
if password:
data['switch']['credential']['password'] = password
return self._put('/api/switches/%s' % switch_id, data=data)
return self._put('/switches/%s' % switch_id, data=data)
def delete_switch(self, switch_id):
"""Not implemented in api."""
return self._delete('api/switches/%s' % switch_id)
return self._delete('/switches/%s' % switch_id)
def get_machines(self, switch_id=None, vlan_id=None,
port=None, limit=None):
@ -250,7 +250,7 @@ class Client(object):
if limit:
params['limit'] = limit
return self._get('/api/machines', params=params)
return self._get('/machines', params=params)
def get_machine(self, machine_id):
"""Lists the details for a specified machine.
@ -258,12 +258,12 @@ class Client(object):
:param machine_id: Return machine with the id.
:type machine_id: int.
"""
return self._get('/api/machines/%s' % machine_id)
return self._get('/machines/%s' % machine_id)
def get_clusters(self):
"""Lists the details for all clusters.
"""
return self._get('/api/clusters')
return self._get('/clusters')
def get_cluster(self, cluster_id):
"""Lists the details of the specified cluster.
@ -271,7 +271,7 @@ class Client(object):
:param cluster_id: cluster id.
:type cluster_id: int.
"""
return self._get('/api/clusters/%s' % cluster_id)
return self._get('/clusters/%d' % cluster_id)
def add_cluster(self, cluster_name, adapter_id, raw_data=None):
"""Creates a cluster by specified name and given adapter id.
@ -288,7 +288,7 @@ class Client(object):
data['cluster'] = {}
data['cluster']['name'] = cluster_name
data['cluster']['adapter_id'] = adapter_id
return self._post('/api/clusters', data=data)
return self._post('/clusters', data=data)
def add_hosts(self, cluster_id, machine_ids, raw_data=None):
"""add the specified machine(s) as the host(s) to the cluster.
@ -303,7 +303,7 @@ class Client(object):
data = raw_data
else:
data['addHosts'] = machine_ids
return self._post('/api/clusters/%s/action' % cluster_id, data=data)
return self._post('/clusters/%d/action' % cluster_id, data=data)
def remove_hosts(self, cluster_id, host_ids, raw_data=None):
"""remove the specified host(s) from the cluster.
@ -318,7 +318,7 @@ class Client(object):
data = raw_data
else:
data['removeHosts'] = host_ids
return self._post('/api/clusters/%s/action' % cluster_id, data=data)
return self._post('/clusters/%s/action' % cluster_id, data=data)
def replace_hosts(self, cluster_id, machine_ids, raw_data=None):
"""replace the cluster hosts with the specified machine(s).
@ -333,7 +333,7 @@ class Client(object):
data = raw_data
else:
data['replaceAllHosts'] = machine_ids
return self._post('/api/clusters/%s/action' % cluster_id, data=data)
return self._post('/clusters/%s/action' % cluster_id, data=data)
def deploy_hosts(self, cluster_id, raw_data=None):
"""Deploy the cluster.
@ -346,7 +346,7 @@ class Client(object):
data = raw_data
else:
data['deploy'] = []
return self._post('/api/clusters/%s/action' % cluster_id, data=data)
return self._post('/clusters/%d/action' % cluster_id, data=data)
@classmethod
def parse_security(cls, kwargs):
@ -376,7 +376,7 @@ class Client(object):
"""
data = {}
data['security'] = self.parse_security(kwargs)
return self._put('/api/clusters/%s/security' % cluster_id, data=data)
return self._put('/clusters/%d/security' % cluster_id, data=data)
@classmethod
def parse_networking(cls, kwargs):
@ -429,7 +429,7 @@ class Client(object):
"""
data = {}
data['networking'] = self.parse_networking(kwargs)
return self._put('/api/clusters/%s/networking' % cluster_id, data=data)
return self._put('/clusters/%d/networking' % cluster_id, data=data)
@classmethod
def parse_partition(cls, kwargs):
@ -462,7 +462,7 @@ class Client(object):
"""
data = {}
data['partition'] = self.parse_partition(kwargs)
return self._put('/api/clusters/%s/partition' % cluster_id, data=data)
return self._put('/clusters/%s/partition' % cluster_id, data=data)
def get_hosts(self, hostname=None, clustername=None):
"""Lists the details of hosts.
@ -484,7 +484,7 @@ class Client(object):
if clustername:
params['clustername'] = clustername
return self._get('/api/clusterhosts', params=params)
return self._get('/clusterhosts', params=params)
def get_host(self, host_id):
"""Lists the details for the specified host.
@ -492,7 +492,7 @@ class Client(object):
:param host_id: host id.
:type host_id: int.
"""
return self._get('/api/clusterhosts/%s' % host_id)
return self._get('/clusterhosts/%s' % host_id)
def get_host_config(self, host_id):
"""Lists the details of the config for the specified host.
@ -500,7 +500,7 @@ class Client(object):
:param host_id: host id.
:type host_id: int.
"""
return self._get('/api/clusterhosts/%s/config' % host_id)
return self._get('/clusterhosts/%s/config' % host_id)
def update_host_config(self, host_id, hostname=None,
roles=None, raw_data=None, **kwargs):
@ -564,7 +564,7 @@ class Client(object):
if roles:
data['roles'] = roles
return self._put('/api/clusterhosts/%s/config' % host_id, data)
return self._put('/clusterhosts/%s/config' % host_id, data)
def delete_from_host_config(self, host_id, delete_key):
"""Deletes one key in config for the host.
@ -574,7 +574,7 @@ class Client(object):
:param delete_key: the key in host config to be deleted.
:type delete_key: str.
"""
return self._delete('/api/clusterhosts/%s/config/%s' % (
return self._delete('/clusterhosts/%s/config/%s' % (
host_id, delete_key))
def get_adapters(self, name=None):
@ -590,7 +590,7 @@ class Client(object):
if name:
params['name'] = name
return self._get('/api/adapters', params=params)
return self._get('/adapters', params=params)
def get_adapter(self, adapter_id):
"""Lists details for the specified adapter.
@ -598,7 +598,7 @@ class Client(object):
:param adapter_id: adapter id.
:type adapter_id: int.
"""
return self._get('/api/adapters/%s' % adapter_id)
return self._get('/adapters/%s' % adapter_id)
def get_adapter_roles(self, adapter_id):
"""Lists roles to assign to hosts for the specified adapter.
@ -606,7 +606,7 @@ class Client(object):
:param adapter_id: adapter id.
:type adapter_id: int.
"""
return self._get('/api/adapters/%s/roles' % adapter_id)
return self._get('/adapters/%s/roles' % adapter_id)
def get_host_installing_progress(self, host_id):
"""Lists progress details for the specified host.
@ -614,7 +614,7 @@ class Client(object):
:param host_id: host id.
:type host_id: int.
"""
return self._get('/api/clusterhosts/%s/progress' % host_id)
return self._get('/clusterhosts/%s/progress' % host_id)
def get_cluster_installing_progress(self, cluster_id):
"""Lists progress details for the specified cluster.
@ -623,7 +623,7 @@ class Client(object):
:param cluster_id: int.
"""
return self._get('/api/clusters/%s/progress' % cluster_id)
return self._get('/clusters/%s/progress' % cluster_id)
def get_dashboard_links(self, cluster_id):
"""Lists links for dashboards of deployed cluster.
@ -633,4 +633,4 @@ class Client(object):
"""
params = {}
params['cluster_id'] = cluster_id
return self._get('/api/dashboardlinks', params)
return self._get('/dashboardlinks', params)

View File

@ -722,7 +722,7 @@ class TestClusterAPI(ApiTestCase):
host_id = json.loads(return_value.get_data())["cluster_hosts"][0]["id"]
deploy_request = json.dumps({"deploy": [host_id]})
return_value = self.app.post(url, data=deploy_request)
return_value = self.test_client.post(url, data=deploy_request)
self.assertEqual(202, return_value.status_code)
cluster_state = session.query(ClusterState).filter_by(id=1).first()
@ -836,13 +836,13 @@ class ClusterHostAPITest(ApiTestCase):
incorrect_conf['hostname'] = 'host_02'
incorrect_conf[
'networking']['interfaces']['management']['ip'] = 'xxx'
return_vlaue = self.app.put(
return_vlaue = self.test_client.put(
url, data=json.dumps(incorrect_conf))
self.assertEqual(400, return_vlaue.status_code)
# 3. Config put sucessfully
config['hostname'] = 'host_02'
return_value = self.app.put(url, data=json.dumps(config))
return_value = self.test_client.put(url, data=json.dumps(config))
self.assertEqual(200, return_value.status_code)
with database.session() as session:
host = session.query(ClusterHost).filter_by(id=2).first()
@ -1516,8 +1516,8 @@ class TestExport(ApiTestCase):
'role', 'switch_config']
for tname in talbes:
url = '/'.join(('/export', tname))
rv = self.app.get(url)
resp_data = rv.get_data()
return_value = self.test_client.get(url)
resp_data = return_value.get_data()
resp_data = resp_data.split('\n')
resp_data = csv.DictReader(resp_data)
expected_file = '/'.join((self.CSV_EXCEPTED_OUTPUT_DIR,

View File

View File

@ -0,0 +1,278 @@
#!/usr/bin/env python
from copy import deepcopy
import simplejson as json
import sys
import os
curr_dir = os.path.dirname(os.path.realpath(__file__))
compass_dir = os.path.dirname(os.path.dirname(os.path.dirname(curr_dir)))
sys.path.append(compass_dir)
from compass.api import app
from compass.db import database
from compass.db.model import Switch
from compass.db.model import Machine
from compass.db.model import Cluster
from compass.db.model import ClusterState
from compass.db.model import ClusterHost
from compass.db.model import HostState
from compass.db.model import Adapter
from compass.db.model import Role
from compass.db.model import SwitchConfig
from compass.utils import util
def setupDb():
SECURITY_CONFIG = {
"security": {
"server_credentials": {
"username": "root",
"password": "root"},
"service_credentials": {
"username": "service",
"password": "admin"},
"console_credentials": {
"username": "console",
"password": "admin"}
}
}
NET_CONFIG = {
"networking": {
"interfaces": {
"management": {
"ip_start": "10.120.8.100",
"ip_end": "10.120.8.200",
"netmask": "255.255.255.0",
"gateway": "",
"nic": "eth0",
"promisc": 1
},
"tenant": {
"ip_start": "192.168.10.100",
"ip_end": "192.168.10.200",
"netmask": "255.255.255.0",
"gateway": "",
"nic": "eth1",
"promisc": 0
},
"public": {
"ip_start": "12.145.68.100",
"ip_end": "12.145.68.200",
"netmask": "255.255.255.0",
"gateway": "",
"nic": "eth2",
"promisc": 0
},
"storage": {
"ip_start": "172.29.8.100",
"ip_end": "172.29.8.200",
"netmask": "255.255.255.0",
"gateway": "",
"nic": "eth3",
"promisc": 0
}
},
"global": {
"nameservers": "8.8.8.8",
"search_path": "ods.com",
"gateway": "192.168.1.1",
"proxy": "http://127.0.0.1:3128",
"ntp_server": "127.0.0.1"
}
}
}
PAR_CONFIG = {
"partition": "/home 20%;/tmp 10%;/var 30%;"
}
HOST_CONFIG = {
"networking": {
"interfaces": {
"management": {
"ip": "%s"
},
"tenant": {
"ip": "%s"
}
}
},
"roles": ["base"]
}
print "Setting up DB ..."
with database.session() as session:
# populate switch_config
switch_config = SwitchConfig(ip='192.168.1.10', filter_port='1')
session.add(switch_config)
# populate role table
role = Role(name='compute', target_system='openstack')
session.add(role)
# Populate one adapter to DB
adapter = Adapter(name='Centos_openstack', os='Centos',
target_system='openstack')
session.add(adapter)
#Populate switches info to DB
switches = [Switch(ip="192.168.2.1",
credential={"version": "2c",
"community": "public"},
vendor="huawei",
state="under_monitoring"),
Switch(ip="192.168.2.2",
credential={"version": "2c",
"community": "public"},
vendor="huawei",
state="under_monitoring"),
Switch(ip="192.168.2.3",
credential={"version": "2c",
"community": "public"},
vendor="huawei",
state="under_monitoring"),
Switch(ip="192.168.2.4",
credential={"version": "2c",
"community": "public"},
vendor="huawei",
state="under_monitoring")]
session.add_all(switches)
# Populate machines info to DB
machines = [
Machine(mac='00:0c:27:88:0c:a1', port='1', vlan='1',
switch_id=1),
Machine(mac='00:0c:27:88:0c:a2', port='2', vlan='1',
switch_id=1),
Machine(mac='00:0c:27:88:0c:a3', port='3', vlan='1',
switch_id=1),
Machine(mac='00:0c:27:88:0c:b1', port='1', vlan='1',
switch_id=2),
Machine(mac='00:0c:27:88:0c:b2', port='2', vlan='1',
switch_id=2),
Machine(mac='00:0c:27:88:0c:b3', port='3', vlan='1',
switch_id=2),
Machine(mac='00:0c:27:88:0c:c1', port='1', vlan='1',
switch_id=3),
Machine(mac='00:0c:27:88:0c:c2', port='2', vlan='1',
switch_id=3),
Machine(mac='00:0c:27:88:0c:c3', port='3', vlan='1',
switch_id=3),
Machine(mac='00:0c:27:88:0c:d1', port='1', vlan='1',
switch_id=4),
Machine(mac='00:0c:27:88:0c:d2', port='2', vlan='1',
switch_id=4),
]
session.add_all(machines)
# Popluate clusters into DB
"""
a. cluster #1: a new machine will be added to it.
b. cluster #2: a failed machine needs to be re-deployed.
c. cluster #3: a new cluster with 3 hosts will be deployed.
"""
clusters_networking_config = [
{"networking":
{"interfaces": {"management": {"ip_start": "10.120.1.100",
"ip_end": "10.120.1.200"},
"tenant": {"ip_start": "192.168.1.100",
"ip_end": "192.168.1.200"},
"public": {"ip_start": "12.145.1.100",
"ip_end": "12.145.1.200"},
"storage": {"ip_start": "172.29.1.100",
"ip_end": "172.29.1.200"}}}},
{"networking":
{"interfaces": {"management": {"ip_start": "10.120.2.100",
"ip_end": "10.120.2.200"},
"tenant": {"ip_start": "192.168.2.100",
"ip_end": "192.168.2.200"},
"public": {"ip_start": "12.145.2.100",
"ip_end": "12.145.2.200"},
"storage": {"ip_start": "172.29.2.100",
"ip_end": "172.29.2.200"}}}}
]
cluster_names = ['cluster_01', 'cluster_02']
for name, networking_config in zip(cluster_names,
clusters_networking_config):
nconfig = deepcopy(NET_CONFIG)
util.merge_dict(nconfig, networking_config)
c = Cluster(
name=name, adapter_id=1,
security_config=json.dumps(SECURITY_CONFIG['security']),
networking_config=json.dumps(nconfig['networking']),
partition_config=json.dumps(PAR_CONFIG['partition']))
session.add(c)
# Populate hosts to each cluster
host_mips = ['10.120.1.100', '10.120.1.101', '10.120.1.102',
'10.120.2.100', '10.120.2.101', '10.120.2.102']
host_tips = ['192.168.1.100', '192.168.1.101', '192.168.1.102',
'192.168.2.100', '192.168.2.101', '192.168.2.102']
hosts_config = []
for mip, tip in zip(host_mips, host_tips):
config = deepcopy(HOST_CONFIG)
config['networking']['interfaces']['management']['ip'] = mip
config['networking']['interfaces']['tenant']['ip'] = tip
hosts_config.append(json.dumps(config))
hosts = [
ClusterHost(hostname='host_01', machine_id=1, cluster_id=1,
config_data=hosts_config[0]),
ClusterHost(hostname='host_02', machine_id=2, cluster_id=1,
config_data=hosts_config[1]),
ClusterHost(hostname='host_03', machine_id=3, cluster_id=1,
config_data=hosts_config[2]),
ClusterHost(hostname='host_01', machine_id=4, cluster_id=2,
config_data=hosts_config[3]),
ClusterHost(hostname='host_02', machine_id=5, cluster_id=2,
config_data=hosts_config[4]),
ClusterHost(hostname='host_03', machine_id=6, cluster_id=2,
config_data=hosts_config[5])
]
session.add_all(hosts)
# Populate cluster state and host state
cluster_states = [
ClusterState(id=1, state="READY", progress=1.0,
message="Successfully!"),
ClusterState(id=2, state="ERROR", progress=0.5,
message="Failed!")
]
session.add_all(cluster_states)
host_states = [
HostState(id=1, state="READY", progress=1.0,
message="Successfully!"),
HostState(id=2, state="READY", progress=1.0,
message="Successfully!"),
HostState(id=3, state="READY", progress=1.0,
message="Successfully!"),
HostState(id=4, state="ERROR", progress=0.5,
message="Failed!"),
HostState(id=5, state="READY", progress=1.0,
message="Successfully!"),
HostState(id=6, state="ERROR", progress=1.0,
message="Failed!")
]
session.add_all(host_states)
if __name__ == '__main__':
db_url, port = sys.argv[1:]
print db_url
try:
database.init(db_url)
database.create_db()
except Exception as e:
print "=====> Failed to create database"
print e
try:
setupDb()
except:
pass
print "Starting server ....."
print "port is ", port
app.run(use_reloader=False, host="0.0.0.0", port=port)

View File

@ -0,0 +1,141 @@
import simplejson as json
import os
import sys
import unittest2
import tempfile
import subprocess
import shutil
import signal
import socket
import time
from mock import Mock
curr_dir = os.path.dirname(os.path.realpath(__file__))
api_cmd_path = '/'.join((
os.path.dirname(os.path.dirname(os.path.dirname(curr_dir))), 'bin'))
sys.path.append(api_cmd_path)
import csvdeploy
os.environ['COMPASS_IGNORE_SETTING'] = 'true'
from compass.utils import setting_wrapper as setting
reload(setting)
from compass.db import database
from compass.db.model import Cluster
from compass.db.model import ClusterHost
from compass.utils import flags
from compass.utils import logsetting
from compass.apiclient.restful import Client
class ApiTestCase(unittest2.TestCase):
def setUp(self):
super(ApiTestCase, self).setUp()
# Create database file
try:
self.db_dir = tempfile.mkdtemp()
self.db_file = '/'.join((self.db_dir, 'app.db'))
except Exception:
sys.exit(2)
database_url = '/'.join(('sqlite://', self.db_file))
# Get a free random port for app server
try:
tmp_socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
tmp_socket.bind(('', 0))
self.port = tmp_socket.getsockname()[-1]
tmp_socket.close()
time.sleep(5)
except socket.error:
sys.exit(1)
cmd = '%s run_server.py %s %d' % (sys.executable, database_url,
self.port)
self.proc = subprocess.Popen(cmd, shell=True,
stderr=subprocess.PIPE,
preexec_fn=os.setsid,
cwd=curr_dir)
time.sleep(5)
# Initial database
try:
database.init(database_url)
except Exception as e:
print "======>", e
def tearDown(self):
super(ApiTestCase, self).tearDown()
database.ENGINE.dispose()
os.killpg(self.proc.pid, signal.SIGTERM)
try:
if os.path.exists(self.db_dir):
shutil.rmtree(self.db_dir)
except Exception:
sys.exit(1)
class TestAPICommand(ApiTestCase):
CSV_IMPORT_DIR = '/'.join((curr_dir, 'test_files'))
def setUp(self):
super(TestAPICommand, self).setUp()
self.deploy_return_val = {
'status': 'accepted',
'deployment': {'cluster': {'cluster_id': 1,
'url': '/clusters/1/progress'},
'hosts': [{'host_id': 1,
'url': '/cluster_hosts/1/progress'}]}}
def tearDown(self):
super(TestAPICommand, self).tearDown()
def test_start(self):
Client.deploy_hosts = Mock(return_value=(202, self.deploy_return_val))
url = "http://127.0.0.1:%d" % self.port
csvdeploy.start(self.CSV_IMPORT_DIR, url)
clusters = csvdeploy.get_csv('cluster.csv',
csv_dir=self.CSV_IMPORT_DIR)
with database.session() as session:
for csv_cluster in clusters:
cluster_id = csv_cluster['id']
cluster = session.query(Cluster)\
.filter_by(id=cluster_id).first()
self.assertIsNotNone(cluster)
self.assertEqual(csv_cluster['name'], cluster.name)
self.assertDictEqual(csv_cluster['security_config'],
json.loads(cluster.security_config))
self.maxDiff = None
self.assertDictEqual(csv_cluster['networking_config'],
json.loads(cluster.networking_config))
self.assertEqual(csv_cluster['partition_config'],
json.loads(cluster.partition_config))
self.assertEqual(csv_cluster['adapter_id'], cluster.adapter_id)
self.maxDiff = None
hosts = csvdeploy.get_csv('cluster_host.csv',
csv_dir=self.CSV_IMPORT_DIR)
for csv_host in hosts:
cluster_id = csv_host['cluster_id']
hostname = csv_host['hostname']
host_in_db = session.query(ClusterHost)\
.filter_by(cluster_id=cluster_id,
hostname=hostname).first()
self.assertIsNotNone(host_in_db)
self.assertEqual(csv_host['hostname'], host_in_db.hostname)
self.assertEqual(csv_host['machine_id'], host_in_db.machine_id)
self.assertDictEqual(csv_host['config_data'],
json.loads(host_in_db.config_data))
self.maxDiff = None
if __name__ == '__main__':
flags.init()
logsetting.init()
unittest2.main()

View File

@ -0,0 +1,2 @@
id,name,os,target_system
1,Centos_openstack,Centos,openstack
1 id name os target_system
2 1 Centos_openstack Centos openstack

View File

@ -0,0 +1,4 @@
id,name,security_config.server_credentials.username,security_config.server_credentials.password,security_config.service_credentials.username,security_config.service_credentials.password,security_config.console_credentials.username,security_config.console_credentials.password,networking_config.interfaces.management.nic,networking_config.interfaces.management.netmask,networking_config.interfaces.management.promisc,networking_config.interfaces.management.ip_end,networking_config.interfaces.management.gateway,networking_config.interfaces.management.ip_start,networking_config.interfaces.storage.nic,networking_config.interfaces.storage.netmask,networking_config.interfaces.storage.promisc,networking_config.interfaces.storage.ip_end,networking_config.interfaces.storage.gateway,networking_config.interfaces.storage.ip_start,networking_config.interfaces.public.nic,networking_config.interfaces.public.netmask,networking_config.interfaces.public.promisc,networking_config.interfaces.public.ip_end,networking_config.interfaces.public.gateway,networking_config.interfaces.public.ip_start,networking_config.interfaces.tenant.nic,networking_config.interfaces.tenant.netmask,networking_config.interfaces.tenant.promisc,networking_config.interfaces.tenant.ip_end,networking_config.interfaces.tenant.gateway,networking_config.interfaces.tenant.ip_start,networking_config.global.nameservers,networking_config.global.ntp_server,networking_config.global.gateway,networking_config.global.proxy,networking_config.global.search_path,partition_config,adapter_id,state
1,cluster_01,root,root,service,admin,console,admin,eth0,255.255.255.0,1,10.120.1.200,None,10.120.1.100,eth3,255.255.255.0,0,172.29.1.200,None,172.29.1.100,eth2,255.255.255.0,0,12.145.1.200,None,12.145.1.100,eth1,255.255.255.0,0,192.168.1.200,None,192.168.1.100,8.8.8.8,127.0.0.1,192.168.1.1,http://127.0.0.1:3128,ods.com,"/home 20%;/tmp 10%;/var 30%;",1,READY
2,cluster_02,root,root,service,admin,console,admin,eth0,255.255.255.0,1,10.120.2.200,None,10.120.2.100,eth3,255.255.255.0,0,172.29.2.200,None,172.29.2.100,eth2,255.255.255.0,0,12.145.2.200,None,12.145.2.100,eth1,255.255.255.0,0,192.168.2.200,None,192.168.2.100,8.8.8.8,127.0.0.1,192.168.1.1,http://127.0.0.1:3128,ods.com,"/home 20%;/tmp 10%;/var 30%;",1,ERROR
3,cluster_03,root,admin,service,admin,console,admin,eth0,255.255.255.0,1,10.120.3.200,None,10.120.3.100,eth3,255.255.255.0,0,172.29.3.200,None,172.29.3.100,eth2,255.255.255.0,0,12.145.3.200,None,12.145.3.100,eth1,255.255.255.0,0,192.168.3.200,None,192.168.3.100,8.8.8.8,120.0.0.1,192.168.1.1,http://localhost:3128,ods.com,"/home 40%;/tmp 20%;/var 30%;",1,None
1 id name security_config.server_credentials.username security_config.server_credentials.password security_config.service_credentials.username security_config.service_credentials.password security_config.console_credentials.username security_config.console_credentials.password networking_config.interfaces.management.nic networking_config.interfaces.management.netmask networking_config.interfaces.management.promisc networking_config.interfaces.management.ip_end networking_config.interfaces.management.gateway networking_config.interfaces.management.ip_start networking_config.interfaces.storage.nic networking_config.interfaces.storage.netmask networking_config.interfaces.storage.promisc networking_config.interfaces.storage.ip_end networking_config.interfaces.storage.gateway networking_config.interfaces.storage.ip_start networking_config.interfaces.public.nic networking_config.interfaces.public.netmask networking_config.interfaces.public.promisc networking_config.interfaces.public.ip_end networking_config.interfaces.public.gateway networking_config.interfaces.public.ip_start networking_config.interfaces.tenant.nic networking_config.interfaces.tenant.netmask networking_config.interfaces.tenant.promisc networking_config.interfaces.tenant.ip_end networking_config.interfaces.tenant.gateway networking_config.interfaces.tenant.ip_start networking_config.global.nameservers networking_config.global.ntp_server networking_config.global.gateway networking_config.global.proxy networking_config.global.search_path partition_config adapter_id state
2 1 cluster_01 root root service admin console admin eth0 255.255.255.0 1 10.120.1.200 None 10.120.1.100 eth3 255.255.255.0 0 172.29.1.200 None 172.29.1.100 eth2 255.255.255.0 0 12.145.1.200 None 12.145.1.100 eth1 255.255.255.0 0 192.168.1.200 None 192.168.1.100 8.8.8.8 127.0.0.1 192.168.1.1 http://127.0.0.1:3128 ods.com /home 20%;/tmp 10%;/var 30%; 1 READY
3 2 cluster_02 root root service admin console admin eth0 255.255.255.0 1 10.120.2.200 None 10.120.2.100 eth3 255.255.255.0 0 172.29.2.200 None 172.29.2.100 eth2 255.255.255.0 0 12.145.2.200 None 12.145.2.100 eth1 255.255.255.0 0 192.168.2.200 None 192.168.2.100 8.8.8.8 127.0.0.1 192.168.1.1 http://127.0.0.1:3128 ods.com /home 20%;/tmp 10%;/var 30%; 1 ERROR
4 3 cluster_03 root admin service admin console admin eth0 255.255.255.0 1 10.120.3.200 None 10.120.3.100 eth3 255.255.255.0 0 172.29.3.200 None 172.29.3.100 eth2 255.255.255.0 0 12.145.3.200 None 12.145.3.100 eth1 255.255.255.0 0 192.168.3.200 None 192.168.3.100 8.8.8.8 120.0.0.1 192.168.1.1 http://localhost:3128 ods.com /home 40%;/tmp 20%;/var 30%; 1 None

View File

@ -0,0 +1,12 @@
id,cluster_id,hostname,machine_id,config_data.networking.interfaces.management.ip,config_data.networking.interfaces.tenant.ip,config_data.roles,state,deploy_action
1,1,host_01,1,10.120.1.100,192.168.1.100,['base'],READY,0
2,1,host_02,2,10.120.1.101,192.168.1.101,['base'],READY,0
3,1,host_03,3,10.120.1.102,192.168.1.102,['base'],READY,0
4,2,host_01,4,10.120.2.100,192.168.2.100,['base'],ERROR,1
5,2,host_02,5,10.120.2.101,192.168.2.101,['base'],READY,0
6,2,host_03,6,10.120.2.102,192.168.2.102,['base'],ERROR,1
7,3,host_01,7,10.120.3.100,192.168.3.100,['base'],None,1
8,3,host_02,8,10.120.3.101,192.168.3.102,['base'],None,1
9,3,host_03,9,10.120.3.102,192.168.3.102,['base'],None,1
10,1,host_04,10,10.120.1.103,192.168.1.103,['base'],None,1
11,2,host_04,11,10.120.2.104,192.168.2.104,['base'],None,1
1 id cluster_id hostname machine_id config_data.networking.interfaces.management.ip config_data.networking.interfaces.tenant.ip config_data.roles state deploy_action
2 1 1 host_01 1 10.120.1.100 192.168.1.100 ['base'] READY 0
3 2 1 host_02 2 10.120.1.101 192.168.1.101 ['base'] READY 0
4 3 1 host_03 3 10.120.1.102 192.168.1.102 ['base'] READY 0
5 4 2 host_01 4 10.120.2.100 192.168.2.100 ['base'] ERROR 1
6 5 2 host_02 5 10.120.2.101 192.168.2.101 ['base'] READY 0
7 6 2 host_03 6 10.120.2.102 192.168.2.102 ['base'] ERROR 1
8 7 3 host_01 7 10.120.3.100 192.168.3.100 ['base'] None 1
9 8 3 host_02 8 10.120.3.101 192.168.3.102 ['base'] None 1
10 9 3 host_03 9 10.120.3.102 192.168.3.102 ['base'] None 1
11 10 1 host_04 10 10.120.1.103 192.168.1.103 ['base'] None 1
12 11 2 host_04 11 10.120.2.104 192.168.2.104 ['base'] None 1

View File

@ -0,0 +1,12 @@
id,mac,port,vlan,switch_id
1,00:0c:27:88:0c:a1,1,1,1
2,00:0c:27:88:0c:a2,2,1,1
3,00:0c:27:88:0c:a3,3,1,1
4,00:0c:27:88:0c:b1,1,1,2
5,00:0c:27:88:0c:b2,2,1,2
6,00:0c:27:88:0c:b3,3,1,2
7,00:0c:27:88:0c:c1,1,1,3
8,00:0c:27:88:0c:c2,2,1,3
9,00:0c:27:88:0c:c3,3,1,3
10,00:0c:27:88:0c:d1,1,1,4
11,00:0c:27:88:0c:d2,2,1,4
1 id mac port vlan switch_id
2 1 00:0c:27:88:0c:a1 1 1 1
3 2 00:0c:27:88:0c:a2 2 1 1
4 3 00:0c:27:88:0c:a3 3 1 1
5 4 00:0c:27:88:0c:b1 1 1 2
6 5 00:0c:27:88:0c:b2 2 1 2
7 6 00:0c:27:88:0c:b3 3 1 2
8 7 00:0c:27:88:0c:c1 1 1 3
9 8 00:0c:27:88:0c:c2 2 1 3
10 9 00:0c:27:88:0c:c3 3 1 3
11 10 00:0c:27:88:0c:d1 1 1 4
12 11 00:0c:27:88:0c:d2 2 1 4

View File

@ -0,0 +1,2 @@
id,name,target_system,description
1,compute,openstack,None
1 id name target_system description
2 1 compute openstack None

View File

@ -0,0 +1,5 @@
id,ip,credential_data.version,credential_data.community
1,192.168.2.1,2c,public
2,192.168.2.2,2c,public
3,192.168.2.3,2c,public
4,192.168.2.4,2c,public
1 id ip credential_data.version credential_data.community
2 1 192.168.2.1 2c public
3 2 192.168.2.2 2c public
4 3 192.168.2.3 2c public
5 4 192.168.2.4 2c public

View File

@ -0,0 +1,3 @@
id,ip,filter_port
1,192.168.1.10,1
2,192.168.1.11,2
1 id ip filter_port
2 1 192.168.1.10 1
3 2 192.168.1.11 2

View File

@ -6,5 +6,4 @@ celery
netaddr
paramiko
simplejson
requests

View File

@ -2,3 +2,4 @@ discover
mock
unittest2
testrepository>=0.0.17
mimeparse