deployment node type

Change-Id: I412969ee3f25ed7e02609a0457e67fb12e526740
This commit is contained in:
Kanagaraj Manickam 2016-03-30 20:32:19 +05:30
parent 97d84d1b45
commit 350f37bbc5
13 changed files with 1134 additions and 367 deletions

View File

@ -7,3 +7,4 @@ namespace = oslo.middleware
namespace = oslo.db
namespace = oslo.log
namespace = oslo.service.service
namespace = os_namos

View File

@ -11,7 +11,7 @@ rabbit_hosts = 172.241.0.101
connection = mysql+pymysql://root:password@172.241.0.101/namos?charset=utf8
[os_manager]
workers=3
workers=20
[os_namos]
region_name=RegionTwo
region_name=RegionTwo

View File

@ -1,27 +1,45 @@
# List of config generator conf files for syncing the conf with namos
heat=/opt/stack/heat/config-generator.conf
namos=/home/manickan/workspace/namos/openstack/namos/config-generator.conf
os_namos=/home/manickan/workspace/namos/openstack/os-namos/config-generator.conf
keystone=/opt/stack/keystone/config-generator/keystone.conf
neutron-bgp-dragent=/opt/stack/neutron/etc/oslo-config-generator/bgp_dragent.ini
neutron-dhcp-agent=/opt/stack/neutron/etc/oslo-config-generator/dhcp_agent.ini
neutron-l3-agent=/opt/stack/neutron/etc/oslo-config-generator/l3_agent.ini
neutron-linuxbridge-agent=/opt/stack/neutron/etc/oslo-config-generator/linuxbridge_agent.ini
neutron-metadata-agent=/opt/stack/neutron/etc/oslo-config-generator/metadata_agent.ini
neutron-metering-agent=/opt/stack/neutron/etc/oslo-config-generator/metering_agent.ini
neutron-ml2=/opt/stack/neutron/etc/oslo-config-generator/ml2_conf.ini
neutron-ml2-sriov=/opt/stack/neutron/etc/oslo-config-generator/ml2_conf_sriov.ini
neutron=/opt/stack/neutron/etc/oslo-config-generator/neutron.conf
neutron-openvswitch-agent=/opt/stack/neutron/etc/oslo-config-generator/openvswitch_agent.ini
neutron-sriov-agent=/opt/stack/neutron/etc/oslo-config-generator/sriov_agent.ini
lbaas-agent=/opt/stack/neutron-lbaas/etc/oslo-config-generator/lbaas_agent.ini
neutron-lbaas=/opt/stack/neutron-lbaas/etc/oslo-config-generator/neutron_lbaas.conf
services-lbaas=/opt/stack/neutron-lbaas/etc/oslo-config-generator/services_lbaas.conf
glance-api=/opt/stack/glance/etc/oslo-config-generator/glance-api.conf
glance-cache=/opt/stack/glance/etc/oslo-config-generator/glance-cache.conf
glance-glare=/opt/stack/glance/etc/oslo-config-generator/glance-glare.conf
glance-registry=/opt/stack/glance/etc/oslo-config-generator/glance-registry.conf
glance-scrubber=/opt/stack/glance/etc/oslo-config-generator/glance-scrubber.conf
glance-manage=/opt/stack/glance/etc/oslo-config-generator/glance-manage.conf
nova=/opt/stack/nova/etc/nova/nova-config-generator.conf
cinder=/opt/stack/cinder/cinder/config/cinder-config-generator.conf
[namos]
namos.conf=/home/manickan/workspace/namos/openstack/namos/config-generator.conf
[heat]
heat.conf=/opt/stack/heat/config-generator.conf
[keystone]
keystone.conf=/opt/stack/keystone/config-generator/keystone.conf
[neutron]
bgp_dragent.ini=/opt/stack/neutron/etc/oslo-config-generator/bgp_dragent.ini
dhcp_agent.ini=/opt/stack/neutron/etc/oslo-config-generator/dhcp_agent.ini
l3_agent.ini=/opt/stack/neutron/etc/oslo-config-generator/l3_agent.ini
linuxbridge_agent.ini=/opt/stack/neutron/etc/oslo-config-generator/linuxbridge_agent.ini
metadata_agent.ini=/opt/stack/neutron/etc/oslo-config-generator/metadata_agent.ini
metering_agent.ini=/opt/stack/neutron/etc/oslo-config-generator/metering_agent.ini
ml2_conf.ini=/opt/stack/neutron/etc/oslo-config-generator/ml2_conf.ini
ml2_conf_sriov.ini=/opt/stack/neutron/etc/oslo-config-generator/ml2_conf_sriov.ini
neutron.conf=/opt/stack/neutron/etc/oslo-config-generator/neutron.conf
openvswitch_agent.ini=/opt/stack/neutron/etc/oslo-config-generator/openvswitch_agent.ini
sriov_agent.ini=/opt/stack/neutron/etc/oslo-config-generator/sriov_agent.ini
lbaas_agent.ini=/opt/stack/neutron-lbaas/etc/oslo-config-generator/lbaas_agent.ini
neutron_lbaas.conf=/opt/stack/neutron-lbaas/etc/oslo-config-generator/neutron_lbaas.conf
services_lbaas.conf=/opt/stack/neutron-lbaas/etc/oslo-config-generator/services_lbaas.conf
[glance]
glance-api.conf=/opt/stack/glance/etc/oslo-config-generator/glance-api.conf
glance-cache.conf=/opt/stack/glance/etc/oslo-config-generator/glance-cache.conf
glance-glare.conf=/opt/stack/glance/etc/oslo-config-generator/glance-glare.conf
glance-registry.conf=/opt/stack/glance/etc/oslo-config-generator/glance-registry.conf
glance-scrubber.conf=/opt/stack/glance/etc/oslo-config-generator/glance-scrubber.conf
glance-manage.conf=/opt/stack/glance/etc/oslo-config-generator/glance-manage.conf
[nova]
nova.conf=/opt/stack/nova/etc/nova/nova-config-generator.conf
[cinder]
cinder.conf=/opt/stack/cinder/cinder/config/cinder-config-generator.conf
[ceilometer]
ceilometer.conf=/opt/stack/ceilometer/etc/ceilometer/ceilometer-config-generator.conf
[aodh]
aodh.conf=/opt/stack/aodh/aodh-config-generator.conf

83
namos/cmd/api.py Normal file
View File

@ -0,0 +1,83 @@
# -*- encoding: utf-8 -*-
#
# Copyright 2013 Hewlett-Packard Development Company, L.P.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from bottle import route
from bottle import run
from namos.conductor import rpcapi
from oslo_context import context
@route('/v1/regions')
def regions():
regions = rpcapi.ConductorAPI().region_get_all(
context.get_admin_context())
return {'regions': regions}
@route('/v1/view_360')
def infra():
_infra = rpcapi.ConductorAPI().view_360(
context.get_admin_context(), True, True, True)
return {'view_360': _infra}
@route('/v1/config_schema/<service>')
def config_schema(service):
schema = dict()
c = rpcapi.ConductorAPI()
schema = c.config_schema(context.RequestContext(),
project=service,
with_file_link=True)
return dict(config_schema=schema)
# Openstack view
@route('/v1/regions/<region_id>/services')
def perspective_region_service_list(region_id):
_region = rpcapi.ConductorAPI().region_perspective_get(
context.get_admin_context(), region_id)
return {'services': _region['services']}
# Data center view
@route('/v1/regions/<region_id>/devices')
def perspective_region_device_list(region_id):
_region = rpcapi.ConductorAPI().region_perspective_get(
context.get_admin_context(), region_id)
return {'devices': _region['devices']}
# Openstack service view
@route('/v1/services/<service_id>')
def perspective_region_service(service_id):
_srv = rpcapi.ConductorAPI().service_perspective_get(
context.get_admin_context(), service_id)
return {'service': _srv}
# Data center device view
@route('/v1/devices/<device_id>')
def perspective_region_device(device_id):
_dvc = rpcapi.ConductorAPI().device_perspective_get(
context.get_admin_context(), device_id)
return {'device': _dvc}
run(host='localhost', port=9999)

View File

@ -19,6 +19,7 @@ from oslo_config import cfg
from namos.common import config
from namos.common import exception
from namos.common import utils
from namos.db import api
from namos.db import sample
from namos.db.sqlalchemy import migration
@ -30,30 +31,37 @@ MANAGE_COMMAND_NAME = 'namos-manage'
class HeartBeat(object):
def report_status(self):
# TODO(mrkanag) Make like Node: Service: worker: status
for sw in api.service_worker_get_all(None):
# TODO(mrkanag) Move this to db layer and query non deleted entries
if sw.deleted_at is not None:
continue
msg = '[%s] [%s] %s %s' % (
'T' if sw.is_launcher else 'F',
'T' if utils.find_status(sw) else 'F',
sw.name,
sw.host)
print (msg)
print_format = "%-20s%-15s%-15s%-35s%-10s"
strip = 90 * '-'
print(strip)
print(print_format % ('Node',
'Type',
'Service',
'Component',
'Status'))
print(strip)
for k, s in api.get_status(None).items():
print(print_format % (s['node'],
s['type'],
s['service'],
s['component'],
s['status']))
print(strip)
class OsloConfigSchemaManager(object):
def gen_schema(self):
import json
cfg_ns = dict()
for cfg_ in api.config_schema_get_all(None):
if cfg_.namespace not in cfg_ns:
cfg_ns[cfg_.namespace] = dict()
if cfg_.group_name not in cfg_ns[cfg_.namespace]:
cfg_ns[cfg_.namespace][cfg_.group_name] = dict()
cfg_ns[cfg_.namespace][cfg_.group_name][cfg_.name] = cfg_.to_dict()
if CONF.command.by_namespace:
for cfg_ in api.config_schema_get_all(None):
if cfg_.namespace not in cfg_ns:
cfg_ns[cfg_.namespace] = dict()
if cfg_.group_name not in cfg_ns[cfg_.namespace]:
cfg_ns[cfg_.namespace][cfg_.group_name] = dict()
cfg_ns[cfg_.namespace][cfg_.group_name][
cfg_.name] = cfg_.to_dict()
open(CONF.command.outputfile, 'w').write(json.dumps(cfg_ns))
@ -62,53 +70,54 @@ class OsloConfigSchemaManager(object):
self.gen_schema()
return
sync_map = {}
with open(CONF.command.syncfile) as f:
for line in f:
if line.startswith("#"):
continue
kv = line.split("=")
sync_map[kv[0]] = kv[1].replace("\n", "")
sync_map = utils.file_to_configs(CONF.command.syncfile)
for k, v in sync_map.items():
out_file = '%s/%s.json' % (CONF.command.outputdir or '/tmp', k)
cmd = ('oslo-config-generator --config-file %s '
'--output-file %s --output-format json' %
(v, out_file))
print ("\nSyncing %s " % cmd)
import os
os.system(cmd)
for srv, confs in sync_map.items():
for conf, gen_conf in confs.items():
out_file = '%s/%s.json' % (CONF.command.outputdir or '/tmp',
conf)
cmd = ('oslo-config-generator --config-file %s '
'--output-file %s --output-format json' %
(gen_conf, out_file))
print ("\nSyncing %s " % cmd)
import os
os.system(cmd)
if CONF.command.dbsync:
import json
conf_dict = json.loads(open(out_file).read())
for grp, namespaces in conf_dict.items():
for namespace, opts in namespaces.items():
for name, opt in opts.items():
conf_ = dict(
namespace=namespace,
group_name=grp,
name=name,
default_value=opt['default'],
type=opt['type']['name'],
help=opt['help'],
required=opt['required'],
secret=opt['secret'],
mutable=opt['mutable']
)
if CONF.command.dbsync:
import json
conf_dict = json.loads(open(out_file).read())
for grp, namespaces in conf_dict.items():
for namespace, opts in namespaces.items():
for name, opt in opts.items():
conf_ = dict(
namespace=namespace,
project=srv,
file_name=conf,
group_name=grp,
name=name.replace('-', '_'),
default_value=opt['default'],
type=opt['type']['name'],
help=opt['help'],
required=opt['required'],
secret=opt['secret'],
mutable=opt['mutable']
)
try:
api.config_schema_create(None,
conf_)
_a = 'T'
except exception.AlreadyExist:
_a = 'F'
try:
api.config_schema_create(None,
conf_)
_a = 'T'
except exception.AlreadyExist:
_a = 'F'
msg = '[%s] %s::%s::%s' % (_a,
namespace,
grp,
name)
print (msg)
msg = '[%s] %s::%s::%s::%s::%s' % (
_a,
srv,
conf,
namespace,
grp,
name)
print (msg)
class DBCommand(object):
@ -180,6 +189,7 @@ def add_command_parsers(subparsers):
parser.add_argument('-j', '--outputfile')
parser.add_argument('-s', '--dbsync', action='store_true')
parser.add_argument('-g', '--gen', action='store_true')
parser.add_argument('-n', '--by-namespace', action='store_true')
parser.set_defaults(func=OsloConfigSchemaManager().sync)
parser = subparsers.add_parser('status')

View File

@ -21,8 +21,35 @@ PROJECT_NAME = 'namos'
VERSION = namos.__version__
MESSAGE_QUEUE_CONDUCTOR_TOPIC = '%s.conductor' % PROJECT_NAME
CONF = cfg.CONF
SCvsNT = dict(
controller=['nova-api', 'nova-cert', 'nova-conductor', 'nova-consoleauth',
'nova-scheduler',
'cinder-api', 'cinder-scheduler',
'neutron-server',
'glance-api', 'glance-registry',
'keystone-all',
'heat-api', 'heat-api-cfn', 'heat-api-cloudwatch',
'heat-engine',
'namos-manager',
'ceilometer-api', 'ceilometer-polling', 'ceilometer-collector',
'ceilometer-agent-notification',
'tacker-server'
],
compute=['nova-compute', 'neutron-openvswitch-agent',
'ceilometer-agent-compute'],
storage=['cinder-volume'],
network=['neutron-dhcp-agent', 'neutron-l3-agent', 'neutron-lbaas-agent',
'neutron-metadata-agent']
)
def find_type(name):
for key, value in SCvsNT.items():
if name in value:
return key
return 'UNKNOWN'
conductor_opts = [
cfg.IntOpt('workers',
default=1,

View File

@ -27,3 +27,51 @@ def find_status(sw, report_interval=60):
status = True
return status
def _to_list(list_in_str):
'''string [a,b,c] -> python list [a, b ,c].'''
def strip_out(s):
start_idx = 0
end_idx = len(s)
if s[start_idx] == '[' \
or s[start_idx] == '\'' \
or s[start_idx] == '"':
start_idx += 1
if s[end_idx - 1] == ']' \
or s[end_idx - 1] == '\'' \
or s[end_idx - 1] == '"':
end_idx -= 1
return s[start_idx:end_idx]
l = []
for s in strip_out(list_in_str.strip()).split(','):
s = str(strip_out(s.strip()))
l.append(s)
return l
def file_to_configs(file_path):
with open(file_path, 'r') as file:
section = ''
conf_dict = dict()
for line in file:
if len(line.strip()) == 0:
continue
if line.strip().startswith('#'):
continue
if line.strip().startswith('['):
section = line.replace('[', '').replace(']', '').strip()
conf_dict[section] = dict()
continue
if section:
kv = line.strip().split('=')
# TODO(mrkanag) if values required, enable it here
conf_dict[section][kv[0].strip()] = kv[1].strip().replace(
"\n", "")
return conf_dict

View File

@ -19,7 +19,7 @@ from oslo_context import context
from oslo_log import log
from oslo_utils import timeutils
from namos.common import config
from namos.common import config as namos_config
from namos.common import exception
from namos.common import messaging
from namos.common import utils
@ -28,7 +28,7 @@ from namos.db import openstack_drivers
LOG = log.getLogger(__name__)
config.register_conductor_opts()
namos_config.register_conductor_opts()
CONF = cfg.CONF
@ -46,73 +46,125 @@ def request_context(func):
class ConductorManager(object):
RPC_API_VERSION = '1.0'
TOPIC = config.MESSAGE_QUEUE_CONDUCTOR_TOPIC
TOPIC = namos_config.MESSAGE_QUEUE_CONDUCTOR_TOPIC
@request_context
def add_region(self, context, region):
# Move this try except to wrpper fn of the db layer
try:
db_api.region_create(context, region)
except: # noqa
raise exception.NamosException()
return db_api.region_create(context, region)
@request_context
def region_get(self, context, region_id):
return db_api.region_get(context. region_id)
@request_context
def region_update(self, context, region_id, region):
return db_api.region_update(context. region_id, region)
@request_context
def region_get_all(self, context):
return db_api.region_get_all(context)
@request_context
def region_delete(self, context, region_id):
return db_api.region_delete(context. region_id)
@request_context
def add_service_node(self, context, service_node):
return db_api.service_node_create(context, service_node)
@request_context
def service_node_get(self, context, service_node_id):
return db_api.service_node_get(context. service_node_id)
@request_context
def service_node_update(self, context, service_node_id, service_node):
return db_api.service_node_update(context.
service_node_id,
service_node)
@request_context
def service_node_get_all(self, context):
return db_api.service_node_get_all(context)
@request_context
def service_node_delete(self, context, service_node_id):
return db_api.service_node_delete(context. service_node_id)
@request_context
def register_myself(self, context, registration_info):
LOG.info("REGISTER [%s.%s.%s] START" % (
LOG.info("REGISTER [%s.%s.%s] START\n%s" % (
registration_info['project_name'],
registration_info['prog_name'],
registration_info['identification']
registration_info['identification'],
registration_info
))
# Region processing
rp = RegionProcessor(context,
self,
registration_info)
region_id = rp.process_region()
# Service processing
sp = ServiceProcessor(context,
self,
region_id,
registration_info)
service_component_id, service_worker_id = sp.process_service(context)
service_component_id, service_worker_id = sp.process_service()
# COnfig processing
cp = ConfigProcessor(context,
self,
registration_info,
service_worker_id)
cp.process_configs()
# Device Driver processing
dp = DriverProcessor(service_worker_id,
registration_info['config_dict'])
dp.process_drivers(context)
# TODO(mrkanag) if this to be per service component??
dp = DriverProcessor(context,
self,
service_worker_id,
region_id)
dp.process_drivers()
self._regisgration_ackw(context,
registration_info['identification'])
LOG.info("REGISTER [%s.%s.%s] DONE" % (
registration_info['project_name'],
registration_info['prog_name'],
registration_info['identification']
))
self._regisgration_ackw(context,
registration_info['identification'])
sp.cleanup(service_component_id)
# TODO(mrkanag) Move this to periofic task, before deleting each
# sw, make usre its created atleast 5 mins before
# sp.cleanup(service_component_id)
return service_worker_id
def _regisgration_ackw(self, context, identification):
client = messaging.get_rpc_client(topic='namos.CONF.%s' %
identification,
version=self.RPC_API_VERSION,
exchange=config.PROJECT_NAME)
exchange=namos_config.PROJECT_NAME)
client.cast(context,
'regisgration_ackw',
identification=identification)
LOG.info("REGISTER [%s] ACK" % identification)
def _ping(self, context, identification):
@request_context
def ping(self, context, identification):
client = messaging.get_rpc_client(
topic='namos.CONF.%s' % identification,
version=self.RPC_API_VERSION,
exchange=config.PROJECT_NAME)
exchange=namos_config.PROJECT_NAME)
try:
client.call(context,
'ping_me',
identification=identification)
LOG.debug("PING [%s] SUCCESSFUL" % identification)
LOG.info("PING [%s] SUCCESSFUL" % identification)
return True
except: # noqa
LOG.debug("PING [%s] FAILED" % identification)
LOG.info("PING [%s] FAILED" % identification)
return False
@request_context
@ -169,8 +221,20 @@ class ConductorManager(object):
return db_api.infra_perspective_get(context)
@request_context
def view_360(self, context):
return db_api.view_360(context)
def view_360(self, context,
include_conf_file=False,
include_status=False,
include_file_entry=False):
view = db_api.view_360(context, include_conf_file, include_status)
if include_file_entry:
view['config_file_entry'] = dict()
for f in list(view['config_file']):
view['config_file_entry'][f] = self.config_file_get(
context,
config_file_id=f
)['entry']
return view
@request_context
def config_get_by_name_for_service_worker(self,
@ -183,8 +247,45 @@ class ConductorManager(object):
name,
only_configured)
@request_context
def get_status(self, context):
return db_api.get_status(context)
class ServiceProcessor(object):
@request_context
def config_file_get(self, context, config_file_id):
file = db_api.config_file_get(context, config_file_id)
cfg_es = db_api.config_file_entry_get_all_by(
context,
oslo_config_file_id=config_file_id
)
return dict(file=file, entry=cfg_es)
@request_context
def config_schema(self, context, project, with_file_link=False):
# provide the manage oslo_config_schema --gen
file_schema = dict()
for cfg_s in db_api.config_schema_get_all_by(context, project=project):
if cfg_s.file_name not in file_schema:
file_schema[cfg_s.file_name] = dict()
if cfg_s.group_name not in file_schema[cfg_s.file_name]:
file_schema[cfg_s.file_name][cfg_s.group_name] = dict()
file_schema[cfg_s.file_name][cfg_s.group_name][cfg_s.name] = cfg_s
if with_file_link:
cfg_es = db_api.config_file_entry_get_all_by(
context,
oslo_config_schema_id=cfg_s.id
)
file_schema[cfg_s.file_name][cfg_s.group_name][
cfg_s.name]['entries'] = cfg_es
return file_schema
class RegionProcessor(object):
def __init__(self,
context,
manager,
@ -193,30 +294,7 @@ class ServiceProcessor(object):
self.manager = manager
self.context = context
def file_to_configs(self, file_content):
tmp_file_path = '/tmp/sample-namos-config.conf'
with open(tmp_file_path, 'w') as file:
file.write(file_content)
with open(tmp_file_path, 'r') as file:
section = ''
conf_dict = dict()
for line in file:
if line.strip().startswith('['):
section = line.replace('[', '').replace(']', '').strip()
continue
if section:
kv = line.strip().split('=')
conf_dict[
'%s::%s' % (section, kv[0].strip())] = None
import os
os.remove(tmp_file_path)
return conf_dict
def process_service(self, context):
def process_region(self):
# region
# If region is not provided, make it as belongs to namos's region
if not self.registration_info.get('region_name'):
@ -225,31 +303,46 @@ class ServiceProcessor(object):
try:
region = db_api.region_create(
context,
self.context,
dict(name=self.registration_info.get('region_name'))
)
LOG.info('Region %s is created' % region)
except exception.AlreadyExist:
region = db_api.region_get_by_name(
context,
self.context,
name=self.registration_info.get('region_name')
)
LOG.info('Region %s is existing' % region)
return region.id
class ServiceProcessor(object):
def __init__(self,
context,
manager,
region_id,
registration_info):
self.registration_info = registration_info
self.manager = manager
self.context = context
self.region_id = region_id
def process_service(self):
# Service Node
try:
# TODO(mrkanag) user proper node name instead of fqdn
node = db_api.service_node_create(
context,
self.context,
dict(name=self.registration_info.get('fqdn'),
fqdn=self.registration_info.get('fqdn'),
region_id=region.id))
region_id=self.region_id,
extra={'ips': self.registration_info.get('ips')}))
LOG.info('Service node %s is created' % node)
except exception.AlreadyExist:
# TODO(mrkanag) is this to be region specifc search
node = db_api.service_node_get_by_name(
context,
self.context,
self.registration_info.get('fqdn'))
LOG.info('Service node %s is existing' % node)
@ -257,7 +350,7 @@ class ServiceProcessor(object):
try:
s_id = 'b9c2549f-f685-4bc2-92e9-ba8af9c18591'
service = db_api.service_create(
context,
self.context,
# TODO(mrkanag) use keystone python client and
# use real service id here
dict(name=self.registration_info.get('project_name'),
@ -266,22 +359,24 @@ class ServiceProcessor(object):
LOG.info('Service %s is created' % service)
except exception.AlreadyExist:
service = db_api.service_get_by_name(
context,
self.context,
self.registration_info.get('project_name'))
LOG.info('Service %s is existing' % service)
# Service Component
try:
service_component = db_api.service_component_create(
context,
self.context,
dict(name=self.registration_info['prog_name'],
node_id=node.id,
service_id=service.id))
service_id=service.id,
type=namos_config.find_type(self.registration_info[
'prog_name'])))
LOG.info('Service Component %s is created' % service_component)
except exception.AlreadyExist:
service_components = \
db_api.service_component_get_all_by_node_for_service(
context,
self.context,
node_id=node.id,
service_id=service.id,
name=self.registration_info['prog_name']
@ -295,7 +390,7 @@ class ServiceProcessor(object):
# Service Worker
try:
service_worker = db_api.service_worker_create(
context,
self.context,
# TODO(mrkanag) Fix the name, device driver proper !
dict(name='%s@%s' % (self.registration_info['pid'],
service_component.name),
@ -307,122 +402,13 @@ class ServiceProcessor(object):
))
LOG.info('Service Worker %s is created' % service_worker)
except exception.AlreadyExist:
service_worker = db_api.service_worker_get_all_by(
self.context,
pid=self.registration_info['identification'],
service_component_id=service_component.id
)[0]
LOG.info('Service Worker %s is existing' %
db_api.service_worker_get_all_by(
context,
pid=self.registration_info['identification'],
service_component_id=service_component.id
)[0])
# config file
conf_files = dict()
for cfg_f in self.registration_info['config_file_list']:
try:
config_file = db_api.config_file_create(
context,
dict(name=cfg_f,
file=self.registration_info[
'config_file_dict'][cfg_f],
service_component_id=service_component.id,
service_node_id=node.id))
LOG.info('Oslo config file %s is created' % config_file)
except exception.AlreadyExist:
config_files = \
db_api.config_file_get_by_name_for_service_node(
context,
service_node_id=node.id,
name=cfg_f
)
if len(config_files) == 1:
config_file = \
db_api.config_file_update(
context,
config_files[0].id,
dict(file=self.registration_info[
'config_file_dict'][cfg_f]))
LOG.info('Oslo config file %s is existing and is updated'
% config_file)
config_dict = self.file_to_configs(
self.registration_info['config_file_dict'][cfg_f]
)
conf_files[config_file.id] = config_dict
# Config
# TODO(mrkanag) Optimize the config like per service_component
# or per service_worker,
for cfg_name, cfg_obj in self.registration_info[
'config_dict'].iteritems():
cfg_schs = db_api.config_schema_get_by(
context=context,
group=cfg_obj['group'],
name=cfg_obj['name']
)
if len(cfg_schs) > 1:
cfg_sche = cfg_schs[0]
LOG.debug("Config Schema %s is existing and is updated" %
cfg_sche)
else:
try:
cfg_sche = db_api.config_schema_create(
context,
dict(
namespace='UNKNOWN-NAMOS',
default_value=cfg_obj['default_value'],
type=cfg_obj['type'],
help=cfg_obj['help'],
required=cfg_obj['required'],
secret=cfg_obj['secret'],
mutable=False,
group_name=cfg_obj['group'],
name=cfg_obj['name']
)
)
LOG.debug("Config Schema %s is created" % cfg_sche)
except exception.AlreadyExist:
cfg_schs = db_api.config_schema_get_by(
context=context,
group=cfg_obj['group'],
name=cfg_obj['name'],
namespace='UNKNOWN-NAMOS'
)
cfg_sche = cfg_schs[0]
LOG.debug("Config Schema %s is existing and is updated" %
cfg_sche)
# is it part of config file
cfg_name = "%s::%s" % (cfg_obj['group'], cfg_name)
file_id = None
for f_id, conf_keys in conf_files.items():
if cfg_name in conf_keys.keys():
file_id = f_id
break
cfg_obj_ = dict(
service_worker_id=service_worker.id,
name=cfg_name,
value=cfg_obj['value'],
oslo_config_schema_id=cfg_sche.id,
oslo_config_file_id=file_id
)
try:
config = db_api.config_create(context, cfg_obj_)
LOG.debug("Config %s is created" % config)
except exception.AlreadyExist:
configs = db_api.config_get_by_name_for_service_worker(
context,
service_worker_id=cfg_obj_['service_worker_id'],
name=cfg_obj_['name'])
if len(configs) == 1:
config = db_api.config_update(context,
configs[0].id,
cfg_obj_)
LOG.debug("Config %s is existing and is updated" % config)
service_worker)
return service_component.id, service_worker.id
@ -431,7 +417,7 @@ class ServiceProcessor(object):
# TODO(mrkanag) Make this into thread
service_workers = \
db_api.service_worker_get_all_by(
context,
self.context,
service_component_id=service_component_id
)
@ -440,7 +426,8 @@ class ServiceProcessor(object):
if srv_wkr.deleted_at is not None:
continue
if utils.find_status(srv_wkr):
# TODO(mrkanag) is this interval ok
if utils.find_status(srv_wkr, report_interval=60):
LOG.info('Service Worker %s is live'
% srv_wkr.id)
continue
@ -452,18 +439,278 @@ class ServiceProcessor(object):
for conf in confs:
db_api.config_delete(self.context, conf.id)
LOG.debug('Config %s is deleted'
% conf.id)
LOG.info('Config %s is deleted'
% conf.id)
db_api.service_worker_delete(self.context, srv_wkr.id)
LOG.info('Service Worker %s is deleted'
% srv_wkr.id)
class DriverProcessor(object):
def __init__(self, service_worker_id, config_dict):
self.config_dict = config_dict
class ConfigProcessor(object):
def __init__(self, context, manager, registration_info, service_worker_id):
self.context = context
self.manager = manager
self.registration_info = registration_info
self.service_worker_id = service_worker_id
self.service_component_id = db_api.service_worker_get(
self.context,
self.service_worker_id).service_component_id
sc = db_api.service_component_get(
self.context,
self.service_component_id
)
self.service_node_id = sc.node_id
self.project = db_api.service_get(self.context, sc.service_id).name
def file_to_configs(self, file_content):
import uuid
tmp_file_path = '/tmp/%s.conf' % str(uuid.uuid4())
with open(tmp_file_path, 'w') as file:
file.write(file_content)
conf_dict = utils.file_to_configs(tmp_file_path)
import os
os.remove(tmp_file_path)
return conf_dict
def _form_config_name(self, group, key):
return '%s.%s' % (group, key)
def process_config_files(self):
# config file
conf_name_to_file_id = dict()
for cfg_f in self.registration_info['config_file_dict'].keys():
try:
config_file = db_api.config_file_create(
self.context,
dict(name=cfg_f,
file=self.registration_info[
'config_file_dict'][cfg_f],
service_node_id=self.service_node_id))
LOG.info('Oslo config file %s is created' % config_file)
except exception.AlreadyExist:
config_files = \
db_api.config_file_get_by_name_for_service_node(
self.context,
service_node_id=self.service_node_id,
name=cfg_f
)
if len(config_files) == 1:
config_file = \
db_api.config_file_update(
self.context,
config_files[0].id,
dict(file=self.registration_info[
'config_file_dict'][cfg_f]))
LOG.info('Oslo config file %s is existing and is updated'
% config_file)
config_dict = self.file_to_configs(
config_file.file
)
# config file entry
for grp, keys in config_dict.items():
for key, value in keys.items():
# find config schema
cfg_schs = db_api.config_schema_get_by(
context=self.context,
group=grp,
name=key,
project=self.project
)
cfg_sche = None
if len(cfg_schs) == 0:
LOG.debug("[%s] No Config Schema is existing, so "
"no schema is associated for Config Entry "
"%s::%s" %
(self.service_component_id,
grp,
key))
elif len(cfg_schs) > 1:
LOG.debug("[%s] More than one Config Schema is "
"existing, so no schema is associated for "
"Config Entry %s::%s" %
(self.service_component_id,
grp,
key))
else:
cfg_sche = cfg_schs[0]
LOG.debug("[%s] Config Schema %s is existing and is "
"used to associated for Config Entry"
" %s::%s" %
(self.service_component_id,
cfg_sche.id,
grp,
key))
# config file entry
cfg_name = self._form_config_name(grp, key)
cfg_obj_ = dict(
service_component_id=self.service_component_id,
name=cfg_name,
value=value,
oslo_config_schema_id=cfg_sche.id if
cfg_sche else None,
oslo_config_file_id=config_file.id
)
try:
config = db_api.config_file_entry_create(
self.context,
cfg_obj_)
LOG.debug("Config Entry %s is created" % config)
except exception.AlreadyExist:
configs = db_api.config_file_entry_get_all_by(
self.context,
service_component_id=cfg_obj_[
'service_component_id'],
oslo_config_file_id=config_file.id,
name=cfg_obj_['name'])
if len(configs) == 1:
config = db_api.config_file_entry_update(
self.context,
configs[0].id,
cfg_obj_)
LOG.debug("Config Entry %s is existing and is "
"updated" % config)
conf_name_to_file_id[cfg_name] = config.id
return conf_name_to_file_id
def process_configs(self):
conf_name_to_file_id = self.process_config_files()
# Config
for cfg_obj in self.registration_info['config_list']:
# This format is used by DriverProcessor
cfg_name = self._form_config_name(cfg_obj['group'],
cfg_obj['name'])
if not conf_name_to_file_id.get(cfg_name):
cfg_schm_id = None
cfg_f_entry = None
# find config schema
# ignore the config file_name right now !!, assumed conf unique
# across the service wth given group and name
cfg_schs = db_api.config_schema_get_by(
context=self.context,
group=cfg_obj['group'],
name=cfg_obj['name'],
project=self.project
)
if len(cfg_schs) == 0:
LOG.debug("[%s] No Config Schema is existing, so "
"no schema is associated for Config %s::%s" %
(self.service_worker_id,
cfg_obj['group'],
cfg_obj['name']))
elif len(cfg_schs) > 1:
LOG.debug("[%s] More than one Config Schema is existing, "
"so no schema is associated for Config %s::%s" %
(self.service_worker_id,
cfg_obj['group'],
cfg_obj['name']))
else:
# try:
# cfg_sche = db_api.config_schema_create(
# self.context,
# dict(
# namespace='UNKNOWN-tagged-by-NAMOS',
# default_value=cfg_obj['default_value'],
# type=cfg_obj['type'],
# help=cfg_obj['help'],
# required=cfg_obj['required'],
# secret=cfg_obj['secret'],
# mutable=False,
# group_name=cfg_obj['group'],
# name=cfg_obj['name']
# )
# )
# LOG.info("Config Schema %s is created" % cfg_sche)
# except exception.AlreadyExist:
# cfg_schs = db_api.config_schema_get_by(
# context=self.context,
# group=cfg_obj['group'],
# name=cfg_obj['name'],
# namespace='UNKNOWN-tagged-by-NAMOS'
# )
cfg_sche = cfg_schs[0]
LOG.debug("[%s] Config Schema %s is existing and is used "
"for Config %s::%s" %
(self.service_worker_id,
cfg_sche.id,
cfg_obj['group'],
cfg_obj['name']))
cfg_schm_id = cfg_sche.id
else:
cfg_schm_id = None
cfg_f_entry = conf_name_to_file_id[cfg_name]
# config_file_entry_id = None
# for f_id, conf_groups in conf_name_to_file_id.items():
# if cfg_obj['group'] in list(conf_groups):
# if cfg_obj['name'] in list(conf_groups[cfg_obj[
# 'group']]):
# config_entrys=db_api.config_file_entry_get_all_by(
# self.context,
# service_component_id=self.service_component_id,
# oslo_config_file_id=f_id,
# name=cfg_name)
# if len(config_entrys) == 1:
# config_file_entry_id = config_entrys[0].id
#
# break
cfg_obj_ = dict(
service_worker_id=self.service_worker_id,
name=cfg_name,
value=cfg_obj['value'] if cfg_obj['value'] else cfg_obj[
'default_value'],
oslo_config_schema_id=cfg_schm_id,
oslo_config_file_entry_id=cfg_f_entry
)
try:
config = db_api.config_create(self.context, cfg_obj_)
LOG.debug("Config %s is created" % config)
except exception.AlreadyExist:
configs = db_api.config_get_by_name_for_service_worker(
self.context,
service_worker_id=cfg_obj_['service_worker_id'],
name=cfg_obj_['name'])
if len(configs) == 1:
config = db_api.config_update(self.context,
configs[0].id,
cfg_obj_)
LOG.debug("Config %s is existing and is updated" % config)
class DriverProcessor(object):
def __init__(self, context, manager, service_worker_id, region_id):
self.context = context
self.manager = manager
self.service_worker_id = service_worker_id
self.region_id = region_id
self.config_dict = self._get_config_dict()
def _get_config_dict(self):
conf_dict = {}
for c in db_api.config_get_by_name_for_service_worker(
self.context,
self.service_worker_id
):
conf_dict[c.name] = c.to_dict()
return conf_dict
def _identify_drivers(self):
return (set(openstack_drivers.get_drivers_config().keys()) &
@ -477,8 +724,7 @@ class DriverProcessor(object):
# Constant naming
if name[0] == '#':
return name[1:]
return (self.config_dict[name].get('value') or
self.config_dict[name].get('default_value'))
return (self.config_dict[name].get('value'))
elif isinstance(name, tuple):
fn = name[0]
args = list()
@ -490,35 +736,13 @@ class DriverProcessor(object):
params = [self._get_value(param) for param in name[1:]]
return fmt_str % tuple(params)
@staticmethod
def _to_list(list_in_str):
def strip_out(s):
start_idx = 0
end_idx = len(s)
if s[start_idx] == '[' \
or s[start_idx] == '\'' \
or s[start_idx] == '"':
start_idx += 1
if s[end_idx - 1] == ']' \
or s[end_idx - 1] == '\'' \
or s[end_idx - 1] == '"':
end_idx -= 1
return s[start_idx:end_idx]
l = []
for s in strip_out(list_in_str.strip()).split(','):
s = str(strip_out(s.strip()))
l.append(s)
return l
def process_drivers(self, context):
def process_drivers(self):
for driver_key in self._identify_drivers():
try:
drivers = self._get_value(driver_key)
drivers = DriverProcessor._to_list(drivers)
drivers = utils._to_list(drivers)
for driver_name in drivers:
self.process_driver(context, driver_key, driver_name)
self.process_driver(driver_key, driver_name)
except KeyError: # noqa
# TODO(mrkanag) run namos-manager and restart nova-scheduler
# KeyError: 'libvirt.virt_type' is thrown, fix it
@ -526,7 +750,7 @@ class DriverProcessor(object):
(driver_key, self.service_worker_id))
continue
def process_driver(self, context, driver_key, driver_name):
def process_driver(self, driver_key, driver_name):
driver_config = \
openstack_drivers.get_drivers_config()[driver_key][driver_name]
@ -587,18 +811,17 @@ class DriverProcessor(object):
# Device
device_name = self._get_value(device_cfg['name'])
try:
# TODO(mrkanag) region_id is hard-coded, fix it !
# Set the right status as well
# TODO(mrkanag) Set the right status
device = db_api.device_create(
context,
self.context,
dict(name=device_name,
status='active',
region_id='f7dcd175-27ef-46b5-997f-e6e572f320b0'))
region_id=self.region_id))
LOG.info('Device %s is created' % device)
except exception.AlreadyExist:
device = db_api.device_get_by_name(
context,
self.context,
device_name)
LOG.info('Device %s is existing' % device)
@ -609,7 +832,7 @@ class DriverProcessor(object):
d_name = '%s-%s' % (base_name, d_name)
try:
device = db_api.device_get_by_name(
context,
self.context,
d_name)
LOG.info('Device %s is existing' % device)
except exception.DeviceNotFound:
@ -617,7 +840,7 @@ class DriverProcessor(object):
# Set the right status as well
r_id = 'f7dcd175-27ef-46b5-997f-e6e572f320b0'
device = db_api.device_create(
context,
self.context,
dict(name=d_name,
status='active',
parent_id=device.id,
@ -631,7 +854,7 @@ class DriverProcessor(object):
connection[k] = self._get_value(k)
device_endpoint = db_api.device_endpoint_create(
context,
self.context,
dict(name=device_endpoint_name,
connection=connection,
type=endpoint_type,
@ -639,7 +862,7 @@ class DriverProcessor(object):
LOG.info('Device Endpoint %s is created' % device_endpoint)
except exception.AlreadyExist:
device_endpoints = db_api.device_endpoint_get_by_device_type(
context,
self.context,
device_id=device.id,
type=endpoint_type,
name=device_endpoint_name)
@ -651,7 +874,7 @@ class DriverProcessor(object):
# Device Driver Class
try:
device_driver_class = db_api.device_driver_class_create(
context,
self.context,
dict(name=driver_name,
python_class=driver_name,
type=driver_def['type'],
@ -663,7 +886,7 @@ class DriverProcessor(object):
device_driver_class)
except exception.AlreadyExist:
device_driver_class = db_api.device_driver_class_get_by_name(
context,
self.context,
driver_name)
LOG.info('Device Driver Class %s is existing' %
device_driver_class)
@ -671,7 +894,7 @@ class DriverProcessor(object):
# Device Driver
try:
device_driver = db_api.device_driver_create(
context,
self.context,
dict(device_id=device.id,
name=driver_name,
endpoint_id=device_endpoint.id,
@ -683,7 +906,7 @@ class DriverProcessor(object):
except exception.AlreadyExist:
device_drivers = \
db_api.device_driver_get_by_device_endpoint_service_worker(
context,
self.context,
device_id=device.id,
endpoint_id=device_endpoint.id,
device_driver_class_id=device_driver_class.id,

View File

@ -61,19 +61,73 @@ class ConductorAPI(object):
@wrapper_function
def add_region(self, context, region):
self.client.call(context, 'add_region', region=region)
return self.client.call(
context,
'region_create',
region=region)
@wrapper_function
def region_get(self, context, region_id):
return self.client.call(
context,
'region_get',
region_id=region_id)
@wrapper_function
def region_update(self, context, region_id, region):
return self.client.call(
context,
'region_update',
region_id=region_id,
region=region)
@wrapper_function
def region_get_all(self, context):
return self.client.call(context, 'region_get_all')
return self.client.call(
context,
'region_get_all')
@wrapper_function
def service_perspective_get(self, context, service_id,
include_details=False):
return self.client.call(context,
'service_perspective_get',
service_id=service_id,
include_details=include_details)
def region_delete(self, context, region_id):
return self.client.call(
context,
'region_delete',
region_id=region_id)
@wrapper_function
def add_service_node(self, context, service_node):
return self.client.call(
context,
'service_node_create',
service_node=service_node)
@wrapper_function
def service_node_get(self, context, service_node_id):
return self.client.call(
context,
'service_node_get',
service_node_id=service_node_id)
@wrapper_function
def service_node_update(self, context, service_node_id, service_node):
return self.client.call(
context,
'service_node_update',
service_node_id=service_node_id,
service_node=service_node)
@wrapper_function
def service_node_get_all(self, context):
return self.client.call(
context,
'service_node_get_all')
@wrapper_function
def service_node_delete(self, context, service_node_id):
return self.client.call(
context,
'service_node_delete',
service_node_id=service_node_id)
@wrapper_function
def device_perspective_get(self, context, device_id,
@ -97,9 +151,15 @@ class ConductorAPI(object):
'infra_perspective_get')
@wrapper_function
def view_360(self, context):
def view_360(self, context,
include_conf_file=False,
include_status=False,
include_file_entry=False):
return self.client.call(context,
'view_360')
'view_360',
include_conf_file=include_conf_file,
include_status=include_status,
include_file_entry=include_file_entry)
@wrapper_function
def config_get_by_name_for_service_worker(self,
@ -113,6 +173,31 @@ class ConductorAPI(object):
name=name,
only_configured=only_configured)
@wrapper_function
def get_status(self, context):
return self.client.call(context,
'get_status')
@wrapper_function
def ping(self, context, identification):
return self.client.call(context,
'ping',
identification=identification)
@wrapper_function
def config_file_get(self, context, config_file_id):
return self.client.call(context,
'config_file_get',
config_file_id=config_file_id)
@wrapper_function
def config_schema(self, context, project, with_file_link=False):
return self.client.call(context,
'config_schema',
project=project,
with_file_link=with_file_link)
if __name__ == '__main__':
# from namos.common import config
@ -134,13 +219,19 @@ if __name__ == '__main__':
print (json.dumps(c.infra_perspective_get(context.RequestContext())))
def print_view_360():
print (json.dumps(c.view_360(context.RequestContext())))
with open('/tmp/view_360.json', 'w') as file:
view = c.view_360(context.RequestContext(), True, True, True)
file.write(json.dumps(view))
def print_sample_conf():
for cf in c.config_get_by_name_for_service_worker(
context.RequestContext(),
service_worker_id='06e64e74-09b3-4721-8e5d-39ae40ed34f3'):
print ('%s %s' % (cf['name'], cf['value']))
def print_config_schema():
for s in ['nova', 'cinder', 'glance', 'neutron', 'heat', 'namos',
'keystone', 'ceilometer', 'tacker']:
with open('/tmp/config_schema_%s.json' % s, 'w') as file:
file.write(json.dumps(c.config_schema(
context.RequestContext(),
project=s,
with_file_link=True
)))
print_config_schema()
print_view_360()
# print_sample_conf()

View File

@ -312,6 +312,34 @@ def service_worker_delete(context, _id):
return IMPL.service_worker_delete(context, _id)
def config_file_entry_create(context, values):
return IMPL.config_file_entry_create(context, values)
def config_file_entry_update(context, _id, values):
return IMPL.config_file_entry_update(context, _id, values)
def config_file_entry_get(context, _id):
return IMPL.config_file_entry_get(context, _id)
def config_file_entry_get_by_name(context, name):
return IMPL.config_file_entry_get_by_name(context, name)
def config_file_entry_get_all(context):
return IMPL.config_file_entry_get_all(context)
def config_file_entry_get_all_by(context, **kwargs):
return IMPL.config_file_entry_get_all_by(context, **kwargs)
def config_file_entry_delete(context, _id):
return IMPL.config_file_entry_delete(context, _id)
# config schema
def config_schema_create(context, values):
return IMPL.config_schema_create(context, values)
@ -332,14 +360,19 @@ def config_schema_get_by_name(context, name):
def config_schema_get_by(context,
namespace=None,
group=None,
name=None):
return IMPL.config_schema_get_by(context, namespace, group, name)
name=None,
project=None):
return IMPL.config_schema_get_by(context, namespace, group, name, project)
def config_schema_get_all(context):
return IMPL.config_schema_get_all(context)
def config_schema_get_all_by(context, **kwargs):
return IMPL.config_schema_get_all_by(context, **kwargs)
def config_schema_delete(context, _id):
return IMPL.config_schema_delete(context, _id)
@ -440,5 +473,11 @@ def infra_perspective_get(context):
return IMPL.infra_perspective_get(context)
def view_360(context):
return IMPL.view_360(context)
def view_360(context, include_conf_file=False, include_status=False):
return IMPL.view_360(context,
include_conf_file=include_conf_file,
include_status=include_status)
def get_status(context):
return IMPL.get_status(context)

View File

@ -178,14 +178,21 @@ def upgrade():
def downgrade():
op.drop_table('oslo_config')
op.drop_table('oslo_config_file_entry')
op.drop_table('oslo_config_file')
op.drop_table('oslo_config_schema')
op.drop_table('os_capability')
op.drop_table('os_capability_schema')
op.drop_table('device_driver')
op.drop_table('device_endpoint')
op.drop_table('device_driver_class')
op.drop_table('device')
op.drop_table('service_worker')
op.drop_table('service_component')
op.drop_table('device_endpoint')
op.drop_table('service_node')
op.drop_table('device')
op.drop_table('region')
op.drop_table('device_driver_class')
op.drop_table('service')
op.drop_table('region')

View File

@ -20,6 +20,7 @@ from oslo_db import exception as db_exception
from oslo_db.sqlalchemy import session as db_session
from namos.common import exception
from namos.common import utils
from namos.db.sqlalchemy import models
@ -439,6 +440,8 @@ def service_component_get_all_by_node_for_service(context,
query = query.filter_by(service_id=service_id)
if name is not None:
query = query.filter_by(name=name)
query = query.order_by(models.ServiceComponent.type)
return query.all()
@ -487,6 +490,7 @@ def service_worker_get_by_host_for_service_component(context,
filter_by(service_component_id=service_component_id)
if host is not None:
query = query.filter_by(host=host)
return query.all()
@ -532,10 +536,12 @@ def config_schema_get_by_name(context, name):
return config
# TODO(mrkanag) fix it to take **kwargs
def config_schema_get_by(context,
namespace=None,
group=None,
name=None):
name=None,
project=None):
query = _model_query(context, models.OsloConfigSchema)
if name is not None:
query = query.filter_by(name=name)
@ -543,6 +549,9 @@ def config_schema_get_by(context,
query = query.filter_by(group_name=group)
if namespace is not None:
query = query.filter_by(namespace=namespace)
if project is not None:
query = query.filter_by(project=project)
return query.all()
@ -550,7 +559,7 @@ def config_schema_get_all(context):
return _get_all(context, models.OsloConfigSchema)
def _config_schema_get_all_by(context, **kwargs):
def config_schema_get_all_by(context, **kwargs):
return _get_all_by(context, models.OsloConfigSchema, **kwargs)
@ -594,7 +603,7 @@ def config_get_by_name_for_service_worker(context,
query = query.filter_by(name=name)
elif only_configured:
query = query.filter(
models.OsloConfig.oslo_config_file_id is not None)
models.OsloConfig.oslo_config_file_entry_id is not None)
return query.all()
@ -610,6 +619,44 @@ def config_delete(context, _id):
return _delete(context, models.OsloConfig, _id)
# Config File Entry
def config_file_entry_create(context, values):
return _create(context, models.OsloConfigFileEntry(), values)
def config_file_entry_update(context, _id, values):
return _update(context, models.OsloConfigFileEntry, _id, values)
def config_file_entry_get(context, _id):
config_file_entry = _get(context, models.OsloConfigFileEntry, _id)
if config_file_entry is None:
raise exception.ConfigNotFound(config_file_entry_id=_id)
return config_file_entry
def config_file_entry_get_by_name(context, name):
config_file_entry = _get_by_name(context, models.OsloConfigFileEntry, name)
if config_file_entry is None:
raise exception.ConfigNotFound(config_file_entry_id=name)
return config_file_entry
def config_file_entry_get_all(context):
return _get_all(context, models.OsloConfigFileEntry)
def config_file_entry_get_all_by(context, **kwargs):
return _get_all_by(context, models.OsloConfigFileEntry, **kwargs)
def config_file_entry_delete(context, _id):
return _delete(context, models.OsloConfigFileEntry, _id)
# Config file
def config_file_create(context, values):
return _create(context, models.OsloConfigFile(), values)
@ -647,6 +694,18 @@ def config_file_get_by_name_for_service_node(
return query.all()
def _config_file_id_get_for_service_component(context, service_component_id):
entries = config_file_entry_get_all_by(
context,
service_component_id=service_component_id)
files = []
for e in entries:
if e.oslo_config_file_id not in files:
files.append(e.oslo_config_file_id)
return files
def config_file_get_all(context):
return _get_all(context, models.OsloConfigFile)
@ -889,7 +948,7 @@ def infra_perspective_get(context):
return infra_perspective
def view_360(context):
def view_360(context, include_conf_file=False, include_status=False):
view = dict()
view['region'] = dict()
@ -901,6 +960,8 @@ def view_360(context):
view['device_driver_class'] = dict()
view['device_endpoint'] = dict()
view['device'] = dict()
view['config_file'] = dict()
view['status'] = dict()
region_list = region_get_all(context)
for rg in region_list:
@ -940,15 +1001,23 @@ def view_360(context):
'service_component'][srv_cmp.id] = dict()
view['region'][rg.id]['service_node'][srv_nd.id][
'service_component'][srv_cmp.id]['config_file'] = dict()
cfg_fl_lst = config_file_get_by_name_for_service_node(
cfg_fl_lst = _config_file_id_get_for_service_component(
context,
service_node_id=srv_nd.id
service_component_id=srv_cmp.id
)
for cfg_fl in cfg_fl_lst:
for cfg_fl_id in cfg_fl_lst:
# config file
if include_conf_file:
view['config_file'][cfg_fl_id] = config_file_get(
context,
cfg_fl_id
)
else:
view['config_file'][cfg_fl_id] = dict()
view['region'][rg.id]['service_node'][srv_nd.id][
'service_component'][srv_cmp.id][
'config_file'][cfg_fl.name] = cfg_fl.file
'config_file'][cfg_fl_id] = dict()
view['region'][rg.id]['service_node'][srv_nd.id][
'service_component'][srv_cmp.id]['service'] = srv_id
@ -1025,8 +1094,39 @@ def view_360(context):
'service_worker'][srv_wkr.id]['device_driver'][
dvc_drv.id]['device'] = dvc_id
if include_status:
view['status'] = get_status(context)
return view
def get_status(context):
sr = {}
for sn in service_node_get_all(context):
for sc in service_component_get_all_by_node_for_service(
context,
node_id=sn.id
):
service = service_get(context, sc.service_id)
for sw in service_worker_get_by_host_for_service_component(
context,
service_component_id=sc.id
):
# TODO(mrkanag) Move this to db layer and query non deleted
# if sw.deleted_at is not None:
# continue
sr[sw.pid] = (
dict(node=sn.name,
type=sc.type,
service=service.name,
component=sw.name,
status=utils.find_status(sw),
is_launcher=sw.is_launcher))
return sr
if __name__ == '__main__':
from namos.common import config
@ -1053,4 +1153,4 @@ if __name__ == '__main__':
# print perp_json
import json
print (json.dumps(view_360(None)))
print (json.dumps(view_360(None, True, True)))

View File

@ -257,6 +257,8 @@ class ServiceComponent(BASE,
Uuid,
sqlalchemy.ForeignKey('service.id'),
nullable=False)
type = sqlalchemy.Column(sqlalchemy.String(255),
nullable=False)
class ServiceWorker(BASE,
@ -302,7 +304,11 @@ class OsloConfigSchema(BASE,
# TODO(mrkanag) Check whether conf is unique across all services or only
# sepcific to namespace, otherwise uniqueconstraint is name, group_name
__table_args__ = (
UniqueConstraint("group_name", "name", "namespace"),
UniqueConstraint("group_name",
"name",
"namespace",
"project",
"file_name"),
)
name = sqlalchemy.Column(sqlalchemy.String(255),
@ -327,6 +333,15 @@ class OsloConfigSchema(BASE,
sqlalchemy.String(128),
nullable=False
)
# This column helps to keep schema for each service
project = sqlalchemy.Column(
sqlalchemy.String(128),
nullable=False
)
file_name = sqlalchemy.Column(
sqlalchemy.String(128),
nullable=False
)
# TODO(mrkanag) default value is some time overriden by services, which
# osloconfig allows, so this column should have values per given service
default_value = sqlalchemy.Column(
@ -346,14 +361,31 @@ class OsloConfigSchema(BASE,
)
class OsloConfig(BASE,
NamosBase,
SoftDelete,
Extra):
class OsloConfigBase(object):
name = sqlalchemy.Column(sqlalchemy.String(255),
# unique=True,
nullable=False,
default=lambda: str(uuid.uuid4()))
value = sqlalchemy.Column(
sqlalchemy.Text
)
oslo_config_schema_id = sqlalchemy.Column(
Uuid,
sqlalchemy.ForeignKey('oslo_config_schema.id')
)
class OsloConfig(
BASE,
NamosBase,
SoftDelete,
Extra):
__tablename__ = 'oslo_config'
__table_args__ = (
UniqueConstraint("oslo_config_schema_id", "service_worker_id"),
UniqueConstraint("name", "service_worker_id"),
)
name = sqlalchemy.Column(sqlalchemy.String(255),
@ -364,19 +396,54 @@ class OsloConfig(BASE,
value = sqlalchemy.Column(
sqlalchemy.Text
)
oslo_config_schema_id = sqlalchemy.Column(
Uuid,
sqlalchemy.ForeignKey('oslo_config_schema.id')
)
service_worker_id = sqlalchemy.Column(
Uuid,
sqlalchemy.ForeignKey('service_worker.id')
)
oslo_config_file_entry_id = sqlalchemy.Column(
Uuid,
sqlalchemy.ForeignKey('oslo_config_file_entry.id')
)
class OsloConfigFileEntry(
BASE,
NamosBase,
SoftDelete,
Extra):
__tablename__ = 'oslo_config_file_entry'
__table_args__ = (
UniqueConstraint("oslo_config_file_id",
"name",
"service_component_id", ),
)
name = sqlalchemy.Column(sqlalchemy.String(255),
# unique=True,
nullable=False,
default=lambda: str(uuid.uuid4()))
value = sqlalchemy.Column(
sqlalchemy.Text
)
oslo_config_schema_id = sqlalchemy.Column(
Uuid,
sqlalchemy.ForeignKey('oslo_config_schema.id')
)
oslo_config_file_id = sqlalchemy.Column(
Uuid,
sqlalchemy.ForeignKey('oslo_config_file.id')
)
oslo_config_schema_id = sqlalchemy.Column(
service_component_id = sqlalchemy.Column(
Uuid,
sqlalchemy.ForeignKey('oslo_config_schema.id'),
nullable=False
)
service_worker_id = sqlalchemy.Column(
Uuid,
sqlalchemy.ForeignKey('service_worker.id'),
nullable=False
sqlalchemy.ForeignKey('service_component.id')
)
@ -397,14 +464,67 @@ class OsloConfigFile(BASE,
file = sqlalchemy.Column(
LongText
)
# Always having last one updated the conf file
service_component_id = sqlalchemy.Column(
Uuid,
sqlalchemy.ForeignKey('service_component.id'),
nullable=False
)
service_node_id = sqlalchemy.Column(
Uuid,
sqlalchemy.ForeignKey('service_node.id'),
nullable=False
)
class CapabilitySchema(BASE,
NamosBase,
Description,
SoftDelete,
Extra):
__tablename__ = 'os_capability_schema'
# TODO(mrkanag) Check whether conf is unique across all services or only
# sepcific to namespace, otherwise uniqueconstraint is name, group_name
__table_args__ = (
UniqueConstraint("name",
"category"),
)
name = sqlalchemy.Column(sqlalchemy.String(255),
# unique=True,
nullable=False,
default=lambda: str(uuid.uuid4()))
type = sqlalchemy.Column(
sqlalchemy.String(128),
nullable=False
)
category = sqlalchemy.Column(
sqlalchemy.String(128),
nullable=False
)
class Capability(BASE,
NamosBase,
SoftDelete,
Extra):
__tablename__ = 'os_capability'
capability_schema_id = sqlalchemy.Column(
Uuid,
sqlalchemy.ForeignKey('os_capability_schema.id'),
nullable=False
)
value = sqlalchemy.Column(
sqlalchemy.Text
)
device_id = sqlalchemy.Column(
Uuid,
sqlalchemy.ForeignKey('device.id'),
nullable=False
)
class Quota(object):
pass
class Reservation(object):
pass