From e8f54799490e2853296e8e91459da6072d2d8040 Mon Sep 17 00:00:00 2001 From: Kanagaraj Manickam Date: Mon, 18 Apr 2016 15:55:09 +0530 Subject: [PATCH] re-factored the processor Change-Id: I4d06b563b5170e72cb5f691fb569c4f77d74539c --- README.rst | 51 + namos/cmd/conductor.py | 2 +- namos/cmd/manage.py | 2 +- .../{generator.py.bkup => generator.py} | 2 +- namos/conductor/config_processor.py | 256 +++ namos/conductor/manager.py | 707 +------- namos/conductor/namespace_processor.py | 252 +++ namos/conductor/region_processor.py | 55 + namos/conductor/service_processor.py | 124 ++ namos/db/openstack_drivers.py | 1420 +---------------- namos/db/sample.py | 586 +++---- setup.cfg | 2 + 12 files changed, 1091 insertions(+), 2368 deletions(-) rename namos/common/{generator.py.bkup => generator.py} (99%) create mode 100644 namos/conductor/config_processor.py create mode 100644 namos/conductor/namespace_processor.py create mode 100644 namos/conductor/region_processor.py create mode 100644 namos/conductor/service_processor.py diff --git a/README.rst b/README.rst index e93829e..2bc045e 100644 --- a/README.rst +++ b/README.rst @@ -12,3 +12,54 @@ Features -------- * Automatic discovery of OpenStack deployment architecture + +How to setup db +---------------- +* create the 'namos' db using below command + + `create database namos` + +* update database.connection in /etc/namos/namos.conf with db username and + password + +* Run the below command to sync the namos schema + + `namos-manage create_schema` + +How to setup namos +------------------ +* Assume, namos is cloned at /opt/stack/namos, then run below command to + install namos from this directory. + + `sudo python setup.py install` + +How to run namos +----------------- +* namos-api - Namos API starts to listen on port 9999. Now it does have support + for keystone authendication + + `namos-api` + +* namos-manager - Namos backend service, to configured the number of workers, + update os_manager->workers + + `namos-manager --config-file=/etc/namos/namos.conf` + +NOTE: Before running the namos-manager, please add os-namos agent in the +console scripts of respective service components. + +To find the 360 view of OpenStack deployment +-------------------------------------------- +Run http://localhost:8888/v1/view_360 + +It provides 360 degree view under region->service_node in the response. In +addition, gives the current live status of each servicec components. + +To find the status of components +-------------------------------- +Run the below command + +`namos-manage status` + +NOTE: This command supports to query status based on given node name, node type +, service and component. To find more details run this command with --help \ No newline at end of file diff --git a/namos/cmd/conductor.py b/namos/cmd/conductor.py index fcfcb6b..891919f 100644 --- a/namos/cmd/conductor.py +++ b/namos/cmd/conductor.py @@ -16,7 +16,7 @@ # under the License. """ -The Namos Infra Management Service +The Namos Manager """ import eventlet diff --git a/namos/cmd/manage.py b/namos/cmd/manage.py index 5805f3d..0f46d25 100644 --- a/namos/cmd/manage.py +++ b/namos/cmd/manage.py @@ -21,7 +21,6 @@ from namos.common import exception from namos.common import utils from namos.db import api -from namos.db import sample from namos.db.sqlalchemy import migration @@ -154,6 +153,7 @@ class DBCommand(object): migration.history() def demo_data(self): + from namos.db import sample if CONF.command.purge: sample.purge_demo_data() else: diff --git a/namos/common/generator.py.bkup b/namos/common/generator.py similarity index 99% rename from namos/common/generator.py.bkup rename to namos/common/generator.py index 6ff4b92..560f740 100644 --- a/namos/common/generator.py.bkup +++ b/namos/common/generator.py @@ -405,7 +405,7 @@ def _append_opts_json(f, group, namespaces): f[group][namespace][opt.name]['deprecated'] = [] for d in opt.deprecated_opts: f[group][namespace][opt.name]['deprecated'].append( - (d.group or 'DEFAULT', d.name or opt.dest)) + (d.group or 'DEFAULT', d.name or opt.dest)) f[group][namespace][opt.name][ 'deprecated_for_removal'] = opt.deprecated_for_removal diff --git a/namos/conductor/config_processor.py b/namos/conductor/config_processor.py new file mode 100644 index 0000000..3a7d382 --- /dev/null +++ b/namos/conductor/config_processor.py @@ -0,0 +1,256 @@ +# -*- coding: utf-8 -*- + +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from oslo_config import cfg +from oslo_log import log + +from namos.common import exception +from namos.common import utils +from namos.db import api as db_api + +LOG = log.getLogger(__name__) + +CONF = cfg.CONF + + +class ConfigProcessor(object): + def __init__(self, context, manager, registration_info, service_worker_id): + self.context = context + self.manager = manager + self.registration_info = registration_info + self.service_worker_id = service_worker_id + self.service_component_id = db_api.service_worker_get( + self.context, + self.service_worker_id).service_component_id + sc = db_api.service_component_get( + self.context, + self.service_component_id + ) + self.service_node_id = sc.node_id + self.project = db_api.service_get(self.context, sc.service_id).name + + def file_to_configs(self, file_content): + import uuid + tmp_file_path = '/tmp/%s.conf' % str(uuid.uuid4()) + with open(tmp_file_path, 'w') as file: + file.write(file_content) + + conf_dict = utils.file_to_configs(tmp_file_path) + + import os + os.remove(tmp_file_path) + + return conf_dict + + def _form_config_name(self, group, key): + return '%s.%s' % (group, key) + + def process_config_files(self): + # config file + conf_name_to_file_id = dict() + for cfg_f in self.registration_info['config_file_dict'].keys(): + try: + config_file = db_api.config_file_create( + self.context, + dict(name=cfg_f, + file=self.registration_info[ + 'config_file_dict'][cfg_f], + service_node_id=self.service_node_id)) + LOG.info('Oslo config file %s is created' % config_file) + except exception.AlreadyExist: + config_files = \ + db_api.config_file_get_by_name_for_service_node( + self.context, + service_node_id=self.service_node_id, + name=cfg_f + ) + if len(config_files) == 1: + config_file = \ + db_api.config_file_update( + self.context, + config_files[0].id, + dict(file=self.registration_info[ + 'config_file_dict'][cfg_f])) + LOG.info('Oslo config file %s is existing and is updated' + % config_file) + + config_dict = self.file_to_configs( + config_file.file + ) + + # config file entry + for grp, keys in config_dict.items(): + for key, value in keys.items(): + # find config schema + cfg_schs = db_api.config_schema_get_by( + context=self.context, + group=grp, + name=key, + project=self.project + ) + + cfg_sche = None + if len(cfg_schs) == 0: + LOG.debug("[%s] No Config Schema is existing, so " + "no schema is associated for Config Entry " + "%s::%s" % + (self.service_component_id, + grp, + key)) + elif len(cfg_schs) > 1: + LOG.debug("[%s] More than one Config Schema is " + "existing, so no schema is associated for " + "Config Entry %s::%s" % + (self.service_component_id, + grp, + key)) + else: + cfg_sche = cfg_schs[0] + LOG.debug("[%s] Config Schema %s is existing and is " + "used to associated for Config Entry" + " %s::%s" % + (self.service_component_id, + cfg_sche.id, + grp, + key)) + + # config file entry + cfg_name = self._form_config_name(grp, key) + + cfg_obj_ = dict( + service_component_id=self.service_component_id, + name=cfg_name, + value=value, + oslo_config_schema_id=cfg_sche.id if + cfg_sche else None, + oslo_config_file_id=config_file.id + ) + + try: + config = db_api.config_file_entry_create( + self.context, + cfg_obj_) + LOG.debug("Config Entry %s is created" % config) + except exception.AlreadyExist: + configs = db_api.config_file_entry_get_all_by( + self.context, + service_component_id=cfg_obj_[ + 'service_component_id'], + oslo_config_file_id=config_file.id, + name=cfg_obj_['name']) + if len(configs) == 1: + config = db_api.config_file_entry_update( + self.context, + configs[0].id, + cfg_obj_) + LOG.debug("Config Entry %s is existing and is " + "updated" % config) + + conf_name_to_file_id[cfg_name] = config.id + + return conf_name_to_file_id + + def process_configs(self): + conf_name_to_file_id = self.process_config_files() + # Config + for cfg_obj in self.registration_info['config_list']: + # This format is used by DriverProcessor + cfg_name = self._form_config_name(cfg_obj['group'], + cfg_obj['name']) + + if not conf_name_to_file_id.get(cfg_name): + cfg_schm_id = None + cfg_f_entry = None + + # find config schema + # ignore the config file_name right now !!, assumed conf unique + # across the service wth given group and name + cfg_schs = db_api.config_schema_get_by( + context=self.context, + group=cfg_obj['group'], + name=cfg_obj['name'], + project=self.project + ) + + if len(cfg_schs) == 0: + LOG.debug("[%s] No Config Schema is existing, so " + "no schema is associated for Config %s::%s" % + (self.service_worker_id, + cfg_obj['group'], + cfg_obj['name'])) + elif len(cfg_schs) > 1: + LOG.debug("[%s] More than one Config Schema is existing, " + "so no schema is associated for Config %s::%s" % + (self.service_worker_id, + cfg_obj['group'], + cfg_obj['name'])) + else: + # try: + # cfg_sche = db_api.config_schema_create( + # self.context, + # dict( + # namespace='UNKNOWN-tagged-by-NAMOS', + # default_value=cfg_obj['default_value'], + # type=cfg_obj['type'], + # help=cfg_obj['help'], + # required=cfg_obj['required'], + # secret=cfg_obj['secret'], + # mutable=False, + # group_name=cfg_obj['group'], + # name=cfg_obj['name'] + # ) + # ) + # LOG.info("Config Schema %s is created" % cfg_sche) + # except exception.AlreadyExist: + # cfg_schs = db_api.config_schema_get_by( + # context=self.context, + # group=cfg_obj['group'], + # name=cfg_obj['name'], + # namespace='UNKNOWN-tagged-by-NAMOS' + # ) + + cfg_sche = cfg_schs[0] + LOG.debug("[%s] Config Schema %s is existing and is used " + "for Config %s::%s" % + (self.service_worker_id, + cfg_sche.id, + cfg_obj['group'], + cfg_obj['name'])) + cfg_schm_id = cfg_sche.id + else: + cfg_schm_id = None + cfg_f_entry = conf_name_to_file_id[cfg_name] + + cfg_obj_ = dict( + service_worker_id=self.service_worker_id, + name=cfg_name, + value=cfg_obj['value'] if cfg_obj['value'] else cfg_obj[ + 'default_value'], + oslo_config_schema_id=cfg_schm_id, + oslo_config_file_entry_id=cfg_f_entry + ) + + try: + config = db_api.config_create(self.context, cfg_obj_) + LOG.debug("Config %s is created" % config) + except exception.AlreadyExist: + configs = db_api.config_get_by_name_for_service_worker( + self.context, + service_worker_id=cfg_obj_['service_worker_id'], + name=cfg_obj_['name']) + if len(configs) == 1: + config = db_api.config_update(self.context, + configs[0].id, + cfg_obj_) + LOG.debug("Config %s is existing and is updated" % config) diff --git a/namos/conductor/manager.py b/namos/conductor/manager.py index 07dab39..5865791 100644 --- a/namos/conductor/manager.py +++ b/namos/conductor/manager.py @@ -20,11 +20,13 @@ from oslo_log import log from oslo_utils import timeutils from namos.common import config as namos_config -from namos.common import exception from namos.common import messaging from namos.common import utils +from namos.conductor.config_processor import ConfigProcessor +from namos.conductor.namespace_processor import NamespaceProcessor +from namos.conductor.region_processor import RegionProcessor +from namos.conductor.service_processor import ServiceProcessor from namos.db import api as db_api -from namos.db import openstack_drivers LOG = log.getLogger(__name__) @@ -48,6 +50,49 @@ class ConductorManager(object): RPC_API_VERSION = '1.0' TOPIC = namos_config.MESSAGE_QUEUE_CONDUCTOR_TOPIC + def _regisgration_ackw(self, context, identification): + client = messaging.get_rpc_client( + topic=self._os_namos_listener_topic(identification), + version=self.RPC_API_VERSION, + exchange=namos_config.PROJECT_NAME) + client.cast(context, + 'regisgration_ackw', + identification=identification) + LOG.info("REGISTER [%s] ACK" % identification) + + def _os_namos_listener_topic(self, identification): + return 'namos.CONF.%s' % identification + + def _ping(self, context, identification): + client = messaging.get_rpc_client( + topic=self._os_namos_listener_topic(identification), + version=self.RPC_API_VERSION, + exchange=namos_config.PROJECT_NAME, + timeout=1) + try: + client.call(context, + 'ping_me', + identification=identification) + + LOG.info("PING [%s] SUCCESSFUL" % identification) + return True + except: # noqa + LOG.info("PING [%s] FAILED" % identification) + return False + + def _update_config_file(self, context, identification, name, content): + client = messaging.get_rpc_client( + topic=self._os_namos_listener_topic(identification), + version=self.RPC_API_VERSION, + exchange=namos_config.PROJECT_NAME, + timeout=2) + client.call(context, + 'update_config_file', + identification=identification, + name=name, + content=content) + LOG.info("CONF FILE [%s] UPDATE [%s] DONE" % (name, identification)) + @request_context def add_region(self, context, region): return db_api.region_create(context, region) @@ -120,10 +165,10 @@ class ConductorManager(object): cp.process_configs() # Device Driver processing # TODO(mrkanag) if this to be per service component?? - dp = DriverProcessor(context, - self, - service_worker_id, - region_id) + dp = NamespaceProcessor(context, + self, + service_worker_id, + region_id) dp.process_drivers() self._regisgration_ackw(context, @@ -140,49 +185,6 @@ class ConductorManager(object): sp.cleanup(service_component_id) return service_worker_id - def _regisgration_ackw(self, context, identification): - client = messaging.get_rpc_client( - topic=self._os_namos_listener_topic(identification), - version=self.RPC_API_VERSION, - exchange=namos_config.PROJECT_NAME) - client.cast(context, - 'regisgration_ackw', - identification=identification) - LOG.info("REGISTER [%s] ACK" % identification) - - def _os_namos_listener_topic(self, identification): - return 'namos.CONF.%s' % identification - - def _ping(self, context, identification): - client = messaging.get_rpc_client( - topic=self._os_namos_listener_topic(identification), - version=self.RPC_API_VERSION, - exchange=namos_config.PROJECT_NAME, - timeout=1) - try: - client.call(context, - 'ping_me', - identification=identification) - - LOG.info("PING [%s] SUCCESSFUL" % identification) - return True - except: # noqa - LOG.info("PING [%s] FAILED" % identification) - return False - - def _update_config_file(self, context, identification, name, content): - client = messaging.get_rpc_client( - topic=self._os_namos_listener_topic(identification), - version=self.RPC_API_VERSION, - exchange=namos_config.PROJECT_NAME, - timeout=2) - client.call(context, - 'update_config_file', - identification=identification, - name=name, - content=content) - LOG.info("CONF FILE [%s] UPDATE [%s] DONE" % (name, identification)) - @request_context def heart_beat(self, context, identification, dieing=False): try: @@ -339,610 +341,3 @@ class ConductorManager(object): cfg_s.name]['entries'] = cfg_es return file_schema - - -class RegionProcessor(object): - def __init__(self, - context, - manager, - registration_info): - self.registration_info = registration_info - self.manager = manager - self.context = context - - def process_region(self): - # region - # If region is not provided, make it as belongs to namos's region - if not self.registration_info.get('region_name'): - self.registration_info[ - 'region_name'] = cfg.CONF.os_namos.region_name - - try: - region = db_api.region_create( - self.context, - dict(name=self.registration_info.get('region_name')) - ) - LOG.info('Region %s is created' % region) - except exception.AlreadyExist: - region = db_api.region_get_by_name( - self.context, - name=self.registration_info.get('region_name') - ) - LOG.info('Region %s is existing' % region) - - return region.id - - -class ServiceProcessor(object): - def __init__(self, - context, - manager, - region_id, - registration_info): - self.registration_info = registration_info - self.manager = manager - self.context = context - self.region_id = region_id - - def process_service(self): - # Service Node - try: - # TODO(mrkanag) user proper node name instead of fqdn - node = db_api.service_node_create( - self.context, - dict(name=self.registration_info.get('fqdn'), - fqdn=self.registration_info.get('fqdn'), - region_id=self.region_id, - extra={'ips': self.registration_info.get('ips')})) - LOG.info('Service node %s is created' % node) - except exception.AlreadyExist: - # TODO(mrkanag) is this to be region specifc search - node = db_api.service_node_get_by_name( - self.context, - self.registration_info.get('fqdn')) - LOG.info('Service node %s is existing' % node) - - # Service - try: - s_id = 'b9c2549f-f685-4bc2-92e9-ba8af9c18591' - service = db_api.service_create( - self.context, - # TODO(mrkanag) use keystone python client and - # use real service id here - dict(name=self.registration_info.get('project_name'), - keystone_service_id=s_id)) - - LOG.info('Service %s is created' % service) - except exception.AlreadyExist: - service = db_api.service_get_by_name( - self.context, - self.registration_info.get('project_name')) - LOG.info('Service %s is existing' % service) - - # Service Component - try: - service_component = db_api.service_component_create( - self.context, - dict(name=self.registration_info['prog_name'], - node_id=node.id, - service_id=service.id, - type=namos_config.find_type(self.registration_info[ - 'prog_name']))) - LOG.info('Service Component %s is created' % service_component) - except exception.AlreadyExist: - service_components = \ - db_api.service_component_get_all_by_node_for_service( - self.context, - node_id=node.id, - service_id=service.id, - name=self.registration_info['prog_name'] - ) - if len(service_components) == 1: - service_component = service_components[0] - LOG.info('Service Component %s is existing' % - service_component) - # TODO(mrkanag) what to do when service_components size is > 1 - - # Service Worker - try: - service_worker = db_api.service_worker_create( - self.context, - # TODO(mrkanag) Fix the name, device driver proper ! - dict(name='%s@%s' % (service_component.name, - self.registration_info['pid']), - pid=self.registration_info['identification'], - host=self.registration_info['host'], - service_component_id=service_component.id, - deleted_at=None, - is_launcher=self.registration_info['i_am_launcher'] - )) - LOG.info('Service Worker %s is created' % service_worker) - except exception.AlreadyExist: - service_worker = db_api.service_worker_get_all_by( - self.context, - pid=self.registration_info['identification'], - service_component_id=service_component.id - )[0] - LOG.info('Service Worker %s is existing' % - service_worker) - - return service_component.id, service_worker.id - - def cleanup(self, service_component_id): - # clean up the dead service workers - db_api.cleanup(self.context, service_component_id) - - -class ConfigProcessor(object): - def __init__(self, context, manager, registration_info, service_worker_id): - self.context = context - self.manager = manager - self.registration_info = registration_info - self.service_worker_id = service_worker_id - self.service_component_id = db_api.service_worker_get( - self.context, - self.service_worker_id).service_component_id - sc = db_api.service_component_get( - self.context, - self.service_component_id - ) - self.service_node_id = sc.node_id - self.project = db_api.service_get(self.context, sc.service_id).name - - def file_to_configs(self, file_content): - import uuid - tmp_file_path = '/tmp/%s.conf' % str(uuid.uuid4()) - with open(tmp_file_path, 'w') as file: - file.write(file_content) - - conf_dict = utils.file_to_configs(tmp_file_path) - - import os - os.remove(tmp_file_path) - - return conf_dict - - def _form_config_name(self, group, key): - return '%s.%s' % (group, key) - - def process_config_files(self): - # config file - conf_name_to_file_id = dict() - for cfg_f in self.registration_info['config_file_dict'].keys(): - try: - config_file = db_api.config_file_create( - self.context, - dict(name=cfg_f, - file=self.registration_info[ - 'config_file_dict'][cfg_f], - service_node_id=self.service_node_id)) - LOG.info('Oslo config file %s is created' % config_file) - except exception.AlreadyExist: - config_files = \ - db_api.config_file_get_by_name_for_service_node( - self.context, - service_node_id=self.service_node_id, - name=cfg_f - ) - if len(config_files) == 1: - config_file = \ - db_api.config_file_update( - self.context, - config_files[0].id, - dict(file=self.registration_info[ - 'config_file_dict'][cfg_f])) - LOG.info('Oslo config file %s is existing and is updated' - % config_file) - - config_dict = self.file_to_configs( - config_file.file - ) - - # config file entry - for grp, keys in config_dict.items(): - for key, value in keys.items(): - # find config schema - cfg_schs = db_api.config_schema_get_by( - context=self.context, - group=grp, - name=key, - project=self.project - ) - - cfg_sche = None - if len(cfg_schs) == 0: - LOG.debug("[%s] No Config Schema is existing, so " - "no schema is associated for Config Entry " - "%s::%s" % - (self.service_component_id, - grp, - key)) - elif len(cfg_schs) > 1: - LOG.debug("[%s] More than one Config Schema is " - "existing, so no schema is associated for " - "Config Entry %s::%s" % - (self.service_component_id, - grp, - key)) - else: - cfg_sche = cfg_schs[0] - LOG.debug("[%s] Config Schema %s is existing and is " - "used to associated for Config Entry" - " %s::%s" % - (self.service_component_id, - cfg_sche.id, - grp, - key)) - - # config file entry - cfg_name = self._form_config_name(grp, key) - - cfg_obj_ = dict( - service_component_id=self.service_component_id, - name=cfg_name, - value=value, - oslo_config_schema_id=cfg_sche.id if - cfg_sche else None, - oslo_config_file_id=config_file.id - ) - - try: - config = db_api.config_file_entry_create( - self.context, - cfg_obj_) - LOG.debug("Config Entry %s is created" % config) - except exception.AlreadyExist: - configs = db_api.config_file_entry_get_all_by( - self.context, - service_component_id=cfg_obj_[ - 'service_component_id'], - oslo_config_file_id=config_file.id, - name=cfg_obj_['name']) - if len(configs) == 1: - config = db_api.config_file_entry_update( - self.context, - configs[0].id, - cfg_obj_) - LOG.debug("Config Entry %s is existing and is " - "updated" % config) - - conf_name_to_file_id[cfg_name] = config.id - - return conf_name_to_file_id - - def process_configs(self): - conf_name_to_file_id = self.process_config_files() - # Config - for cfg_obj in self.registration_info['config_list']: - # This format is used by DriverProcessor - cfg_name = self._form_config_name(cfg_obj['group'], - cfg_obj['name']) - - if not conf_name_to_file_id.get(cfg_name): - cfg_schm_id = None - cfg_f_entry = None - - # find config schema - # ignore the config file_name right now !!, assumed conf unique - # across the service wth given group and name - cfg_schs = db_api.config_schema_get_by( - context=self.context, - group=cfg_obj['group'], - name=cfg_obj['name'], - project=self.project - ) - - if len(cfg_schs) == 0: - LOG.debug("[%s] No Config Schema is existing, so " - "no schema is associated for Config %s::%s" % - (self.service_worker_id, - cfg_obj['group'], - cfg_obj['name'])) - elif len(cfg_schs) > 1: - LOG.debug("[%s] More than one Config Schema is existing, " - "so no schema is associated for Config %s::%s" % - (self.service_worker_id, - cfg_obj['group'], - cfg_obj['name'])) - else: - # try: - # cfg_sche = db_api.config_schema_create( - # self.context, - # dict( - # namespace='UNKNOWN-tagged-by-NAMOS', - # default_value=cfg_obj['default_value'], - # type=cfg_obj['type'], - # help=cfg_obj['help'], - # required=cfg_obj['required'], - # secret=cfg_obj['secret'], - # mutable=False, - # group_name=cfg_obj['group'], - # name=cfg_obj['name'] - # ) - # ) - # LOG.info("Config Schema %s is created" % cfg_sche) - # except exception.AlreadyExist: - # cfg_schs = db_api.config_schema_get_by( - # context=self.context, - # group=cfg_obj['group'], - # name=cfg_obj['name'], - # namespace='UNKNOWN-tagged-by-NAMOS' - # ) - - cfg_sche = cfg_schs[0] - LOG.debug("[%s] Config Schema %s is existing and is used " - "for Config %s::%s" % - (self.service_worker_id, - cfg_sche.id, - cfg_obj['group'], - cfg_obj['name'])) - cfg_schm_id = cfg_sche.id - else: - cfg_schm_id = None - cfg_f_entry = conf_name_to_file_id[cfg_name] - - # config_file_entry_id = None - # for f_id, conf_groups in conf_name_to_file_id.items(): - # if cfg_obj['group'] in list(conf_groups): - # if cfg_obj['name'] in list(conf_groups[cfg_obj[ - # 'group']]): - # config_entrys=db_api.config_file_entry_get_all_by( - # self.context, - # service_component_id=self.service_component_id, - # oslo_config_file_id=f_id, - # name=cfg_name) - # if len(config_entrys) == 1: - # config_file_entry_id = config_entrys[0].id - # - # break - - cfg_obj_ = dict( - service_worker_id=self.service_worker_id, - name=cfg_name, - value=cfg_obj['value'] if cfg_obj['value'] else cfg_obj[ - 'default_value'], - oslo_config_schema_id=cfg_schm_id, - oslo_config_file_entry_id=cfg_f_entry - ) - - try: - config = db_api.config_create(self.context, cfg_obj_) - LOG.debug("Config %s is created" % config) - except exception.AlreadyExist: - configs = db_api.config_get_by_name_for_service_worker( - self.context, - service_worker_id=cfg_obj_['service_worker_id'], - name=cfg_obj_['name']) - if len(configs) == 1: - config = db_api.config_update(self.context, - configs[0].id, - cfg_obj_) - LOG.debug("Config %s is existing and is updated" % config) - - -class DriverProcessor(object): - def __init__(self, context, manager, service_worker_id, region_id): - self.context = context - self.manager = manager - self.service_worker_id = service_worker_id - self.region_id = region_id - self.config_dict = self._get_config_dict() - - def _get_config_dict(self): - conf_dict = {} - for c in db_api.config_get_by_name_for_service_worker( - self.context, - self.service_worker_id - ): - conf_dict[c.name] = c.to_dict() - - return conf_dict - - def _identify_drivers(self): - return (set(openstack_drivers.get_drivers_config().keys()) & - set(self.config_dict.keys())) - - def _get_value(self, name): - if name is None: - return name - - if isinstance(name, str): - # Constant naming - if name[0] == '#': - return name[1:] - return (self.config_dict[name].get('value')) - elif isinstance(name, tuple): - fn = name[0] - args = list() - for var in name[1:]: - args.append(self._get_value(var)) - return fn(*args) - elif isinstance(name, list): - fmt_str = name[0] - params = [self._get_value(param) for param in name[1:]] - return fmt_str % tuple(params) - - def process_drivers(self): - for driver_key in self._identify_drivers(): - try: - drivers = self._get_value(driver_key) - drivers = utils._to_list(drivers) - for driver_name in drivers: - self.process_driver(driver_key, driver_name) - except KeyError: # noqa - # TODO(mrkanag) run namos-manager and restart nova-scheduler - # KeyError: 'libvirt.virt_type' is thrown, fix it - LOG.error('Failed to process driver %s in service worker %s' % - (driver_key, self.service_worker_id)) - continue - - def process_driver(self, driver_key, driver_name): - driver_config = \ - openstack_drivers.get_drivers_config()[driver_key][driver_name] - - if driver_config.get('alias') is not None: - alias = driver_config.get('alias') - driver_config = \ - openstack_drivers.get_drivers_config() - for key in alias.split(':'): - driver_config = driver_config[key] - driver_name = key - - driver_def = \ - openstack_drivers.get_drivers_def()[driver_name] - - connection = dict() - - endpoint_type = None - connection_cfg = None - device_endpoint_name = None - device_cfg = None - child_device_cfg = None - - if driver_config.get('device') is not None: - device_cfg = driver_config['device'] - - if driver_config['endpoint'].get('type') is not None: - endpoint_type = driver_config['endpoint']['type'] - if endpoint_type[0] != '#': - endpoint_type = self._get_value(endpoint_type) - - connection_cfg = driver_config['endpoint'][endpoint_type][ - 'connection'] - device_endpoint_name = self._get_value( - driver_config['endpoint'][endpoint_type]['name']) - # override the device name - if driver_config['endpoint'][endpoint_type].get( - 'device') is not None: - device_cfg = driver_config['endpoint'][endpoint_type][ - 'device'] - if driver_config['endpoint'][endpoint_type].get( - 'child_device') is not None: - child_device_cfg = driver_config['endpoint'][ - endpoint_type]['child_device'] - else: - endpoint_type = None - connection_cfg = driver_config['endpoint']['connection'] - device_endpoint_name = self._get_value( - driver_config['endpoint']['name'] - ) - # override the device name - if driver_config['endpoint'].get('device') is not None: - device_cfg = driver_config['endpoint']['device'] - - if driver_config['endpoint'].get('child_device') is not None: - child_device_cfg = driver_config['endpoint'][ - 'child_device'] - - # Device - device_name = self._get_value(device_cfg['name']) - try: - # TODO(mrkanag) Set the right status - device = db_api.device_create( - self.context, - dict(name=device_name, - status='active', - region_id=self.region_id)) - - LOG.info('Device %s is created' % device) - except exception.AlreadyExist: - device = db_api.device_get_by_name( - self.context, - device_name) - LOG.info('Device %s is existing' % device) - - # TODO(mrkanag) Poperly Handle child devices - if child_device_cfg is not None: - for d_name in self._get_value(child_device_cfg['key']): - base_name = self._get_value(child_device_cfg['base_name']) - d_name = '%s-%s' % (base_name, d_name) - try: - device = db_api.device_get_by_name( - self.context, - d_name) - LOG.info('Device %s is existing' % device) - except exception.DeviceNotFound: - # TODO(mrkanag) region_id is hard-coded, fix it ! - # Set the right status as well - r_id = 'f7dcd175-27ef-46b5-997f-e6e572f320b0' - device = db_api.device_create( - self.context, - dict(name=d_name, - status='active', - parent_id=device.id, - region_id=r_id)) - - LOG.info('Device %s is created' % device) - - # Device Endpoint - try: - for k, v in connection_cfg.iteritems(): - connection[k] = self._get_value(k) - - device_endpoint = db_api.device_endpoint_create( - self.context, - dict(name=device_endpoint_name, - connection=connection, - type=endpoint_type, - device_id=device.id)) - LOG.info('Device Endpoint %s is created' % device_endpoint) - except exception.AlreadyExist: - device_endpoints = db_api.device_endpoint_get_by_device_type( - self.context, - device_id=device.id, - type=endpoint_type, - name=device_endpoint_name) - if len(device_endpoints) >= 1: - device_endpoint = device_endpoints[0] - LOG.info('Device Endpoint %s is existing' % - device_endpoints[0]) - - # Device Driver Class - try: - device_driver_class = db_api.device_driver_class_create( - self.context, - dict(name=driver_name, - python_class=driver_name, - type=driver_def['type'], - device_id=device.id, - endpoint_id=device_endpoint.id, - service_worker_id=self.service_worker_id, - extra=driver_def.get('extra'))) - LOG.info('Device Driver Class %s is created' % - device_driver_class) - except exception.AlreadyExist: - device_driver_class = db_api.device_driver_class_get_by_name( - self.context, - driver_name) - LOG.info('Device Driver Class %s is existing' % - device_driver_class) - - # Device Driver - try: - device_driver = db_api.device_driver_create( - self.context, - dict(device_id=device.id, - name=driver_name, - endpoint_id=device_endpoint.id, - device_driver_class_id=device_driver_class.id, - service_worker_id=self.service_worker_id) - ) - LOG.info('Device Driver %s is created' % - device_driver) - except exception.AlreadyExist: - device_drivers = \ - db_api.device_driver_get_by_device_endpoint_service_worker( - self.context, - device_id=device.id, - endpoint_id=device_endpoint.id, - device_driver_class_id=device_driver_class.id, - service_worker_id=self.service_worker_id - ) - if len(device_drivers) >= 1: - device_driver = device_drivers[0] - LOG.info('Device Driver %s is existing' % - device_driver) - - -if __name__ == '__main__': - print (DriverProcessor(None, None)._to_list("[\"file\', \'http\']")) diff --git a/namos/conductor/namespace_processor.py b/namos/conductor/namespace_processor.py new file mode 100644 index 0000000..d9d9c30 --- /dev/null +++ b/namos/conductor/namespace_processor.py @@ -0,0 +1,252 @@ +# -*- coding: utf-8 -*- + +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from oslo_config import cfg +from oslo_log import log + +from namos.common import exception +from namos.common import utils +from namos.db import api as db_api +from namos.db import openstack_drivers + +LOG = log.getLogger(__name__) + +CONF = cfg.CONF + + +class NamespaceProcessor(object): + # TODO(mrkanag) check Fuel driver at + # http://docs.openstack.org/mitaka/config-reference/content/ + # hpe-3par-driver.html + def __init__(self, context, manager, service_worker_id, region_id): + self.context = context + self.manager = manager + self.service_worker_id = service_worker_id + self.region_id = region_id + self.config_dict = self._get_config_dict() + + def _get_config_dict(self): + conf_dict = {} + for c in db_api.config_get_by_name_for_service_worker( + self.context, + self.service_worker_id + ): + conf_dict[c.name] = c.to_dict() + + return conf_dict + + def _identify_drivers(self): + return (set(openstack_drivers.get_drivers_config().keys()) & + set(self.config_dict.keys())) + + def _get_value(self, name): + if name is None: + return name + + if isinstance(name, str): + # Constant naming + if name[0] == '#': + return name[1:] + return (self.config_dict[name].get('value')) + elif isinstance(name, tuple): + fn = name[0] + args = list() + for var in name[1:]: + args.append(self._get_value(var)) + return fn(*args) + elif isinstance(name, list): + fmt_str = name[0] + params = [self._get_value(param) for param in name[1:]] + return fmt_str % tuple(params) + + def process_drivers(self): + for driver_key in self._identify_drivers(): + try: + drivers = self._get_value(driver_key) + drivers = utils._to_list(drivers) + for driver_name in drivers: + self.process_driver(driver_key, driver_name) + except KeyError: # noqa + # TODO(mrkanag) run namos-manager and restart nova-scheduler + # KeyError: 'libvirt.virt_type' is thrown, fix it + LOG.error('Failed to process driver %s in service worker %s' % + (driver_key, self.service_worker_id)) + continue + + def process_driver(self, driver_key, driver_name): + driver_config = \ + openstack_drivers.get_drivers_config()[driver_key][driver_name] + + if driver_config.get('alias') is not None: + alias = driver_config.get('alias') + driver_config = \ + openstack_drivers.get_drivers_config() + for key in alias.split(':'): + driver_config = driver_config[key] + driver_name = key + + driver_def = \ + openstack_drivers.get_drivers_def()[driver_name] + + connection = dict() + + endpoint_type = None + connection_cfg = None + device_endpoint_name = None + device_cfg = None + child_device_cfg = None + + if driver_config.get('device') is not None: + device_cfg = driver_config['device'] + + if driver_config['endpoint'].get('type') is not None: + endpoint_type = driver_config['endpoint']['type'] + if endpoint_type[0] != '#': + endpoint_type = self._get_value(endpoint_type) + + connection_cfg = driver_config['endpoint'][endpoint_type][ + 'connection'] + device_endpoint_name = self._get_value( + driver_config['endpoint'][endpoint_type]['name']) + # override the device name + if driver_config['endpoint'][endpoint_type].get( + 'device') is not None: + device_cfg = driver_config['endpoint'][endpoint_type][ + 'device'] + if driver_config['endpoint'][endpoint_type].get( + 'child_device') is not None: + child_device_cfg = driver_config['endpoint'][ + endpoint_type]['child_device'] + else: + endpoint_type = None + connection_cfg = driver_config['endpoint']['connection'] + device_endpoint_name = self._get_value( + driver_config['endpoint']['name'] + ) + # override the device name + if driver_config['endpoint'].get('device') is not None: + device_cfg = driver_config['endpoint']['device'] + + if driver_config['endpoint'].get('child_device') is not None: + child_device_cfg = driver_config['endpoint'][ + 'child_device'] + + # Device + device_name = self._get_value(device_cfg['name']) + try: + # TODO(mrkanag) Set the right status + device = db_api.device_create( + self.context, + dict(name=device_name, + status='active', + region_id=self.region_id)) + + LOG.info('Device %s is created' % device) + except exception.AlreadyExist: + device = db_api.device_get_by_name( + self.context, + device_name) + LOG.info('Device %s is existing' % device) + + # TODO(mrkanag) Poperly Handle child devices + if child_device_cfg is not None: + for d_name in self._get_value(child_device_cfg['key']): + base_name = self._get_value(child_device_cfg['base_name']) + d_name = '%s-%s' % (base_name, d_name) + try: + device = db_api.device_get_by_name( + self.context, + d_name) + LOG.info('Device %s is existing' % device) + except exception.DeviceNotFound: + # TODO(mrkanag) region_id is hard-coded, fix it ! + # Set the right status as well + r_id = 'f7dcd175-27ef-46b5-997f-e6e572f320b0' + device = db_api.device_create( + self.context, + dict(name=d_name, + status='active', + parent_id=device.id, + region_id=r_id)) + + LOG.info('Device %s is created' % device) + + # Device Endpoint + try: + for k, v in connection_cfg.iteritems(): + connection[k] = self._get_value(k) + + device_endpoint = db_api.device_endpoint_create( + self.context, + dict(name=device_endpoint_name, + connection=connection, + type=endpoint_type, + device_id=device.id)) + LOG.info('Device Endpoint %s is created' % device_endpoint) + except exception.AlreadyExist: + device_endpoints = db_api.device_endpoint_get_by_device_type( + self.context, + device_id=device.id, + type=endpoint_type, + name=device_endpoint_name) + if len(device_endpoints) >= 1: + device_endpoint = device_endpoints[0] + LOG.info('Device Endpoint %s is existing' % + device_endpoints[0]) + + # Device Driver Class + try: + device_driver_class = db_api.device_driver_class_create( + self.context, + dict(name=driver_name, + python_class=driver_name, + type=driver_def['type'], + device_id=device.id, + endpoint_id=device_endpoint.id, + service_worker_id=self.service_worker_id, + extra=driver_def.get('extra'))) + LOG.info('Device Driver Class %s is created' % + device_driver_class) + except exception.AlreadyExist: + device_driver_class = db_api.device_driver_class_get_by_name( + self.context, + driver_name) + LOG.info('Device Driver Class %s is existing' % + device_driver_class) + + # Device Driver + try: + device_driver = db_api.device_driver_create( + self.context, + dict(device_id=device.id, + name=driver_name, + endpoint_id=device_endpoint.id, + device_driver_class_id=device_driver_class.id, + service_worker_id=self.service_worker_id) + ) + LOG.info('Device Driver %s is created' % + device_driver) + except exception.AlreadyExist: + device_drivers = \ + db_api.device_driver_get_by_device_endpoint_service_worker( + self.context, + device_id=device.id, + endpoint_id=device_endpoint.id, + device_driver_class_id=device_driver_class.id, + service_worker_id=self.service_worker_id + ) + if len(device_drivers) >= 1: + device_driver = device_drivers[0] + LOG.info('Device Driver %s is existing' % + device_driver) diff --git a/namos/conductor/region_processor.py b/namos/conductor/region_processor.py new file mode 100644 index 0000000..0c09659 --- /dev/null +++ b/namos/conductor/region_processor.py @@ -0,0 +1,55 @@ +# -*- coding: utf-8 -*- + +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from oslo_config import cfg +from oslo_log import log + +from namos.common import exception +from namos.db import api as db_api + +LOG = log.getLogger(__name__) + +CONF = cfg.CONF + + +class RegionProcessor(object): + def __init__(self, + context, + manager, + registration_info): + self.registration_info = registration_info + self.manager = manager + self.context = context + + def process_region(self): + # region + # If region is not provided, make it as belongs to namos's region + if not self.registration_info.get('region_name'): + self.registration_info[ + 'region_name'] = cfg.CONF.os_namos.region_name + + try: + region = db_api.region_create( + self.context, + dict(name=self.registration_info.get('region_name')) + ) + LOG.info('Region %s is created' % region) + except exception.AlreadyExist: + region = db_api.region_get_by_name( + self.context, + name=self.registration_info.get('region_name') + ) + LOG.info('Region %s is existing' % region) + + return region.id diff --git a/namos/conductor/service_processor.py b/namos/conductor/service_processor.py new file mode 100644 index 0000000..a415377 --- /dev/null +++ b/namos/conductor/service_processor.py @@ -0,0 +1,124 @@ +# -*- coding: utf-8 -*- + +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from oslo_config import cfg +from oslo_log import log + +from namos.common import config as namos_config +from namos.common import exception +from namos.db import api as db_api + +LOG = log.getLogger(__name__) + +CONF = cfg.CONF + + +class ServiceProcessor(object): + def __init__(self, + context, + manager, + region_id, + registration_info): + self.registration_info = registration_info + self.manager = manager + self.context = context + self.region_id = region_id + + def process_service(self): + # Service Node + try: + # TODO(mrkanag) user proper node name instead of fqdn + node = db_api.service_node_create( + self.context, + dict(name=self.registration_info.get('fqdn'), + fqdn=self.registration_info.get('fqdn'), + region_id=self.region_id, + extra={'ips': self.registration_info.get('ips')})) + LOG.info('Service node %s is created' % node) + except exception.AlreadyExist: + # TODO(mrkanag) is this to be region specifc search + node = db_api.service_node_get_by_name( + self.context, + self.registration_info.get('fqdn')) + LOG.info('Service node %s is existing' % node) + + # Service + try: + s_id = 'b9c2549f-f685-4bc2-92e9-ba8af9c18591' + service = db_api.service_create( + self.context, + # TODO(mrkanag) use keystone python client and + # use real service id here + dict(name=self.registration_info.get('project_name'), + keystone_service_id=s_id)) + + LOG.info('Service %s is created' % service) + except exception.AlreadyExist: + service = db_api.service_get_by_name( + self.context, + self.registration_info.get('project_name')) + LOG.info('Service %s is existing' % service) + + # Service Component + try: + service_component = db_api.service_component_create( + self.context, + dict(name=self.registration_info['prog_name'], + node_id=node.id, + service_id=service.id, + type=namos_config.find_type(self.registration_info[ + 'prog_name']))) + LOG.info('Service Component %s is created' % service_component) + except exception.AlreadyExist: + service_components = \ + db_api.service_component_get_all_by_node_for_service( + self.context, + node_id=node.id, + service_id=service.id, + name=self.registration_info['prog_name'] + ) + if len(service_components) == 1: + service_component = service_components[0] + LOG.info('Service Component %s is existing' % + service_component) + # TODO(mrkanag) what to do when service_components size is > 1 + + # Service Worker + try: + service_worker = db_api.service_worker_create( + self.context, + # TODO(mrkanag) Fix the name, device driver proper ! + dict(name='%s@%s' % (service_component.name, + self.registration_info['pid']), + pid=self.registration_info['identification'], + host=self.registration_info['host'], + service_component_id=service_component.id, + deleted_at=None, + is_launcher=self.registration_info['i_am_launcher'] + )) + LOG.info('Service Worker %s is created' % service_worker) + except exception.AlreadyExist: + service_worker = db_api.service_worker_get_all_by( + self.context, + pid=self.registration_info['identification'], + service_component_id=service_component.id + )[0] + LOG.info('Service Worker %s is existing' % + service_worker) + + return service_component.id, service_worker.id + + def cleanup(self, service_component_id): + # clean up the dead service workers + db_api.cleanup(self.context, service_component_id) diff --git a/namos/db/openstack_drivers.py b/namos/db/openstack_drivers.py index 4904892..f2b0cc6 100644 --- a/namos/db/openstack_drivers.py +++ b/namos/db/openstack_drivers.py @@ -12,1424 +12,12 @@ # License for the specific language governing permissions and limitations # under the License. +_all__ = ['get_drivers_config', + 'get_drivers_def'] -import urlparse +_DRIVERS_CONFIG = {} -__all__ = ['get_drivers_config', - 'get_drivers_def'] - - -def _get_db_name(*args, **kwargs): - result = urlparse.urlparse(args[0]) - return '%s' % result.path.replace('/', '') - - -def _get_rpc_name(*args, **kwargs): - return '%s' % args[0] - - -_DRIVERS_CONFIG = { - # db - 'db_backend':{ - 'sqlalchemy': { #driver_class - # alias should be always end with driver class name. - 'alias':'database.backend:sqlalchemy', - } - }, - # TODO(mrkanag) sql_connectio is used in trove for db_backends - # just check it - 'database.backend': { - 'sqlalchemy': { #driver_class - 'endpoint': { - 'name': 'database.connection', - 'connection': { - # mysql://root:password@127.0.0.1/neutron?charset=utf8 - 'database.connection': 'database.connection' - } - }, - 'device': { - 'name': ['DB_%s', (_get_db_name, 'database.connection')] - } - } - }, - # rpc - 'rpc_backend':{ - 'rabbit': { - 'endpoint': { - 'name': 'oslo_messaging_rabbit.rabbit_hosts', - 'connection': { - 'oslo_messaging_rabbit.rabbit_hosts': 'oslo_messaging_rabbit.rabbit_hosts', - 'oslo_messaging_rabbit.rabbit_port': 'oslo_messaging_rabbit.rabbit_port', - 'oslo_messaging_rabbit.rabbit_userid': 'oslo_messaging_rabbit.rabbit_userid', - 'oslo_messaging_rabbit.rabbit_password': 'oslo_messaging_rabbit.rabbit_password', - } - }, - 'device': { - 'name': ['RPC_%s', (_get_rpc_name, 'oslo_messaging_rabbit.rabbit_hosts')] - } - }, - 'nova.openstack.common.rpc.impl_kombu': { - 'alias': 'rpc_backend:rabbit' - }, - 'cinder.openstack.common.rpc.impl_kombu': { - 'alias': 'rpc_backend:rabbit' - }, - 'neutron.openstack.common.rpc.impl_kombu': { - 'alias': 'rpc_backend:rabbit' - }, - 'glance.openstack.common.rpc.impl_kombu': { - 'alias': 'rpc_backend:rabbit' - }, - 'heat.openstack.common.rpc.impl_kombu': { - 'alias': 'rpc_backend:rabbit' - }, - 'namos.openstack.common.rpc.impl_kombu': { - 'alias': 'rpc_backend:rabbit' - } - }, - # nova - 'compute_driver' : { - 'libvirt.LibvirtDriver': { - 'endpoint': { - 'type': 'libvirt.virt_type', - 'kvm': { - 'name': 'host', - 'connection': { - 'libvirt.virt_type': 'libvirt.virt_type' - } - }, - 'qemu' : { - 'name': 'host', - 'connection': { - 'libvirt.virt_type': 'libvirt.virt_type' - } - }, - 'xen': { - 'name': 'xenapi_connection_url', - 'connection': { - 'xenapi_connection_url': 'xenapi_connection_url', - 'xenapi_connection_username': - 'xenapi_connection_username', - 'xenapi_connection_password': - 'xenapi_connection_password' - }, - 'device': { - 'name': ['%s %s', 'libvirt.virt_type', - 'xenapi_connection_url'] - } - }, - 'lxc': { - 'name': 'host', - 'connection': { - 'libvirt.virt_type': 'libvirt.virt_type' - } - }, - }, - 'device': { - 'name': ['%s host %s', 'libvirt.virt_type', 'host'] - } - }, - 'vmwareapi.VMwareVCDriver':{ - 'endpoint': { - 'name': 'vmware.host_ip', - 'connection': { - 'vmware.host_ip': 'vmware.host_ip', - 'vmware.host_username': 'vmware.host_username', - 'vmware.host_password': 'vmware.host_password', - 'vmware.cluster_name': 'vmware.cluster_name', - 'vmware.datastore_regex': 'vmware.datastore_regex' - }, - # When one driver supports mutiple devices, parent-child - # relation will be formed and parent usually has the - # endpoint associated with it, which is used by children - # devices - 'child_device': { - # TODO(mrkanag) key should be comma separated or list, - # just check !! - 'key': 'vmware.cluster_name', - 'base_name': ['VMWare Cluster %s', 'vmware.host_ip'] - # This name will be postfixed with device name got from key - } - }, - 'device': { - 'name': 'vmware.host_ip' - } - }, - 'nova.virt.hyperv.driver.HyperVDriver': { - 'endpoint': { - 'name': 'host', - 'connection': { - 'libvirt.type': 'libvirt.type' - } - }, - 'device':{ - 'name': ['Hyper-V host %s', 'host'] - } - } - }, - # cinder - 'volume_driver': { - 'cinder.volume.drivers.lvm.LVMISCSIDriver': { - 'endpoint': { - 'name': 'volume_group', - 'connection': { - 'volume_group': 'volume_group', - 'lvm_mirrors': 'lvm_mirrors', - 'lvm_type': 'lvm_type' - } - }, - 'device': { - 'name': ['%s@%s', 'volume_group', 'host'] - } - }, - 'cinder.volume.drivers.lvm.LVMVolumeDriver': { - 'alias': 'volume_driver:cinder.volume.drivers.lvm.LVMISCSIDriver' - }, - 'cinder.volume.drivers.lvm.LVMISERDriver': { - 'alias': 'volume_driver:cinder.volume.drivers.lvm.LVMISCSIDriver' - }, - 'cinder.volume.drivers.san.hp.hp_3par_iscsi.HP3PARISCSIDriver': { - 'endpoint': { - 'name': 'hp3par_api_url', - 'connection': { - 'hp3par_api_url': 'hp3par_api_url', - 'hp3par_username': 'hp3par_username', - 'hp3par_password' : 'hp3par_password', - 'hp3par_cpg': 'hp3par_cpg', - 'san_ip': 'san_ip', - 'san_login':'san_login', - 'san_password': 'san_password', - 'hp3par_iscsi_ips': 'hp3par_iscsi_ips', - 'iscsi_ip_address': 'iscsi_ip_address' - }, - 'device': { - 'name': ['%s@%s', 'hp3par_cpg', 'san_ip'] - } - } - }, - 'cinder.volume.drivers.san.hp.hp_3par_fc.HP3PARFCDriver': { - 'alias': 'volume_driver:cinder.volume.drivers.san.hp.hp_3par_iscsi.HP3PARISCSIDriver' - }, - 'cinder.volume.drivers.san.hp.hp_lefthand_iscsi.HPLeftHandISCSIDriver': - { - 'endpoint': { - # TODO(mrkanag) type is not config param. what to do? - 'type': '#REST', - '#CLIQ': { - 'name': 'san_ip', - 'connection': { - 'san_ip': 'san_ip', - 'san_login': 'san_login', - 'san_password': 'san_password', - 'san_ssh_port': 'san_ssh_port', - 'san_clustername': 'san_clustername' - }, - 'device': { - 'name': ['%s@%s', 'san_clustername', 'san_ip'] - } - }, - '#REST': { - 'name': 'hplefthand_api_url', - 'connection': { - 'hplefthand_api_url': 'hplefthand_api_url', - 'hplefthand_username': 'hplefthand_username', - 'hplefthand_password': 'hplefthand_password', - 'hplefthand_clustername': 'hplefthand_clustername' - }, - 'device': { - 'name': ['%s@%s', 'hplefthand_clustername', - 'hplefthand_api_url'] - } - } - } - }, - 'cinder.volume.drivers.coraid.CoraidDriver': { - 'endpoint': { - 'name': 'coraid_esm_address', - 'connection': { - 'coraid_esm_address': 'coraid_esm_address', - 'coraid_user': 'coraid_user', - 'coraid_group': 'coraid_group', - 'coraid_password': 'coraid_password', - 'coraid_repository_key': 'coraid_repository_key' - } - }, - 'device': { - 'name': ['coraid %s', 'coraid_esm_address'] - } - }, - 'cinder.volume.drivers.eqlx.DellEQLSanISCSIDriver': { - 'endpoint': { - 'name': 'san_ip', - 'connection': { - 'san_ip': 'san_ip', - 'san_login': 'san_login', - 'san_password': 'san_password', - 'eqlx_group_name': 'eqlx_group_name', - 'eqlx_pool': 'eqlx_pool' - } - }, - 'device': { - 'name': ['%s@%s', 'eqlx_group_name', 'san_ip'] - } - }, - 'cinder.volume.drivers.emc.emc_vmax_iscsi.EMCVMAXISCSIDriver': { - 'endpoint': { - 'name': 'iscsi_ip_address', - 'connection': { - 'iscsi_ip_address': 'iscsi_ip_address', - # TODO(mrkanag) not sure what to do with config file - 'cinder_emc_config_file': 'cinder_emc_config_file' - } - }, - 'device': { - 'name': ['EMCVMAX %s', 'iscsi_ip_address'] - } - }, - 'cinder.volume.drivers.emc.emc_vmax_fc.EMCVMAXFCDriver': { - 'endpoint': { - 'name': '', - 'connection': { - 'cinder_emc_config_file': 'cinder_emc_config_file' - } - }, - 'device': { - # TODO(mrkanag) fill it - 'name': '' - } - }, - 'cinder.volume.drivers.emc.emc_cli_iscsi.EMCCLIISCSIDriver': { - 'endpoint': { - 'name': 'iscsi_ip_address', - 'connection': { - 'iscsi_ip_address': 'iscsi_ip_address', - 'san_ip': 'iscsi_ip_address', - 'san_login': 'san_login', - 'san_password': 'san_password', - 'naviseccli_path': 'naviseccli_path', - 'storage_vnx_pool_name': 'storage_vnx_pool_name', - 'default_timeout': 'default_timeout', - 'max_luns_per_storage_group': 'max_luns_per_storage_group' - } - }, - 'device': { - 'name': ['EMC %s@%s', 'storage_vnx_pool_name', 'san_ip'] - } - }, - 'cinder.volume.drivers.glusterfs.GlusterfsDriver': { - 'endpoint': { - 'name': 'glusterfs_mount_point_base', - 'connection': { - 'glusterfs_mount_point_base': 'glusterfs_mount_point_base', - 'glusterfs_shares_config': 'glusterfs_shares_config' - } - }, - 'device': { - 'name': ['Gfs %s', 'glusterfs_mount_point_base'] - } - }, - 'cinder.volume.drivers.hds.iscsi.HDSISCSIDriver': { - 'endpoint': { - 'name': 'HDS', - 'connection': { - 'hds_hnas_iscsi_config_file': 'hds_hnas_iscsi_config_file' - } - }, - 'device': { - 'name': 'HDS' - } - }, - 'cinder.volume.drivers.hds.nfs.HDSNFSDriver': { - 'endpoint': { - 'name': '', - 'connection': { - 'hds_hnas_nfs_config_file': 'hds_hnas_nfs_config_file' - } - }, - 'device': { - 'name': 'HDS' - } - }, - 'cinder.volume.drivers.hds.hds.HUSDriver': { - 'endpoint': { - 'name': '', - 'connection': { - 'hds_cinder_config_file': 'hds_cinder_config_file' - } - }, - 'device': { - 'name': 'HUS' - } - }, - 'cinder.volume.drivers.hitachi.hbsd_fc.HBSDFCDriver': { - 'endpoint': { - 'name': '', - 'connection': { - - } - }, - 'device': { - 'name': '' - } - }, - 'cinder.volume.drivers.hitachi.hbsd_iscsi.HBSDISCSIDriver': { - 'endpoint': { - 'name': '', - 'connection': { - - } - }, - 'device': { - 'name': '' - } - }, - 'cinder.volume.drivers.san.hp.hp_msa_fc.HPMSAFCDriver': { - 'endpoint': { - 'name': '', - 'connection': { - - } - }, - 'device': { - 'name': '' - } - }, - 'cinder.volume.drivers.huawei.HuaweiVolumeDriver': { - 'endpoint': { - 'name': '', - 'connection': { - - } - }, - 'device': { - 'name': '' - } - }, - 'cinder.volume.drivers.ibm.gpfs.GPFSDriver': { - 'endpoint': { - 'name': '', - 'connection': { - } - }, - 'device': { - 'name': '' - } - }, - 'cinder.volume.drivers.ibm.storwize_svc.StorwizeSVCDriver': { - 'endpoint': { - 'name': '', - 'connection': { - } - }, - 'device': { - 'name': '' - } - }, - 'cinder.volume.drivers.xiv_ds8k.XIVDS8KDriver': { - 'endpoint': { - 'name': '', - 'connection': { - - } - }, - 'device': { - 'name': '' - } - }, - 'cinder.volume.drivers.netapp.common.NetAppDriver': { - 'endpoint': { - 'name': '', - 'connection': { - - } - }, - 'device': { - 'name': '' - } - }, - 'cinder.volume.drivers.netapp.iscsi.NetAppDirectCmodeISCSIDriver': { - 'endpoint': { - 'name': '', - 'connection': { - - } - }, - 'device': { - 'name': '' - } - }, - 'cinder.volume.drivers.netapp.nfs.NetAppDirectCmodeNfsDriver': { - 'endpoint': { - 'name': '', - 'connection': { - - } - }, - 'device': { - 'name': '' - } - }, - 'cinder.volume.drivers.netapp.iscsi.NetAppDirect7modeISCSIDriver': { - 'endpoint': { - 'name': '', - 'connection': { - - } - }, - 'device': { - 'name': '' - } - }, - 'cinder.volume.drivers.netapp.nfs.NetAppDirect7modeNfsDriver': { - 'endpoint': { - 'name': '', - 'connection': { - - } - }, - 'device': { - 'name': '' - } - }, - 'cinder.volume.drivers.nexenta.iscsi.NexentaISCSIDriver': { - 'endpoint': { - 'name': '', - 'connection': { - - } - }, - 'device': { - 'name': '' - } - }, - 'cinder.volume.drivers.nexenta.nfs.NexentaNfsDriver': { - 'endpoint': { - 'name': '', - 'connection': { - - } - }, - 'device': { - 'name': '' - } - }, - 'cinder.volume.drivers.nfs.NfsDriver': { - 'endpoint': { - 'name': '', - 'connection': { - - } - }, - 'device': { - 'name': '' - } - }, - 'cinder.volume.drivers.prophetstor.dpl_fc.DPLFCDriver': { - 'endpoint': { - 'name': '', - 'connection': { - - } - }, - 'device': { - 'name': '' - } - }, - 'cinder.volume.drivers.prophetstor.dpl_iscsi.DPLISCSIDriver': { - 'endpoint': { - 'name': '', - 'connection': { - - } - }, - 'device': { - 'name': '' - } - }, - 'cinder.volume.drivers.pure.PureISCSIDriver': { - 'endpoint': { - 'name': '', - 'connection': { - - } - }, - 'device': { - 'name': '' - } - }, - 'cinder.volume.drivers.sheepdog.SheepdogDriver': { - 'endpoint': { - 'name': '', - 'connection': { - - } - }, - 'device': { - 'name': '' - } - }, - 'cinder.volume.drivers.solidfire.SolidFireDriver': { - 'endpoint': { - 'name': '', - 'connection': { - - } - }, - 'device': { - 'name': '' - } - }, - 'cinder.volume.drivers.vmware.vmdk.VMwareVcVmdkDriver': { - 'endpoint': { - 'name': '', - 'connection': { - - } - }, - 'device': { - 'name': '' - } - }, - 'cinder.volume.drivers.windows.WindowsDriver': { - 'endpoint': { - 'name': '', - 'connection': { - - } - }, - 'device': { - 'name': '' - } - }, - 'cinder.volume.drivers.xenapi.sm.XenAPINFSDriver': { - 'endpoint': { - 'name': '', - 'connection': { - - } - }, - 'device': { - 'name': '' - } - }, - 'cinder.volume.drivers.zadara.ZadaraVPSAISCSIDriver': { - 'endpoint': { - 'name': '', - 'connection': { - - } - }, - 'device': { - 'name': '' - } - }, - 'cinder.volume.drivers.zfssa.zfssaiscsi.ZFSSAISCSIDriver': { - 'endpoint': { - 'name': '', - 'connection': { - - } - }, - 'device': { - 'name': '' - } - } - }, - 'backup_driver': { - 'cinder.backup.drivers.ceph': { - 'endpoint': { - 'name': '', - 'connection': { - - } - }, - 'device': { - 'name': '' - } - }, - 'cinder.backup.drivers.swift': { - 'endpoint': { - 'name': '', - 'connection': { - - } - }, - 'device': { - 'name': '' - } - }, - 'cinder.backup.drivers.tsm': { - 'endpoint': { - 'name': '', - 'connection': { - - } - }, - 'device': { - 'name': '' - } - } - }, - 'zone_driver': { - 'cinder.zonemanager.drivers.brocade.brcd_fc_zone_driver.BrcdFCZoneDriver': { - 'endpoint': { - 'name': '', - 'connection': { - - } - }, - 'device': { - 'name': '' - } - }, - 'cinder.zonemanager.drivers.cisco.cisco_fc_zone_driver.CiscoFCZoneDriver': { - 'endpoint': { - 'name': '', - 'connection': { - - } - }, - 'device': { - 'name': '' - } - } - }, - # neutron - # 'core_plugin': { - # 'neutron.plugins.ml2.plugin.Ml2Plugin': { - # - # } - # }, - # 'service_plugins': { - # 'neutron.services.l3_router.l3_router_plugin.L3RouterPlugin': { - # - # } - # }, - # 'service_providers.service_provider': { - # - # }, - 'dhcp_driver': { - 'neutron.agent.linux.dhcp.Dnsmasq': { - 'endpoint': { - 'name': 'dhcp_domain', - 'connection': { - 'dhcp_domain': 'dhcp_domain' - } - }, - 'device': { - 'name': ['DHCP %s', 'dhcp_domain'] - } - } - }, - 'interface_driver': { - 'neutron.agent.linux.interface.OVSInterfaceDriver': { - 'endpoint': { - 'name': 'ovs_integration_bridge', - 'connection': { - 'ovs_integration_bridge': 'ovs_integration_bridge' - } - }, - 'device': { - 'name': ['OVS bridge %s', 'ovs_integration_bridge'] - } - }, - 'openvswitch': { - 'alias': 'interface_driver:neutron.agent.linux.interface.OVSInterfaceDriver' - }, - }, - # 'extension_drivers': { - # - # }, - 'ml2.mechanism_drivers': { - 'linuxbridge' : { - 'alias': 'ml2.mechanism_drivers:neutron.plugins.ml2.drivers.mech_linuxbridge.LinuxbridgeMechanismDriver' - }, - 'neutron.plugins.ml2.drivers.mech_linuxbridge.LinuxbridgeMechanismDriver': { - 'endpoint': { - 'name': '#Linux Bride', - 'connection': { - } - }, - 'device': { - 'name': '#Linux Bride' - } - }, - 'openvswitch': { - 'alias': 'ml2.mechanism_drivers:neutron.plugins.ml2.drivers.mech_openvswitch.OpenvswitchMechanismDriver' - }, - 'neutron.plugins.ml2.drivers.mech_openvswitch.OpenvswitchMechanismDriver': { - 'endpoint': { - 'name': '#OVS', - 'connection': { - } - }, - 'device': { - 'name': '#OVS' - } - } - }, - 'ml2.type_drivers': { - 'local': { - 'alias': 'ml2.type_drivers:neutron.plugins.ml2.drivers.type_local.LocalTypeDriver' - }, - 'neutron.plugins.ml2.drivers.type_local.LocalTypeDriver': { - 'endpoint': { - 'name': '#Local Type', - 'connection': { - } - }, - 'device': { - 'name': '#Local Type' - } - }, - 'flat': { - 'alias': 'ml2.type_drivers:neutron.plugins.ml2.drivers.type_flat.FlatTypeDriver' - }, - 'neutron.plugins.ml2.drivers.type_flat.FlatTypeDriver': { - 'endpoint': { - 'name': '#FLAT Type', - 'connection': { - } - }, - 'device': { - 'name': '#FLAT type' - } - }, - 'vlan': { - 'alias': 'ml2.type_drivers:neutron.plugins.ml2.drivers.type_vlan.VlanTypeDriver' - }, - 'neutron.plugins.ml2.drivers.type_vlan.VlanTypeDriver': { - 'endpoint': { - 'name': '#VLAN Type', - 'connection': { - } - }, - 'device': { - 'name': '#VLAN Type' - } - }, - 'gre': { - 'alias': 'ml2.type_drivers:neutron.plugins.ml2.drivers.type_gre.GreTypeDriver' - }, - 'neutron.plugins.ml2.drivers.type_gre.GreTypeDriver': { - 'endpoint': { - 'name': '#GRE Type', - 'connection': { - } - }, - 'device': { - 'name': '#GRE Type' - } - }, - 'vxlan': { - 'alias': 'ml2.type_drivers:neutron.plugins.ml2.drivers.type_vxlan.VxlanTypeDriver' - }, - 'neutron.plugins.ml2.drivers.type_vxlan.VxlanTypeDriver': { - 'endpoint': { - 'name': '#VxLAN Type', - 'connection': { - } - }, - 'device': { - 'name': '#VxLAN Type' - } - }, - }, - 'firewall_driver': { - 'neutron.agent.linux.iptables_firewall.OVSHybridIptablesFirewallDriver': { - 'endpoint': { - 'name': '#Firewall', - 'connection': { - } - }, - 'device': { - 'name': '#Firewall' - } - }, - 'nova.virt.firewall.NoopFirewallDriver' : { - 'endpoint': { - 'name': '#NoopFirewall', - 'connection': { - } - }, - 'device': { - 'name': '#Firewall' - } - } - }, - 'SECURITY_GROUP.firewall_driver': { - 'neutron.agent.linux.iptables_firewall.OVSHybridIptablesFirewallDriver': { - 'alias': 'firewall_driver:neutron.agent.linux.iptables_firewall.OVSHybridIptablesFirewallDriver' - } - }, - - # 'dhcp_agent_manager': { - # - # }, - # 'l3_agent_manager': { - # - # }, - # Glance - 'glance_store.stores': { - 'file': { - 'alias': 'glance_store.stores:glance_store._drivers.filesystem.Store' - }, - 'filesystem': { - 'alias': 'glance_store.stores:glance_store._drivers.filesystem.Store' - }, - 'glance_store._drivers.filesystem.Store': { - 'endpoint': { - 'name': 'glance_store.filesystem_store_datadir', - 'connection': { - 'glance_store.filesystem_store_datadir': 'glance_store.filesystem_store_datadir', - 'glance_store.filesystem_store_datadirs': 'glance_store.filesystem_store_datadirs', - 'glance_store.filesystem_store_file_perm': 'glance_store.filesystem_store_file_perm', - 'glance_store.filesystem_store_metadata_file': - 'glance_store.filesystem_store_metadata_file' - } - }, - 'device': { - 'name': ['File System Store %s', 'glance_store.filesystem_store_datadir'] - } - }, - 'http': { - 'alias': 'glance_store.stores:glance_store._drivers.http.Store' - }, - 'https': { - 'alias': 'glance_store.stores:glance_store._drivers.http.Store' - }, - 'glance_store._drivers.http.Store': { - 'endpoint': { - 'name': '#http', - 'connection': { - } - }, - 'device': { - 'name': '#http Image beackend' - } - }, - 'cinder': { - 'alias': 'glance_store.stores:glance_store._drivers.cinder.Store' - }, - 'glance_store._drivers.cinder.Store': { - 'endpoint': { - 'name': '#Cinder Image Backend', - 'connection': { - 'glance_store.cinder_endpoint_template': 'glance_store.cinder_endpoint_template', - 'glance_store.cinder_api_insecure': 'glance_store.cinder_api_insecure', - 'glance_store.cinder_ca_certificates_file': - 'glance_store.cinder_ca_certificates_file', - 'glance_store.cinder_catalog_info': 'glance_store.cinder_catalog_info', - 'glance_store.cinder_http_retries': 'glance_store.cinder_http_retries' - } - }, - 'device': { - 'name': '#Cinder Image Backend' - } - }, - 'swift': { - 'alias': 'glance_store.stores:glance_store._drivers.swift.Store' - }, - 'glance_store._drivers.swift.Store': { - 'endpoint': { - 'name': 'glance_store.default_swift_reference', - 'connection': { - 'glance_store.default_swift_reference': 'glance_store.default_swift_reference', - 'glance_store.swift_enable_snet': 'glance_store.swift_enable_snet', - 'glance_store.swift_store_admin_tenants': 'glance_store.swift_store_admin_tenants', - 'glance_store.swift_store_auth_address': 'glance_store.swift_store_auth_address', - 'glance_store.swift_store_auth_insecure': 'glance_store.swift_store_auth_insecure', - 'glance_store.swift_store_auth_version': 'glance_store.swift_store_auth_version', - 'glance_store.swift_store_config_file': 'glance_store.swift_store_config_file', - 'glance_store.swift_store_container': 'glance_store.swift_store_container', - 'glance_store.swift_store_create_container_on_put': 'glance_store.swift_store_create_container_on_put', - 'glance_store.swift_store_endpoint_type': 'glance_store.swift_store_endpoint_type', - 'glance_store.swift_store_key': 'glance_store.swift_store_key', - 'glance_store.swift_store_large_object_chunk_size': 'glance_store.swift_store_large_object_chunk_size', - 'glance_store.swift_store_large_object_size': 'glance_store.swift_store_large_object_size', - 'glance_store.swift_store_multi_tenant': 'glance_store.swift_store_multi_tenant', - 'glance_store.swift_store_region': 'glance_store.swift_store_region', - 'glance_store.swift_store_retry_get_count': 'glance_store.swift_store_retry_get_count', - 'glance_store.swift_store_service_type': 'glance_store.swift_store_service_type', - 'glance_store.swift_store_ssl_compression': 'glance_store.swift_store_ssl_compression', - 'glance_store.swift_store_user': 'glance_store.swift_store_user' - } - }, - 'device': { - 'name': ['Swift Image backend %s', 'glance_store.default_swift_reference'] - } - }, - 'rbd': { - 'alias': 'glance_store.stores:glance_store._drivers.rbd.Store' - }, - 'glance_store._drivers.rbd.Store': { - 'endpoint': { - 'name': 'glance_store.rbd_store_pool', - 'connection': { - 'glance_store.rbd_store_ceph_conf': 'glance_store.rbd_store_ceph_conf', - 'glance_store.rbd_store_chunk_size': 'glance_store.rbd_store_chunk_size', - 'glance_store.rbd_store_pool': 'glance_store.rbd_store_pool', - 'glance_store.rbd_store_user': 'glance_store.rbd_store_user' - } - }, - 'device': { - 'name': ['RBD Image backend %s', 'glance_store.rbd_store_pool'] - } - }, - 'sheepdog': { - 'alias': 'glance_store.stores:glance_store._drivers.sheepdog.Store' - }, - 'glance_store._drivers.sheepdog.Store': { - 'endpoint': { - 'name': 'glance_store.sheepdog_store_address', - 'connection': { - 'glance_store.sheepdog_store_address': 'glance_store.sheepdog_store_address', - 'glance_store.sheepdog_store_chunk_size': 'glance_store.sheepdog_store_chunk_size', - 'glance_store.sheepdog_store_port': 'glance_store.sheepdog_store_port' - } - }, - 'device': { - 'name': ['Sheepdog Image backend %s', 'glance_store.sheepdog_store_address'] - } - }, - 'gridfs': { - 'alias': 'glance_store.stores:glance_store._drivers.gridfs.Store' - }, - 'glance_store._drivers.gridfs.Store': { - 'endpoint': { - 'name': 'glance_store.mongodb_store_uri', - 'connection': { - 'glance_store.mongodb_store_db': 'glance_store.mongodb_store_db', - 'glance_store.mongodb_store_uri': 'glance_store.mongodb_store_uri' - } - }, - 'device': { - 'name': ['Gird FS Image backend %s', 'glance_store.mongodb_store_db'] - } - }, - 's3': { - 'alias': 'glance_store.stores:glance_store._drivers.s3.Store' - }, - 's3+http': { - 'alias': 'glance_store.stores:glance_store._drivers.s3.Store' - }, - 's3': { - 'alias': 'glance_store.stores:glance_store._drivers.s3.Store' - }, - 'glance_store._drivers.s3.Store': { - 'endpoint': { - 'name': ['%s/%s', 'glance_store.s3_store_host', 'glance_store.s3_store_bucket'], - 'connection': { - 'glance_store.s3_store_host': 'glance_store.s3_store_host', - 'glance_store.s3_store_bucket': 'glance_store.s3_store_bucket', - 'glance_store.s3_store_object_buffer_dir': 'glance_store.s3_store_object_buffer_dir', - 'glance_store.s3_store_secret_key': 'glance_store.s3_store_secret_key', - 'glance_store.s3_store_create_bucket_on_put': 'glance_store.s3_store_create_bucket_on_put', - 'glance_store.s3_store_bucket_url_format': 'glance_store.s3_store_bucket_url_format', - 'glance_store.s3_store_access_key': 'glance_store.s3_store_access_key' - } - }, - 'device': { - 'name': ['S3 Image Backend %s/%s', - 'glance_store.s3_store_host', - 'glance_store.s3_store_bucket'] - } - }, - 'vsphere': { - 'alias': 'glance_store.stores:glance_store._drivers.vmware_datastore.Store' - }, - 'glance_store._drivers.vmware_datastore.Store': { - 'endpoint': { - 'name': ['%s/%s', - 'glance_store.vmware_server_host', - 'glance_store.vmware_datastore_name' - ], - 'connection': { - 'glance_store.vmware_api_insecure': 'glance_store.vmware_api_insecure', - 'glance_store.vmware_api_retry_count': 'glance_store.vmware_api_retry_count', - 'glance_store.vmware_datacenter_path': 'glance_store.vmware_datacenter_path', - 'glance_store.vmware_datastore_name': 'glance_store.vmware_datastore_name', - 'glance_store.vmware_server_host': 'glance_store.vmware_server_host', - 'glance_store.vmware_server_password': 'glance_store.vmware_server_password', - 'glance_store.vmware_server_username': 'glance_store.vmware_server_username', - 'glance_store.vmware_store_image_dir': 'glance_store.vmware_store_image_dir', - 'glance_store.vmware_task_poll_interval': 'glance_store.vmware_task_poll_interval' - } - }, - 'device': { - 'name': ['VMWare Image backend %s/%s', - 'glance_store.vmware_server_host', - 'glance_store.vmware_datastore_name' - ] - } - } - } -} - -_DRIVERS = { - 'sqlalchemy': { - 'type': 'database', - 'extra': { - 'url': 'https://pypi.python.org/pypi/SQLAlchemy', - 'python_class': 'sqlalchemy', - 'version': '0.9.8', - 'device_support': [ - { - 'vendor': 'MYSQL', - 'model': 'MYSQL', - 'version': ['5.6','5.7'] - } - ], - 'configuration_guide':'', - 'metadata': { - 'wiki': '' - } - } - }, - 'rabbit': { - 'type': 'message', - 'extra': { - 'url': 'https://github.com/openstack/oslo.messaging', - 'python_class': '%{service_type}s.openstack.common.rpc.impl_kombu', - 'version': '0.9.8', - 'device_support': [ - { - 'vendor': 'RabbitMQ', - 'model': 'RabbitMQ Server', - 'version': ['3.4','3.5'] - } - ], - 'configuration_guide':'', - 'metadata': { - 'wiki': '' - } - } - }, - 'vmwareapi.VMwareVCDriver': { - 'type': 'nova', - 'class': 'hypervisor', - 'extra': { - 'url': 'https://github.com/openstack/nova', - 'python_class': 'nova.virt.vmwareapi.VMwareVCDriver', - 'version': '2014.5', - 'device_support': [ - { - 'vendor': 'VMWare', - 'model': 'vSphere', - 'version': ['5.0','5.1'] - } - ], - 'configuration_guide':'', - 'metadata': { - 'wiki': '' - } - } - }, - 'libvirt.LibvirtDriver': { - 'type': 'nova', - 'class': ['hypervisor', 'container'], - 'extra' : { - 'url': 'https://github.com/openstack/nova', - 'python_class': 'nova.virt.libvirt.LibvirtDriver', - 'version': '2014.5', - 'device_support': [ - { - 'vendor': 'KVM, LXC, QEMU, UML, and XEN', - 'model': 'KVM', - 'version': ['5.0','5.1'] - } - ], - 'configuration_guide':'', - 'metadata': { - 'libvirt_supports': 'https://wiki.openstack.org/wiki/LibvirtDistroSupportMatrix' - } - } - }, - 'nova.virt.hyperv.driver.HyperVDriver': { - 'type': 'nova', - 'extra' : { - 'url': 'https://github.com/openstack/nova', - 'python_class': 'nova.virt.hyperv.driver.HyperVDriver', - 'version': '2014.5', - 'device_support': [ - { - 'vendor': 'Microsoft', - 'model': 'Hyper-V', - 'version': ['2014'] - } - ], - 'configuration_guide':'', - 'metadata': { - } - } - }, - 'cinder.volume.drivers.lvm.LVMISCSIDriver': { - 'type': 'cinder', - 'class': 'volume' - }, - 'cinder.volume.drivers.lvm.LVMISERDriver': { - 'type': 'cinder', - 'class': 'volume' - }, - 'cinder.volume.drivers.san.hp.hp_3par_iscsi.HP3PARISCSIDriver': { - 'type': 'cinder', - 'class': 'volume' - }, - 'cinder.volume.drivers.san.hp.hp_3par_fc.HP3PARFCDriver': { - 'type': 'cinder', - 'class': 'volume', - 'requirements_txt': { - # TODO(mrkanag) Add installer reqs here, pip pkg or apt pkg or - # any other OS packages - }, - 'apt_get_list': { - - }, - 'deprecation': { - 'alternate': '', - 'since': '2012.1' - } - }, - 'cinder.volume.drivers.san.hp.hp_lefthand_iscsi.HPLeftHandISCSIDriver': { - 'type': 'cinder', - 'class': 'volume', - 'protocol': 'iSCSI' - }, - 'cinder.volume.drivers.coraid.CoraidDriver': { - 'type': 'cinder', - 'class': 'volume' - }, - 'cinder.volume.drivers.eqlx.DellEQLSanISCSIDriver': { - 'type': 'cinder', - 'class': 'volume', - 'protocol': 'iSCSI' - }, - 'cinder.volume.drivers.emc.emc_vmax_iscsi.EMCVMAXISCSIDriver': { - 'type': 'cinder', - 'class': 'volume', - 'protocol': 'iSCSI' - }, - 'cinder.volume.drivers.emc.emc_vmax_fc.EMCVMAXFCDriver': { - 'type': 'cinder', - 'class': 'volume' - }, - 'cinder.volume.drivers.emc.emc_cli_iscsi.EMCCLIISCSIDriver': { - 'type': 'cinder', - 'class': 'volume', - 'protocol': 'iSCSI' - }, - 'cinder.volume.drivers.glusterfs.GlusterfsDriver': { - 'type': 'cinder', - 'class': 'volume' - }, - 'cinder.volume.drivers.hds.iscsi.HDSISCSIDriver': { - 'type': 'cinder', - 'class': 'volume', - 'protocol': 'iSCSI' - }, - 'cinder.volume.drivers.hds.nfs.HDSNFSDriver': { - 'type': 'cinder', - 'class': 'volume' - }, - 'cinder.volume.drivers.hds.hds.HUSDriver': { - 'type': 'cinder', - 'class': 'volume' - }, - 'cinder.volume.drivers.hitachi.hbsd_fc.HBSDFCDriver': { - 'type': 'cinder', - 'class': 'volume' - }, - 'cinder.volume.drivers.hitachi.hbsd_iscsi.HBSDISCSIDriver': { - 'type': 'cinder', - 'class': 'volume', - 'protocol': 'iSCSI' - }, - 'cinder.volume.drivers.san.hp.hp_msa_fc.HPMSAFCDriver': { - 'type': 'cinder', - 'class': 'volume' - }, - 'cinder.volume.drivers.huawei.HuaweiVolumeDriver': { - 'type': 'cinder', - 'class': 'volume' - }, - 'cinder.volume.drivers.ibm.gpfs.GPFSDriver': { - 'type': 'cinder', - 'class': 'volume' - }, - 'cinder.volume.drivers.ibm.storwize_svc.StorwizeSVCDriver': { - 'type': 'cinder', - 'class': 'volume' - }, - 'cinder.volume.drivers.xiv_ds8k.XIVDS8KDriver': { - 'type': 'cinder', - 'class': 'volume' - }, - 'cinder.volume.drivers.netapp.common.NetAppDriver': { - 'type': 'cinder', - 'class': 'volume' - }, - 'cinder.volume.drivers.netapp.iscsi.NetAppDirectCmodeISCSIDriver': { - 'type': 'cinder', - 'class': 'volume', - 'protocol': 'iSCSI' - }, - 'cinder.volume.drivers.netapp.nfs.NetAppDirectCmodeNfsDriver': { - 'type': 'cinder', - 'class': 'volume' - }, - 'cinder.volume.drivers.netapp.iscsi.NetAppDirect7modeISCSIDriver': { - 'type': 'cinder', - 'class': 'volume', - 'protocol': 'iSCSI' - }, - 'cinder.volume.drivers.netapp.nfs.NetAppDirect7modeNfsDriver': { - 'type': 'cinder', - 'class': 'volume' - }, - 'cinder.volume.drivers.nexenta.iscsi.NexentaISCSIDriver': { - 'type': 'cinder', - 'class': 'volume', - 'protocol': 'iSCSI' - }, - 'cinder.volume.drivers.nexenta.nfs.NexentaNfsDriver': { - 'type': 'cinder', - 'class': 'volume' - }, - 'cinder.volume.drivers.nfs.NfsDriver': { - 'type': 'cinder', - 'class': 'volume' - }, - 'cinder.volume.drivers.prophetstor.dpl_fc.DPLFCDriver': { - 'type': 'cinder', - 'class': 'volume' - }, - 'cinder.volume.drivers.prophetstor.dpl_iscsi.DPLISCSIDriver': { - 'type': 'cinder', - 'class': 'volume', - 'protocol': 'iSCSI' - }, - 'cinder.volume.drivers.pure.PureISCSIDriver': { - 'type': 'cinder', - 'class': 'volume', - 'protocol': 'iSCSI' - }, - 'cinder.volume.drivers.sheepdog.SheepdogDriver': { - 'type': 'cinder', - 'class': 'volume' - }, - 'cinder.volume.drivers.solidfire.SolidFireDriver': { - 'type': 'cinder', - 'class': 'volume' - }, - 'cinder.volume.drivers.vmware.vmdk.VMwareVcVmdkDriver': { - 'type': 'cinder', - 'class': 'volume', - 'compute_driver': 'vmwareapi.VMwareVCDriver' - }, - 'cinder.volume.drivers.windows.WindowsDriver': { - 'type': 'cinder', - 'class': 'volume' - }, - 'cinder.volume.drivers.xenapi.sm.XenAPINFSDriver': { - 'type': 'cinder', - 'class': 'volume' - }, - 'cinder.volume.drivers.zadara.ZadaraVPSAISCSIDriver': { - 'type': 'cinder', - 'class': 'volume', - 'protocol': 'iSCSI' - }, - 'cinder.volume.drivers.zfssa.zfssaiscsi.ZFSSAISCSIDriver': { - 'type': 'cinder', - 'class': 'volume', - 'protocol': 'iSCSI' - }, - 'cinder.backup.drivers.ceph': { - 'type': 'cinder', - 'class': 'backup' - }, - 'cinder.backup.drivers.tsm': { - 'type': 'cinder', - 'class': 'backup' - }, - 'cinder.backup.drivers.swift': { - 'type': 'cinder', - 'class': 'backup' - }, - 'cinder.zonemanager.drivers.brocade.brcd_fc_zone_driver.BrcdFCZoneDriver': - { - 'type': 'cinder', - 'class': 'zone_manager' - }, - 'cinder.zonemanager.drivers.cisco.cisco_fc_zone_driver.CiscoFCZoneDriver': - { - 'type': 'cinder', - 'class': 'zone_manager' - }, - 'glance_store._drivers.filesystem.Store': { - 'type': 'glance' - }, - 'glance_store._drivers.http.Store': { - 'type': 'glance' - }, - 'glance_store._drivers.cinder.Store': { - 'type': 'glance' - }, - 'glance_store._drivers.swift.Store': { - 'type': 'glance' - }, - 'glance_store._drivers.rbd.Store': { - 'type': 'glance' - }, - 'glance_store._drivers.gridfs.Store': { - 'type': 'glance' - }, - 'glance_store._drivers.vmware_datstore.Store': { - 'type': 'glance' - }, - 'glance_store._drivers.filesystem.Store': { - 'type': 'glance' - }, - 'neutron.agent.linux.interface.OVSInterfaceDriver': { - 'type': 'neutron' - }, - 'neutron.agent.linux.dhcp.Dnsmasq': { - 'type': 'neutron' - }, - 'neutron.agent.linux.interface.OVSInterfaceDriver': { - 'type': 'neutron' - }, - 'neutron.plugins.ml2.drivers.mech_linuxbridge.LinuxbridgeMechanismDriver': { - 'type': 'neutron' - }, - 'neutron.plugins.ml2.drivers.mech_openvswitch.OpenvswitchMechanismDriver': { - 'type': 'neutron' - }, - 'neutron.plugins.ml2.drivers.type_local.LocalTypeDriver': { - 'type': 'neutron' - }, - 'neutron.plugins.ml2.drivers.type_flat.FlatTypeDriver': { - 'type': 'neutron' - }, - 'neutron.plugins.ml2.drivers.type_vlan.VlanTypeDriver': { - 'type': 'neutron' - }, - 'neutron.plugins.ml2.drivers.type_gre.GreTypeDriver': { - 'type': 'neutron' - }, - 'neutron.plugins.ml2.drivers.type_vxlan.VxlanTypeDriver': { - 'type': 'neutron' - }, - 'neutron.agent.linux.iptables_firewall.OVSHybridIptablesFirewallDriver': { - 'type': 'neutron' - }, - 'nova.virt.firewall.NoopFirewallDriver': { - 'type': 'neutron' - }, - 'neutron.agent.linux.iptables_firewall.OVSHybridIptablesFirewallDriver': { - 'type': 'neutron' - } -} +_DRIVERS = {} def get_drivers_config(): diff --git a/namos/db/sample.py b/namos/db/sample.py index 5b0155d..d2d2d37 100644 --- a/namos/db/sample.py +++ b/namos/db/sample.py @@ -15,329 +15,329 @@ from namos.db import api REGION_LIST = [ - {'f7dcd175-27ef-46b5-997f-e6e572f320af': - {'name': 'RegionOne', - 'keystone_region_id': 'region_one', - 'extra': {'location': 'bangalore'}} - }, - {'f7dcd175-27ef-46b5-997f-e6e572f320b0': - {'name': 'RegionTwo', - 'keystone_region_id': 'region_two', - 'extra': {'location': 'chennai'}} - } + # {'f7dcd175-27ef-46b5-997f-e6e572f320af': + # {'name': 'RegionOne', + # 'keystone_region_id': 'region_one', + # 'extra': {'location': 'bangalore'}} + # }, + # {'f7dcd175-27ef-46b5-997f-e6e572f320b0': + # {'name': 'RegionTwo', + # 'keystone_region_id': 'region_two', + # 'extra': {'location': 'chennai'}} + # } ] DEVICE_LIST = [ # vCenter - {'91007d3c-9c95-40c5-8f94-c7b071f9b577': - { - 'name': 'Vmware_vCenter_1', - 'display_name': 'VMWare vCenter 1', - 'description': 'vCenter 5.0', - 'status': 'active', - 'extra': {'owner': 'mkr1481@namos.com'}, - 'region_id': 'f7dcd175-27ef-46b5-997f-e6e572f320af'} - }, - # Clusters - {'d468ea2e-74f6-4a55-a7f4-a56d18e91c66': - { - 'name': 'vmware_vc_Cluster_1', - 'display_name': 'VMWare vCenter 1 Cluster 1', - 'description': 'Cluster 1 having 3 hosts', - 'status': 'active', - 'extra': {'owner': 'mkr1481@namos.com', - 'vcpus': 1000, - 'ram_in_gb': 1024}, - 'parent_id': '91007d3c-9c95-40c5-8f94-c7b071f9b577', - 'region_id': 'f7dcd175-27ef-46b5-997f-e6e572f320af'} - }, - {'6c97f476-8e27-4e21-8528-a5ec236306f3': - {'name': 'vmware_vc_Cluster_2', - 'display_name': 'VMWare vCenter 1 Cluster 2', - 'description': 'Cluster 2 having 5 hosts', - 'status': 'active', - 'extra': {'owner': 'mkr1481@namos.com'}, - 'parent_id': '91007d3c-9c95-40c5-8f94-c7b071f9b577', - 'region_id': 'f7dcd175-27ef-46b5-997f-e6e572f320af'} - }, - # Datastores - {'fdab6c51-38fb-4fb1-a76f-9c243a8b8296': - {'name': 'Vmware_vCenter_1_datastore_1', - 'display_name': 'VMWare vCenter 1 datastore 1', - 'description': 'vCenter 5.0 Datastore created from FC', - 'status': 'active', - 'extra': {'owner': 'mkr1481@namos.com', - 'size_in_gb': '102400'}, - 'parent_id': '91007d3c-9c95-40c5-8f94-c7b071f9b577', - 'region_id': 'f7dcd175-27ef-46b5-997f-e6e572f320af'} - }, - {'05b935b3-942c-439c-a6a4-9c3c73285430': - {'name': 'Vmware_vCenter_1_datastore_2', - 'display_name': 'VMWare vCenter 1 datastore 2', - 'description': 'vCenter 5.0 Datastore created from FC', - 'status': 'active', - 'extra': {'owner': 'mkr1481@namos.com', - 'size_in_gb': '10240'}, - 'parent_id': '91007d3c-9c95-40c5-8f94-c7b071f9b577', - 'region_id': 'f7dcd175-27ef-46b5-997f-e6e572f320af'} - }, - # Switch - {'f062556b-45c4-417d-80fa-4283b9c58da3': - {'name': 'Vmware_vCenter_1_switch_1', - 'display_name': 'VMWare vCenter 1 Dist. vSwitch 1', - 'description': 'vCenter 5.0 distributed virtual switch', - 'status': 'active', - 'extra': {'owner': 'mkr1481@namos.com'}, - 'parent_id': '91007d3c-9c95-40c5-8f94-c7b071f9b577', - 'region_id': 'f7dcd175-27ef-46b5-997f-e6e572f320af'} - } + # {'91007d3c-9c95-40c5-8f94-c7b071f9b577': + # { + # 'name': 'Vmware_vCenter_1', + # 'display_name': 'VMWare vCenter 1', + # 'description': 'vCenter 5.0', + # 'status': 'active', + # 'extra': {'owner': 'mkr1481@namos.com'}, + # 'region_id': 'f7dcd175-27ef-46b5-997f-e6e572f320af'} + # }, + # # Clusters + # {'d468ea2e-74f6-4a55-a7f4-a56d18e91c66': + # { + # 'name': 'vmware_vc_Cluster_1', + # 'display_name': 'VMWare vCenter 1 Cluster 1', + # 'description': 'Cluster 1 having 3 hosts', + # 'status': 'active', + # 'extra': {'owner': 'mkr1481@namos.com', + # 'vcpus': 1000, + # 'ram_in_gb': 1024}, + # 'parent_id': '91007d3c-9c95-40c5-8f94-c7b071f9b577', + # 'region_id': 'f7dcd175-27ef-46b5-997f-e6e572f320af'} + # }, + # {'6c97f476-8e27-4e21-8528-a5ec236306f3': + # {'name': 'vmware_vc_Cluster_2', + # 'display_name': 'VMWare vCenter 1 Cluster 2', + # 'description': 'Cluster 2 having 5 hosts', + # 'status': 'active', + # 'extra': {'owner': 'mkr1481@namos.com'}, + # 'parent_id': '91007d3c-9c95-40c5-8f94-c7b071f9b577', + # 'region_id': 'f7dcd175-27ef-46b5-997f-e6e572f320af'} + # }, + # # Datastores + # {'fdab6c51-38fb-4fb1-a76f-9c243a8b8296': + # {'name': 'Vmware_vCenter_1_datastore_1', + # 'display_name': 'VMWare vCenter 1 datastore 1', + # 'description': 'vCenter 5.0 Datastore created from FC', + # 'status': 'active', + # 'extra': {'owner': 'mkr1481@namos.com', + # 'size_in_gb': '102400'}, + # 'parent_id': '91007d3c-9c95-40c5-8f94-c7b071f9b577', + # 'region_id': 'f7dcd175-27ef-46b5-997f-e6e572f320af'} + # }, + # {'05b935b3-942c-439c-a6a4-9c3c73285430': + # {'name': 'Vmware_vCenter_1_datastore_2', + # 'display_name': 'VMWare vCenter 1 datastore 2', + # 'description': 'vCenter 5.0 Datastore created from FC', + # 'status': 'active', + # 'extra': {'owner': 'mkr1481@namos.com', + # 'size_in_gb': '10240'}, + # 'parent_id': '91007d3c-9c95-40c5-8f94-c7b071f9b577', + # 'region_id': 'f7dcd175-27ef-46b5-997f-e6e572f320af'} + # }, + # # Switch + # {'f062556b-45c4-417d-80fa-4283b9c58da3': + # {'name': 'Vmware_vCenter_1_switch_1', + # 'display_name': 'VMWare vCenter 1 Dist. vSwitch 1', + # 'description': 'vCenter 5.0 distributed virtual switch', + # 'status': 'active', + # 'extra': {'owner': 'mkr1481@namos.com'}, + # 'parent_id': '91007d3c-9c95-40c5-8f94-c7b071f9b577', + # 'region_id': 'f7dcd175-27ef-46b5-997f-e6e572f320af'} + # } ] ENDPOINT_LIST = [ - {'7403bf80-9376-4081-89ee-d2501661ca84':{ - 'name': 'vcenter1_connection', - 'connection': {'host_ip': '10.1.1.3', - 'host_port': 443, - 'host_username': 'adminstrator', - 'host_password': 'password'}, - 'device_id': '91007d3c-9c95-40c5-8f94-c7b071f9b577' - }} + # {'7403bf80-9376-4081-89ee-d2501661ca84':{ + # 'name': 'vcenter1_connection', + # 'connection': {'host_ip': '10.1.1.3', + # 'host_port': 443, + # 'host_username': 'adminstrator', + # 'host_password': 'password'}, + # 'device_id': '91007d3c-9c95-40c5-8f94-c7b071f9b577' + # }} ] DEVICE_DRIVER_CLASS_LIST = [ - {'0664e8c0-ff02-427e-8fa3-8788c017ad84': { - 'python_class': 'nova...vcdriver', - 'type': 'compute', - 'vendor': 'vmware-community' - }}, - {'11caf99c-f820-4266-a461-5a15437a8144': { - 'python_class': 'cinder...vmdkdriver', - 'type': 'volume', - 'vendor': 'vmware-community' - }}, - {'bb99ea96-fe6b-49e6-a761-faea92b79f75': { - 'python_class': 'neutron...nsxdriver', - 'type': 'network', - 'vendor': 'vmware-community' - }} + # {'0664e8c0-ff02-427e-8fa3-8788c017ad84': { + # 'python_class': 'nova...vcdriver', + # 'type': 'compute', + # 'vendor': 'vmware-community' + # }}, + # {'11caf99c-f820-4266-a461-5a15437a8144': { + # 'python_class': 'cinder...vmdkdriver', + # 'type': 'volume', + # 'vendor': 'vmware-community' + # }}, + # {'bb99ea96-fe6b-49e6-a761-faea92b79f75': { + # 'python_class': 'neutron...nsxdriver', + # 'type': 'network', + # 'vendor': 'vmware-community' + # }} ] DEVICE_DRIVER_LIST = [ - # nova - {'3c089cdb-e1d5-4182-9a8e-cef9899fd7e5':{ - 'endpoint_id': '7403bf80-9376-4081-89ee-d2501661ca84', - 'device_driver_class_id':'0664e8c0-ff02-427e-8fa3-8788c017ad84', - 'device_id': 'd468ea2e-74f6-4a55-a7f4-a56d18e91c66' - }}, - # nova - {'4e0360ae-0728-4bfd-a557-3ad867231787':{ - 'endpoint_id': '7403bf80-9376-4081-89ee-d2501661ca84', - 'device_driver_class_id':'0664e8c0-ff02-427e-8fa3-8788c017ad84', - 'device_id': '6c97f476-8e27-4e21-8528-a5ec236306f3' - }}, - # cinder - {'92d5e2c1-511b-4837-a57d-5e6ee723060c':{ - 'endpoint_id': '7403bf80-9376-4081-89ee-d2501661ca84', - 'device_driver_class_id':'11caf99c-f820-4266-a461-5a15437a8144', - 'device_id': 'fdab6c51-38fb-4fb1-a76f-9c243a8b8296' - }}, - # cinder - {'f3d807a0-eff0-4473-8ae5-594967136e05':{ - 'endpoint_id': '7403bf80-9376-4081-89ee-d2501661ca84', - 'python_class_id':'11caf99c-f820-4266-a461-5a15437a8144', - 'device_id': '05b935b3-942c-439c-a6a4-9c3c73285430' - }}, - # neutron - {'f27eb548-929c-45e2-a2a7-dc123e2a1bc7':{ - 'endpoint_id': '7403bf80-9376-4081-89ee-d2501661ca84', - 'python_class_id':'bb99ea96-fe6b-49e6-a761-faea92b79f75', - 'device_id': 'f062556b-45c4-417d-80fa-4283b9c58da3' - }} + # # nova + # {'3c089cdb-e1d5-4182-9a8e-cef9899fd7e5':{ + # 'endpoint_id': '7403bf80-9376-4081-89ee-d2501661ca84', + # 'device_driver_class_id': '0664e8c0-ff02-427e-8fa3-8788c017ad84', + # 'device_id': 'd468ea2e-74f6-4a55-a7f4-a56d18e91c66' + # }}, + # # nova + # {'4e0360ae-0728-4bfd-a557-3ad867231787':{ + # 'endpoint_id': '7403bf80-9376-4081-89ee-d2501661ca84', + # 'device_driver_class_id': '0664e8c0-ff02-427e-8fa3-8788c017ad84', + # 'device_id': '6c97f476-8e27-4e21-8528-a5ec236306f3' + # }}, + # # cinder + # {'92d5e2c1-511b-4837-a57d-5e6ee723060c':{ + # 'endpoint_id': '7403bf80-9376-4081-89ee-d2501661ca84', + # 'device_driver_class_id': '11caf99c-f820-4266-a461-5a15437a8144', + # 'device_id': 'fdab6c51-38fb-4fb1-a76f-9c243a8b8296' + # }}, + # # cinder + # {'f3d807a0-eff0-4473-8ae5-594967136e05':{ + # 'endpoint_id': '7403bf80-9376-4081-89ee-d2501661ca84', + # 'python_class_id': '11caf99c-f820-4266-a461-5a15437a8144', + # 'device_id': '05b935b3-942c-439c-a6a4-9c3c73285430' + # }}, + # # neutron + # {'f27eb548-929c-45e2-a2a7-dc123e2a1bc7':{ + # 'endpoint_id': '7403bf80-9376-4081-89ee-d2501661ca84', + # 'python_class_id': 'bb99ea96-fe6b-49e6-a761-faea92b79f75', + # 'device_id': 'f062556b-45c4-417d-80fa-4283b9c58da3' + # }} ] -SERVICE_LIST =[ - {'11367a37-976f-468a-b8dd-77b28ee63cf4': { - 'name': 'nova_service', - 'keystone_service_id': 'b9c2549f-f685-4bc2-92e9-ba8af9c18599' - }}, - {'809e04c1-2f3b-43af-9677-3428a0154216': { - 'name': 'cinder_service', - 'keystone_service_id': '9cc4c374-abb5-4bdc-9129-f0fa4bba0e0b' - }}, - {'3495fa07-39d9-4d87-9f97-0a582a3e25c3': { - 'name': 'neutron_service', - 'keystone_service_id': 'b24e2884-75bc-4876-81d1-5b4fb6e92afc' - }} +SERVICE_LIST = [ + # {'11367a37-976f-468a-b8dd-77b28ee63cf4': { + # 'name': 'nova_service', + # 'keystone_service_id': 'b9c2549f-f685-4bc2-92e9-ba8af9c18599' + # }}, + # {'809e04c1-2f3b-43af-9677-3428a0154216': { + # 'name': 'cinder_service', + # 'keystone_service_id': '9cc4c374-abb5-4bdc-9129-f0fa4bba0e0b' + # }}, + # {'3495fa07-39d9-4d87-9f97-0a582a3e25c3': { + # 'name': 'neutron_service', + # 'keystone_service_id': 'b24e2884-75bc-4876-81d1-5b4fb6e92afc' + # }} ] SERVICE_NODE_LIST = [ - { - 'a5073d58-2dbb-4146-b47c-4e5f7dc11fbe': { - 'name': 'd_network_node_1', - 'fqdn': 'network_node_1.devstack1.abc.com', - 'region_id': 'f7dcd175-27ef-46b5-997f-e6e572f320af' - } - }, - { - '4e99a641-dbe9-416e-8c0a-78015dc55a2a': { - 'name': 'd_compute_node_1', - 'fqdn': 'compute_node_1.devstack.abc.com', - 'region_id': 'f7dcd175-27ef-46b5-997f-e6e572f320af' - } - }, - { - 'b92f4811-7970-421b-a611-d51c62972388': { - 'name': 'd_cloud-controller-1', - 'fqdn': 'cloud_controller_1.devstack1.abc.com', - 'region_id': 'f7dcd175-27ef-46b5-997f-e6e572f320af' - } - }, - { - 'e5913cd3-a416-40e1-889f-1a1b1c53001c': { - 'name': 'd_storage_node_1', - 'fqdn': 'storage_node_1.devstack.abc.com', - 'region_id': 'f7dcd175-27ef-46b5-997f-e6e572f320af' - } - } + # { + # 'a5073d58-2dbb-4146-b47c-4e5f7dc11fbe': { + # 'name': 'd_network_node_1', + # 'fqdn': 'network_node_1.devstack1.abc.com', + # 'region_id': 'f7dcd175-27ef-46b5-997f-e6e572f320af' + # } + # }, + # { + # '4e99a641-dbe9-416e-8c0a-78015dc55a2a': { + # 'name': 'd_compute_node_1', + # 'fqdn': 'compute_node_1.devstack.abc.com', + # 'region_id': 'f7dcd175-27ef-46b5-997f-e6e572f320af' + # } + # }, + # { + # 'b92f4811-7970-421b-a611-d51c62972388': { + # 'name': 'd_cloud-controller-1', + # 'fqdn': 'cloud_controller_1.devstack1.abc.com', + # 'region_id': 'f7dcd175-27ef-46b5-997f-e6e572f320af' + # } + # }, + # { + # 'e5913cd3-a416-40e1-889f-1a1b1c53001c': { + # 'name': 'd_storage_node_1', + # 'fqdn': 'storage_node_1.devstack.abc.com', + # 'region_id': 'f7dcd175-27ef-46b5-997f-e6e572f320af' + # } + # } ] SERVICE_COMPONENT_LIST = [ - # nova - { - '7259a9ff-2e6f-4e8d-b2fb-a529188825dd': { - 'name': 'd_nova-compute', - 'node_id': '4e99a641-dbe9-416e-8c0a-78015dc55a2a', - 'service_id': '11367a37-976f-468a-b8dd-77b28ee63cf4' - } - }, - { - 'e5e366ea-9029-4ba0-8bbc-f658e642aa54': { - 'name': 'd_nova-scheduler', - 'node_id': 'b92f4811-7970-421b-a611-d51c62972388', - 'service_id': '11367a37-976f-468a-b8dd-77b28ee63cf4' - } - }, - { - 'f7813622-85ee-4588-871d-42c3128fa14f': { - 'name': 'd_nova-api', - 'node_id': 'b92f4811-7970-421b-a611-d51c62972388', - 'service_id': '11367a37-976f-468a-b8dd-77b28ee63cf4' - } - }, - # cinder - { - 'b0e9ac3f-5600-406c-95e4-f698b1eecfc6': { - 'name': 'd_cinder-volume', - 'node_id': 'e5913cd3-a416-40e1-889f-1a1b1c53001c', - 'service_id': '809e04c1-2f3b-43af-9677-3428a0154216' - } - }, - # neutron - { - '54f608bd-fb01-4614-9653-acbb803aeaf7':{ - 'name': 'd_neutron-agent', - 'node_id': 'a5073d58-2dbb-4146-b47c-4e5f7dc11fbe', - 'service_id': '3495fa07-39d9-4d87-9f97-0a582a3e25c3' - } - } + # # nova + # { + # '7259a9ff-2e6f-4e8d-b2fb-a529188825dd': { + # 'name': 'd_nova-compute', + # 'node_id': '4e99a641-dbe9-416e-8c0a-78015dc55a2a', + # 'service_id': '11367a37-976f-468a-b8dd-77b28ee63cf4' + # } + # }, + # { + # 'e5e366ea-9029-4ba0-8bbc-f658e642aa54': { + # 'name': 'd_nova-scheduler', + # 'node_id': 'b92f4811-7970-421b-a611-d51c62972388', + # 'service_id': '11367a37-976f-468a-b8dd-77b28ee63cf4' + # } + # }, + # { + # 'f7813622-85ee-4588-871d-42c3128fa14f': { + # 'name': 'd_nova-api', + # 'node_id': 'b92f4811-7970-421b-a611-d51c62972388', + # 'service_id': '11367a37-976f-468a-b8dd-77b28ee63cf4' + # } + # }, + # # cinder + # { + # 'b0e9ac3f-5600-406c-95e4-f698b1eecfc6': { + # 'name': 'd_cinder-volume', + # 'node_id': 'e5913cd3-a416-40e1-889f-1a1b1c53001c', + # 'service_id': '809e04c1-2f3b-43af-9677-3428a0154216' + # } + # }, + # # neutron + # { + # '54f608bd-fb01-4614-9653-acbb803aeaf7':{ + # 'name': 'd_neutron-agent', + # 'node_id': 'a5073d58-2dbb-4146-b47c-4e5f7dc11fbe', + # 'service_id': '3495fa07-39d9-4d87-9f97-0a582a3e25c3' + # } + # } ] SERVICE_WORKER_LIST = [ - # cluster-1 - { - '65dbd695-fa92-4950-b8b4-d46aa0408f6a': { - 'name': 'd_nova-compute-esx-cluster1', - 'pid': '1233454343', - 'host': 'd_nova-compute-esx-cluster1', - 'service_component_id': '7259a9ff-2e6f-4e8d-b2fb-a529188825dd', - 'device_driver_id': '3c089cdb-e1d5-4182-9a8e-cef9899fd7e5' - } - }, - # cluster-2 - { - '50d2c0c6-741d-4108-a3a2-2090eaa0be37': { - 'name': 'd_nova-compute-esx-cluster2', - 'pid': '1233454344', - 'host': 'd_nova-compute-esx-cluster2', - 'service_component_id': '7259a9ff-2e6f-4e8d-b2fb-a529188825dd', - 'device_driver_id': '4e0360ae-0728-4bfd-a557-3ad867231787' - } - }, - # datastore-1 - { - '77e3ee16-fa2b-4e12-ad1c-226971d1a482': { - 'name': 'd_cinder-volume-vmdk-1', - 'pid': '09878654', - 'host': 'd_cinder-volume-vmdk-1', - 'service_component_id': 'b0e9ac3f-5600-406c-95e4-f698b1eecfc6', - 'device_driver_id': '92d5e2c1-511b-4837-a57d-5e6ee723060c' - } - }, - # datastore-2 - { - '8633ce68-2b02-4efd-983c-49a460f6d7ef': { - 'name': 'd_cinder-volume-vmdk-2', - 'pid': '4353453', - 'host': 'd_cinder-volume-vmdk-2', - 'service_component_id': 'b0e9ac3f-5600-406c-95e4-f698b1eecfc6', - 'device_driver_id': 'f3d807a0-eff0-4473-8ae5-594967136e05' - } - }, - # vswitch - { - '5a3ac5b9-9186-45d8-928c-9e702368dfb4': { - 'name': 'd_neutron-agent', - 'pid': '2359234', - 'host': 'd_neutron-agent', - 'service_component_id': '54f608bd-fb01-4614-9653-acbb803aeaf7', - 'device_driver_id': 'f27eb548-929c-45e2-a2a7-dc123e2a1bc7' - } - }, + # # cluster-1 + # { + # '65dbd695-fa92-4950-b8b4-d46aa0408f6a': { + # 'name': 'd_nova-compute-esx-cluster1', + # 'pid': '1233454343', + # 'host': 'd_nova-compute-esx-cluster1', + # 'service_component_id': '7259a9ff-2e6f-4e8d-b2fb-a529188825dd', + # 'device_driver_id': '3c089cdb-e1d5-4182-9a8e-cef9899fd7e5' + # } + # }, + # # cluster-2 + # { + # '50d2c0c6-741d-4108-a3a2-2090eaa0be37': { + # 'name': 'd_nova-compute-esx-cluster2', + # 'pid': '1233454344', + # 'host': 'd_nova-compute-esx-cluster2', + # 'service_component_id': '7259a9ff-2e6f-4e8d-b2fb-a529188825dd', + # 'device_driver_id': '4e0360ae-0728-4bfd-a557-3ad867231787' + # } + # }, + # # datastore-1 + # { + # '77e3ee16-fa2b-4e12-ad1c-226971d1a482': { + # 'name': 'd_cinder-volume-vmdk-1', + # 'pid': '09878654', + # 'host': 'd_cinder-volume-vmdk-1', + # 'service_component_id': 'b0e9ac3f-5600-406c-95e4-f698b1eecfc6', + # 'device_driver_id': '92d5e2c1-511b-4837-a57d-5e6ee723060c' + # } + # }, + # # datastore-2 + # { + # '8633ce68-2b02-4efd-983c-49a460f6d7ef': { + # 'name': 'd_cinder-volume-vmdk-2', + # 'pid': '4353453', + # 'host': 'd_cinder-volume-vmdk-2', + # 'service_component_id': 'b0e9ac3f-5600-406c-95e4-f698b1eecfc6', + # 'device_driver_id': 'f3d807a0-eff0-4473-8ae5-594967136e05' + # } + # }, + # # vswitch + # { + # '5a3ac5b9-9186-45d8-928c-9e702368dfb4': { + # 'name': 'd_neutron-agent', + # 'pid': '2359234', + # 'host': 'd_neutron-agent', + # 'service_component_id': '54f608bd-fb01-4614-9653-acbb803aeaf7', + # 'device_driver_id': 'f27eb548-929c-45e2-a2a7-dc123e2a1bc7' + # } + # }, ] CONFIG_LIST = [ - { - 'dc6aa02f-ba70-4410-a59c-5e113e629fe5': { - 'name':'vmware.host_ip', - 'value':'10.1.0.1', - 'help': 'VMWare vcenter IP address', - 'default':'', - 'type':'String', - 'required':True, - 'secret': False, - 'config_file':'/etc/nova/nova.conf', - 'service_worker_id': '65dbd695-fa92-4950-b8b4-d46aa0408f6a' - } - }, - { - 'dc6aa02f-ba70-4410-a59c-5e113e629f10': { - 'name':'vmware.host_username', - 'value':'Administraotr', - 'help': 'VMWare vcenter Username', - 'default':'Administrator', - 'type':'String', - 'required':True, - 'secret': False, - 'file':'/etc/nova/nova.conf', - 'service_worker_id': '65dbd695-fa92-4950-b8b4-d46aa0408f6a' - } - }, - { - 'dc6aa02f-ba70-4410-a59c-5e113e629f11': { - 'name':'vmware.host_password', - 'value':'password', - 'help': 'VMWare vcenter password', - 'default':'', - 'type':'String', - 'required':True, - 'secret': True, - 'file':'/etc/nova/nova.conf', - 'service_worker_id': '65dbd695-fa92-4950-b8b4-d46aa0408f6a' - }, - } + # { + # 'dc6aa02f-ba70-4410-a59c-5e113e629fe5': { + # 'name': 'vmware.host_ip', + # 'value': '10.1.0.1', + # 'help': 'VMWare vcenter IP address', + # 'default': '', + # 'type': 'String', + # 'required':True, + # 'secret': False, + # 'config_file': '/etc/nova/nova.conf', + # 'service_worker_id': '65dbd695-fa92-4950-b8b4-d46aa0408f6a' + # } + # }, + # { + # 'dc6aa02f-ba70-4410-a59c-5e113e629f10': { + # 'name': 'vmware.host_username', + # 'value': 'Administraotr', + # 'help': 'VMWare vcenter Username', + # 'default': 'Administrator', + # 'type': 'String', + # 'required':True, + # 'secret': False, + # 'file': '/etc/nova/nova.conf', + # 'service_worker_id': '65dbd695-fa92-4950-b8b4-d46aa0408f6a' + # } + # }, + # { + # 'dc6aa02f-ba70-4410-a59c-5e113e629f11': { + # 'name': 'vmware.host_password', + # 'value': 'password', + # 'help': 'VMWare vcenter password', + # 'default': '', + # 'type': 'String', + # 'required':True, + # 'secret': True, + # 'file': '/etc/nova/nova.conf', + # 'service_worker_id': '65dbd695-fa92-4950-b8b4-d46aa0408f6a' + # }, + # } ] diff --git a/setup.cfg b/setup.cfg index f27cf8f..5dfa049 100644 --- a/setup.cfg +++ b/setup.cfg @@ -47,6 +47,8 @@ output_file = namos/locale/namos.pot [entry_points] console_scripts = namos-manage = namos.cmd.manage:main + namos-api = namos.cmd.api:main + namos-manager = namos.cmd.conductor:main oslo.config.opts = namos.common.config = namos.common.config:list_opts \ No newline at end of file