make init db, load adapters/metadatas lazy initialization

Change-Id: I7f32732a9a2a1bc9ec721dad6e0c43ceef8c5f11
This commit is contained in:
xiaodongwang 2014-09-26 12:29:25 -07:00
parent bbbdf1a533
commit 88eb112683
63 changed files with 1668 additions and 690 deletions

View File

@ -20,6 +20,14 @@ import os
import os.path import os.path
import sys import sys
current_dir = os.path.dirname(os.path.realpath(__file__))
sys.path.append(current_dir)
import switch_virtualenv
from compass.utils import flags from compass.utils import flags
from compass.utils import logsetting from compass.utils import logsetting

View File

@ -20,6 +20,13 @@ import os
import os.path import os.path
import sys import sys
current_dir = os.path.dirname(os.path.realpath(__file__))
sys.path.append(current_dir)
import switch_virtualenv
from compass.utils import flags from compass.utils import flags
from compass.utils import logsetting from compass.utils import logsetting

View File

@ -20,6 +20,13 @@ import os
import os.path import os.path
import sys import sys
current_dir = os.path.dirname(os.path.realpath(__file__))
sys.path.append(current_dir)
import switch_virtualenv
from compass.utils import flags from compass.utils import flags
from compass.utils import logsetting from compass.utils import logsetting

View File

@ -18,14 +18,14 @@
import logging import logging
import os import os
import os.path import os.path
import site
import sys import sys
activate_this = '$PythonHome/bin/activate_this.py'
execfile(activate_this, dict(__file__=activate_this)) current_dir = os.path.dirname(os.path.realpath(__file__))
site.addsitedir('$PythonHome/lib/python2.6/site-packages') sys.path.append(current_dir)
sys.path.append('$PythonHome')
os.environ['PYTHON_EGG_CACHE'] = '/tmp/.egg'
import switch_virtualenv
from compass.utils import flags from compass.utils import flags
from compass.utils import logsetting from compass.utils import logsetting

View File

@ -20,16 +20,16 @@ import netaddr
import os import os
import re import re
import requests import requests
import site
import socket import socket
import sys import sys
import time import time
activate_this = '$PythonHome/bin/activate_this.py'
execfile(activate_this, dict(__file__=activate_this)) current_dir = os.path.dirname(os.path.realpath(__file__))
site.addsitedir('$PythonHome/lib/python2.6/site-packages') sys.path.append(current_dir)
sys.path.append('$PythonHome')
os.environ['PYTHON_EGG_CACHE'] = '/tmp/.egg'
import switch_virtualenv
from compass.apiclient.restful import Client from compass.apiclient.restful import Client
from compass.utils import flags from compass.utils import flags
@ -71,10 +71,10 @@ flags.add('adapter_name',
default='') default='')
flags.add('adapter_os_pattern', flags.add('adapter_os_pattern',
help='adapter os name', help='adapter os name',
default=r'(?i)centos.*') default=r'^(?i)centos.*')
flags.add('adapter_target_system_pattern', flags.add('adapter_target_system_pattern',
help='adapter target system name', help='adapter target system name',
default='openstack.*') default='^openstack$')
flags.add('adapter_flavor_pattern', flags.add('adapter_flavor_pattern',
help='adapter flavor name', help='adapter flavor name',
default='allinone') default='allinone')
@ -346,6 +346,7 @@ def _get_adapter(client):
flavor_re = None flavor_re = None
adapter_id = None adapter_id = None
os_id = None os_id = None
distributed_system_id = None
flavor_id = None flavor_id = None
adapter = None adapter = None
for item in resp: for item in resp:
@ -370,20 +371,30 @@ def _get_adapter(client):
if adapter_name and adapter['name'] == adapter_name: if adapter_name and adapter['name'] == adapter_name:
adapter_id = adapter['id'] adapter_id = adapter['id']
logging.info('adapter name %s match: %s', adapter_name, adapter) logging.info('adapter name %s matches: %s', adapter_name, adapter)
elif 'distributed_system_name' in item: elif (
'distributed_system_name' in item and
adapter['distributed_system_name']
):
if ( if (
not target_system_re or target_system_re and
target_system_re.match(adapter['distributed_system_name']) target_system_re.match(adapter['distributed_system_name'])
): ):
adapter_id = adapter['id'] adapter_id = adapter['id']
distributed_system_id = adapter['distributed_system_id']
logging.info( logging.info(
'distributed system name pattern %s match: %s', 'distributed system name pattern %s matches: %s',
target_system_pattern, adapter target_system_pattern, adapter
) )
else:
if not target_system_re:
adapter_id = adapter['id']
logging.info(
'os only adapter matches no target_system_pattern'
)
if adapter_id: if adapter_id:
logging.info('adadpter does not match: %s', adapter) logging.info('adadpter matches: %s', adapter)
break break
if not adapter_id: if not adapter_id:
@ -394,12 +405,16 @@ def _get_adapter(client):
msg = 'no os found for %s' % os_pattern msg = 'no os found for %s' % os_pattern
raise Exception(msg) raise Exception(msg)
if target_system_re and not distributed_system_id:
msg = 'no distributed system found for' % target_system_pattern
raise Exception(msg)
if flavor_re and not flavor_id: if flavor_re and not flavor_id:
msg = 'no flavor found for %s' % flavor_pattern msg = 'no flavor found for %s' % flavor_pattern
raise Exception(msg) raise Exception(msg)
logging.info('adpater for deploying a cluster: %s', adapter_id) logging.info('adpater for deploying a cluster: %s', adapter_id)
return (adapter_id, os_id, flavor_id) return (adapter_id, os_id, distributed_system_id, flavor_id)
def _add_subnets(client): def _add_subnets(client):
@ -686,6 +701,9 @@ def _set_cluster_package_config(client, cluster_id):
for service_credential in flags.OPTIONS.service_credentials.split(',') for service_credential in flags.OPTIONS.service_credentials.split(',')
if service_credential if service_credential
] ]
logging.debug(
'service credentials: %s', service_credentials
)
for service_credential in service_credentials: for service_credential in service_credentials:
if ':' not in service_credential: if ':' not in service_credential:
raise Exception( raise Exception(
@ -706,6 +724,9 @@ def _set_cluster_package_config(client, cluster_id):
for console_credential in flags.OPTIONS.console_credentials.split(',') for console_credential in flags.OPTIONS.console_credentials.split(',')
if console_credential if console_credential
] ]
logging.debug(
'console credentials: %s', console_credentials
)
for console_credential in console_credentials: for console_credential in console_credentials:
if ':' not in console_credential: if ':' not in console_credential:
raise Exception( raise Exception(
@ -717,7 +738,7 @@ def _set_cluster_package_config(client, cluster_id):
'there is no = in console %s security' % console_name 'there is no = in console %s security' % console_name
) )
username, password = console_pair.split('=', 1) username, password = console_pair.split('=', 1)
package_config['security']['console_credentials'][service_name] = { package_config['security']['console_credentials'][console_name] = {
'username': username, 'username': username,
'password': password 'password': password
} }
@ -952,14 +973,14 @@ def main():
else: else:
machines = _get_machines(client) machines = _get_machines(client)
subnet_mapping = _add_subnets(client) subnet_mapping = _add_subnets(client)
adapter_id, os_id, flavor_id = _get_adapter(client) adapter_id, os_id, distributed_system_id, flavor_id = _get_adapter(client)
cluster_id, host_mapping, role_mapping = _add_cluster( cluster_id, host_mapping, role_mapping = _add_cluster(
client, adapter_id, os_id, flavor_id, machines) client, adapter_id, os_id, flavor_id, machines)
host_ips = _set_host_networking( host_ips = _set_host_networking(
client, host_mapping, subnet_mapping client, host_mapping, subnet_mapping
) )
_set_cluster_os_config(client, cluster_id, host_ips) _set_cluster_os_config(client, cluster_id, host_ips)
if flavor_id: if distributed_system_id:
_set_cluster_package_config(client, cluster_id) _set_cluster_package_config(client, cluster_id)
if role_mapping: if role_mapping:
_set_hosts_roles(client, cluster_id, host_mapping, role_mapping) _set_hosts_roles(client, cluster_id, host_mapping, role_mapping)

View File

@ -1,14 +0,0 @@
#!/usr/bin/env python
import os
import site
import sys
activate_this='$PythonHome/bin/activate_this.py'
execfile(activate_this, dict(__file__=activate_this))
site.addsitedir('$PythonHome/lib/python2.6/site-packages')
sys.path.append('$PythonHome')
os.environ['PYTHON_EGG_CACHE'] = '/tmp/.egg'
import compass.actions.cli as cli
sys.exit(cli.main())

18
bin/__init__.py → bin/compass_check.py Normal file → Executable file
View File

@ -1,3 +1,5 @@
#!/usr/bin/env python
#
# Copyright 2014 Huawei Technologies Co. Ltd # Copyright 2014 Huawei Technologies Co. Ltd
# #
# Licensed under the Apache License, Version 2.0 (the "License"); # Licensed under the Apache License, Version 2.0 (the "License");
@ -11,3 +13,19 @@
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and # See the License for the specific language governing permissions and
# limitations under the License. # limitations under the License.
"""compass health check."""
import os
import os.path
import sys
current_dir = os.path.dirname(os.path.realpath(__file__))
sys.path.append(current_dir)
import switch_virtualenv
import compass.actions.cli as cli
sys.exit(cli.main())

42
bin/compass_wsgi.py Executable file
View File

@ -0,0 +1,42 @@
#!/usr/bin/env python
#
# Copyright 2014 Huawei Technologies Co. Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""compass wsgi module."""
import os
import sys
current_dir = os.path.dirname(os.path.realpath(__file__))
sys.path.append(current_dir)
import switch_virtualenv
from compass.utils import flags
from compass.utils import logsetting
from compass.utils import setting_wrapper as setting
flags.init()
flags.OPTIONS.logfile = setting.WEB_LOGFILE
logsetting.init()
from compass.api import api as compass_api
compass_api.init()
application = compass_api.app

View File

@ -18,14 +18,15 @@
import logging import logging
import os import os
import os.path import os.path
import site
import sys import sys
activate_this = '$PythonHome/bin/activate_this.py'
execfile(activate_this, dict(__file__=activate_this)) current_dir = os.path.dirname(os.path.realpath(__file__))
site.addsitedir('$PythonHome/lib/python2.6/site-packages') sys.path.append(current_dir)
sys.path.append('$PythonHome')
os.environ['PYTHON_EGG_CACHE'] = '/tmp/.egg'
import switch_virtualenv
from compass.db.api import cluster as cluster_api from compass.db.api import cluster as cluster_api
from compass.db.api import database from compass.db.api import database

View File

@ -17,14 +17,14 @@
"""utility binary to manage database.""" """utility binary to manage database."""
import os import os
import os.path import os.path
import site
import sys import sys
activate_this = '$PythonHome/bin/activate_this.py'
execfile(activate_this, dict(__file__=activate_this)) current_dir = os.path.dirname(os.path.realpath(__file__))
site.addsitedir('$PythonHome/lib/python2.6/site-packages') sys.path.append(current_dir)
sys.path.append('$PythonHome')
os.environ['PYTHON_EGG_CACHE'] = '/tmp/.egg'
import switch_virtualenv
from flask.ext.script import Manager from flask.ext.script import Manager

View File

@ -19,14 +19,14 @@ import functools
import lockfile import lockfile
import logging import logging
import os import os
import site
import sys import sys
activate_this = '$PythonHome/bin/activate_this.py'
execfile(activate_this, dict(__file__=activate_this)) current_dir = os.path.dirname(os.path.realpath(__file__))
site.addsitedir('$PythonHome/lib/python2.6/site-packages') sys.path.append(current_dir)
sys.path.append('$PythonHome')
os.environ['PYTHON_EGG_CACHE'] = '/tmp/.egg'
import switch_virtualenv
from multiprocessing import Pool from multiprocessing import Pool

View File

@ -19,14 +19,14 @@ import functools
import lockfile import lockfile
import logging import logging
import os import os
import site
import sys import sys
activate_this = '$PythonHome/bin/activate_this.py'
execfile(activate_this, dict(__file__=activate_this)) current_dir = os.path.dirname(os.path.realpath(__file__))
site.addsitedir('$PythonHome/lib/python2.6/site-packages') sys.path.append(current_dir)
sys.path.append('$PythonHome')
os.environ['PYTHON_EGG_CACHE'] = '/tmp/.egg'
import switch_virtualenv
from compass.actions import update_progress from compass.actions import update_progress
from compass.db.api import database from compass.db.api import database

View File

@ -0,0 +1,30 @@
#!/usr/bin/env python
#
# Copyright 2014 Huawei Technologies Co. Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""utility switch to virtual env."""
import os
import os.path
import site
import sys
virtual_env = '$PythonHome'
activate_this = '%s/bin/activate_this.py' % virtual_env
execfile(activate_this, dict(__file__=activate_this))
site.addsitedir('%s/lib/python2.6/site-packages' % virtual_env)
if virtual_env not in sys.path:
sys.path.append(virtual_env)
os.environ['PYTHON_EGG_CACHE'] = '/tmp/.egg'

View File

@ -25,7 +25,7 @@ from compass.utils import setting_wrapper as setting
from compass.utils import util from compass.utils import util
def _add_system(session, model, configs): def _add_system(session, model, configs, exception_when_existing=True):
parents = {} parents = {}
for config in configs: for config in configs:
logging.info( logging.info(
@ -34,7 +34,7 @@ def _add_system(session, model, configs):
) )
object = utils.add_db_object( object = utils.add_db_object(
session, model, session, model,
False, config['NAME'], exception_when_existing, config['NAME'],
deployable=config.get('DEPLOYABLE', False) deployable=config.get('DEPLOYABLE', False)
) )
parents[config['NAME']] = ( parents[config['NAME']] = (
@ -48,17 +48,23 @@ def _add_system(session, model, configs):
utils.update_db_object(session, object, parent=parent) utils.update_db_object(session, object, parent=parent)
def add_oses_internal(session): def add_oses_internal(session, exception_when_existing=True):
configs = util.load_configs(setting.OS_DIR) configs = util.load_configs(setting.OS_DIR)
_add_system(session, models.OperatingSystem, configs) _add_system(
session, models.OperatingSystem, configs,
exception_when_existing=exception_when_existing
)
def add_distributed_systems_internal(session): def add_distributed_systems_internal(session, exception_when_existing=True):
configs = util.load_configs(setting.DISTRIBUTED_SYSTEM_DIR) configs = util.load_configs(setting.DISTRIBUTED_SYSTEM_DIR)
_add_system(session, models.DistributedSystem, configs) _add_system(
session, models.DistributedSystem, configs,
exception_when_existing=exception_when_existing
)
def add_adapters_internal(session): def add_adapters_internal(session, exception_when_existing=True):
parents = {} parents = {}
configs = util.load_configs(setting.ADAPTER_DIR) configs = util.load_configs(setting.ADAPTER_DIR)
for config in configs: for config in configs:
@ -86,7 +92,7 @@ def add_adapters_internal(session):
package_installer = None package_installer = None
adapter = utils.add_db_object( adapter = utils.add_db_object(
session, models.Adapter, session, models.Adapter,
False, exception_when_existing,
config['NAME'], config['NAME'],
display_name=config.get('DISPLAY_NAME', None), display_name=config.get('DISPLAY_NAME', None),
distributed_system=distributed_system, distributed_system=distributed_system,
@ -109,7 +115,7 @@ def add_adapters_internal(session):
if supported_os_pattern.match(os_name): if supported_os_pattern.match(os_name):
utils.add_db_object( utils.add_db_object(
session, models.AdapterOS, session, models.AdapterOS,
True, exception_when_existing,
os.id, adapter.id os.id, adapter.id
) )
break break
@ -123,7 +129,7 @@ def add_adapters_internal(session):
utils.update_db_object(session, adapter, parent=parent) utils.update_db_object(session, adapter, parent=parent)
def add_roles_internal(session): def add_roles_internal(session, exception_when_existing=True):
configs = util.load_configs(setting.ADAPTER_ROLE_DIR) configs = util.load_configs(setting.ADAPTER_ROLE_DIR)
for config in configs: for config in configs:
logging.info( logging.info(
@ -136,14 +142,14 @@ def add_roles_internal(session):
for role_dict in config['ROLES']: for role_dict in config['ROLES']:
utils.add_db_object( utils.add_db_object(
session, models.AdapterRole, session, models.AdapterRole,
False, role_dict['role'], adapter.id, exception_when_existing, role_dict['role'], adapter.id,
display_name=role_dict.get('display_name', None), display_name=role_dict.get('display_name', None),
description=role_dict.get('description', None), description=role_dict.get('description', None),
optional=role_dict.get('optional', False) optional=role_dict.get('optional', False)
) )
def add_flavors_internal(session): def add_flavors_internal(session, exception_when_existing=True):
configs = util.load_configs(setting.ADAPTER_FLAVOR_DIR) configs = util.load_configs(setting.ADAPTER_FLAVOR_DIR)
for config in configs: for config in configs:
logging.info('add config %s to flavor', config) logging.info('add config %s to flavor', config)
@ -154,7 +160,7 @@ def add_flavors_internal(session):
for flavor_dict in config['FLAVORS']: for flavor_dict in config['FLAVORS']:
flavor = utils.add_db_object( flavor = utils.add_db_object(
session, models.AdapterFlavor, session, models.AdapterFlavor,
False, flavor_dict['flavor'], adapter.id, exception_when_existing, flavor_dict['flavor'], adapter.id,
display_name=flavor_dict.get('display_name', None), display_name=flavor_dict.get('display_name', None),
template=flavor_dict.get('template', None) template=flavor_dict.get('template', None)
) )
@ -166,7 +172,7 @@ def add_flavors_internal(session):
) )
utils.add_db_object( utils.add_db_object(
session, models.AdapterFlavorRole, session, models.AdapterFlavorRole,
False, flavor.id, role.id exception_when_existing, flavor.id, role.id
) )
utils.update_db_object( utils.update_db_object(
session, flavor, session, flavor,

View File

@ -47,6 +47,10 @@ RESP_FLAVORS_FIELDS = [
@database.run_in_session() @database.run_in_session()
def load_adapters(session): def load_adapters(session):
load_adapters_internal(session)
def load_adapters_internal(session):
global ADAPTER_MAPPING global ADAPTER_MAPPING
logging.info('load adapters into memory') logging.info('load adapters into memory')
ADAPTER_MAPPING = adapter_api.get_adapters_internal(session) ADAPTER_MAPPING = adapter_api.get_adapters_internal(session)
@ -93,11 +97,16 @@ def _filter_adapters(adapter_config, filter_name, filter_value):
) )
def list_adapters(session, lister, **filters): def list_adapters(session, lister, **filters):
"""list adapters.""" """list adapters."""
if not ADAPTER_MAPPING:
load_adapters_internal(session)
return ADAPTER_MAPPING.values() return ADAPTER_MAPPING.values()
def get_adapter_internal(adapter_id): def get_adapter_internal(session, adapter_id):
"""get adapter.""" """get adapter."""
if not ADAPTER_MAPPING:
load_adapters_internal(session)
if adapter_id not in ADAPTER_MAPPING: if adapter_id not in ADAPTER_MAPPING:
raise exception.RecordNotExists( raise exception.RecordNotExists(
'adpater %s does not exist' % adapter_id 'adpater %s does not exist' % adapter_id
@ -118,7 +127,7 @@ def get_adapter_internal(adapter_id):
) )
def get_adapter(session, getter, adapter_id, **kwargs): def get_adapter(session, getter, adapter_id, **kwargs):
"""get adapter.""" """get adapter."""
return get_adapter_internal(adapter_id) return get_adapter_internal(session, adapter_id)
@utils.supported_filters([]) @utils.supported_filters([])
@ -129,8 +138,5 @@ def get_adapter(session, getter, adapter_id, **kwargs):
@utils.wrap_to_dict(RESP_ROLES_FIELDS) @utils.wrap_to_dict(RESP_ROLES_FIELDS)
def get_adapter_roles(session, getter, adapter_id, **kwargs): def get_adapter_roles(session, getter, adapter_id, **kwargs):
"""get adapter roles.""" """get adapter roles."""
if adapter_id not in ADAPTER_MAPPING: adapter = get_adapter_internal(session, adapter_id)
raise exception.RecordNotExists( return adapter.get('roles', [])
'adpater %s does not exist' % adapter_id
)
return ADAPTER_MAPPING[adapter_id].get('roles', [])

View File

@ -14,6 +14,7 @@
# limitations under the License. # limitations under the License.
"""Cluster database operations.""" """Cluster database operations."""
import copy
import functools import functools
import logging import logging
@ -344,12 +345,14 @@ def get_cluster_metadata(session, getter, cluster_id, **kwargs):
os = cluster.os os = cluster.os
if os: if os:
metadatas['os_config'] = metadata_api.get_os_metadata_internal( metadatas['os_config'] = metadata_api.get_os_metadata_internal(
os.id session, os.id
) )
adapter = cluster.adapter adapter = cluster.adapter
if adapter: if adapter:
metadatas['package_config'] = ( metadatas['package_config'] = (
metadata_api.get_package_metadata_internal(adapter.id) metadata_api.get_package_metadata_internal(
session, adapter.id
)
) )
return metadatas return metadatas
@ -407,10 +410,16 @@ def update_cluster_config(session, updater, cluster_id, **kwargs):
cluster = utils.get_db_object( cluster = utils.get_db_object(
session, models.Cluster, id=cluster_id session, models.Cluster, id=cluster_id
) )
os_config_validates = functools.partial(
metadata_api.validate_os_config, os_id=cluster.os_id) def os_config_validates(config):
package_config_validates = functools.partial( metadata_api.validate_os_config(
metadata_api.validate_package_config, adapter_id=cluster.adapter_id) session, config, os_id=cluster.os_id
)
def package_config_validates(config):
metadata_api.validate_package_config(
session, config, adapter_id=cluster.adapter_id
)
@utils.input_validates( @utils.input_validates(
put_os_config=os_config_validates, put_os_config=os_config_validates,
@ -443,10 +452,15 @@ def patch_cluster_config(session, updater, cluster_id, **kwargs):
session, models.Cluster, id=cluster_id session, models.Cluster, id=cluster_id
) )
os_config_validates = functools.partial( def os_config_validates(config):
metadata_api.validate_os_config, os_id=cluster.os_id) metadata_api.validate_os_config(
package_config_validates = functools.partial( session, config, os_id=cluster.os_id
metadata_api.validate_package_config, adapter_id=cluster.adapter_id) )
def package_config_validates(config):
metadata_api.validate_package_config(
session, config, adapter_id=cluster.adapter_id
)
@utils.output_validates( @utils.output_validates(
os_config=os_config_validates, os_config=os_config_validates,
@ -896,15 +910,15 @@ def _update_clusterhost_config(session, updater, clusterhost, **kwargs):
ignore_keys.append('put_os_config') ignore_keys.append('put_os_config')
def os_config_validates(os_config): def os_config_validates(os_config):
from compass.db.api import host as host_api
host = clusterhost.host host = clusterhost.host
metadata_api.validate_os_config(os_config, host.os_id) metadata_api.validate_os_config(
session, os_config, host.os_id)
def package_config_validates(package_config): def package_config_validates(package_config):
cluster = clusterhost.cluster cluster = clusterhost.cluster
is_cluster_editable(session, cluster, updater) is_cluster_editable(session, cluster, updater)
metadata_api.validate_package_config( metadata_api.validate_package_config(
package_config, cluster.adapter_id session, package_config, cluster.adapter_id
) )
@utils.supported_filters( @utils.supported_filters(
@ -1052,13 +1066,13 @@ def _patch_clusterhost_config(session, updater, clusterhost, **kwargs):
def os_config_validates(os_config): def os_config_validates(os_config):
host = clusterhost.host host = clusterhost.host
metadata_api.validate_os_config(os_config, host.os_id) metadata_api.validate_os_config(session, os_config, host.os_id)
def package_config_validates(package_config): def package_config_validates(package_config):
cluster = clusterhost.cluster cluster = clusterhost.cluster
is_cluster_editable(session, cluster, updater) is_cluster_editable(session, cluster, updater)
metadata_api.validate_package_config( metadata_api.validate_package_config(
package_config, cluster.adapter_id session, package_config, cluster.adapter_id
) )
@utils.supported_filters( @utils.supported_filters(
@ -1240,10 +1254,16 @@ def validate_cluster(session, cluster):
role.name for role in cluster_roles if not role.optional role.name for role in cluster_roles if not role.optional
]) ])
clusterhost_roles = set([]) clusterhost_roles = set([])
interface_subnets = {}
for clusterhost in cluster.clusterhosts: for clusterhost in cluster.clusterhosts:
roles = clusterhost.roles roles = clusterhost.roles
for role in roles: for role in roles:
clusterhost_roles.add(role.name) clusterhost_roles.add(role.name)
host = clusterhost.host
for host_network in host.host_networks:
interface_subnets.setdefault(
host_network.interface, set([])
).add(host_network.subnet.subnet)
missing_roles = necessary_roles - clusterhost_roles missing_roles = necessary_roles - clusterhost_roles
if missing_roles: if missing_roles:
raise exception.InvalidParameter( raise exception.InvalidParameter(
@ -1251,6 +1271,13 @@ def validate_cluster(session, cluster):
list(missing_roles), cluster.name list(missing_roles), cluster.name
) )
) )
for interface, subnets in interface_subnets.items():
if len(subnets) > 1:
raise exception.InvalidParameter(
'multi subnets %s in interface %s' % (
list(subnets), interface
)
)
@utils.supported_filters(optional_support_keys=['review']) @utils.supported_filters(optional_support_keys=['review'])
@ -1279,10 +1306,14 @@ def review_cluster(session, reviewer, cluster_id, review={}, **kwargs):
clusterhost.host_id in host_ids clusterhost.host_id in host_ids
): ):
clusterhosts.append(clusterhost) clusterhosts.append(clusterhost)
os_config = cluster.os_config os_config = copy.deepcopy(cluster.os_config)
os_config = metadata_api.autofill_os_config(
session, os_config, cluster.os_id,
cluster=cluster
)
if os_config: if os_config:
metadata_api.validate_os_config( metadata_api.validate_os_config(
os_config, cluster.os_id, True session, os_config, cluster.os_id, True
) )
for clusterhost in clusterhosts: for clusterhost in clusterhosts:
host = clusterhost.host host = clusterhost.host
@ -1294,33 +1325,56 @@ def review_cluster(session, reviewer, cluster_id, review={}, **kwargs):
'since it is not editable' % host.name 'since it is not editable' % host.name
) )
continue continue
host_os_config = host.os_config host_os_config = copy.deepcopy(host.os_config)
host_os_config = metadata_api.autofill_os_config(
session, host_os_config, host.os_id,
host=host
)
deployed_os_config = util.merge_dict( deployed_os_config = util.merge_dict(
os_config, host_os_config os_config, host_os_config
) )
metadata_api.validate_os_config( metadata_api.validate_os_config(
deployed_os_config, host.os_id, True session, deployed_os_config, host.os_id, True
) )
host_api.validate_host(session, host) host_api.validate_host(session, host)
utils.update_db_object(session, host, config_validated=True) utils.update_db_object(
package_config = cluster.package_config session, host, os_config=host_os_config, config_validated=True
)
package_config = copy.deepcopy(cluster.package_config)
package_config = metadata_api.autofill_package_config(
session, package_config, cluster.adapter_id,
cluster=cluster
)
if package_config: if package_config:
metadata_api.validate_package_config( metadata_api.validate_package_config(
package_config, cluster.adapter_id, True session, package_config, cluster.adapter_id, True
) )
for clusterhost in clusterhosts: for clusterhost in clusterhosts:
clusterhost_package_config = clusterhost.package_config clusterhost_package_config = copy.deepcopy(
clusterhost.package_config
)
clusterhost_package_config = metadata_api.autofill_package_config(
session, clusterhost_package_config,
cluster.adapter_id, clusterhost=clusterhost
)
deployed_package_config = util.merge_dict( deployed_package_config = util.merge_dict(
package_config, clusterhost_package_config package_config, clusterhost_package_config
) )
metadata_api.validate_package_config( metadata_api.validate_package_config(
deployed_package_config, session, deployed_package_config,
cluster.adapter_id, True cluster.adapter_id, True
) )
validate_clusterhost(session, clusterhost) validate_clusterhost(session, clusterhost)
utils.update_db_object(session, clusterhost, config_validated=True) utils.update_db_object(
session, clusterhost,
package_config=clusterhost_package_config,
config_validated=True
)
validate_cluster(session, cluster) validate_cluster(session, cluster)
utils.update_db_object(session, cluster, config_validated=True) utils.update_db_object(
session, cluster, os_config=os_config, package_config=package_config,
config_validated=True
)
return { return {
'cluster': cluster, 'cluster': cluster,
'hosts': clusterhosts 'hosts': clusterhosts

View File

@ -93,6 +93,9 @@ def session():
.. note:: .. note::
To operate database, it should be called in database session. To operate database, it should be called in database session.
""" """
if not ENGINE:
init()
if hasattr(SESSION_HOLDER, 'session'): if hasattr(SESSION_HOLDER, 'session'):
logging.error('we are already in session') logging.error('we are already in session')
raise exception.DatabaseException('session already exist') raise exception.DatabaseException('session already exist')
@ -174,12 +177,7 @@ def _setup_switch_table(switch_session):
from compass.db.api import switch from compass.db.api import switch
switch.add_switch_internal( switch.add_switch_internal(
switch_session, long(netaddr.IPAddress(setting.DEFAULT_SWITCH_IP)), switch_session, long(netaddr.IPAddress(setting.DEFAULT_SWITCH_IP)),
True, filters=[{ True, filters=['deny ports all']
'filter_name': 'deny-all',
'filter_type': 'deny',
'port_prefix': '.*',
'port_suffix': '.*'
}]
) )

View File

@ -397,8 +397,10 @@ def update_host_config(session, updater, host_id, **kwargs):
session, models.Host, id=host_id session, models.Host, id=host_id
) )
os_config_validates = functools.partial( def os_config_validates(config):
metadata_api.validate_os_config, os_id=host.os_id) metadata_api.validate_os_config(
session, config, os_id=host.os_id
)
@utils.input_validates( @utils.input_validates(
put_os_config=os_config_validates, put_os_config=os_config_validates,
@ -426,8 +428,10 @@ def patch_host_config(session, updater, host_id, **kwargs):
session, models.Host, id=host_id session, models.Host, id=host_id
) )
os_config_validates = functools.partial( def os_config_validates(config):
metadata_api.validate_os_config, os_id=host.os_id) metadata_api.validate_os_config(
session, config, os_id=host.os_id
)
@utils.output_validates( @utils.output_validates(
os_config=os_config_validates, os_config=os_config_validates,

View File

@ -25,23 +25,29 @@ from compass.utils import setting_wrapper as setting
from compass.utils import util from compass.utils import util
def _add_installers(session, model, configs): def _add_installers(session, model, configs, exception_when_existing=True):
installers = [] installers = []
for config in configs: for config in configs:
installers.append(utils.add_db_object( installers.append(utils.add_db_object(
session, model, session, model,
True, config['INSTANCE_NAME'], exception_when_existing, config['INSTANCE_NAME'],
name=config['NAME'], name=config['NAME'],
settings=config.get('SETTINGS', {}) settings=config.get('SETTINGS', {})
)) ))
return installers return installers
def add_os_installers_internal(session): def add_os_installers_internal(session, exception_when_existing=True):
configs = util.load_configs(setting.OS_INSTALLER_DIR) configs = util.load_configs(setting.OS_INSTALLER_DIR)
return _add_installers(session, models.OSInstaller, configs) return _add_installers(
session, models.OSInstaller, configs,
exception_when_existing=exception_when_existing
)
def add_package_installers_internal(session): def add_package_installers_internal(session, exception_when_existing=True):
configs = util.load_configs(setting.PACKAGE_INSTALLER_DIR) configs = util.load_configs(setting.PACKAGE_INSTALLER_DIR)
return _add_installers(session, models.PackageInstaller, configs) return _add_installers(
session, models.PackageInstaller, configs,
exception_when_existing=exception_when_existing
)

View File

@ -13,13 +13,17 @@
# limitations under the License. # limitations under the License.
"""Metadata related database operations.""" """Metadata related database operations."""
import copy
import logging import logging
import string
from compass.db.api import database from compass.db.api import database
from compass.db.api import utils from compass.db.api import utils
from compass.db import callback as metadata_callback
from compass.db import exception from compass.db import exception
from compass.db import models from compass.db import models
from compass.db import validator from compass.db import validator as metadata_validator
from compass.utils import setting_wrapper as setting from compass.utils import setting_wrapper as setting
from compass.utils import util from compass.utils import util
@ -28,6 +32,10 @@ from compass.utils import util
def _add_field_internal(session, model, configs): def _add_field_internal(session, model, configs):
fields = [] fields = []
for config in configs: for config in configs:
if not isinstance(config, dict):
raise exception.InvalidParameter(
'config %s is not dict' % config
)
fields.append(utils.add_db_object( fields.append(utils.add_db_object(
session, model, False, session, model, False,
config['NAME'], config['NAME'],
@ -41,9 +49,12 @@ def _add_field_internal(session, model, configs):
def add_os_field_internal(session): def add_os_field_internal(session):
env_locals = {}
env_locals.update(metadata_validator.VALIDATOR_LOCALS)
env_locals.update(metadata_callback.CALLBACK_LOCALS)
configs = util.load_configs( configs = util.load_configs(
setting.OS_FIELD_DIR, setting.OS_FIELD_DIR,
env_locals=validator.VALIDATOR_LOCALS env_locals=env_locals
) )
return _add_field_internal( return _add_field_internal(
session, models.OSConfigField, configs session, models.OSConfigField, configs
@ -51,9 +62,12 @@ def add_os_field_internal(session):
def add_package_field_internal(session): def add_package_field_internal(session):
env_locals = {}
env_locals.update(metadata_validator.VALIDATOR_LOCALS)
env_locals.update(metadata_callback.CALLBACK_LOCALS)
configs = util.load_configs( configs = util.load_configs(
setting.PACKAGE_FIELD_DIR, setting.PACKAGE_FIELD_DIR,
env_locals=validator.VALIDATOR_LOCALS env_locals=env_locals
) )
return _add_field_internal( return _add_field_internal(
session, models.PackageConfigField, configs session, models.PackageConfigField, configs
@ -61,9 +75,13 @@ def add_package_field_internal(session):
def _add_metadata( def _add_metadata(
session, field_model, metadata_model, path, name, config, session, field_model, metadata_model, id, path, name, config,
parent=None, **kwargs exception_when_existing=True, parent=None, **kwargs
): ):
if not isinstance(config, dict):
raise exception.InvalidParameter(
'%s config %s is not dict' % (path, config)
)
metadata_self = config.get('_self', {}) metadata_self = config.get('_self', {})
if 'field' in metadata_self: if 'field' in metadata_self:
field = utils.get_db_object( field = utils.get_db_object(
@ -71,38 +89,96 @@ def _add_metadata(
) )
else: else:
field = None field = None
mapping_to_template = metadata_self.get('mapping_to', None)
if mapping_to_template:
mapping_to = string.Template(
mapping_to_template
).safe_substitute(
**kwargs
)
else:
mapping_to = None
metadata = utils.add_db_object( metadata = utils.add_db_object(
session, metadata_model, False, session, metadata_model, exception_when_existing,
path, name=name, parent=parent, field=field, id, path, name=name, parent=parent, field=field,
display_name=metadata_self.get('display_name', name), display_name=metadata_self.get('display_name', name),
description=metadata_self.get('description', None), description=metadata_self.get('description', None),
is_required=metadata_self.get('is_required', False), is_required=metadata_self.get('is_required', False),
required_in_whole_config=metadata_self.get( required_in_whole_config=metadata_self.get(
'required_in_whole_config', False 'required_in_whole_config', False),
), mapping_to=mapping_to,
mapping_to=metadata_self.get('mapping_to', None),
validator=metadata_self.get('validator', None), validator=metadata_self.get('validator', None),
js_validator=metadata_self.get('js_validator', None), js_validator=metadata_self.get('js_validator', None),
default_value=metadata_self.get('default_value', None), default_value=metadata_self.get('default_value', None),
options=metadata_self.get('options', []), default_callback=metadata_self.get('default_callback', None),
required_in_options=metadata_self.get('required_in_options', False), default_callback_params=metadata_self.get(
'default_callback_params', {}),
options=metadata_self.get('options', None),
options_callback=metadata_self.get('options_callback', None),
options_callback_params=metadata_self.get(
'options_callback_params', {}),
autofill_callback=metadata_self.get(
'autofill_callback', None),
autofill_callback_params=metadata_self.get(
'autofill_callback_params', {}),
required_in_options=metadata_self.get(
'required_in_options', False),
**kwargs **kwargs
) )
key_extensions = metadata_self.get('key_extensions', {})
general_keys = []
for key, value in config.items(): for key, value in config.items():
if key not in '_self': if key.startswith('_'):
continue
if key in key_extensions:
if not key.startswith('$'):
raise exception.InvalidParameter(
'%s subkey %s should start with $' % (
path, key
)
)
extended_keys = key_extensions[key]
for extended_key in extended_keys:
if extended_key.startswith('$'):
raise exception.InvalidParameter(
'%s extended key %s should not start with $' % (
path, extended_key
)
)
sub_kwargs = dict(kwargs)
sub_kwargs[key[1:]] = extended_key
_add_metadata(
session, field_model, metadata_model,
id, '%s/%s' % (path, extended_key), extended_key, value,
exception_when_existing=exception_when_existing,
parent=metadata, **sub_kwargs
)
else:
if key.startswith('$'):
general_keys.append(key)
_add_metadata( _add_metadata(
session, field_model, metadata_model, session, field_model, metadata_model,
'%s/%s' % (path, key), key, value, id, '%s/%s' % (path, key), key, value,
exception_when_existing=exception_when_existing,
parent=metadata, **kwargs parent=metadata, **kwargs
) )
if len(general_keys) > 1:
raise exception.InvalidParameter(
'foud multi general keys in %s: %s' % (
path, general_keys
)
)
return metadata return metadata
def add_os_metadata_internal(session): def add_os_metadata_internal(session, exception_when_existing=True):
os_metadatas = [] os_metadatas = []
env_locals = {}
env_locals.update(metadata_validator.VALIDATOR_LOCALS)
env_locals.update(metadata_callback.CALLBACK_LOCALS)
configs = util.load_configs( configs = util.load_configs(
setting.OS_METADATA_DIR, setting.OS_METADATA_DIR,
env_locals=validator.VALIDATOR_LOCALS env_locals=env_locals
) )
for config in configs: for config in configs:
os = utils.get_db_object( os = utils.get_db_object(
@ -112,17 +188,21 @@ def add_os_metadata_internal(session):
os_metadatas.append(_add_metadata( os_metadatas.append(_add_metadata(
session, models.OSConfigField, session, models.OSConfigField,
models.OSConfigMetadata, models.OSConfigMetadata,
key, key, value, parent=None, os.id, key, key, value,
os=os exception_when_existing=exception_when_existing,
parent=None
)) ))
return os_metadatas return os_metadatas
def add_package_metadata_internal(session): def add_package_metadata_internal(session, exception_when_existing=True):
package_metadatas = [] package_metadatas = []
env_locals = {}
env_locals.update(metadata_validator.VALIDATOR_LOCALS)
env_locals.update(metadata_callback.CALLBACK_LOCALS)
configs = util.load_configs( configs = util.load_configs(
setting.PACKAGE_METADATA_DIR, setting.PACKAGE_METADATA_DIR,
env_locals=validator.VALIDATOR_LOCALS env_locals=env_locals
) )
for config in configs: for config in configs:
adapter = utils.get_db_object( adapter = utils.get_db_object(
@ -132,8 +212,9 @@ def add_package_metadata_internal(session):
package_metadatas.append(_add_metadata( package_metadatas.append(_add_metadata(
session, models.PackageConfigField, session, models.PackageConfigField,
models.PackageConfigMetadata, models.PackageConfigMetadata,
key, key, value, parent=None, adapter.id, key, key, value,
adapter=adapter exception_when_existing=exception_when_existing,
parent=None
)) ))
return package_metadatas return package_metadatas
@ -173,9 +254,15 @@ def get_os_metadatas_internal(session):
def _validate_self( def _validate_self(
config_path, config_key, config, metadata, whole_check config_path, config_key, config,
metadata, whole_check,
**kwargs
): ):
if '_self' not in metadata: if '_self' not in metadata:
if isinstance(config, dict):
_validate_config(
config_path, config, metadata, whole_check, **kwargs
)
return return
field_type = metadata['_self'].get('field_type', 'basestring') field_type = metadata['_self'].get('field_type', 'basestring')
if not isinstance(config, field_type): if not isinstance(config, field_type):
@ -185,34 +272,39 @@ def _validate_self(
required_in_options = metadata['_self'].get( required_in_options = metadata['_self'].get(
'required_in_options', False 'required_in_options', False
) )
options = metadata['_self'].get('options', []) options = metadata['_self'].get('options', None)
if required_in_options: if required_in_options:
if field_type in [int, basestring, float, bool]: if field_type in [int, basestring, float, bool]:
if config not in options: if options and config not in options:
raise exception.InvalidParameter( raise exception.InvalidParameter(
'%s config is not in %s' % (config_path, options) '%s config is not in %s' % (config_path, options)
) )
elif field_type in [list, tuple]: elif field_type in [list, tuple]:
if not set(config).issubset(set(options)): if options and not set(config).issubset(set(options)):
raise exception.InvalidParameter( raise exception.InvalidParameter(
'%s config is not in %s' % (config_path, options) '%s config is not in %s' % (config_path, options)
) )
elif field_type == dict: elif field_type == dict:
if not set(config.keys()).issubset(set(options)): if options and not set(config.keys()).issubset(set(options)):
raise exception.InvalidParameter( raise exception.InvalidParameter(
'%s config is not in %s' % (config_path, options) '%s config is not in %s' % (config_path, options)
) )
validator = metadata['_self'].get('validator', None) validator = metadata['_self'].get('validator', None)
if validator: if validator:
if not validator(config_key, config): if not validator(config_key, config, **kwargs):
raise exception.InvalidParameter( raise exception.InvalidParameter(
'%s config is invalid' % config_path '%s config is invalid' % config_path
) )
if issubclass(field_type, dict): if isinstance(config, dict):
_validate_config(config_path, config, metadata, whole_check) _validate_config(
config_path, config, metadata, whole_check, **kwargs
)
def _validate_config(config_path, config, metadata, whole_check): def _validate_config(
config_path, config, metadata, whole_check,
**kwargs
):
generals = {} generals = {}
specified = {} specified = {}
for key, value in metadata.items(): for key, value in metadata.items():
@ -250,15 +342,118 @@ def _validate_config(config_path, config, metadata, whole_check):
for key in intersect_keys: for key in intersect_keys:
_validate_self( _validate_self(
'%s/%s' % (config_path, key), '%s/%s' % (config_path, key),
key, config[key], specified[key], whole_check key, config[key], specified[key], whole_check,
**kwargs
) )
for key in not_found_keys: for key in not_found_keys:
if not generals:
raise exception.InvalidParameter(
'key %s missing in metadata %s' % (
key, config_path
)
)
for general_key, general_value in generals.items(): for general_key, general_value in generals.items():
_validate_self( _validate_self(
'%s/%s' % (config_path, key), '%s/%s' % (config_path, key),
key, config[key], general_value, whole_check key, config[key], general_value, whole_check,
**kwargs
) )
def validate_config_internal(config, metadata, whole_check): def _autofill_self_config(
_validate_config('', config, metadata, whole_check) config_path, config_key, config,
metadata,
**kwargs
):
if '_self' not in metadata:
if isinstance(config, dict):
_autofill_config(
config_path, config, metadata, **kwargs
)
return config
autofill_callback = metadata['_self'].get(
'autofill_callback', None
)
autofill_callback_params = metadata['_self'].get(
'autofill_callback_params', {}
)
callback_params = dict(kwargs)
if autofill_callback_params:
callback_params.update(autofill_callback_params)
default_value = metadata['_self'].get(
'default_value', None
)
if default_value is not None:
callback_params['default_value'] = default_value
options = metadata['_self'].get(
'options', None
)
if options is not None:
callback_params['options'] = options
if autofill_callback:
config = autofill_callback(
config_key, config, **callback_params
)
if config is None:
new_config = {}
else:
new_config = config
if isinstance(new_config, dict):
_autofill_config(
config_path, new_config, metadata, **kwargs
)
if new_config:
config = new_config
return config
def _autofill_config(
config_path, config, metadata, **kwargs
):
generals = {}
specified = {}
for key, value in metadata.items():
if key.startswith('$'):
generals[key] = value
elif key.startswith('_'):
pass
else:
specified[key] = value
config_keys = set(config.keys())
specified_keys = set(specified.keys())
intersect_keys = config_keys & specified_keys
not_found_keys = config_keys - specified_keys
redundant_keys = specified_keys - config_keys
for key in redundant_keys:
self_config = _autofill_self_config(
'%s/%s' % (config_path, key),
key, None, specified[key], **kwargs
)
if self_config is not None:
config[key] = self_config
for key in intersect_keys:
config[key] = _autofill_self_config(
'%s/%s' % (config_path, key),
key, config[key], specified[key],
**kwargs
)
for key in not_found_keys:
for general_key, general_value in generals.items():
config[key] = _autofill_self_config(
'%s/%s' % (config_path, key),
key, config[key], general_value,
**kwargs
)
return config
def validate_config_internal(
config, metadata, whole_check, **kwargs
):
_validate_config('', config, metadata, whole_check, **kwargs)
def autofill_config_internal(
config, metadata, **kwargs
):
return _autofill_config('', config, metadata, **kwargs)

View File

@ -30,10 +30,19 @@ RESP_METADATA_FIELDS = [
@database.run_in_session() @database.run_in_session()
def load_metadatas(session): def load_metadatas(session):
load_os_metadatas_internal(session)
load_package_metadatas_internal(session)
def load_os_metadatas_internal(session):
global OS_METADATA_MAPPING global OS_METADATA_MAPPING
global PACKAGE_METADATA_MAPPING logging.info('load os metadatas into memory')
logging.info('load metadatas into memory')
OS_METADATA_MAPPING = metadata_api.get_os_metadatas_internal(session) OS_METADATA_MAPPING = metadata_api.get_os_metadatas_internal(session)
def load_package_metadatas_internal(session):
global PACKAGE_METADATA_MAPPING
logging.info('load package metadatas into memory')
PACKAGE_METADATA_MAPPING = ( PACKAGE_METADATA_MAPPING = (
metadata_api.get_package_metadatas_internal(session) metadata_api.get_package_metadatas_internal(session)
) )
@ -44,48 +53,80 @@ PACKAGE_METADATA_MAPPING = {}
def _validate_config( def _validate_config(
config, id, metadata_mapping, whole_check config, id, id_name, metadata_mapping, whole_check, **kwargs
): ):
if id not in metadata_mapping: if id not in metadata_mapping:
raise exception.InvalidParameter( raise exception.InvalidParameter(
'adapter id %s is not found in metadata mapping' % id '%s id %s is not found in metadata mapping' % (id_name, id)
) )
metadatas = metadata_mapping[id] metadatas = metadata_mapping[id]
metadata_api.validate_config_internal( metadata_api.validate_config_internal(
config, metadatas, whole_check config, metadatas, whole_check, **kwargs
) )
def validate_os_config(config, os_id, whole_check=False): def validate_os_config(
session, config, os_id, whole_check=False, **kwargs
):
if not OS_METADATA_MAPPING:
load_os_metadatas_internal(session)
_validate_config( _validate_config(
config, os_id, OS_METADATA_MAPPING, config, os_id, 'os', OS_METADATA_MAPPING,
whole_check whole_check, session=session, **kwargs
) )
def validate_package_config(config, adapter_id, whole_check=False): def validate_package_config(
session, config, adapter_id, whole_check=False, **kwargs
):
if not PACKAGE_METADATA_MAPPING:
load_package_metadatas_internal(session)
_validate_config( _validate_config(
config, adapter_id, PACKAGE_METADATA_MAPPING, config, adapter_id, 'adapter', PACKAGE_METADATA_MAPPING,
whole_check whole_check, session=session, **kwargs
) )
def _filter_metadata(metadata): def _filter_metadata(metadata, **kwargs):
if not isinstance(metadata, dict): if not isinstance(metadata, dict):
return metadata return metadata
filtered_metadata = {} filtered_metadata = {}
for key, value in metadata.items(): for key, value in metadata.items():
if key == '_self': if key == '_self':
filtered_metadata['_self'] = {}
default_value = value.get('default_value', None)
if default_value is None:
default_callback_params = value.get(
'default_callback_params', {}
)
callback_params = dict(kwargs)
if default_callback_params:
callback_params.update(default_callback_params)
default_callback = value.get('default_callback', None)
if default_callback:
default_value = default_callback(key, **callback_params)
options = value.get('options', None)
if options is None:
options_callback_params = value.get(
'options_callback_params', {}
)
callback_params = dict(kwargs)
if options_callback_params:
callback_params.update(options_callback_params)
options_callback = value.get('options_callback', None)
if options_callback:
options = options_callback(key, **callback_params)
filtered_metadata[key] = { filtered_metadata[key] = {
'name': value['name'], 'name': value['name'],
'description': value.get('description', None), 'description': value.get('description', None),
'default_value': value.get('default_value', None), 'default_value': default_value,
'is_required': value.get( 'is_required': value.get(
'is_required', False), 'is_required', False),
'required_in_whole_config': value.get( 'required_in_whole_config': value.get(
'required_in_whole_config', False), 'required_in_whole_config', False),
'js_validator': value.get('js_validator', None), 'js_validator': value.get('js_validator', None),
'options': value.get('options', []), 'options': options,
'required_in_options': value.get( 'required_in_options': value.get(
'required_in_options', False), 'required_in_options', False),
'field_type': value.get( 'field_type': value.get(
@ -98,13 +139,17 @@ def _filter_metadata(metadata):
return filtered_metadata return filtered_metadata
def get_package_metadata_internal(adapter_id): def get_package_metadata_internal(session, adapter_id):
"""get package metadata internal.""" """get package metadata internal."""
if not PACKAGE_METADATA_MAPPING:
load_package_metadatas_internal(session)
if adapter_id not in PACKAGE_METADATA_MAPPING: if adapter_id not in PACKAGE_METADATA_MAPPING:
raise exception.RecordNotExists( raise exception.RecordNotExists(
'adpater %s does not exist' % adapter_id 'adpater %s does not exist' % adapter_id
) )
return _filter_metadata(PACKAGE_METADATA_MAPPING[adapter_id]) return _filter_metadata(
PACKAGE_METADATA_MAPPING[adapter_id], session=session
)
@utils.supported_filters([]) @utils.supported_filters([])
@ -114,16 +159,22 @@ def get_package_metadata_internal(adapter_id):
) )
@utils.wrap_to_dict(RESP_METADATA_FIELDS) @utils.wrap_to_dict(RESP_METADATA_FIELDS)
def get_package_metadata(session, getter, adapter_id, **kwargs): def get_package_metadata(session, getter, adapter_id, **kwargs):
return {'package_config': get_package_metadata_internal(adapter_id)} return {
'package_config': get_package_metadata_internal(session, adapter_id)
}
def get_os_metadata_internal(os_id): def get_os_metadata_internal(session, os_id):
"""get os metadata internal.""" """get os metadata internal."""
if not OS_METADATA_MAPPING:
load_os_metadatas_internal(session)
if os_id not in OS_METADATA_MAPPING: if os_id not in OS_METADATA_MAPPING:
raise exception.RecordNotExists( raise exception.RecordNotExists(
'os %s does not exist' % os_id 'os %s does not exist' % os_id
) )
return _filter_metadata(OS_METADATA_MAPPING[os_id]) return _filter_metadata(
OS_METADATA_MAPPING[os_id], session=session
)
@utils.supported_filters([]) @utils.supported_filters([])
@ -134,7 +185,7 @@ def get_os_metadata_internal(os_id):
@utils.wrap_to_dict(RESP_METADATA_FIELDS) @utils.wrap_to_dict(RESP_METADATA_FIELDS)
def get_os_metadata(session, getter, os_id, **kwargs): def get_os_metadata(session, getter, os_id, **kwargs):
"""get os metadatas.""" """get os metadatas."""
return {'os_config': get_os_metadata_internal(os_id)} return {'os_config': get_os_metadata_internal(session, os_id)}
@utils.supported_filters([]) @utils.supported_filters([])
@ -145,7 +196,7 @@ def get_os_metadata(session, getter, os_id, **kwargs):
@utils.wrap_to_dict(RESP_METADATA_FIELDS) @utils.wrap_to_dict(RESP_METADATA_FIELDS)
def get_package_os_metadata(session, getter, adapter_id, os_id, **kwargs): def get_package_os_metadata(session, getter, adapter_id, os_id, **kwargs):
from compass.db.api import adapter_holder as adapter_api from compass.db.api import adapter_holder as adapter_api
adapter = adapter_api.get_adapter_internal(adapter_id) adapter = adapter_api.get_adapter_internal(session, adapter_id)
os_ids = [os['os_id'] for os in adapter['supported_oses']] os_ids = [os['os_id'] for os in adapter['supported_oses']]
if os_id not in os_ids: if os_id not in os_ids:
raise exception.InvalidParameter( raise exception.InvalidParameter(
@ -155,9 +206,47 @@ def get_package_os_metadata(session, getter, adapter_id, os_id, **kwargs):
) )
metadatas = {} metadatas = {}
metadatas['os_config'] = get_os_metadata_internal( metadatas['os_config'] = get_os_metadata_internal(
os_id session, os_id
) )
metadatas['package_config'] = get_package_metadata_internal( metadatas['package_config'] = get_package_metadata_internal(
adapter_id session, adapter_id
) )
return metadatas return metadatas
def _autofill_config(
config, id, id_name, metadata_mapping, **kwargs
):
if id not in metadata_mapping:
raise exception.InvalidParameter(
'%s id %s is not found in metadata mapping' % (id_name, id)
)
metadatas = metadata_mapping[id]
logging.debug(
'auto fill %s config %s by metadata %s',
id_name, config, metadatas
)
return metadata_api.autofill_config_internal(
config, metadatas, **kwargs
)
def autofill_os_config(
session, config, os_id, **kwargs
):
if not OS_METADATA_MAPPING:
load_os_metadatas_internal(session)
return _autofill_config(
config, os_id, 'os', OS_METADATA_MAPPING, session=session, **kwargs
)
def autofill_package_config(
session, config, adapter_id, **kwargs
):
if not PACKAGE_METADATA_MAPPING:
load_package_metadatas_internal(session)
return _autofill_config(
config, adapter_id, 'adapter', PACKAGE_METADATA_MAPPING,
session=session, **kwargs
)

View File

@ -434,11 +434,15 @@ def output_validates(**kwargs_validators):
def get_db_object(session, table, exception_when_missing=True, **kwargs): def get_db_object(session, table, exception_when_missing=True, **kwargs):
"""Get db object.""" """Get db object."""
with session.begin(subtransactions=True): with session.begin(subtransactions=True):
logging.debug('get db object %s from table %s', logging.debug(
kwargs, table.__name__) 'session %s get db object %s from table %s',
session, kwargs, table.__name__)
db_object = model_filter( db_object = model_filter(
model_query(session, table), table, **kwargs model_query(session, table), table, **kwargs
).first() ).first()
logging.debug(
'session %s db object %s added', session, db_object
)
if db_object: if db_object:
return db_object return db_object
@ -456,8 +460,9 @@ def add_db_object(session, table, exception_when_existing=True,
*args, **kwargs): *args, **kwargs):
"""Create db object.""" """Create db object."""
with session.begin(subtransactions=True): with session.begin(subtransactions=True):
logging.debug('add object %s atributes %s to table %s', logging.debug(
args, kwargs, table.__name__) 'session %s add object %s atributes %s to table %s',
session, args, kwargs, table.__name__)
argspec = inspect.getargspec(table.__init__) argspec = inspect.getargspec(table.__init__)
arg_names = argspec.args[1:] arg_names = argspec.args[1:]
arg_defaults = argspec.defaults arg_defaults = argspec.defaults
@ -494,66 +499,97 @@ def add_db_object(session, table, exception_when_existing=True,
session.flush() session.flush()
db_object.initialize() db_object.initialize()
db_object.validate() db_object.validate()
logging.debug(
'session %s db object %s added', session, db_object
)
return db_object return db_object
def list_db_objects(session, table, **filters): def list_db_objects(session, table, **filters):
"""List db objects.""" """List db objects."""
with session.begin(subtransactions=True): with session.begin(subtransactions=True):
logging.debug('list db objects by filters %s in table %s', logging.debug(
filters, table.__name__) 'session %s list db objects by filters %s in table %s',
return model_filter( session, filters, table.__name__
)
db_objects = model_filter(
model_query(session, table), table, **filters model_query(session, table), table, **filters
).all() ).all()
logging.debug(
'session %s got listed db objects: %s',
session, db_objects
)
return db_objects
def del_db_objects(session, table, **filters): def del_db_objects(session, table, **filters):
"""delete db objects.""" """delete db objects."""
with session.begin(subtransactions=True): with session.begin(subtransactions=True):
logging.debug('delete db objects by filters %s in table %s', logging.debug(
filters, table.__name__) 'session %s delete db objects by filters %s in table %s',
session, filters, table.__name__
)
query = model_filter( query = model_filter(
model_query(session, table), table, **filters model_query(session, table), table, **filters
) )
db_objects = query.all() db_objects = query.all()
query.delete(synchronize_session=False) query.delete(synchronize_session=False)
logging.debug(
'session %s db objects %s deleted', session, db_objects
)
return db_objects return db_objects
def update_db_objects(session, table, **filters): def update_db_objects(session, table, **filters):
"""Update db objects.""" """Update db objects."""
with session.begin(subtransactions=True): with session.begin(subtransactions=True):
logging.debug('update db objects by filters %s in table %s', logging.debug(
filters, table.__name__) 'session %s update db objects by filters %s in table %s',
query = model_filter( session, filters, table.__name__)
db_objects = model_filter(
model_query(session, table), table, **filters model_query(session, table), table, **filters
) ).all()
db_objects = query.all()
for db_object in db_objects: for db_object in db_objects:
logging.debug('update db object %s', db_object) logging.debug('update db object %s', db_object)
session.flush()
db_object.update() db_object.update()
db_object.validate() db_object.validate()
logging.debug(
'session %s db objects %s updated', session, db_objects
)
return db_objects return db_objects
def update_db_object(session, db_object, **kwargs): def update_db_object(session, db_object, **kwargs):
"""Update db object.""" """Update db object."""
with session.begin(subtransactions=True): with session.begin(subtransactions=True):
logging.debug('update db object %s by value %s', logging.debug(
db_object, kwargs) 'session %s update db object %s by value %s',
session, db_object, kwargs
)
for key, value in kwargs.items(): for key, value in kwargs.items():
setattr(db_object, key, value) setattr(db_object, key, value)
session.flush() session.flush()
db_object.update() db_object.update()
db_object.validate() db_object.validate()
logging.debug(
'session %s db object %s updated', session, db_object
)
return db_object return db_object
def del_db_object(session, db_object): def del_db_object(session, db_object):
"""Delete db object.""" """Delete db object."""
with session.begin(subtransactions=True): with session.begin(subtransactions=True):
logging.debug('delete db object %s', db_object) logging.debug(
'session %s delete db object %s',
session, db_object
)
session.delete(db_object) session.delete(db_object)
logging.debug(
'session %s db object %s deleted',
session, db_object
)
return db_object return db_object

171
compass/db/callback.py Normal file
View File

@ -0,0 +1,171 @@
# Copyright 2014 Huawei Technologies Co. Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Metadata Callback methods."""
import netaddr
import random
import re
import socket
from compass.db import exception
from compass.utils import setting_wrapper as setting
from compass.utils import util
CALLBACK_GLOBALS = globals()
CALLBACK_LOCALS = locals()
CALLBACK_CONFIGS = util.load_configs(
setting.CALLBACK_DIR,
config_name_suffix='.py',
env_globals=CALLBACK_GLOBALS,
env_locals=CALLBACK_LOCALS
)
for callback_config in CALLBACK_CONFIGS:
CALLBACK_LOCALS.update(callback_config)
def default_proxy(name, **kwargs):
return setting.COMPASS_SUPPORTED_PROXY
def proxy_options(name, **kwargs):
return [setting.COMPASS_SUPPORTED_PROXY]
def default_noproxy(name, **kwargs):
return setting.COMPASS_SUPPORTED_DEFAULT_NOPROXY
def noproxy_options(name, **kwargs):
return setting.COMPASS_SUPPORTED_DEFAULT_NOPROXY
def default_ntp_server(name, **kwargs):
return setting.COMPASS_SUPPORTED_NTP_SERVER
def ntp_server_options(name, **kwargs):
return setting.COMPASS_SUPPORTED_NTP_SERVER
def default_dns_servers(name, **kwargs):
return setting.COMPASS_SUPPORTED_DNS_SERVERS
def dns_servers_options(name, **kwargs):
return setting.COMPASS_SUPPORTED_DNS_SERVERS
def default_domain(name, **kwargs):
if setting.COMPASS_SUPPORTED_DOMAINS:
return setting.COMPASS_SUPPORTED_DOMAINS[0]
else:
return None
def domain_options(name, **kwargs):
return setting.COMPASS_SUPPORTED_DOMAINS
def default_search_path(name, **kwargs):
return setting.COMPASS_SUPPORTED_DOMAINS
def search_path_options(name, **kwargs):
return setting.COMPASS_SUPPORTED_DOMAINS
def default_gateway(name, **kwargs):
return setting.COMPASS_SUPPORTED_DEFAULT_GATEWAY
def default_gateway_options(name, **kwargs):
return [setting.COMPASS_SUPPORTED_DEFAULT_GATEWAY]
def default_localrepo(name, **kwargs):
return setting.COMPASS_SUPPORTED_LOCAL_REPO
def default_localrepo_options(name, **kwargs):
return [setting.COMPASS_SUPPORTED_LOCAL_REPO]
def autofill_callback_default(name, config, **kwargs):
if config is None:
if (
'autofill_types' not in kwargs or
not (set(kwargs['autofill_types']) & set(kwargs))
):
return None
if 'default_value' not in kwargs:
return None
return kwargs['default_value']
return config
def autofill_callback_random_option(name, config, **kwargs):
if config is None:
if (
'autofill_types' not in kwargs or
not (set(kwargs['autofill_types']) & set(kwargs))
):
return None
if 'options' not in kwargs or not kwargs['options']:
return None
return random.choice(kwargs['options'])
return config
def autofill_network_mapping(name, config, **kwargs):
if not config:
return config
if isinstance(config, basestring):
config = {
'interface': config,
'subnet': None
}
if not isinstance(config, dict):
return config
if 'interface' not in config:
return config
subnet = None
interface = config['interface']
if 'cluster' in kwargs:
cluster = kwargs['cluster']
for clusterhost in cluster.clusterhosts:
host = clusterhost.host
for host_network in host.host_networks:
if host_network.interface == interface:
subnet = host_network.subnet.subnet
elif 'clusterhost' in kwargs:
clusterhost = kwargs['clusterhost']
host = clusterhost.host
for host_network in host.host_networks:
if host_network.interface == interface:
subnet = host_network.subnet.subnet
if not subnet:
raise exception.InvalidParameter(
'interface %s not found in host(s)' % interface
)
if 'subnet' not in config or not config['subnet']:
config['subnet'] = subnet
else:
if config['subnet'] != subnet:
raise exception.InvalidParameter(
'subnet %s in config is not equal to subnet %s in hosts' % (
config['subnet'], subnet
)
)
return config

View File

@ -37,8 +37,9 @@ from sqlalchemy import Text
from sqlalchemy.types import TypeDecorator from sqlalchemy.types import TypeDecorator
from sqlalchemy import UniqueConstraint from sqlalchemy import UniqueConstraint
from compass.db import callback as metadata_callback
from compass.db import exception from compass.db import exception
from compass.db import validator from compass.db import validator as metadata_validator
from compass.utils import util from compass.utils import util
@ -126,10 +127,22 @@ class MetadataMixin(HelperMixin):
is_required = Column(Boolean, default=False) is_required = Column(Boolean, default=False)
required_in_whole_config = Column(Boolean, default=False) required_in_whole_config = Column(Boolean, default=False)
mapping_to = Column(String(80), default='') mapping_to = Column(String(80), default='')
validator_data = Column('validator', Text) _validator = Column('validator', Text)
js_validator = Column(Text) js_validator = Column(Text)
default_value = Column(JSONEncoded) default_value = Column(JSONEncoded)
options = Column(JSONEncoded, default=[]) _default_callback = Column('default_callback', Text)
default_callback_params = Column(
'default_callback_params', JSONEncoded, default={}
)
options = Column(JSONEncoded)
_options_callback = Column('options_callback', Text)
options_callback_params = Column(
'options_callback_params', JSONEncoded, default={}
)
_autofill_callback = Column('autofill_callback', Text)
autofill_callback_params = Column(
'autofill_callback_params', JSONEncoded, default={}
)
required_in_options = Column(Boolean, default=False) required_in_options = Column(Boolean, default=False)
def initialize(self): def initialize(self):
@ -138,36 +151,125 @@ class MetadataMixin(HelperMixin):
self.display_name = self.name self.display_name = self.name
super(MetadataMixin, self).initialize() super(MetadataMixin, self).initialize()
@property def validate(self):
def validator(self): super(MetadataMixin, self).validate()
if not self.name: if not self.name:
raise exception.InvalidParamter( raise exception.InvalidParamter(
'name is not set in os metadata %s' % self.id 'name is not set in os metadata %s' % self.id
) )
if not self.validator_data:
@property
def validator(self):
if not self._validator:
return None return None
func = eval( func = eval(
self.validator_data, self._validator,
validator.VALIDATOR_GLOBALS, metadata_validator.VALIDATOR_GLOBALS,
validator.VALIDATOR_LOCALS metadata_validator.VALIDATOR_LOCALS
) )
if not callable(func): if not callable(func):
raise Exception( raise Exception(
'%s is not callable' % self.validator_data 'validator %s is not callable' % self._validator
) )
return func return func
@validator.setter @validator.setter
def validator(self, value): def validator(self, value):
if not value: if not value:
self.validator_data = None self._validator = None
elif isinstance(value, basestring): elif isinstance(value, basestring):
self.validator_data = value self._validator = value
elif callable(value): elif callable(value):
self.validator_data = value.func_name self._validator = value.func_name
else: else:
raise Exception( raise Exception(
'%s is not callable' % value 'validator %s is not callable' % value
)
@property
def default_callback(self):
if not self._default_callback:
return None
func = eval(
self._default_callback,
metadata_callback.CALLBACK_GLOBALS,
metadata_callback.CALLBACK_LOCALS
)
if not callable(func):
raise Exception(
'default callback %s is not callable' % self._default_callback
)
return func
@default_callback.setter
def default_callback(self, value):
if not value:
self._default_callback = None
elif isinstance(value, basestring):
self._default_callback = value
elif callable(value):
self._default_callback = value.func_name
else:
raise Exception(
'default callback %s is not callable' % value
)
@property
def options_callback(self):
if not self._options_callback:
return None
func = eval(
self._options_callback,
metadata_callback.CALLBACK_GLOBALS,
metadata_callback.CALLBACK_LOCALS
)
if not callable(func):
raise Exception(
'options callback %s is not callable' % self._options_callback
)
return func
@options_callback.setter
def options_callback(self, value):
if not value:
self._options_callback = None
elif isinstance(value, basestring):
self._options_callback = value
elif callable(value):
self._options_callback = value.func_name
else:
raise Exception(
'options callback %s is not callable' % value
)
@property
def autofill_callback(self):
if not self._autofill_callback:
return None
func = eval(
self._autofill_callback,
metadata_callback.CALLBACK_GLOBALS,
metadata_callback.CALLBACK_LOCALS
)
if not callable(func):
raise Exception(
'autofill callback %s is not callable' % (
self._autofill_callback
)
)
return func
@autofill_callback.setter
def autofill_callback(self, value):
if not value:
self._autofill_callback = None
elif isinstance(value, basestring):
self._autofill_callback = value
elif callable(value):
self._autofill_callback = value.func_name
else:
raise Exception(
'autofill callback %s is not callable' % value
) )
def to_dict(self): def to_dict(self):
@ -180,8 +282,16 @@ class MetadataMixin(HelperMixin):
self_dict_info.update(super(MetadataMixin, self).to_dict()) self_dict_info.update(super(MetadataMixin, self).to_dict())
validator = self.validator validator = self.validator
if validator: if validator:
self_dict_info['validator_data'] = self.validator_data
self_dict_info['validator'] = validator self_dict_info['validator'] = validator
default_callback = self.default_callback
if default_callback:
self_dict_info['default_callback'] = default_callback
options_callback = self.options_callback
if options_callback:
self_dict_info['options_callback'] = options_callback
autofill_callback = self.autofill_callback
if autofill_callback:
self_dict_info['autofill_callback'] = autofill_callback
js_validator = self.js_validator js_validator = self.js_validator
if js_validator: if js_validator:
self_dict_info['js_validator'] = js_validator self_dict_info['js_validator'] = js_validator
@ -201,7 +311,10 @@ class FieldMixin(HelperMixin):
field = Column(String(80), unique=True) field = Column(String(80), unique=True)
field_type_data = Column( field_type_data = Column(
'field_type', 'field_type',
Enum('basestring', 'int', 'float', 'list', 'bool'), Enum(
'basestring', 'int', 'float', 'list', 'bool',
'dict', 'object'
),
ColumnDefault('basestring') ColumnDefault('basestring')
) )
display_type = Column( display_type = Column(
@ -212,7 +325,7 @@ class FieldMixin(HelperMixin):
), ),
ColumnDefault('text') ColumnDefault('text')
) )
validator_data = Column('validator', Text) _validator = Column('validator', Text)
js_validator = Column(Text) js_validator = Column(Text)
description = Column(Text) description = Column(Text)
@ -242,27 +355,27 @@ class FieldMixin(HelperMixin):
@property @property
def validator(self): def validator(self):
if not self.validator_data: if not self._validator:
return None return None
func = eval( func = eval(
self.validator_data, self._validator,
validator.VALIDATOR_GLOBALS, metadata_validator.VALIDATOR_GLOBALS,
validator.VALIDATOR_LOCALS metadata_validator.VALIDATOR_LOCALS
) )
if not callable(func): if not callable(func):
raise Exception( raise Exception(
'%s is not callable' % self.validator_data '%s is not callable' % self._validator
) )
return func return func
@validator.setter @validator.setter
def validator(self, value): def validator(self, value):
if not value: if not value:
self.validator_data = None self._validator = None
elif isinstance(value, basestring): elif isinstance(value, basestring):
self.validator_data = value self._validator = value
elif callable(value): elif callable(value):
self.validator_data = value.func_name self._validator = value.func_name
else: else:
raise Exception( raise Exception(
'%s is not callable' % value '%s is not callable' % value
@ -561,9 +674,6 @@ class ClusterHost(BASE, TimestampMixin, HelperMixin):
@patched_package_config.setter @patched_package_config.setter
def patched_package_config(self, value): def patched_package_config(self, value):
package_config = util.merge_dict(dict(self.package_config), value) package_config = util.merge_dict(dict(self.package_config), value)
if 'roles' in package_config:
self.patched_roles = package_config['roles']
del package_config['roles']
self.package_config = package_config self.package_config = package_config
self.config_validated = False self.config_validated = False
@ -575,9 +685,6 @@ class ClusterHost(BASE, TimestampMixin, HelperMixin):
def put_package_config(self, value): def put_package_config(self, value):
package_config = dict(self.package_config) package_config = dict(self.package_config)
package_config.update(value) package_config.update(value)
if 'roles' in package_config:
self.roles = package_config['roles']
del package_config['roles']
self.package_config = package_config self.package_config = package_config
self.config_validated = False self.config_validated = False
@ -922,10 +1029,15 @@ class Host(BASE, TimestampMixin, HelperMixin):
dict_info = self.machine.to_dict() dict_info = self.machine.to_dict()
dict_info.update(super(Host, self).to_dict()) dict_info.update(super(Host, self).to_dict())
state_dict = self.state_dict() state_dict = self.state_dict()
ip = None
for host_network in self.host_networks:
if host_network.is_mgmt:
ip = host_network.ip
dict_info.update({ dict_info.update({
'machine_id': self.machine.id, 'machine_id': self.machine.id,
'os_installed': self.os_installed, 'os_installed': self.os_installed,
'hostname': self.name, 'hostname': self.name,
'ip': ip,
'networks': [ 'networks': [
host_network.to_dict() host_network.to_dict()
for host_network in self.host_networks for host_network in self.host_networks
@ -1164,14 +1276,6 @@ class Cluster(BASE, TimestampMixin, HelperMixin):
else: else:
flavor_adapter_id = flavor.adapter_id flavor_adapter_id = flavor.adapter_id
adapter_id = self.adapter_id adapter_id = self.adapter_id
logging.info(
'flavor adapter type %s value %s',
type(flavor_adapter_id), flavor_adapter_id
)
logging.info(
'adapter type %s value %s',
type(adapter_id), adapter_id
)
if flavor_adapter_id != adapter_id: if flavor_adapter_id != adapter_id:
raise exception.InvalidParameter( raise exception.InvalidParameter(
'flavor adapter id %s does not match adapter id %s' % ( 'flavor adapter id %s does not match adapter id %s' % (
@ -1525,6 +1629,8 @@ class SwitchMachine(BASE, HelperMixin, TimestampMixin):
denied = filter_type != 'allow' denied = filter_type != 'allow'
unmatched_allowed = denied unmatched_allowed = denied
if 'ports' in port_filter: if 'ports' in port_filter:
if 'all' in port_filter['ports']:
return denied
if port in port_filter['ports']: if port in port_filter['ports']:
return denied return denied
if port_match: if port_match:
@ -1870,7 +1976,8 @@ class OSConfigMetadata(BASE, MetadataMixin):
UniqueConstraint('path', 'os_id', name='constraint'), UniqueConstraint('path', 'os_id', name='constraint'),
) )
def __init__(self, path, **kwargs): def __init__(self, os_id, path, **kwargs):
self.os_id = os_id
self.path = path self.path = path
super(OSConfigMetadata, self).__init__(**kwargs) super(OSConfigMetadata, self).__init__(**kwargs)
@ -1982,7 +2089,7 @@ class OperatingSystem(BASE, HelperMixin):
if self.parent: if self.parent:
dict_info.update(self.parent.metadata_dict()) dict_info.update(self.parent.metadata_dict())
for metadata in self.root_metadatas: for metadata in self.root_metadatas:
dict_info.update(metadata.to_dict()) util.merge_dict(dict_info, metadata.to_dict())
return dict_info return dict_info
@property @property
@ -2206,8 +2313,9 @@ class PackageConfigMetadata(BASE, MetadataMixin):
) )
def __init__( def __init__(
self, path, **kwargs self, adapter_id, path, **kwargs
): ):
self.adapter_id = adapter_id
self.path = path self.path = path
super(PackageConfigMetadata, self).__init__(**kwargs) super(PackageConfigMetadata, self).__init__(**kwargs)
@ -2342,7 +2450,7 @@ class Adapter(BASE, HelperMixin):
if self.parent: if self.parent:
dict_info.update(self.parent.metadata_dict()) dict_info.update(self.parent.metadata_dict())
for metadata in self.root_metadatas: for metadata in self.root_metadatas:
dict_info.update(metadata.to_dict()) util.merge_dict(dict_info, metadata.to_dict())
return dict_info return dict_info
@property @property

View File

@ -21,7 +21,7 @@ from compass.utils import setting_wrapper as setting
from compass.utils import util from compass.utils import util
def is_valid_ip(name, ip_addr): def is_valid_ip(name, ip_addr, **kwargs):
"""Valid the format of an IP address.""" """Valid the format of an IP address."""
try: try:
netaddr.IPAddress(ip_addr) netaddr.IPAddress(ip_addr)
@ -30,7 +30,7 @@ def is_valid_ip(name, ip_addr):
return True return True
def is_valid_network(name, ip_network): def is_valid_network(name, ip_network, **kwargs):
"""Valid the format of an Ip network.""" """Valid the format of an Ip network."""
try: try:
netaddr.IPNetwork(ip_network) netaddr.IPNetwork(ip_network)
@ -39,7 +39,7 @@ def is_valid_network(name, ip_network):
return False return False
def is_valid_netmask(name, ip_addr): def is_valid_netmask(name, ip_addr, **kwargs):
"""Valid the format of a netmask.""" """Valid the format of a netmask."""
if not is_valid_ip(ip_addr): if not is_valid_ip(ip_addr):
return False return False
@ -50,7 +50,7 @@ def is_valid_netmask(name, ip_addr):
return False return False
def is_valid_gateway(name, ip_addr): def is_valid_gateway(name, ip_addr, **kwargs):
"""Valid the format of gateway.""" """Valid the format of gateway."""
if not is_valid_ip(ip_addr): if not is_valid_ip(ip_addr):
return False return False
@ -61,7 +61,7 @@ def is_valid_gateway(name, ip_addr):
return False return False
def is_valid_dns(name, dns): def is_valid_dns(name, dns, **kwargs):
"""Valid the format of DNS.""" """Valid the format of DNS."""
if is_valid_ip(dns): if is_valid_ip(dns):
return True return True
@ -72,17 +72,17 @@ def is_valid_dns(name, dns):
return True return True
def is_valid_username(name, username): def is_valid_username(name, username, **kwargs):
"""Valid the format of username.""" """Valid the format of username."""
return bool(username) return bool(username)
def is_valid_password(name, password): def is_valid_password(name, password, **kwargs):
"""Valid the format of password.""" """Valid the format of password."""
return bool(password) return bool(password)
def is_valid_partition(name, partition): def is_valid_partition(name, partition, **kwargs):
"""Valid the format of partition name.""" """Valid the format of partition name."""
if name != 'swap' and not name.startswith('/'): if name != 'swap' and not name.startswith('/'):
return False return False
@ -91,17 +91,17 @@ def is_valid_partition(name, partition):
return True return True
def is_valid_percentage(name, percentage): def is_valid_percentage(name, percentage, **kwargs):
"""Valid the percentage.""" """Valid the percentage."""
return 0 <= percentage <= 100 return 0 <= percentage <= 100
def is_valid_port(name, port): def is_valid_port(name, port, **kwargs):
"""Valid the format of port.""" """Valid the format of port."""
return 0 < port < 65536 return 0 < port < 65536
def is_valid_size(name, size): def is_valid_size(name, size, **kwargs):
if re.match(r'(\d+)(K|M|G|T)?', size): if re.match(r'(\d+)(K|M|G|T)?', size):
return True return True
return False return False

View File

@ -0,0 +1,2 @@
NAME = 'anytype'
FIELD_TYPE = object

View File

@ -46,12 +46,25 @@ METADATA = {
}, },
'network_mapping': { 'network_mapping': {
'_self': { '_self': {
'required_in_whole_config': True 'required_in_whole_config': True,
}, },
'$interface_type': { '$interface_type': {
'_self': { '_self': {
'is_required': True, 'is_required': True,
'field': 'general' 'field': 'anytype',
'autofill_callback': autofill_network_mapping,
},
'interface': {
'_self': {
'is_required': True,
'field': 'general',
}
},
'subnet': {
'_self': {
'is_required': False,
'field': 'general'
}
} }
} }
} }

View File

@ -122,7 +122,10 @@ class ClusterTestCase(unittest2.TestCase):
} }
}, },
'network_mapping': { 'network_mapping': {
'$interface_type': 'eth0' '$interface_type': {
'interface': 'eth0',
'subnet': '10.145.88.0/23'
}
} }
} }

View File

@ -53,6 +53,7 @@ class MetadataTestCase(unittest2.TestCase):
database.init('sqlite://') database.init('sqlite://')
database.create_db() database.create_db()
adapter.load_adapters() adapter.load_adapters()
metadata.load_metadatas()
#Get a os_id and adapter_id #Get a os_id and adapter_id
self.user_object = ( self.user_object = (
@ -82,53 +83,56 @@ class MetadataTestCase(unittest2.TestCase):
class TestGetPackageMetadata(MetadataTestCase): class TestGetPackageMetadata(MetadataTestCase):
def setUp(self): def setUp(self):
super(TestGetPackageMetadata, self).setUp() self.backup_load_configs = util.load_configs
mock_config = mock.Mock()
self.backup_package_configs = util.load_configs def mock_load_configs(config_dir, *args, **kwargs):
util.load_configs = mock_config if config_dir != setting.PACKAGE_METADATA_DIR:
configs = [{ return self.backup_load_configs(
'ADAPTER': 'openstack', config_dir, *args, **kwargs
'METADATA': { )
'security': { config = {
'_self': { 'ADAPTER': 'openstack',
'required_in_whole_config': True 'METADATA': {
}, 'security': {
'service_credentials': {
'_self': { '_self': {
'mapping_to': 'service_credentials' 'required_in_whole_config': True
}, },
'$service': { 'service_credentials': {
'username': { '_self': {
'_self': { 'mapping_to': 'service_credentials'
'is_required': True,
'field': 'username',
'mapping_to': 'username'
}
}, },
'password': { '$service': {
'_self': { 'username': {
'is_required': True, '_self': {
'field': 'password', 'is_required': True,
'mapping_to': 'password' 'field': 'username',
'mapping_to': 'username'
}
},
'password': {
'_self': {
'is_required': True,
'field': 'password',
'mapping_to': 'password'
}
} }
} }
} }
} },
}, 'test_package_metadata': {
'test_package_metadata': { '_self': {
'_self': { 'dummy': 'fake'
'dummy': 'fake' }
} }
} }
} }
}] return [config]
util.load_configs.return_value = configs
with database.session() as session: util.load_configs = mock.Mock(side_effect=mock_load_configs)
metadata_api.add_package_metadata_internal(session) super(TestGetPackageMetadata, self).setUp()
metadata.load_metadatas()
def tearDown(self): def tearDown(self):
util.load_configs = self.backup_package_configs util.load_configs = self.backup_load_configs
super(TestGetPackageMetadata, self).tearDown() super(TestGetPackageMetadata, self).tearDown()
def test_get_package_metadata(self): def test_get_package_metadata(self):
@ -155,56 +159,59 @@ class TestGetPackageMetadata(MetadataTestCase):
class TestGetOsMetadata(MetadataTestCase): class TestGetOsMetadata(MetadataTestCase):
def setUp(self): def setUp(self):
super(TestGetOsMetadata, self).setUp() self.backup_load_configs = util.load_configs
mock_config = mock.Mock()
self.backup_os_configs = util.load_configs def mock_load_configs(config_dir, *args, **kwargs):
util.load_configs = mock_config if config_dir != setting.OS_METADATA_DIR:
configs = [{ return self.backup_load_configs(
'OS': 'general', config_dir, *args, **kwargs
'METADATA': { )
'general': { config = {
'_self': { 'OS': 'general',
'required_in_whole_config': True 'METADATA': {
}, 'general': {
'language': {
'_self': { '_self': {
'field': 'general', 'required_in_whole_config': True
'default_value': 'EN', },
'options': ['EN', 'CN'], 'language': {
'mapping_to': 'language' '_self': {
'field': 'general',
'default_value': 'EN',
'options': ['EN', 'CN'],
'mapping_to': 'language'
}
},
'timezone': {
'_self': {
'field': 'general',
'default_value': 'UTC',
'options': [
'America/New_York', 'America/Chicago',
'America/Los_Angeles', 'Asia/Shanghai',
'Asia/Tokyo', 'Europe/Paris',
'Europe/London', 'Europe/Moscow',
'Europe/Rome', 'Europe/Madrid',
'Europe/Berlin', 'UTC'
],
'mapping_to': 'timezone'
}
} }
}, },
'timezone': { 'test_os_metadata': {
'_self': { '_self': {
'field': 'general', 'test': 'dummy'
'default_value': 'UTC',
'options': [
'America/New_York', 'America/Chicago',
'America/Los_Angeles', 'Asia/Shanghai',
'Asia/Tokyo', 'Europe/Paris',
'Europe/London', 'Europe/Moscow',
'Europe/Rome', 'Europe/Madrid',
'Europe/Berlin', 'UTC'
],
'mapping_to': 'timezone'
} }
} }
},
'test_os_metadata': {
'_self': {
'test': 'dummy'
}
} }
} }
}] return [config]
util.load_configs.return_value = configs
with database.session() as session: util.load_configs = mock.Mock(side_effect=mock_load_configs)
metadata_api.add_os_metadata_internal(session) super(TestGetOsMetadata, self).setUp()
metadata.load_metadatas()
def tearDown(self): def tearDown(self):
util.load_configs = self.backup_load_configs
super(TestGetOsMetadata, self).tearDown() super(TestGetOsMetadata, self).tearDown()
util.load_configs = self.backup_os_configs
def test_get_os_metadata(self): def test_get_os_metadata(self):
"""Test get os metadata.""" """Test get os metadata."""

View File

@ -52,6 +52,9 @@ LOGLEVEL_MAPPING = {
'critical': logging.CRITICAL, 'critical': logging.CRITICAL,
} }
logging.addLevelName(LOGLEVEL_MAPPING['fine'], 'fine')
logging.addLevelName(LOGLEVEL_MAPPING['finest'], 'finest')
# disable logging when logsetting.init not called # disable logging when logsetting.init not called
logging.getLogger().setLevel(logging.CRITICAL) logging.getLogger().setLevel(logging.CRITICAL)

View File

@ -59,6 +59,14 @@ SWITCHES_DEFAULT_FILTERS = []
DEFAULT_SWITCH_IP = '0.0.0.0' DEFAULT_SWITCH_IP = '0.0.0.0'
DEFAULT_SWITCH_PORT = 0 DEFAULT_SWITCH_PORT = 0
COMPASS_SUPPORTED_PROXY = 'http://127.0.0.1:3128'
COMPASS_SUPPORTED_DEFAULT_NOPROXY = ['127.0.0.1']
COMPASS_SUPPORTED_NTP_SERVER = '127.0.0.1'
COMPASS_SUPPORTED_DNS_SERVERS = ['127.0.0.1']
COMPASS_SUPPORTED_DOMAINS = []
COMPASS_SUPPORTED_DEFAULT_GATEWAY = '127.0.0.1'
COMPASS_SUPPORTED_LOCAL_REPO = 'http://127.0.0.1'
# For test chef server. please replace these config info with your own. # For test chef server. please replace these config info with your own.
TEST_CHEF_URL = "https://api.opscode.com/organizations/compasscheftest" TEST_CHEF_URL = "https://api.opscode.com/organizations/compasscheftest"
TEST_CLIENT_KEY_PATH = "/etc/compass/client.pem" TEST_CLIENT_KEY_PATH = "/etc/compass/client.pem"
@ -102,6 +110,9 @@ ADAPTER_FLAVOR_DIR = lazypy.delay(
VALIDATOR_DIR = lazypy.delay( VALIDATOR_DIR = lazypy.delay(
lambda: os.path.join(CONFIG_DIR, 'validator') lambda: os.path.join(CONFIG_DIR, 'validator')
) )
CALLBACK_DIR = lazypy.delay(
lambda: os.path.join(CONFIG_DIR, 'callback')
)
TMPL_DIR = lazypy.delay( TMPL_DIR = lazypy.delay(
lambda: os.path.join(CONFIG_DIR, 'templates') lambda: os.path.join(CONFIG_DIR, 'templates')
) )

View File

@ -1,6 +1,7 @@
NAME = 'ceph_openstack_icehouse' NAME = 'ceph_openstack_icehouse'
DISPLAY_NAME = 'Ceph + OpenStack Icehouse' DISPLAY_NAME = 'Ceph + OpenStack Icehouse'
PARENT = 'openstack' PARENT = 'openstack'
DISTRIBUTED_SYSTEM = 'openstack_ceph'
PACKAGE_INSTALLER = 'chef_installer' PACKAGE_INSTALLER = 'chef_installer'
OS_INSTALLER = 'cobbler' OS_INSTALLER = 'cobbler'
SUPPORTED_OS_PATTERNS = ['(?i)centos.*', '(?i)ubuntu.*'] SUPPORTED_OS_PATTERNS = ['(?i)centos.*', '(?i)ubuntu.*']

View File

@ -0,0 +1,3 @@
NAME ='openstack_ceph'
PARENT = 'general'
DEPLOYABLE = True

View File

@ -30,36 +30,24 @@ METADATA = {
'http_proxy': { 'http_proxy': {
'_self': { '_self': {
'field': 'general', 'field': 'general',
'default_value': 'http://10.145.89.126:3128', 'default_callback': default_proxy,
'options': [ 'options_callback': proxy_options,
'http://10.145.89.126:3128'
],
'mapping_to': 'http_proxy' 'mapping_to': 'http_proxy'
} }
}, },
'https_proxy': { 'https_proxy': {
'_self': { '_self': {
'field': 'general', 'field': 'general',
'default_value': 'http://10.145.89.126:3128', 'default_callback': default_proxy,
'options': [ 'options_callback': proxy_options,
'http://10.145.89.126:3128'
],
'mapping_to': 'https_proxy' 'mapping_to': 'https_proxy'
} }
}, },
'no_proxy': { 'no_proxy': {
'_self': { '_self': {
'field': 'general_list', 'field': 'general_list',
'default_value': [ 'default_callback': default_noproxy,
'127.0.0.1', 'options_callback': noproxy_options,
'xicheng-126',
'10.145.89.126'
],
'options': [
'127.0.0.1',
'xicheng-126',
'10.145.89.126'
],
'mapping_to': 'no_proxy' 'mapping_to': 'no_proxy'
} }
}, },
@ -67,10 +55,8 @@ METADATA = {
'_self': { '_self': {
'is_required': True, 'is_required': True,
'field': 'general', 'field': 'general',
'default_value': '10.145.89.126', 'default_callback': default_ntp_server,
'options': [ 'options_callback': ntp_server_options,
'10.145.89.126'
],
'mapping_to': 'ntp_server' 'mapping_to': 'ntp_server'
} }
}, },
@ -78,12 +64,8 @@ METADATA = {
'_self': { '_self': {
'is_required': True, 'is_required': True,
'field': 'general_list', 'field': 'general_list',
'default_value': [ 'default_callback': default_dns_servers,
'10.145.89.126', 'options_callback': dns_servers_options,
],
'options': [
'10.145.89.126'
],
'mapping_to': 'nameservers' 'mapping_to': 'nameservers'
} }
}, },
@ -91,17 +73,15 @@ METADATA = {
'_self': { '_self': {
'field': 'general', 'field': 'general',
'is_required' : True, 'is_required' : True,
'default_value': ['ods.com'][0], 'default_callback': default_domain,
'options': ['ods.com'], 'options_callback': domain_options,
} }
}, },
'search_path': { 'search_path': {
'_self': { '_self': {
'field': 'general_list', 'field': 'general_list',
'default_value': [ 'default_callback': default_search_path,
'ods.com' 'options_callback': search_path_options,
],
'options': ['ods.com'],
'mapping_to': 'search_path' 'mapping_to': 'search_path'
} }
}, },
@ -109,14 +89,14 @@ METADATA = {
'_self': { '_self': {
'is_required': True, 'is_required': True,
'field': 'ip', 'field': 'ip',
'default_value': '10.145.88.1', 'default_callback': default_gateway,
'mapping_to': 'gateway' 'mapping_to': 'gateway'
} }
}, },
'local_repo': { 'local_repo': {
'_self': { '_self': {
'field': 'general', 'field': 'general',
'default_value': 'http://10.145.89.126/', 'default_callback': default_localrepo,
'mapping_to': 'local_repo' 'mapping_to': 'local_repo'
} }
} }
@ -151,7 +131,8 @@ METADATA = {
}, },
'$partition': { '$partition': {
'_self': { '_self': {
'validator': is_valid_partition 'validator': is_valid_partition,
'mapping_to': '$partition'
}, },
'max_size': { 'max_size': {
'_self': { '_self': {

View File

@ -0,0 +1,2 @@
NAME = 'anytype'
FIELD_TYPE = object

View File

@ -0,0 +1,2 @@
NAME = 'integer'
FIELD_TYPE = int

View File

@ -33,7 +33,7 @@ METADATA = {
'osd_config': { 'osd_config': {
'_self': { '_self': {
'mapping_to': 'osd_config' 'mapping_to': 'osd_config'
} },
'journal_size': { 'journal_size': {
'_self': { '_self': {
'field': 'general', 'field': 'general',
@ -43,7 +43,7 @@ METADATA = {
}, },
'op_threads': { 'op_threads': {
'_self': { '_self': {
'field': 'general', 'field': 'integer',
'default_value': 10, 'default_value': 10,
'mapping_to': 'op_threads' 'mapping_to': 'op_threads'
} }
@ -52,7 +52,7 @@ METADATA = {
"osd_devices": { "osd_devices": {
'_self': { '_self': {
'mapping_to': 'osd_devices' 'mapping_to': 'osd_devices'
} },
'$device': { '$device': {
'_self': { '_self': {
'validator': is_valid_partition 'validator': is_valid_partition

View File

@ -31,7 +31,7 @@ METADATA = {
}, },
'osd_config': { 'osd_config': {
'_self': { '_self': {
} },
'journal_size': { 'journal_size': {
'_self': { '_self': {
'field': 'general', 'field': 'general',
@ -41,7 +41,7 @@ METADATA = {
}, },
'op_threads': { 'op_threads': {
'_self': { '_self': {
'field': 'general', 'field': 'integer',
'default_value': 10, 'default_value': 10,
'mapping_to': 'op_threads' 'mapping_to': 'op_threads'
} }
@ -50,7 +50,7 @@ METADATA = {
"osd_devices": { "osd_devices": {
'_self': { '_self': {
'mapping_to': 'osd_devices' 'mapping_to': 'osd_devices'
} },
'$device': { '$device': {
'_self': { '_self': {
'validator': is_valid_partition 'validator': is_valid_partition

View File

@ -6,9 +6,17 @@ METADATA = {
}, },
'service_credentials': { 'service_credentials': {
'_self': { '_self': {
'mapping_to': 'service_credentials' 'required_in_whole_config': True,
'key_extensions': {
'$service': ['image', 'compute', 'dashboard', 'identity', 'metering', 'rabbitmq', 'volume', 'mysql']
},
'mapping_to': 'service_credentials'
}, },
'$service': { '$service': {
'_self': {
'required_in_whole_config': True,
'mapping_to': '$service'
},
'username': { 'username': {
'_self': { '_self': {
'is_required': True, 'is_required': True,
@ -26,7 +34,17 @@ METADATA = {
} }
}, },
'console_credentials': { 'console_credentials': {
'_self': {
'required_in_whole_config': True,
'key_extensions': {
'$console': ['admin', 'compute', 'dashboard', 'image', 'metering', 'network', 'object-store', 'volume']
}
},
'$console': { '$console': {
'_self': {
'required_in_whole_config': True,
'mapping_to': '$console'
},
'username': { 'username': {
'_self': { '_self': {
'is_required': True, 'is_required': True,
@ -46,12 +64,29 @@ METADATA = {
}, },
'network_mapping': { 'network_mapping': {
'_self': { '_self': {
'required_in_whole_config': True 'required_in_whole_config': True,
'key_extensions': {
'$interface_type': ['management', 'public', 'storage', 'tenant']
}
}, },
'$interface_type': { '$interface_type': {
'_self': { '_self': {
'is_required': True, 'required_in_whole_config': True,
'field': 'general' 'field': 'anytype',
'autofill_callback': autofill_network_mapping,
'mapping_to': '$interface_type'
},
'interface': {
'_self': {
'is_required': True,
'field': 'general',
}
},
'subnet': {
'_self': {
'is_required': False,
'field': 'general'
}
} }
} }
} }

View File

@ -3,16 +3,16 @@ DATABASE_TYPE = 'mysql'
DATABASE_USER = 'root' DATABASE_USER = 'root'
DATABASE_PASSWORD = 'root' DATABASE_PASSWORD = 'root'
DATABASE_SERVER = '127.0.0.1:3306' DATABASE_SERVER = '127.0.0.1:3306'
DATABASE_NAME = 'db' DATABASE_NAME = 'compass'
SQLALCHEMY_DATABASE_URI = '%s://%s:%s@%s/%s' % (DATABASE_TYPE, DATABASE_USER, DATABASE_PASSWORD, DATABASE_SERVER, DATABASE_NAME) SQLALCHEMY_DATABASE_URI = '%s://%s:%s@%s/%s' % (DATABASE_TYPE, DATABASE_USER, DATABASE_PASSWORD, DATABASE_SERVER, DATABASE_NAME)
SQLALCHEMY_DATABASE_POOL_TYPE = 'instant' SQLALCHEMY_DATABASE_POOL_TYPE = 'instant'
INSTALLATION_LOGDIR = { INSTALLATION_LOGDIR = {
'CobblerInstaller': '/var/log/cobbler/anamon', 'CobblerInstaller': '/var/log/cobbler/anamon',
'ChefInstaller': '/var/log/chef' 'ChefInstaller': '/var/log/chef'
} }
DEFAULT_LOGLEVEL = 'debug' DEFAULT_LOGLEVEL = 'info'
DEFAULT_LOGDIR = '/var/log/compass' DEFAULT_LOGDIR = '/var/log/compass'
DEFAULT_LOGINTERVAL = 1 DEFAULT_LOGINTERVAL = 6
DEFAULT_LOGINTERVAL_UNIT = 'h' DEFAULT_LOGINTERVAL_UNIT = 'h'
DEFAULT_LOGFORMAT = '%(asctime)s - %(filename)s - %(lineno)d - %(levelname)s - %(message)s' DEFAULT_LOGFORMAT = '%(asctime)s - %(filename)s - %(lineno)d - %(levelname)s - %(message)s'
WEB_LOGFILE = 'compass.log' WEB_LOGFILE = 'compass.log'
@ -24,3 +24,10 @@ POLLSWITCH_INTERVAL=60
SWITCHES = [ SWITCHES = [
] ]
TMPL_DIR = '/etc/compass/templates' TMPL_DIR = '/etc/compass/templates'
COMPASS_SUPPORTED_PROXY = 'http://$ipaddr:3128'
COMPASS_SUPPORTED_DEFAULT_NOPROXY = ['127.0.0.1','$ipaddr','$hostname']
COMPASS_SUPPORTED_NTP_SERVER = '$ipaddr'
COMPASS_SUPPORTED_DNS_SERVERS = ['$ipaddr']
COMPASS_SUPPORTED_DOMAINS = ['$domains']
COMPASS_SUPPORTED_DEFAULT_GATEWAY = '$gateway'
COMPASS_SUPPORTED_LOCAL_REPO = 'http://$ipaddr'

View File

@ -1,6 +1,19 @@
#!/bin/bash #!/bin/bash
# #
echo "Installing chef"
DIR=$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )
source $DIR/install.conf
if [ -f $DIR/env.conf ]; then
source $DIR/env.conf
else
echo "failed to load environment"
exit 1
fi
source $DIR/install_func.sh
echo "Installing chef related packages"
# create backup dir # create backup dir
sudo mkdir -p /root/backup/chef sudo mkdir -p /root/backup/chef
@ -11,6 +24,8 @@ else
echo "chef-server has already installed" echo "chef-server has already installed"
fi fi
echo "reconfigure chef server"
# configure chef-server # configure chef-server
sudo chef-server-ctl cleanse sudo chef-server-ctl cleanse
mkdir -p /etc/chef-server mkdir -p /etc/chef-server
@ -25,26 +40,7 @@ if [[ "$?" != "0" ]]; then
exit 1 exit 1
fi fi
sudo rm -rf /var/chef echo "configure chef client and knife"
sudo mkdir -p /var/chef/cookbooks/
sudo cp -r $ADAPTERS_HOME/chef/cookbooks/* /var/chef/cookbooks/
if [ $? -ne 0 ]; then
echo "failed to copy cookbooks to /var/chef/cookbooks/"
exit 1
fi
sudo mkdir -p /var/chef/databags/
sudo cp -r $ADAPTERS_HOME/chef/databags/* /var/chef/databags/
if [ $? -ne 0 ]; then
echo "failed to copy databags to /var/chef/databags/"
exit 1
fi
sudo mkdir -p /var/chef/roles/
sudo cp -r $ADAPTERS_HOME/chef/roles/* /var/chef/roles/
if [ $? -ne 0 ]; then
echo "failed to copy roles to /var/chef/roles/"
exit 1
fi
# configure chef client and knife # configure chef client and knife
rpm -q chef rpm -q chef
if [[ "$?" != "0" ]]; then if [[ "$?" != "0" ]]; then
@ -55,7 +51,7 @@ fi
sudo mkdir -p ~/.chef sudo mkdir -p ~/.chef
sudo knife configure -y -i --defaults -r ~/chef-repo -s https://localhost:443 -u $USER --admin-client-name admin --admin-client-key /etc/chef-server/admin.pem --validation-client-name chef-validator --validation-key /etc/chef-server/chef-validator.pem <<EOF sudo knife configure -y -i --defaults -r ~/chef-repo -s https://$IPADDR:443 -u $USER --admin-client-name admin --admin-client-key /etc/chef-server/admin.pem --validation-client-name chef-validator --validation-key /etc/chef-server/chef-validator.pem <<EOF
$CHEF_PASSWORD $CHEF_PASSWORD
EOF EOF
sudo sed -i "/node_name/c\node_name \'admin\'" /$USER/.chef/knife.rb sudo sed -i "/node_name/c\node_name \'admin\'" /$USER/.chef/knife.rb

View File

@ -1,6 +1,17 @@
#!/bin/bash #!/bin/bash
# #
echo "Installing cobbler"
DIR=$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )
source $DIR/install.conf
if [ -f $DIR/env.conf ]; then
source $DIR/env.conf
else
echo "failed to load environment"
exit 1
fi
source $DIR/install_func.sh
echo "Installing cobbler related packages" echo "Installing cobbler related packages"
sudo yum -y install cobbler cobbler-web createrepo mkisofs python-cheetah python-simplejson python-urlgrabber PyYAML Django cman debmirror pykickstart reprepro sudo yum -y install cobbler cobbler-web createrepo mkisofs python-cheetah python-simplejson python-urlgrabber PyYAML Django cman debmirror pykickstart reprepro
if [[ "$?" != "0" ]]; then if [[ "$?" != "0" ]]; then
@ -19,7 +30,6 @@ sudo mkdir -p /root/backup/cobbler
# update httpd conf # update httpd conf
sudo cp -rn /etc/httpd/conf.d /root/backup/cobbler/ sudo cp -rn /etc/httpd/conf.d /root/backup/cobbler/
sudo rm -f /etc/httpd/conf.d/cobbler_web.conf sudo rm -f /etc/httpd/conf.d/cobbler_web.conf
sudo rm -f /etc/httpd/conf.d/ods-server.conf
sudo cp -rf $COMPASSDIR/misc/apache/cobbler_web.conf /etc/httpd/conf.d/cobbler_web.conf sudo cp -rf $COMPASSDIR/misc/apache/cobbler_web.conf /etc/httpd/conf.d/cobbler_web.conf
chmod 644 /etc/httpd/conf.d/cobbler_web.conf chmod 644 /etc/httpd/conf.d/cobbler_web.conf
sudo rm -rf /etc/httpd/conf.d/ssl.conf sudo rm -rf /etc/httpd/conf.d/ssl.conf
@ -36,8 +46,8 @@ sudo cp -rn /etc/cobbler/settings /root/backup/cobbler/
sudo rm -f /etc/cobbler/settings sudo rm -f /etc/cobbler/settings
sudo cp -rf $ADAPTERS_HOME/cobbler/conf/settings /etc/cobbler/settings sudo cp -rf $ADAPTERS_HOME/cobbler/conf/settings /etc/cobbler/settings
sudo sed -i "s/next_server:[ \t]*\$next_server/next_server: $NEXTSERVER/g" /etc/cobbler/settings sudo sed -i "s/next_server:[ \t]*\$next_server/next_server: $NEXTSERVER/g" /etc/cobbler/settings
sudo sed -i "s/server:[ \t]*\$ipaddr/server: $ipaddr/g" /etc/cobbler/settings sudo sed -i "s/server:[ \t]*\$ipaddr/server: $IPADDR/g" /etc/cobbler/settings
sudo sed -i "s/default_name_servers:[ \t]*\['\$ipaddr'\]/default_name_servers: \['$ipaddr'\]/g" /etc/cobbler/settings sudo sed -i "s/default_name_servers:[ \t]*\['\$ipaddr'\]/default_name_servers: \['$IPADDR'\]/g" /etc/cobbler/settings
domains=$(echo $NAMESERVER_DOMAINS | sed "s/,/','/g") domains=$(echo $NAMESERVER_DOMAINS | sed "s/,/','/g")
sudo sed -i "s/manage_forward_zones:[ \t]*\[\]/manage_forward_zones: \['$domains'\]/g" /etc/cobbler/settings sudo sed -i "s/manage_forward_zones:[ \t]*\[\]/manage_forward_zones: \['$domains'\]/g" /etc/cobbler/settings
export cobbler_passwd=$(openssl passwd -1 -salt 'huawei' '123456') export cobbler_passwd=$(openssl passwd -1 -salt 'huawei' '123456')
@ -48,13 +58,15 @@ sudo chmod 644 /etc/cobbler/settings
sudo cp -rn /etc/cobbler/dhcp.template /root/backup/cobbler/ sudo cp -rn /etc/cobbler/dhcp.template /root/backup/cobbler/
sudo rm -f /etc/cobbler/dhcp.template sudo rm -f /etc/cobbler/dhcp.template
sudo cp -rf $ADAPTERS_HOME/cobbler/conf/dhcp.template /etc/cobbler/dhcp.template sudo cp -rf $ADAPTERS_HOME/cobbler/conf/dhcp.template /etc/cobbler/dhcp.template
subnet=$(ipcalc $SUBNET -n |cut -f 2 -d '=') export netaddr=$(ipcalc $IPADDR $NETMASK -n |cut -f 2 -d '=')
sudo sed -i "s/subnet \$subnet netmask \$netmask/subnet $subnet netmask $netmask/g" /etc/cobbler/dhcp.template export netprefix=$(ipcalc $IPADDR $NETMASK -p |cut -f 2 -d '=')
export subnet=${netaddr}/${netprefix}
sudo sed -i "s/subnet \$subnet netmask \$netmask/subnet $netaddr netmask $NETMASK/g" /etc/cobbler/dhcp.template
sudo sed -i "s/option routers \$gateway/option routers $OPTION_ROUTER/g" /etc/cobbler/dhcp.template sudo sed -i "s/option routers \$gateway/option routers $OPTION_ROUTER/g" /etc/cobbler/dhcp.template
sudo sed -i "s/option subnet-mask \$netmask/option subnet-mask $netmask/g" /etc/cobbler/dhcp.template sudo sed -i "s/option subnet-mask \$netmask/option subnet-mask $NETMASK/g" /etc/cobbler/dhcp.template
sudo sed -i "s/option domain-name-servers \$ipaddr/option domain-name-servers $ipaddr/g" /etc/cobbler/dhcp.template sudo sed -i "s/option domain-name-servers \$ipaddr/option domain-name-servers $IPADDR/g" /etc/cobbler/dhcp.template
sudo sed -i "s/range dynamic-bootp \$ip_range/range dynamic-bootp $IP_START $IP_END/g" /etc/cobbler/dhcp.template sudo sed -i "s/range dynamic-bootp \$ip_range/range dynamic-bootp $IP_START $IP_END/g" /etc/cobbler/dhcp.template
sudo sed -i "s/local-address \$ipaddr/local-address $ipaddr/g" /etc/cobbler/dhcp.template sudo sed -i "s/local-address \$ipaddr/local-address $IPADDR/g" /etc/cobbler/dhcp.template
sudo chmod 644 /etc/cobbler/dhcp.template sudo chmod 644 /etc/cobbler/dhcp.template
# update tftpd.template # update tftpd.template
@ -67,8 +79,8 @@ sudo chmod 644 /etc/cobbler/tftpd.template
sudo cp -rn /etc/cobbler/named.template /root/backup/cobbler/ sudo cp -rn /etc/cobbler/named.template /root/backup/cobbler/
sudo rm -f /etc/cobbler/named.template sudo rm -f /etc/cobbler/named.template
sudo cp -rf $ADAPTERS_HOME/cobbler/conf/named.template /etc/cobbler/named.template sudo cp -rf $ADAPTERS_HOME/cobbler/conf/named.template /etc/cobbler/named.template
sudo sed -i "s/listen-on port 53 { \$ipaddr; }/listen-on port 53 \{ $ipaddr; \}/g" /etc/cobbler/named.template sudo sed -i "s/listen-on port 53 { \$ipaddr; }/listen-on port 53 \{ $IPADDR; \}/g" /etc/cobbler/named.template
subnet_escaped=$(echo $SUBNET | sed -e 's/[\/&]/\\&/g') subnet_escaped=$(echo $subnet | sed -e 's/[\/&]/\\&/g')
sudo sed -i "s/allow-query { 127.0.0.0\/8; \$subnet; }/allow-query \{ 127.0.0.0\/8; $subnet_escaped; \}/g" /etc/cobbler/named.template sudo sed -i "s/allow-query { 127.0.0.0\/8; \$subnet; }/allow-query \{ 127.0.0.0\/8; $subnet_escaped; \}/g" /etc/cobbler/named.template
sudo chmod 644 /etc/cobbler/named.template sudo chmod 644 /etc/cobbler/named.template
@ -76,8 +88,8 @@ sudo chmod 644 /etc/cobbler/named.template
sudo cp -rn /etc/cobbler/zone.template /root/backup/cobbler/ sudo cp -rn /etc/cobbler/zone.template /root/backup/cobbler/
sudo rm -f /etc/cobbler/zone.template sudo rm -f /etc/cobbler/zone.template
sudo cp -rf $ADAPTERS_HOME/cobbler/conf/zone.template /etc/cobbler/zone.template sudo cp -rf $ADAPTERS_HOME/cobbler/conf/zone.template /etc/cobbler/zone.template
sudo sed -i "s/\$hostname IN A \$ipaddr/$HOSTNAME IN A $ipaddr/g" /etc/cobbler/zone.template sudo sed -i "s/\$hostname IN A \$ipaddr/$HOSTNAME IN A $IPADDR/g" /etc/cobbler/zone.template
sudo sed -i "s/metrics IN A \$ipaddr/metrics IN A $ipaddr/g" /etc/cobbler/zone.template sudo sed -i "s/metrics IN A \$ipaddr/metrics IN A $IPADDR/g" /etc/cobbler/zone.template
sudo chmod 644 /etc/cobbler/zone.template sudo chmod 644 /etc/cobbler/zone.template
# update modules.conf # update modules.conf
@ -390,7 +402,7 @@ for profile in $(cobbler profile list); do
done done
if [ "$centos_found_profile" == "0" ]; then if [ "$centos_found_profile" == "0" ]; then
sudo cobbler profile add --name="${CENTOS_IMAGE_NAME}-${CENTOS_IMAGE_ARCH}" --repo=centos_ppa_repo --distro="${CENTOS_IMAGE_NAME}-${CENTOS_IMAGE_ARCH}" --ksmeta="tree=http://$ipaddr/cobbler/ks_mirror/${CENTOS_IMAGE_NAME}-${CENTOS_IMAGE_ARCH}" --kickstart=/var/lib/cobbler/kickstarts/default.ks sudo cobbler profile add --name="${CENTOS_IMAGE_NAME}-${CENTOS_IMAGE_ARCH}" --repo=centos_ppa_repo --distro="${CENTOS_IMAGE_NAME}-${CENTOS_IMAGE_ARCH}" --ksmeta="tree=http://$IPADDR/cobbler/ks_mirror/${CENTOS_IMAGE_NAME}-${CENTOS_IMAGE_ARCH}" --kickstart=/var/lib/cobbler/kickstarts/default.ks
if [[ "$?" != "0" ]]; then if [[ "$?" != "0" ]]; then
echo "failed to add profile ${CENTOS_IMAGE_NAME}-${CENTOS_IMAGE_ARCH}" echo "failed to add profile ${CENTOS_IMAGE_NAME}-${CENTOS_IMAGE_ARCH}"
exit 1 exit 1
@ -399,7 +411,7 @@ if [ "$centos_found_profile" == "0" ]; then
fi fi
else else
echo "profile ${CENTOS_IMAGE_NAME}-${CENTOS_IMAGE_ARCH} has already existed." echo "profile ${CENTOS_IMAGE_NAME}-${CENTOS_IMAGE_ARCH} has already existed."
sudo cobbler profile edit --name="${CENTOS_IMAGE_NAME}-${CENTOS_IMAGE_ARCH}" --repo=centos_ppa_repo --distro="${CENTOS_IMAGE_NAME}-${CENTOS_IMAGE_ARCH}" --ksmeta="tree=http://$ipaddr/cobbler/ks_mirror/${CENTOS_IMAGE_NAME}-${CENTOS_IMAGE_ARCH}" --kickstart=/var/lib/cobbler/kickstarts/default.ks sudo cobbler profile edit --name="${CENTOS_IMAGE_NAME}-${CENTOS_IMAGE_ARCH}" --repo=centos_ppa_repo --distro="${CENTOS_IMAGE_NAME}-${CENTOS_IMAGE_ARCH}" --ksmeta="tree=http://$IPADDR/cobbler/ks_mirror/${CENTOS_IMAGE_NAME}-${CENTOS_IMAGE_ARCH}" --kickstart=/var/lib/cobbler/kickstarts/default.ks
if [[ "$?" != "0" ]]; then if [[ "$?" != "0" ]]; then
echo "failed to edit profile ${CENTOS_IMAGE_NAME}-${CENTOS_IMAGE_ARCH}" echo "failed to edit profile ${CENTOS_IMAGE_NAME}-${CENTOS_IMAGE_ARCH}"
exit 1 exit 1
@ -416,7 +428,7 @@ for profile in $(cobbler profile list); do
done done
if [ "$ubuntu_found_profile" == "0" ]; then if [ "$ubuntu_found_profile" == "0" ]; then
sudo cobbler profile add --name="${UBUNTU_IMAGE_NAME}-${UBUNTU_IMAGE_ARCH}" --repo=ubuntu_ppa_repo --distro="${UBUNTU_IMAGE_NAME}-${UBUNTU_IMAGE_ARCH}" --ksmeta="tree=http://$ipaddr/cobbler/ks_mirror/${UBUNTU_IMAGE_NAME}-${UBUNTU_IMAGE_ARCH}" --kickstart=/var/lib/cobbler/kickstarts/default.seed sudo cobbler profile add --name="${UBUNTU_IMAGE_NAME}-${UBUNTU_IMAGE_ARCH}" --repo=ubuntu_ppa_repo --distro="${UBUNTU_IMAGE_NAME}-${UBUNTU_IMAGE_ARCH}" --ksmeta="tree=http://$IPADDR/cobbler/ks_mirror/${UBUNTU_IMAGE_NAME}-${UBUNTU_IMAGE_ARCH}" --kickstart=/var/lib/cobbler/kickstarts/default.seed
if [[ "$?" != "0" ]]; then if [[ "$?" != "0" ]]; then
echo "failed to add profile ${UBUNTU_IMAGE_NAME}-${UBUNTU_IMAGE_ARCH}" echo "failed to add profile ${UBUNTU_IMAGE_NAME}-${UBUNTU_IMAGE_ARCH}"
exit 1 exit 1
@ -425,7 +437,7 @@ if [ "$ubuntu_found_profile" == "0" ]; then
fi fi
else else
echo "profile ${UBUNTU_IMAGE_NAME}-${UBUNTU_IMAGE_ARCH} has already existed." echo "profile ${UBUNTU_IMAGE_NAME}-${UBUNTU_IMAGE_ARCH} has already existed."
sudo cobbler profile edit --name="${UBUNTU_IMAGE_NAME}-${UBUNTU_IMAGE_ARCH}" --repo=ubuntu_ppa_repo --distro="${UBUNTU_IMAGE_NAME}-${UBUNTU_IMAGE_ARCH}" --ksmeta="tree=http://$ipaddr/cobbler/ks_mirror/${UBUNTU_IMAGE_NAME}-${UBUNTU_IMAGE_ARCH}" --kickstart=/var/lib/cobbler/kickstarts/default.seed sudo cobbler profile edit --name="${UBUNTU_IMAGE_NAME}-${UBUNTU_IMAGE_ARCH}" --repo=ubuntu_ppa_repo --distro="${UBUNTU_IMAGE_NAME}-${UBUNTU_IMAGE_ARCH}" --ksmeta="tree=http://$IPADDR/cobbler/ks_mirror/${UBUNTU_IMAGE_NAME}-${UBUNTU_IMAGE_ARCH}" --kickstart=/var/lib/cobbler/kickstarts/default.seed
if [[ "$?" != "0" ]]; then if [[ "$?" != "0" ]]; then
echo "failed to edit profile ${UBUNTU_IMAGE_NAME}-${UBUNTU_IMAGE_ARCH}" echo "failed to edit profile ${UBUNTU_IMAGE_NAME}-${UBUNTU_IMAGE_ARCH}"
exit 1 exit 1

View File

@ -1,34 +1,85 @@
#!/bin/bash #!/bin/bash
# Move files to their respective locations # Move files to their respective locations
sudo mkdir -p /etc/compass
sudo mkdir -p /opt/compass/bin ### BEGIN OF SCRIPT ###
sudo mkdir -p /var/www/compass_web echo "setup compass configuration"
sudo mkdir -p /var/log/compass DIR=$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )
source $DIR/install.conf
if [ -f $DIR/env.conf ]; then
source $DIR/env.conf
else
echo "failed to load environment"
exit 1
fi
source $DIR/install_func.sh
cd $SCRIPT_DIR
if [ -z $WEB_SOURCE ]; then
echo "web source $WEB_SOURCE is not set"
exit 1
fi
copy2dir "$WEB_SOURCE" "$WEB_HOME" "stackforge/compass-web" || exit $?
if [ -z $ADAPTERS_SOURCE ]; then
echo "adpaters source $ADAPTERS_SOURCE is not set"
exit 1
fi
copy2dir "$ADAPTERS_SOURCE" "$ADAPTERS_HOME" "stackforge/compass-adapters" dev/experimental || exit $?
mkdir -p /etc/compass
rm -rf /etc/compass/*
mkdir -p /opt/compass/bin
rm -rf /opt/compass/bin/*
mkdir -p /var/www/compass_web
rm -rf /var/www/compass_web/*
mkdir -p /var/log/compass
rm -rf /var/log/compass/*
sudo mkdir -p /var/log/chef sudo mkdir -p /var/log/chef
sudo mkdir -p /opt/compass/db rm -rf /var/log/chef/*
sudo mkdir -p /var/www/compass mkdir -p /var/www/compass
rm -rf /var/www/compass/*
sudo cp -rf $COMPASSDIR/misc/apache/ods-server.conf /etc/httpd/conf.d/ods-server.conf sudo cp -rf $COMPASSDIR/misc/apache/ods-server.conf /etc/httpd/conf.d/ods-server.conf
sudo cp -rf $COMPASSDIR/misc/apache/compass.wsgi /var/www/compass/compass.wsgi
sudo cp -rf $COMPASSDIR/conf/* /etc/compass/ sudo cp -rf $COMPASSDIR/conf/* /etc/compass/
sudo cp -rf $COMPASSDIR/service/* /etc/init.d/ sudo cp -rf $COMPASSDIR/service/* /etc/init.d/
sudo cp -rf $COMPASSDIR/bin/*.py /opt/compass/bin/ sudo cp -rf $COMPASSDIR/bin/*.py /opt/compass/bin/
sudo cp -rf $COMPASSDIR/bin/*.sh /opt/compass/bin/ sudo cp -rf $COMPASSDIR/bin/*.sh /opt/compass/bin/
sudo cp -rf $COMPASSDIR/bin/compassd /usr/bin/ sudo cp -rf $COMPASSDIR/bin/compassd /usr/bin/
sudo cp -rf $COMPASSDIR/bin/compass /usr/bin/ sudo cp -rf $COMPASSDIR/bin/switch_virtualenv.py.template /opt/compass/bin/switch_virtualenv.py
sudo ln -s -f /opt/compass/bin/compass_check.py /usr/bin/compass
sudo ln -s -f /opt/compass/bin/compass_wsgi.py /var/www/compass/compass.wsgi
sudo cp -rf $COMPASSDIR/bin/chef/* /opt/compass/bin/ sudo cp -rf $COMPASSDIR/bin/chef/* /opt/compass/bin/
sudo cp -rf $COMPASSDIR/bin/cobbler/* /opt/compass/bin/ sudo cp -rf $COMPASSDIR/bin/cobbler/* /opt/compass/bin/
sudo cp -rf $WEB_HOME/public/* /var/www/compass_web/ sudo cp -rf $WEB_HOME/public/* /var/www/compass_web/
sudo cp -rf $WEB_HOME/v2 /var/www/compass_web/ sudo cp -rf $WEB_HOME/v2 /var/www/compass_web/
sudo cp -rf $COMPASSDIR/templates /etc/compass/
sudo rm -rf /var/chef
sudo mkdir -p /var/chef/cookbooks/
sudo cp -r $ADAPTERS_HOME/chef/cookbooks/* /var/chef/cookbooks/
if [ $? -ne 0 ]; then
echo "failed to copy cookbooks to /var/chef/cookbooks/"
exit 1
fi
sudo mkdir -p /var/chef/databags/
sudo cp -r $ADAPTERS_HOME/chef/databags/* /var/chef/databags/
if [ $? -ne 0 ]; then
echo "failed to copy databags to /var/chef/databags/"
exit 1
fi
sudo mkdir -p /var/chef/roles/
sudo cp -r $ADAPTERS_HOME/chef/roles/* /var/chef/roles/
if [ $? -ne 0 ]; then
echo "failed to copy roles to /var/chef/roles/"
exit 1
fi
# add apache user to the group of virtualenv user # add apache user to the group of virtualenv user
sudo usermod -a -G `groups $USER|awk '{print$3}'` apache sudo usermod -a -G `groups $USER|awk '{print$3}'` apache
sudo chkconfig compass-progress-updated on
sudo chkconfig compass-celeryd on
# setup ods server # setup ods server
if [ ! -f /usr/lib64/libcrypto.so ]; then if [ ! -f /usr/lib64/libcrypto.so ]; then
sudo cp -rf /usr/lib64/libcrypto.so.6 /usr/lib64/libcrypto.so sudo cp -rf /usr/lib64/libcrypto.so.6 /usr/lib64/libcrypto.so
fi fi
sudo chmod -R 777 /opt/compass/db sudo chmod -R 777 /opt/compass/db
@ -36,42 +87,38 @@ sudo chmod -R 777 /var/log/compass
sudo chmod -R 777 /var/log/chef sudo chmod -R 777 /var/log/chef
sudo echo "export C_FORCE_ROOT=1" > /etc/profile.d/celery_env.sh sudo echo "export C_FORCE_ROOT=1" > /etc/profile.d/celery_env.sh
sudo chmod +x /etc/profile.d/celery_env.sh sudo chmod +x /etc/profile.d/celery_env.sh
source `which virtualenvwrapper.sh`
if ! lsvirtualenv |grep compass-core>/dev/null; then
mkvirtualenv compass-core
fi
cd $COMPASSDIR cd $COMPASSDIR
workon compass-core workon compass-core
function compass_cleanup {
echo "deactive"
deactivate
}
trap compass_cleanup EXIT
python setup.py install python setup.py install
if [[ "$?" != "0" ]]; then if [[ "$?" != "0" ]]; then
echo "failed to install compass package" echo "failed to install compass package"
deactivate
exit 1 exit 1
else else
echo "compass package is installed in virtualenv under current dir" echo "compass package is installed in virtualenv under current dir"
fi fi
sudo sed -i "/COBBLER_INSTALLER_URL/c\COBBLER_INSTALLER_URL = 'http:\/\/$ipaddr/cobbler_api'" /etc/compass/setting sudo sed -i "s/\$ipaddr/$IPADDR/g" /etc/compass/setting
sudo sed -i "s/\$cobbler_ip/$ipaddr/g" /etc/compass/os_installer/cobbler.conf sudo sed -i "s/\$hostname/$HOSTNAME/g" /etc/compass/setting
sudo sed -i "/CHEF_INSTALLER_URL/c\CHEF_INSTALLER_URL = 'https:\/\/$ipaddr/'" /etc/compass/setting sed -i "s/\$gateway/$OPTION_ROUTER/g" /etc/compass/setting
sudo sed -i "s/\$chef_ip/$ipaddr/g" /etc/compass/package_installer/chef-icehouse.conf
sudo sed -i "s/\$chef_hostname/$HOSTNAME/g" /etc/compass/package_installer/chef-icehouse.conf
sudo sed -e 's|$PythonHome|'$VIRTUAL_ENV'|' -i /var/www/compass/compass.wsgi
sudo sed -e 's|$PythonHome|'$VIRTUAL_ENV'|' -i /usr/bin/compass
sudo sed -e 's|$PythonHome|'$VIRTUAL_ENV'|' -i /opt/compass/bin/poll_switch.py
sudo sed -e 's|$PythonHome|'$VIRTUAL_ENV'|' -i /opt/compass/bin/progress_update.py
sudo sed -e 's|$PythonHome|'$VIRTUAL_ENV'|' -i /opt/compass/bin/manage_db.py
sudo sed -e 's|$PythonHome|'$VIRTUAL_ENV'|' -i /opt/compass/bin/client.py
sudo sed -e 's|$PythonHome|'$VIRTUAL_ENV'|' -i /opt/compass/bin/clean_installation_logs.py
sudo sed -e 's|$PythonHome|'$VIRTUAL_ENV'|' -i /opt/compass/bin/delete_clusters.py
sudo sed -e 's|$Python|'$VIRTUAL_ENV/bin/python'|' -i /etc/init.d/compass-progress-updated
sudo sed -e 's|$CeleryPath|'$VIRTUAL_ENV/bin/celery'|' -i /etc/init.d/compass-celeryd
sudo sed -i "s/\$ipaddr/$ipaddr/g" /etc/compass/os_metadata/general.conf
sudo sed -i "s/\$hostname/$HOSTNAME/g" /etc/compass/os_metadata/general.conf
sed -i "s/\$gateway/$OPTION_ROUTER/g" /etc/compass/os_metadata/general.conf
domains=$(echo $NAMESERVER_DOMAINS | sed "s/,/','/g") domains=$(echo $NAMESERVER_DOMAINS | sed "s/,/','/g")
sudo sed -i "s/\$domain/$domains/g" /etc/compass/os_metadata/general.conf sudo sed -i "s/\$domains/$domains/g" /etc/compass/setting
# add cookbooks, databags and roles sudo sed -i "s/\$cobbler_ip/$IPADDR/g" /etc/compass/os_installer/cobbler.conf
sudo chmod +x /opt/compass/bin/addcookbooks.py sudo sed -i "s/\$chef_ip/$IPADDR/g" /etc/compass/package_installer/chef-icehouse.conf
sudo chmod +x /opt/compass/bin/adddatabags.py sudo sed -i "s/\$chef_hostname/$HOSTNAME/g" /etc/compass/package_installer/chef-icehouse.conf
sudo chmod +x /opt/compass/bin/addroles.py sudo sed -i "s|\$PythonHome|$VIRTUAL_ENV|g" /opt/compass/bin/switch_virtualenv.py
sudo ln -s -f $VIRTUAL_ENV/bin/celery /opt/compass/bin/celery
/opt/compass/bin/addcookbooks.py /opt/compass/bin/addcookbooks.py
if [[ "$?" != "0" ]]; then if [[ "$?" != "0" ]]; then
@ -111,6 +158,9 @@ else
exit 1 exit 1
fi fi
sudo chkconfig compass-progress-updated on
sudo chkconfig compass-celeryd on
/opt/compass/bin/refresh.sh /opt/compass/bin/refresh.sh
if [[ "$?" != "0" ]]; then if [[ "$?" != "0" ]]; then
echo "failed to refresh compassd service" echo "failed to refresh compassd service"
@ -131,13 +181,6 @@ else
echo "httpd has already started" echo "httpd has already started"
fi fi
sudo mkdir -p /var/log/redis
sudo chown -R redis:root /var/log/redis
sudo mkdir -p /var/lib/redis/
sudo chown -R redis:root /var/lib/redis
sudo mkdir -p /var/run/redis
sudo chown -R redis:root /var/run/redis
sudo service redis status |grep running sudo service redis status |grep running
if [[ "$?" != "0" ]]; then if [[ "$?" != "0" ]]; then
echo "redis is not started" echo "redis is not started"
@ -152,7 +195,6 @@ if [[ "$?" != "0" ]]; then
exit 1 exit 1
fi fi
killall -9 celeryd
killall -9 celery killall -9 celery
service compass-celeryd restart service compass-celeryd restart
service compass-celeryd status |grep running service compass-celeryd status |grep running
@ -170,10 +212,9 @@ if [[ "$?" != "0" ]]; then
else else
echo "compass-progress-updated has already started" echo "compass-progress-updated has already started"
fi fi
#compass check #compass check
#if [[ "$?" != "0" ]]; then #if [[ "$?" != "0" ]]; then
# echo "compass check failed" # echo "compass check failed"
# exit 1 # exit 1
#fi #fi
deactivate

10
install/env.conf Executable file
View File

@ -0,0 +1,10 @@
NIC=${NIC:-eth0}
IPADDR=${IPADDR:-10.145.89.100}
NETMASK=${NETMASK:-255.255.254.0}
WEB_SOURCE=${WEB_SOURCE:-http://git.openstack.org/stackforge/compass-web}
ADAPTERS_SOURCE=${ADAPTERS_SOURCE:-http://git.openstack.org/stackforge/compass-adapters}
OPTION_ROUTER=${OPTION_ROUTER:-10.145.88.1}
NAMESERVER_DOMAINS=${NAMESERVER_DOMAINS:-ods.com}
NEXTSERVER=${NEXTSERVER:-10.145.89.100}
IP_START=${IP_START:-10.145.89.100}
IP_END=${IP_END:-10.145.89.250}

View File

@ -10,10 +10,10 @@ export PACKAGE_INSTALLER=${PACKAGE_INSTALLER:-chef}
# service NIC # service NIC
export NIC=${NIC:-} export NIC=${NIC:-}
export IPADDR=${IPADDR:-}
export NETMASK=${NETMASK:-}
# DHCP config # DHCP config
# SUBNET variable specifies the subnet for DHCP server. Example: 192.168.0.0/16
export SUBNET=${SUBNET:-}
# DHCP option router address(Default is your management interface IP address )" # DHCP option router address(Default is your management interface IP address )"
export OPTION_ROUTER=${OPTION_ROUTER:-} export OPTION_ROUTER=${OPTION_ROUTER:-}
# The IP range for DHCP clients (Default: local subnet start from 100 to 254) # The IP range for DHCP clients (Default: local subnet start from 100 to 254)

View File

@ -14,15 +14,13 @@ export PACKAGE_INSTALLER=chef
export NIC=installation export NIC=installation
# DHCP config # DHCP config
# SUBNET variable specifies the subnet for DHCP server. Example: 192.168.0.0/16 export IPADDR=`ifconfig $NIC | grep 'inet addr:' | cut -d: -f2 | awk '{ print $1}'`
export netmask=$(ifconfig $NIC |grep Mask | cut -f 4 -d ':') export NETMASK=$(ifconfig $NIC |grep Mask | cut -f 4 -d ':')
export ipaddr=`ifconfig $NIC | grep 'inet addr:' | cut -d: -f2 | awk '{ print $1}'`
export SUBNET=$(ipcalc $ipaddr $netmask -n |cut -f 2 -d '=')/$(ipcalc $ipaddr $netmask -p |cut -f 2 -d '=')
# DHCP option router address(Default is your management interface IP address )" # DHCP option router address(Default is your management interface IP address )"
export OPTION_ROUTER=`ifconfig $NIC | grep 'inet addr:' | cut -d: -f2 | awk '{ print $1}'` export OPTION_ROUTER=`ifconfig $NIC | grep 'inet addr:' | cut -d: -f2 | awk '{ print $1}'`
# The IP range for DHCP clients (Default: local subnet start from 100 to 254) # The IP range for DHCP clients (Default: local subnet start from 100 to 254)
export IP_START=`echo $ipaddr |cut -d. -f'1 2 3'`.128 export IP_START=`echo $IPADDR |cut -d. -f'1 2 3'`.128
export IP_END=`echo $ipaddr |cut -d. -f'1 2 3'`.254 export IP_END=`echo $IPADDR |cut -d. -f'1 2 3'`.254
# TFTP server's IP address(Default: Management Interface/eth0 IP) # TFTP server's IP address(Default: Management Interface/eth0 IP)
export NEXTSERVER=`ifconfig $NIC | grep 'inet addr:' | cut -d: -f2 | awk '{ print $1}'` export NEXTSERVER=`ifconfig $NIC | grep 'inet addr:' | cut -d: -f2 | awk '{ print $1}'`
# the domains covered by nameserver # the domains covered by nameserver
@ -83,8 +81,8 @@ export SCRIPT_DIR=$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )
export COMPASSDIR=${SCRIPT_DIR}/.. export COMPASSDIR=${SCRIPT_DIR}/..
# Set test script variables # Set test script variables
export NAMESERVERS=$ipaddr export NAMESERVERS=$IPADDR
export NTP_SERVER=$ipaddr export NTP_SERVER=$IPADDR
export GATEWAY=$ipaddr export GATEWAY=$IPADDR
export PROXY=http://$ipaddr:3128 export PROXY=http://$IPADDR:3128
export TESTMODE=${TESTMODE:-"True"} export TESTMODE=${TESTMODE:-"True"}

View File

@ -9,18 +9,55 @@ exec 2>&1
LOCKFILE="/tmp/`basename $0`" LOCKFILE="/tmp/`basename $0`"
LOCKFD=99 LOCKFD=99
if [ -f $LOCKFILE ]; then
LOCKED_PID=$(cat $LOCKFILE | head -n 1)
ps -p $LOCKED_PID &> /dev/null
if [[ "$?" != "0" ]]; then
echo "the progress of pid $LOCKED_PID does not exist"
rm -f $LOCKFILE
else
echo "the progress of pid $LOCKED_PID is running"
exit 1
fi
else
echo "$LOCKFILE not exist"
fi
# PRIVATE # PRIVATE
_lock() { flock -$1 $LOCKFD; } _lock()
_no_more_locking() { _lock u; _lock xn && rm -f $LOCKFILE; } {
_prepare_locking() { eval "exec $LOCKFD>\"$LOCKFILE\""; trap _no_more_locking EXIT; } echo "lock $LOCKFILE"
flock -$1 $LOCKFD
pid=$$
echo $pid 1>& $LOCKFD
}
_no_more_locking()
{
_lock u
_lock xn && rm -f $LOCKFILE
}
_prepare_locking()
{
eval "exec $LOCKFD>\"$LOCKFILE\""
trap _no_more_locking EXIT
}
# ON START # ON START
_prepare_locking _prepare_locking
# PUBLIC # PUBLIC
exlock_now() { _lock xn; } # obtain an exclusive lock immediately or fail exlock_now()
{
_lock xn || exit 1
} # obtain an exclusive lock immediately or fail
exlock_now || exit 1 exlock_now
if [[ "$?" != "0" ]]; then
echo "failed to acquire lock $LOCKFILE"
exit 1
fi
### BEGIN OF SCRIPT ### ### BEGIN OF SCRIPT ###
DIR=$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd ) DIR=$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )
@ -134,22 +171,21 @@ if [ $? -ne 0 ]; then
exit 1 exit 1
fi fi
export netmask=$(ifconfig $NIC |grep Mask | cut -f 4 -d ':')
export ipaddr=$(ifconfig $NIC | grep 'inet addr:' | cut -d: -f2 | awk '{ print $1}') export ipaddr=$(ifconfig $NIC | grep 'inet addr:' | cut -d: -f2 | awk '{ print $1}')
export netaddr=$(ipcalc $ipaddr $netmask -n |cut -f 2 -d '=') loadvars IPADDR ${ipaddr}
export netprefix=$(ipcalc $ipaddr $netmask -p |cut -f 2 -d '=') ipcalc $IPADDR -c
loadvars SUBNET ${netaddr}/${netprefix}
ipcalc $SUBNET -c
if [ $? -ne 0 ]; then if [ $? -ne 0 ]; then
echo "subnet $SUBNET format should be x.x.x.x/x" echo "ip addr $IPADDR format should be x.x.x.x"
exit 1 exit 1
fi fi
export netaddr=$(ipcalc $SUBNET -n |cut -f 2 -d '=') export netmask=$(ifconfig $NIC |grep Mask | cut -f 4 -d ':')
export netprefix=$(ipcalc $SUBNET -p |cut -f 2 -d '=') loadvars NETMASK ${netmask}
export netmask=$(ipcalc $SUBNET -m |cut -f 2 -d '=') export netaddr=$(ipcalc $IPADDR $NETMASK -n |cut -f 2 -d '=')
export expected_subnet=${netaddr}/${netprefix} export netprefix=$(ipcalc $IPADDR $NETMASK -p |cut -f 2 -d '=')
if [[ "$SUBNET" != "$expected_subnet" ]]; then subnet=${netaddr}/${netprefix}
echo "expected subnet should be $expected_subnet" ipcalc $subnet -c
if [ $? -ne 0 ]; then
echo "subnet $subnet format should be x.x.x.x/x"
exit 1 exit 1
fi fi
loadvars OPTION_ROUTER $(route -n | grep '^0.0.0.0' | xargs | cut -d ' ' -f 2) loadvars OPTION_ROUTER $(route -n | grep '^0.0.0.0' | xargs | cut -d ' ' -f 2)
@ -158,8 +194,8 @@ if [ $? -ne 0 ]; then
echo "router $OPTION_ROUTER format should be x.x.x.x" echo "router $OPTION_ROUTER format should be x.x.x.x"
exit 1 exit 1
fi fi
export ip_start=$(echo "$ipaddr"|cut -f 1,2,3 -d '.')."100" export ip_start=$(echo "$IPADDR"|cut -f 1,2,3 -d '.')."100"
export ip_end=$(echo "$ipaddr"|cut -f 1,2,3 -d '.')."250" export ip_end=$(echo "$IPADDR"|cut -f 1,2,3 -d '.')."250"
loadvars IP_START "$ip_start" loadvars IP_START "$ip_start"
ipcalc $IP_START -c ipcalc $IP_START -c
if [ $? -ne 0 ]; then if [ $? -ne 0 ]; then
@ -168,9 +204,9 @@ if [ $? -ne 0 ]; then
else else
echo "ip start address is $IP_START" echo "ip start address is $IP_START"
fi fi
ip_start_net=$(ipcalc $IP_START $netmask -n |cut -f 2 -d '=') ip_start_net=$(ipcalc $IP_START $NETMASK -n |cut -f 2 -d '=')
if [[ "$ip_start_net" != "$netaddr" ]]; then if [[ "$ip_start_net" != "$netaddr" ]]; then
echo "ip start $IP_START is not in $SUBNET" echo "ip start $IP_START is not in $subnet"
exit 1 exit 1
fi fi
loadvars IP_END "$ip_end" loadvars IP_END "$ip_end"
@ -179,9 +215,9 @@ if [ $? -ne 0 ]; then
echo "ip end $IP_END format should be x.x.x.x" echo "ip end $IP_END format should be x.x.x.x"
exit 1 exit 1
fi fi
ip_end_net=$(ipcalc $IP_END $netmask -n |cut -f 2 -d '=') ip_end_net=$(ipcalc $IP_END $NETMASK -n |cut -f 2 -d '=')
if [[ "$ip_end_net" != "$netaddr" ]]; then if [[ "$ip_end_net" != "$netaddr" ]]; then
echo "ip end $IP_END is not in $SUBNET" echo "ip end $IP_END is not in $subnet"
exit 1 exit 1
fi fi
ip_start_int=$(ipaddr_convert $IP_START) ip_start_int=$(ipaddr_convert $IP_START)
@ -192,7 +228,7 @@ if [ $ip_range -le 0 ]; then
exit 1 exit 1
fi fi
echo "there will be at most $ip_range hosts deployed." echo "there will be at most $ip_range hosts deployed."
loadvars NEXTSERVER $ipaddr loadvars NEXTSERVER $IPADDR
ipcalc $NEXTSERVER -c ipcalc $NEXTSERVER -c
if [ $? -ne 0 ]; then if [ $? -ne 0 ]; then
echo "next server $NEXTSERVER format should be x.x.x.x" echo "next server $NEXTSERVER format should be x.x.x.x"
@ -205,6 +241,9 @@ loadvars ADAPTERS_SOURCE 'http://git.openstack.org/stackforge/compass-adapters'
echo "script dir: $SCRIPT_DIR" echo "script dir: $SCRIPT_DIR"
echo "compass dir is $COMPASSDIR" echo "compass dir is $COMPASSDIR"
echo "generate env.conf"
source ${COMPASSDIR}/install/setup_env.sh || exit $?
echo "Install the Dependencies" echo "Install the Dependencies"
source ${COMPASSDIR}/install/dependency.sh || exit $? source ${COMPASSDIR}/install/dependency.sh || exit $?

145
install/install_func.sh Executable file
View File

@ -0,0 +1,145 @@
#!/bin/bash
#
copy2dir()
{
repo=$1
destdir=$2
git_project=$3
git_branch=master
if [ -n "$4" ]; then
git_branch=$4
fi
echo "copy $repo branch $git_branch to $destdir"
if [[ "$repo" =~ (git|http|https|ftp):// ]]; then
if [[ -d $destdir || -L $destdir ]]; then
cd $destdir
git status &> /dev/null
if [ $? -ne 0 ]; then
echo "$destdir is not git repo"
cd -
rm -rf $destdir
else
echo "$destdir is git repo"
cd -
fi
fi
if [[ -d $destdir || -L $destdir ]]; then
echo "$destdir exists"
cd $destdir
git remote set-url origin $repo
git remote update
if [ $? -ne 0 ]; then
echo "failed to git remote update $repo in $destdir"
cd -
exit 1
else
echo "git remote update $repo in $destdir succeeded"
fi
git reset --hard
git clean -x -f
git checkout $git_branch
git reset --hard remotes/origin/$git_branch
cd -
else
echo "create $destdir"
mkdir -p $destdir
git clone $repo $destdir
if [ $? -ne 0 ]; then
echo "failed to git clone $repo $destdir"
exit 1
else
echo "git clone $repo $destdir suceeded"
fi
cd $destdir
git checkout $git_branch
git reset --hard remotes/origin/$git_branch
cd -
fi
cd $destdir
if [[ -z $ZUUL_PROJECT ]]; then
echo "ZUUL_PROJECT is not set"
elif [[ -z $ZUUL_BRANCH ]]; then
echo "ZUUL_BRANCH is not set"
elif [[ -z $ZUUL_REF ]]; then
echo "ZUUL_REF is not set"
elif [[ "$ZUUL_PROJECT" != "$git_project" ]]; then
echo "ZUUL_PROJECT $ZUUL_PROJECT is not equal to git_project $git_project"
elif [[ "$ZUUL_BRANCH" != "$git_branch" ]]; then
echo "ZUUL_BRANCH $ZUUL_BRANCH is not equal git_branch $git_branch"
else
git_repo=$ZUUL_URL/$ZUUL_PROJECT
git_ref=$ZUUL_REF
git reset --hard remotes/origin/$git_branch
git fetch $git_repo $git_ref && git checkout FETCH_HEAD
if [ $? -ne 0 ]; then
echo "failed to git fetch $git_repo $git_ref"
cd -
exit 1
fi
git clean -x -f
fi
cd -
else
sudo rm -rf $destdir
sudo cp -rf $repo $destdir
if [ $? -ne 0 ]; then
echo "failed to copy $repo to $destdir"
exit 1
else
echo "copy $repo to $destdir succeeded"
fi
fi
if [[ ! -d $destdir && ! -L $destdir ]]; then
echo "$destdir does not exist"
exit 1
else
echo "$destdir is ready"
fi
}
# TODO(xicheng): Please add comments to ths function. e.g, arg list
download()
{
#download params: <download url> [<package name>] [<action after package downloaded>]
url=$1
package=${2:-$(basename $url)}
action=${3:-""}
echo "download $package from $url and run $action"
if [[ -f /tmp/${package} || -L /tmp/${package} ]]; then
echo "$package already exists"
else
if [[ "$url" =~ (http|https|ftp):// ]]; then
echo "downloading $url to /tmp/${package}"
curl -L -o /tmp/${package}.tmp $url
if [[ "$?" != "0" ]]; then
echo "failed to download $package"
exit 1
else
echo "successfully download $package"
mv -f /tmp/${package}.tmp /tmp/${package}
fi
else
cp -rf $url /tmp/${package}
fi
if [[ ! -f /tmp/${package} && ! -L /tmp/${package} ]]; then
echo "/tmp/$package is not created"
exit 1
fi
fi
if [[ "$action" == "install" ]]; then
echo "install /tmp/$package"
sudo rpm -Uvh /tmp/$package
if [[ "$?" != "0" ]]; then
echo "failed to install $package"
exit 1
else
echo "$package is installed"
fi
elif [[ "$action" == "copy" ]]; then
echo "copy /tmp/$package to $destdir"
destdir=$4
sudo cp /tmp/$package $destdir
fi
}

View File

@ -1,113 +1,31 @@
#!/bin/bash #!/bin/bash
# prepare the installation # prepare the installation
copy2dir() ### BEGIN OF SCRIPT ###
{ echo "prepare installation"
repo=$1 DIR=$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )
destdir=$2 source $DIR/install.conf
git_branch=master if [ -f $DIR/env.conf ]; then
source $DIR/env.conf
if [ -n "$4" ]; then else
git_branch=$4 echo "failed to load environment"
fi exit 1
fi
if [[ "$repo" =~ (git|http|https|ftp):// ]]; then source $DIR/install_func.sh
if [[ -d $destdir || -L $destdir ]]; then
cd $destdir
git status &> /dev/null
if [ $? -ne 0 ]; then
echo "$destdir is not git repo"
rm -rf $destdir
else
echo "$destdir is git repo"
fi
cd -
fi
if [[ -d $destdir || -L $destdir ]]; then
echo "$destdir exists"
cd $destdir
git remote set-url origin $repo
git remote update
if [ $? -ne 0 ]; then
echo "failed to git remote update $repo in $destdir"
exit 1
else
echo "git remote update $repo in $destdir succeeded"
fi
git reset --hard
git clean -x -f
git checkout $git_branch
git reset --hard remotes/origin/$git_branch
else
echo "create $destdir"
mkdir -p $destdir
git clone $repo $destdir
if [ $? -ne 0 ]; then
echo "failed to git clone $repo $destdir"
exit 1
else
echo "git clone $repo $destdir suceeded"
fi
cd $destdir
git checkout $git_branch
git reset --hard remotes/origin/$git_branch
fi
if [[ ! -z $ZUUL_REF || ! -z $GERRIT_REFSPEC ]]; then
if [[ ! -z $ZUUL_REF ]]; then
git_repo=$ZUUL_URL/$3
git_ref=$ZUUL_REF
if git branch -a|grep ${ZUUL_BRANCH}; then
git_branch=$ZUUL_BRANCH
else
git_branch=master
fi
elif [[ ! -z $GERRIT_REFSPEC ]]; then
git_repo=https://$GERRIT_HOST/$3
git_ref=$GERRIT_REFSPEC
if git branch -a|grep $GERRIT_BRANCH; then
git_branch=$GERRIT_BRANCH
else
git_branch=master
fi
fi
git reset --hard remotes/origin/$git_branch
git fetch $git_repo $git_ref && git checkout FETCH_HEAD
if [ $? -ne 0 ]; then
echo "failed to git fetch $git_repo $git_ref"
fi
git clean -x -f
fi
else
sudo rm -rf $destdir
sudo cp -rf $repo $destdir
if [ $? -ne 0 ]; then
echo "failed to copy $repo to $destdir"
exit 1
else
echo "copy $repo to $destdir succeeded"
fi
fi
if [[ ! -d $destdir && ! -L $destdir ]]; then
echo "$destdir does not exist"
exit 1
else
echo "$destdir is ready"
fi
cd $SCRIPT_DIR
}
# Create backup dir # Create backup dir
sudo mkdir -p /root/backup sudo mkdir -p /root/backup
# update /etc/hosts # update /etc/hosts
echo "update /etc/hosts"
sudo cp -rn /etc/hosts /root/backup/hosts sudo cp -rn /etc/hosts /root/backup/hosts
sudo rm -f /etc/hosts sudo rm -f /etc/hosts
sudo cp -rf $COMPASSDIR/misc/hosts /etc/hosts sudo cp -rf $COMPASSDIR/misc/hosts /etc/hosts
sudo sed -i "s/\$ipaddr \$hostname/$ipaddr $HOSTNAME/g" /etc/hosts sudo sed -i "s/\$ipaddr \$hostname/$IPADDR $HOSTNAME/g" /etc/hosts
sudo chmod 644 /etc/hosts sudo chmod 644 /etc/hosts
# update rsyslog # update rsyslog
echo "update rsyslog"
sudo cp -rn /etc/rsyslog.conf /root/backup/ sudo cp -rn /etc/rsyslog.conf /root/backup/
sudo rm -f /etc/rsyslog.conf sudo rm -f /etc/rsyslog.conf
sudo cp -rf $COMPASSDIR/misc/rsyslog/rsyslog.conf /etc/rsyslog.conf sudo cp -rf $COMPASSDIR/misc/rsyslog/rsyslog.conf /etc/rsyslog.conf
@ -122,12 +40,14 @@ else
fi fi
# update logrotate.d # update logrotate.d
echo "update logrotate config"
sudo cp -rn /etc/logrotate.d /root/backup/ sudo cp -rn /etc/logrotate.d /root/backup/
rm -f /etc/logrotate.d/* rm -f /etc/logrotate.d/*
sudo cp -rf $COMPASSDIR/misc/logrotate.d/* /etc/logrotate.d/ sudo cp -rf $COMPASSDIR/misc/logrotate.d/* /etc/logrotate.d/
sudo chmod 644 /etc/logrotate.d/* sudo chmod 644 /etc/logrotate.d/*
# update ntp conf # update ntp conf
echo "update ntp config"
sudo cp -rn /etc/ntp.conf /root/backup/ sudo cp -rn /etc/ntp.conf /root/backup/
sudo rm -f /etc/ntp.conf sudo rm -f /etc/ntp.conf
sudo cp -rf $COMPASSDIR/misc/ntp/ntp.conf /etc/ntp.conf sudo cp -rf $COMPASSDIR/misc/ntp/ntp.conf /etc/ntp.conf
@ -144,10 +64,14 @@ else
fi fi
# update squid conf # update squid conf
echo "update squid config"
sudo cp -rn /etc/squid/squid.conf /root/backup/ sudo cp -rn /etc/squid/squid.conf /root/backup/
sudo rm -f /etc/squid/squid.conf sudo rm -f /etc/squid/squid.conf
sudo cp $COMPASSDIR/misc/squid/squid.conf /etc/squid/ sudo cp $COMPASSDIR/misc/squid/squid.conf /etc/squid/
subnet_escaped=$(echo $SUBNET | sed -e 's/[\/&]/\\&/g') export netaddr=$(ipcalc $IPADDR $NETMASK -n |cut -f 2 -d '=')
export netprefix=$(ipcalc $IPADDR $NETMASK -p |cut -f 2 -d '=')
subnet=${netaddr}/${netprefix}
subnet_escaped=$(echo $subnet | sed -e 's/[\/&]/\\&/g')
sudo sed -i "s/acl localnet src \$subnet/acl localnet src $subnet_escaped/g" /etc/squid/squid.conf sudo sed -i "s/acl localnet src \$subnet/acl localnet src $subnet_escaped/g" /etc/squid/squid.conf
sudo chmod 644 /etc/squid/squid.conf sudo chmod 644 /etc/squid/squid.conf
sudo mkdir -p /var/squid/cache sudo mkdir -p /var/squid/cache
@ -162,13 +86,14 @@ else
fi fi
#update mysqld #update mysqld
echo "update mysqld"
sudo service mysqld restart sudo service mysqld restart
MYSQL_USER=${MYSQL_USER:-root} MYSQL_USER=${MYSQL_USER:-root}
MYSQL_OLD_PASSWORD=${MYSQL_OLD_PASSWORD:-root} MYSQL_OLD_PASSWORD=${MYSQL_OLD_PASSWORD:-root}
MYSQL_PASSWORD=${MYSQL_PASSWORD:-root} MYSQL_PASSWORD=${MYSQL_PASSWORD:-root}
MYSQL_SERVER=${MYSQL_SERVER:-127.0.0.1} MYSQL_SERVER=${MYSQL_SERVER:-127.0.0.1}
MYSQL_PORT=${MYSQL_PORT:-3306} MYSQL_PORT=${MYSQL_PORT:-3306}
MYSQL_DATABASE=${MYSQL_DATABASE:-db} MYSQL_DATABASE=${MYSQL_DATABASE:-compass}
# first time set mysql password # first time set mysql password
sudo mysqladmin -h${MYSQL_SERVER} --port=${MYSQL_PORT} -u ${MYSQL_USER} -p"${MYSQL_OLD_PASSWORD}" password ${MYSQL_PASSWORD} sudo mysqladmin -h${MYSQL_SERVER} --port=${MYSQL_PORT} -u ${MYSQL_USER} -p"${MYSQL_OLD_PASSWORD}" password ${MYSQL_PASSWORD}
if [[ "$?" != "0" ]]; then if [[ "$?" != "0" ]]; then
@ -213,6 +138,7 @@ fi
copy2dir "$ADAPTERS_SOURCE" "$ADAPTERS_HOME" "stackforge/compass-adapters" dev/experimental || exit $? copy2dir "$ADAPTERS_SOURCE" "$ADAPTERS_HOME" "stackforge/compass-adapters" dev/experimental || exit $?
if [ "$tempest" == "true" ]; then if [ "$tempest" == "true" ]; then
echo "download tempest packages"
if [[ ! -e /tmp/tempest ]]; then if [[ ! -e /tmp/tempest ]]; then
git clone http://git.openstack.org/openstack/tempest /tmp/tempest git clone http://git.openstack.org/openstack/tempest /tmp/tempest
if [[ "$?" != "0" ]]; then if [[ "$?" != "0" ]]; then
@ -259,8 +185,9 @@ source `which virtualenvwrapper.sh`
if ! lsvirtualenv |grep compass-core>/dev/null; then if ! lsvirtualenv |grep compass-core>/dev/null; then
mkvirtualenv compass-core mkvirtualenv compass-core
fi fi
workon compass-core
cd $COMPASSDIR cd $COMPASSDIR
workon compass-core
echo "install compass requirements"
pip install -U -r requirements.txt pip install -U -r requirements.txt
if [[ "$?" != "0" ]]; then if [[ "$?" != "0" ]]; then
echo "failed to install compass requiremnts" echo "failed to install compass requiremnts"
@ -277,50 +204,6 @@ else
deactivate deactivate
fi fi
# TODO(xicheng): Please add comments to ths function. e.g, arg list
download()
{
url=$1
package=${2:-$(basename $url)}
action=${3:-""}
if [[ -f /tmp/${package} || -L /tmp/${package} ]]; then
echo "$package already exists"
else
if [[ "$url" =~ (http|https|ftp):// ]]; then
echo "downloading $url to /tmp/${package}"
curl -L -o /tmp/${package}.tmp $url
if [[ "$?" != "0" ]]; then
echo "failed to download $package"
exit 1
else
echo "successfully download $package"
mv -f /tmp/${package}.tmp /tmp/${package}
fi
else
cp -rf $url /tmp/${package}
fi
if [[ ! -f /tmp/${package} && ! -L /tmp/${package} ]]; then
echo "/tmp/$package is not created"
exit 1
fi
fi
if [[ "$action" == "install" ]]; then
echo "install /tmp/$package"
sudo rpm -Uvh /tmp/$package
if [[ "$?" != "0" ]]; then
echo "failed to install $package"
exit 1
else
echo "$package is installed"
fi
elif [[ "$action" == "copy" ]]; then
echo "copy /tmp/$package to $destdir"
destdir=$4
sudo cp /tmp/$package $destdir
fi
}
# download cobbler related packages # download cobbler related packages
centos_ppa_repo_packages=" centos_ppa_repo_packages="
ntp-4.2.6p5-1.${CENTOS_IMAGE_TYPE_OTHER}${CENTOS_IMAGE_VERSION_MAJOR}.${CENTOS_IMAGE_TYPE,,}.${CENTOS_IMAGE_ARCH}.rpm ntp-4.2.6p5-1.${CENTOS_IMAGE_TYPE_OTHER}${CENTOS_IMAGE_VERSION_MAJOR}.${CENTOS_IMAGE_TYPE,,}.${CENTOS_IMAGE_ARCH}.rpm
@ -356,6 +239,7 @@ download "$CENTOS_IMAGE_SOURCE" ${CENTOS_IMAGE_NAME}-${CENTOS_IMAGE_ARCH}.iso ||
download "$UBUNTU_IMAGE_SOURCE" ${UBUNTU_IMAGE_NAME}-${UBUNTU_IMAGE_ARCH}.iso || exit $? download "$UBUNTU_IMAGE_SOURCE" ${UBUNTU_IMAGE_NAME}-${UBUNTU_IMAGE_ARCH}.iso || exit $?
# Install net-snmp # Install net-snmp
echo "install snmp config"
if [[ ! -e /etc/snmp ]]; then if [[ ! -e /etc/snmp ]]; then
sudo mkdir -p /etc/snmp sudo mkdir -p /etc/snmp
fi fi
@ -371,6 +255,7 @@ sudo mkdir -p /var/lib/net-snmp/mib_indexes
sudo chmod 755 /var/lib/net-snmp/mib_indexes sudo chmod 755 /var/lib/net-snmp/mib_indexes
# generate ssh key # generate ssh key
echo "generate ssh key"
if [[ ! -e $HOME/.ssh ]]; then if [[ ! -e $HOME/.ssh ]]; then
sudo mkdir -p $HOME/.ssh sudo mkdir -p $HOME/.ssh
fi fi

13
install/setup_env.sh Executable file
View File

@ -0,0 +1,13 @@
cat << EOF > $SCRIPT_DIR/env.conf
NIC=\${NIC:-$NIC}
IPADDR=\${IPADDR:-$IPADDR}
NETMASK=\${NETMASK:-$NETMASK}
WEB_SOURCE=\${WEB_SOURCE:-$WEB_SOURCE}
ADAPTERS_SOURCE=\${ADAPTERS_SOURCE:-$ADAPTERS_SOURCE}
OPTION_ROUTER=\${OPTION_ROUTER:-$OPTION_ROUTER}
NAMESERVER_DOMAINS=\${NAMESERVER_DOMAINS:-$NAMESERVER_DOMAINS}
NEXTSERVER=\${NEXTSERVER:-$NEXTSERVER}
IP_START=\${IP_START:-$IP_START}
IP_END=\${IP_END:-$IP_END}
EOF
chmod ugo+x $SCRIPT_DIR/env.conf

View File

@ -1,23 +0,0 @@
#!/usr/bin/env python
import site
import sys
import os
activate_this='$PythonHome/bin/activate_this.py'
execfile(activate_this, dict(__file__=activate_this))
site.addsitedir('$PythonHome/lib/python2.6/site-packages')
sys.path.append('$PythonHome')
os.environ['PYTHON_EGG_CACHE'] = '/tmp/.egg'
from compass.api import app as application
from compass.utils import flags
from compass.utils import logsetting
from compass.utils import setting_wrapper as setting
flags.init()
flags.OPTIONS.logfile = setting.WEB_LOGFILE
logsetting.init()
from compass.api import api as compass_api
compass_api.init()
application = compass_api.app

View File

@ -5,7 +5,7 @@ export VIRT_MEM=${VIRT_MEM:-'8192'}
export VIRT_DISK=${VIRT_DISK:-'30G'} export VIRT_DISK=${VIRT_DISK:-'30G'}
export CLEAN_OLD_DATA=${CLEAN_OLD_DATA:-true} export CLEAN_OLD_DATA=${CLEAN_OLD_DATA:-true}
export COMPASS_SERVER_URL=${COMPASS_SERVER_URL:-"http://$ipaddr/api"} export COMPASS_SERVER_URL=${COMPASS_SERVER_URL:-"http://$IPADDR/api"}
export COMPASS_USER_EMAIL=${COMPASS_USER_EMAIL:-'admin@huawei.com'} export COMPASS_USER_EMAIL=${COMPASS_USER_EMAIL:-'admin@huawei.com'}
export COMPASS_USER_PASSWORD=${COMPASS_USER_PASSWORD:-'admin'} export COMPASS_USER_PASSWORD=${COMPASS_USER_PASSWORD:-'admin'}
export CLUSTER_NAME=${CLUSTER_NAME:-'allinone'} export CLUSTER_NAME=${CLUSTER_NAME:-'allinone'}
@ -20,16 +20,16 @@ export TIMEZONE=${TIMEZONE:-'America/Los_Angeles'}
export HOSTNAMES=${HOSTNAMES:-'allinone'} export HOSTNAMES=${HOSTNAMES:-'allinone'}
export ADAPTER_OS_PATTERN=${ADAPTER_OS_PATTERN:-'(?i)centos.*'} export ADAPTER_OS_PATTERN=${ADAPTER_OS_PATTERN:-'(?i)centos.*'}
export ADAPTER_NAME=${ADAPTER_NAME:=''} export ADAPTER_NAME=${ADAPTER_NAME:=''}
export ADAPTER_TARGET_SYSTEM_PATTERN=${ADAPTER_TARGET_SYSTEM_PATTERN:-'openstack.*'} export ADAPTER_TARGET_SYSTEM_PATTERN=${ADAPTER_TARGET_SYSTEM_PATTERN:-'^openstack$'}
export ADAPTER_FLAVOR_PATTERN=${ADAPTER_FLAVOR_PATTERN:-'allinone'} export ADAPTER_FLAVOR_PATTERN=${ADAPTER_FLAVOR_PATTERN:-'allinone'}
export HOST_ROLES=${HOST_ROLES:-'allinone=allinone-compute'} export HOST_ROLES=${HOST_ROLES:-'allinone=allinone-compute'}
export DEFAULT_ROLES=${DEFAULT_ROLES:-'allinone-compute'} export DEFAULT_ROLES=${DEFAULT_ROLES:-'allinone-compute'}
export NAMESERVERS=${NAMESERVERS:-$ipaddr} export NAMESERVERS=${NAMESERVERS:-$IPADDR}
export NTP_SERVER=${NTP_SERVER:-$ipaddr} export NTP_SERVER=${NTP_SERVER:-$IPADDR}
export GATEWAY=${GATEWAY:-$ipaddr} export GATEWAY=${GATEWAY:-$IPADDR}
export PROXY=${PROXY:-http://$ipaddr:3128} export PROXY=${PROXY:-http://${IPADDR}:3128}
export IGNORE_PROXY=${IGNORE_PROXY:-"127.0.0.1,localhost,$ipaddr,$HOSTNAME"} export IGNORE_PROXY=${IGNORE_PROXY:-"127.0.0.1,localhost,${IPADDR},$HOSTNAME"}
export DOMAIN=${DOMAIN:-'ods.com'} export DOMAIN=${DOMAIN:-'ods.com'}
export SEARCH_PATH=${SEARCH_PATH:-${DOMAIN}} export SEARCH_PATH=${SEARCH_PATH:-${DOMAIN}}
@ -45,7 +45,7 @@ function ip_subnet {
} }
if [ -z "$MANAGEMENT_SUBNET" ]; then if [ -z "$MANAGEMENT_SUBNET" ]; then
export MANAGEMENT_SUBNET=$(ip_subnet $ipaddr) export MANAGEMENT_SUBNET=$(ip_subnet ${IPADDR})
fi fi
export TENANT_SUBNET=${TENANT_SUBNET:-'172.16.2.0/24'} export TENANT_SUBNET=${TENANT_SUBNET:-'172.16.2.0/24'}
export PUBLIC_SUBNET=${PUBLIC_SUBNET:-'172.16.3.0/24'} export PUBLIC_SUBNET=${PUBLIC_SUBNET:-'172.16.3.0/24'}
@ -78,7 +78,7 @@ export CONSOLE_OBJECT_STORE_CREDENTIAL=${CONSOLE_OBJECT_STORE_CREDENTIAL:-"objec
export CONSOLE_VOLUME_CREDENTIAL=${CONSOLE_VOLUME_CREDENTIAL:-"volume:${CONSOLE_USERNAME}=${CONSOLE_PASSWORD}"} export CONSOLE_VOLUME_CREDENTIAL=${CONSOLE_VOLUME_CREDENTIAL:-"volume:${CONSOLE_USERNAME}=${CONSOLE_PASSWORD}"}
export CONSOLE_CREDENTIALS=${CONSOLE_CREDENTIALS:-"${CONSOLE_ADMIN_CREDENTIAL},${CONSOLE_COMPUTE_CREDENTIAL},${CONSOLE_DASHBOARD_CREDENTIAL},${CONSOLE_IMAGE_CREDENTIAL},${CONSOLE_METERING_CREDENTIAL},${CONSOLE_NETWORK_CREDENTIAL},${CONSOLE_OBJECT_STORE_CREDENTIAL},${CONSOLE_VOLUME_CREDENTIAL}"} export CONSOLE_CREDENTIALS=${CONSOLE_CREDENTIALS:-"${CONSOLE_ADMIN_CREDENTIAL},${CONSOLE_COMPUTE_CREDENTIAL},${CONSOLE_DASHBOARD_CREDENTIAL},${CONSOLE_IMAGE_CREDENTIAL},${CONSOLE_METERING_CREDENTIAL},${CONSOLE_NETWORK_CREDENTIAL},${CONSOLE_OBJECT_STORE_CREDENTIAL},${CONSOLE_VOLUME_CREDENTIAL}"}
export MANAGEMENT_IP_START=${MANAGEMENT_IP_START:-`echo $ipaddr |cut -d. -f'1 2 3'`.50} export MANAGEMENT_IP_START=${MANAGEMENT_IP_START:-`echo $IPADDR |cut -d. -f'1 2 3'`.50}
export TENANT_IP_START=${TENANT_IP_START:-'172.16.2.50'} export TENANT_IP_START=${TENANT_IP_START:-'172.16.2.50'}
export PUBLIC_IP_START=${PUBLIC_IP_START:-'172.16.3.50'} export PUBLIC_IP_START=${PUBLIC_IP_START:-'172.16.3.50'}
export STORAGE_IP_START=${STORAGE_IP_START:-'172.16.4.50'} export STORAGE_IP_START=${STORAGE_IP_START:-'172.16.4.50'}

View File

@ -9,7 +9,7 @@ export HOSTNAMES=${HOSTNAMES:-'database,messaging,identity,compute-controller,co
export HOST_ROLES=${HOST_ROLES:-'database=os-ops-database;messaging=os-ops-messaging;identity=os-identity;compute-controller=os-compute-controller;network-server=os-network-server;network-worker=os-network-worker;block-storage-volume=os-block-storage-volume;block-storage-controller=os-block-storage-controller;image=os-image;dashboard=os-dashboard'} export HOST_ROLES=${HOST_ROLES:-'database=os-ops-database;messaging=os-ops-messaging;identity=os-identity;compute-controller=os-compute-controller;network-server=os-network-server;network-worker=os-network-worker;block-storage-volume=os-block-storage-volume;block-storage-controller=os-block-storage-controller;image=os-image;dashboard=os-dashboard'}
export DEFAULT_ROLES=${DEFAULT_ROLES:-'os-compute-worker'} export DEFAULT_ROLES=${DEFAULT_ROLES:-'os-compute-worker'}
export ADAPTER_FLAVOR_PATTERN=${ADAPTER_FLAVOR_PATTERN:-'multinodes'} export ADAPTER_FLAVOR_PATTERN=${ADAPTER_FLAVOR_PATTERN:-'multinodes'}
export MANAGEMENT_IP_START=${MANAGEMENT_IP_START:-`echo $ipaddr |cut -d. -f'1 2 3'`.90} export MANAGEMENT_IP_START=${MANAGEMENT_IP_START:-`echo $IPADDR |cut -d. -f'1 2 3'`.90}
export TENANT_IP_START=${TENANT_IP_START:-'172.16.2.90'} export TENANT_IP_START=${TENANT_IP_START:-'172.16.2.90'}
export PUBLIC_IP_START=${PUBLIC_IP_START:-'172.16.3.90'} export PUBLIC_IP_START=${PUBLIC_IP_START:-'172.16.3.90'}
export STORAGE_IP_START=${STORAGE_IP_START:-'172.16.4.90'} export STORAGE_IP_START=${STORAGE_IP_START:-'172.16.4.90'}

View File

@ -8,7 +8,7 @@ export HOSTNAMES=${HOSTNAMES:-'allinone'}
export HOST_ROLES=${HOST_ROLES:-'allinone=os-controller,os-compute-worker,os-network,os-block-storage-volume'} export HOST_ROLES=${HOST_ROLES:-'allinone=os-controller,os-compute-worker,os-network,os-block-storage-volume'}
export DEFAULT_ROLES=${DEFAULT_ROLES:-'os-compute-worker'} export DEFAULT_ROLES=${DEFAULT_ROLES:-'os-compute-worker'}
export ADAPTER_FLAVOR_PATTERN=${ADAPTER_FLAVOR_PATTERN:-'single-contoller-multi-compute'} export ADAPTER_FLAVOR_PATTERN=${ADAPTER_FLAVOR_PATTERN:-'single-contoller-multi-compute'}
export MANAGEMENT_IP_START=${MANAGEMENT_IP_START:-`echo $ipaddr |cut -d. -f'1 2 3'`.52} export MANAGEMENT_IP_START=${MANAGEMENT_IP_START:-`echo $IPADDR |cut -d. -f'1 2 3'`.52}
export TENANT_IP_START=${TENANT_IP_START:-'172.16.2.52'} export TENANT_IP_START=${TENANT_IP_START:-'172.16.2.52'}
export PUBLIC_IP_START=${PUBLIC_IP_START:-'172.16.3.52'} export PUBLIC_IP_START=${PUBLIC_IP_START:-'172.16.3.52'}
export STORAGE_IP_START=${STORAGE_IP_START:-'172.16.4.52'} export STORAGE_IP_START=${STORAGE_IP_START:-'172.16.4.52'}

View File

@ -8,7 +8,7 @@ export HOSTNAMES=${HOSTNAMES:-'allinone'}
export HOST_ROLES=${HOST_ROLES:-'allinone=os-ops-database,os-ops-messaging,os-identity,os-compute-controller,os-compute-worker,os-network-server,os-network-worker,os-block-storage-volume,os-block-storage-controller,os-image,os-dashboard'} export HOST_ROLES=${HOST_ROLES:-'allinone=os-ops-database,os-ops-messaging,os-identity,os-compute-controller,os-compute-worker,os-network-server,os-network-worker,os-block-storage-volume,os-block-storage-controller,os-image,os-dashboard'}
export DEFAULT_ROLES=${DEFAULT_ROLES:-'os-compute-worker'} export DEFAULT_ROLES=${DEFAULT_ROLES:-'os-compute-worker'}
export ADAPTER_FLAVOR_PATTERN=${ADAPTER_FLAVOR_PATTERN:-'multinodes'} export ADAPTER_FLAVOR_PATTERN=${ADAPTER_FLAVOR_PATTERN:-'multinodes'}
export MANAGEMENT_IP_START=${MANAGEMENT_IP_START:-`echo $ipaddr |cut -d. -f'1 2 3'`.54} export MANAGEMENT_IP_START=${MANAGEMENT_IP_START:-`echo $IPADDR |cut -d. -f'1 2 3'`.54}
export TENANT_IP_START=${TENANT_IP_START:-'172.16.2.54'} export TENANT_IP_START=${TENANT_IP_START:-'172.16.2.54'}
export PUBLIC_IP_START=${PUBLIC_IP_START:-'172.16.3.54'} export PUBLIC_IP_START=${PUBLIC_IP_START:-'172.16.3.54'}
export STORAGE_IP_START=${STORAGE_IP_START:-'172.16.4.54'} export STORAGE_IP_START=${STORAGE_IP_START:-'172.16.4.54'}

View File

@ -5,7 +5,7 @@ export VIRT_MEM=${VIRT_MEM:-'8192'}
export VIRT_DISK=${VIRT_DISK:-'30G'} export VIRT_DISK=${VIRT_DISK:-'30G'}
export HOSTNAMES=${HOSTNAMES:-'controller,network,compute'} export HOSTNAMES=${HOSTNAMES:-'controller,network,compute'}
export HOST_ROLES=${HOST_ROLES:-'controller=os-controller;network=os-network,os-block-storage-volume'} export HOST_ROLES=${HOST_ROLES:-'controller=os-controller;network=os-network,os-block-storage-volume'}
export MANAGEMENT_IP_START=${MANAGEMENT_IP_START:-`echo $ipaddr |cut -d. -f'1 2 3'`.56} export MANAGEMENT_IP_START=${MANAGEMENT_IP_START:-`echo $IPADDR |cut -d. -f'1 2 3'`.56}
export TENANT_IP_START=${TENANT_IP_START:-'172.16.2.56'} export TENANT_IP_START=${TENANT_IP_START:-'172.16.2.56'}
export PUBLIC_IP_START=${PUBLIC_IP_START:-'172.16.3.56'} export PUBLIC_IP_START=${PUBLIC_IP_START:-'172.16.3.56'}
export STORAGE_IP_START=${STORAGE_IP_START:-'172.16.4.56'} export STORAGE_IP_START=${STORAGE_IP_START:-'172.16.4.56'}

View File

@ -5,7 +5,7 @@ export VIRT_MEM=${VIRT_MEM:-'8192'}
export VIRT_DISK=${VIRT_DISK:-'20G'} export VIRT_DISK=${VIRT_DISK:-'20G'}
export HOSTNAMES=${HOSTNAMES:-'database,messaging,identity,compute-controller,compute-worker1,compute-worker2,network-server,network-worker,block-storage-volume,block-storage-controller,image,dashboard'} export HOSTNAMES=${HOSTNAMES:-'database,messaging,identity,compute-controller,compute-worker1,compute-worker2,network-server,network-worker,block-storage-volume,block-storage-controller,image,dashboard'}
export HOST_ROLES=${HOST_ROLES:-'database=os-ops-database;messaging=os-ops-messaging;identity=os-identity;compute-controller=os-compute-controller;network-server=os-network-server;network-worker=os-network-worker;block-storage-volume=os-block-storage-volume;block-storage-controller=os-block-storage-controller;image=os-image;dashboard=os-dashboard'} export HOST_ROLES=${HOST_ROLES:-'database=os-ops-database;messaging=os-ops-messaging;identity=os-identity;compute-controller=os-compute-controller;network-server=os-network-server;network-worker=os-network-worker;block-storage-volume=os-block-storage-volume;block-storage-controller=os-block-storage-controller;image=os-image;dashboard=os-dashboard'}
export MANAGEMENT_IP_START=${MANAGEMENT_IP_START:-`echo $ipaddr |cut -d. -f'1 2 3'`.60} export MANAGEMENT_IP_START=${MANAGEMENT_IP_START:-`echo $IPADDR |cut -d. -f'1 2 3'`.60}
export TENANT_IP_START=${TENANT_IP_START:-'172.16.2.60'} export TENANT_IP_START=${TENANT_IP_START:-'172.16.2.60'}
export PUBLIC_IP_START=${PUBLIC_IP_START:-'172.16.3.60'} export PUBLIC_IP_START=${PUBLIC_IP_START:-'172.16.3.60'}
export STORAGE_IP_START=${STORAGE_IP_START:-'172.16.4.60'} export STORAGE_IP_START=${STORAGE_IP_START:-'172.16.4.60'}

View File

@ -5,7 +5,7 @@ export VIRT_MEM=${VIRT_MEM:-'8192'}
export VIRT_DISK=${VIRT_DISK:-'30G'} export VIRT_DISK=${VIRT_DISK:-'30G'}
export CLUSTER_NAME=${CLUSTER_NAME:-'osonly'} export CLUSTER_NAME=${CLUSTER_NAME:-'osonly'}
export HOSTNAMES=${HOSTNAMES:-'centos'} export HOSTNAMES=${HOSTNAMES:-'centos'}
export MANAGEMENT_IP_START=${MANAGEMENT_IP_START:-`echo $ipaddr |cut -d. -f'1 2 3'`.80} export MANAGEMENT_IP_START=${MANAGEMENT_IP_START:-`echo $IPADDR |cut -d. -f'1 2 3'`.80}
export TENANT_IP_START=${TENANT_IP_START:-'172.16.2.80'} export TENANT_IP_START=${TENANT_IP_START:-'172.16.2.80'}
export PUBLIC_IP_START=${PUBLIC_IP_START:-'172.16.3.80'} export PUBLIC_IP_START=${PUBLIC_IP_START:-'172.16.3.80'}
export STORAGE_IP_START=${STORAGE_IP_START:-'172.16.4.80'} export STORAGE_IP_START=${STORAGE_IP_START:-'172.16.4.80'}

View File

@ -5,7 +5,7 @@ export VIRT_MEM=${VIRT_MEM:-'8192'}
export VIRT_DISK=${VIRT_DISK:-'30G'} export VIRT_DISK=${VIRT_DISK:-'30G'}
export HOSTNAMES=${HOSTNAMES:-'ubuntu'} export HOSTNAMES=${HOSTNAMES:-'ubuntu'}
export ADAPTER_OS_PATTERN=${ADAPTER_OS_PATTERN:-'(?i)ubuntu.*'} export ADAPTER_OS_PATTERN=${ADAPTER_OS_PATTERN:-'(?i)ubuntu.*'}
export MANAGEMENT_IP_START=${MANAGEMENT_IP_START:-`echo $ipaddr |cut -d. -f'1 2 3'`.82} export MANAGEMENT_IP_START=${MANAGEMENT_IP_START:-`echo $IPADDR |cut -d. -f'1 2 3'`.82}
export TENANT_IP_START=${TENANT_IP_START:-'172.16.2.82'} export TENANT_IP_START=${TENANT_IP_START:-'172.16.2.82'}
export PUBLIC_IP_START=${PUBLIC_IP_START:-'172.16.3.82'} export PUBLIC_IP_START=${PUBLIC_IP_START:-'172.16.3.82'}
export STORAGE_IP_START=${STORAGE_IP_START:-'172.16.4.82'} export STORAGE_IP_START=${STORAGE_IP_START:-'172.16.4.82'}

View File

@ -5,7 +5,7 @@ export VIRT_MEM=${VIRT_MEM:-'8192'}
export VIRT_DISK=${VIRT_DISK:-'30G'} export VIRT_DISK=${VIRT_DISK:-'30G'}
export CLUSTER_NAME=${CLUSTER_NAME:-'allinone-ubuntu'} export CLUSTER_NAME=${CLUSTER_NAME:-'allinone-ubuntu'}
export ADAPTER_OS_PATTERN=${ADAPTER_OS_PATTERN:-'(?i)ubuntu.*'} export ADAPTER_OS_PATTERN=${ADAPTER_OS_PATTERN:-'(?i)ubuntu.*'}
export MANAGEMENT_IP_START=${MANAGEMENT_IP_START:-`echo $ipaddr |cut -d. -f'1 2 3'`.84} export MANAGEMENT_IP_START=${MANAGEMENT_IP_START:-`echo $IPADDR |cut -d. -f'1 2 3'`.84}
export TENANT_IP_START=${TENANT_IP_START:-'172.16.2.84'} export TENANT_IP_START=${TENANT_IP_START:-'172.16.2.84'}
export PUBLIC_IP_START=${PUBLIC_IP_START:-'172.16.3.84'} export PUBLIC_IP_START=${PUBLIC_IP_START:-'172.16.3.84'}
export STORAGE_IP_START=${STORAGE_IP_START:-'172.16.4.84'} export STORAGE_IP_START=${STORAGE_IP_START:-'172.16.4.84'}

View File

@ -9,7 +9,7 @@ export HOSTNAMES=${HOSTNAMES:-'controller,network,compute'}
export HOST_ROLES=${HOST_ROLES:-'controller=os-controller;network=os-network,os-block-storage-volume'} export HOST_ROLES=${HOST_ROLES:-'controller=os-controller;network=os-network,os-block-storage-volume'}
export DEFAULT_ROLES=${DEFAULT_ROLES:-'os-compute-worker'} export DEFAULT_ROLES=${DEFAULT_ROLES:-'os-compute-worker'}
export ADAPTER_FLAVOR_PATTERN=${ADAPTER_FLAVOR_PATTERN:-'single-contoller-multi-compute'} export ADAPTER_FLAVOR_PATTERN=${ADAPTER_FLAVOR_PATTERN:-'single-contoller-multi-compute'}
export MANAGEMENT_IP_START=${MANAGEMENT_IP_START:-`echo $ipaddr |cut -d. -f'1 2 3'`.86} export MANAGEMENT_IP_START=${MANAGEMENT_IP_START:-`echo $IPADDR |cut -d. -f'1 2 3'`.86}
export TENANT_IP_START=${TENANT_IP_START:-'172.16.2.86'} export TENANT_IP_START=${TENANT_IP_START:-'172.16.2.86'}
export PUBLIC_IP_START=${PUBLIC_IP_START:-'172.16.3.86'} export PUBLIC_IP_START=${PUBLIC_IP_START:-'172.16.3.86'}
export STORAGE_IP_START=${STORAGE_IP_START:-'172.16.4.86'} export STORAGE_IP_START=${STORAGE_IP_START:-'172.16.4.86'}

View File

@ -23,7 +23,7 @@
# Checking Sanity # Checking Sanity
DEBIAN=/etc/debian_version DEBIAN=/etc/debian_version
SUSE=/etc/SuSE-release SUSE=/etc/SuSE-release
CELERY=$CeleryPath CELERY=/opt/compass/bin/celery
if [ -f $DEBIAN ]; then if [ -f $DEBIAN ]; then
. /lib/lsb/init-functions . /lib/lsb/init-functions

View File

@ -23,7 +23,6 @@
# Checking Sanity # Checking Sanity
DEBIAN=/etc/debian_version DEBIAN=/etc/debian_version
SUSE=/etc/SuSE-release SUSE=/etc/SuSE-release
PYTHON=$Python
if [ -f $DEBIAN ]; then if [ -f $DEBIAN ]; then
. /lib/lsb/init-functions . /lib/lsb/init-functions
@ -37,14 +36,14 @@ RETVAL=0
start() { start() {
echo -n "Starting Compass progress updated: " echo -n "Starting Compass progress updated: "
if [ -f $SUSE ]; then if [ -f $SUSE ]; then
startproc -f -p /var/run/progress_update.pid -l /tmp/progress_update.log $PYTHON /opt/compass/bin/progress_update.py startproc -f -p /var/run/progress_update.pid -l /tmp/progress_update.log /opt/compass/bin/progress_update.py
rc_status -v rc_status -v
RETVAL=$? RETVAL=$?
elif [ -f $DEBIAN ]; then elif [ -f $DEBIAN ]; then
start_daemon -p /var/run/progress_update.pid "$PYTHON /opt/compass/bin/progress_update.py &>/tmp/progress_update.log & echo \$! > /var/run/progress_update.pid" start_daemon -p /var/run/progress_update.pid "/opt/compass/bin/progress_update.py &>/tmp/progress_update.log & echo \$! > /var/run/progress_update.pid"
RETVAL=$? RETVAL=$?
else else
daemon --pidfile /var/run/progress_update.pid "$PYTHON /opt/compass/bin/progress_update.py &>/tmp/progress_update.log & echo \$! > /var/run/progress_update.pid" daemon --pidfile /var/run/progress_update.pid "/opt/compass/bin/progress_update.py &>/tmp/progress_update.log & echo \$! > /var/run/progress_update.pid"
RETVAL=$? RETVAL=$?
fi fi
echo echo