make init db, load adapters/metadatas lazy initialization

Change-Id: I7f32732a9a2a1bc9ec721dad6e0c43ceef8c5f11
This commit is contained in:
xiaodongwang 2014-09-26 12:29:25 -07:00
parent bbbdf1a533
commit 88eb112683
63 changed files with 1668 additions and 690 deletions

View File

@ -20,6 +20,14 @@ import os
import os.path
import sys
current_dir = os.path.dirname(os.path.realpath(__file__))
sys.path.append(current_dir)
import switch_virtualenv
from compass.utils import flags
from compass.utils import logsetting

View File

@ -20,6 +20,13 @@ import os
import os.path
import sys
current_dir = os.path.dirname(os.path.realpath(__file__))
sys.path.append(current_dir)
import switch_virtualenv
from compass.utils import flags
from compass.utils import logsetting

View File

@ -20,6 +20,13 @@ import os
import os.path
import sys
current_dir = os.path.dirname(os.path.realpath(__file__))
sys.path.append(current_dir)
import switch_virtualenv
from compass.utils import flags
from compass.utils import logsetting

View File

@ -18,14 +18,14 @@
import logging
import os
import os.path
import site
import sys
activate_this = '$PythonHome/bin/activate_this.py'
execfile(activate_this, dict(__file__=activate_this))
site.addsitedir('$PythonHome/lib/python2.6/site-packages')
sys.path.append('$PythonHome')
os.environ['PYTHON_EGG_CACHE'] = '/tmp/.egg'
current_dir = os.path.dirname(os.path.realpath(__file__))
sys.path.append(current_dir)
import switch_virtualenv
from compass.utils import flags
from compass.utils import logsetting

View File

@ -20,16 +20,16 @@ import netaddr
import os
import re
import requests
import site
import socket
import sys
import time
activate_this = '$PythonHome/bin/activate_this.py'
execfile(activate_this, dict(__file__=activate_this))
site.addsitedir('$PythonHome/lib/python2.6/site-packages')
sys.path.append('$PythonHome')
os.environ['PYTHON_EGG_CACHE'] = '/tmp/.egg'
current_dir = os.path.dirname(os.path.realpath(__file__))
sys.path.append(current_dir)
import switch_virtualenv
from compass.apiclient.restful import Client
from compass.utils import flags
@ -71,10 +71,10 @@ flags.add('adapter_name',
default='')
flags.add('adapter_os_pattern',
help='adapter os name',
default=r'(?i)centos.*')
default=r'^(?i)centos.*')
flags.add('adapter_target_system_pattern',
help='adapter target system name',
default='openstack.*')
default='^openstack$')
flags.add('adapter_flavor_pattern',
help='adapter flavor name',
default='allinone')
@ -346,6 +346,7 @@ def _get_adapter(client):
flavor_re = None
adapter_id = None
os_id = None
distributed_system_id = None
flavor_id = None
adapter = None
for item in resp:
@ -370,20 +371,30 @@ def _get_adapter(client):
if adapter_name and adapter['name'] == adapter_name:
adapter_id = adapter['id']
logging.info('adapter name %s match: %s', adapter_name, adapter)
elif 'distributed_system_name' in item:
logging.info('adapter name %s matches: %s', adapter_name, adapter)
elif (
'distributed_system_name' in item and
adapter['distributed_system_name']
):
if (
not target_system_re or
target_system_re and
target_system_re.match(adapter['distributed_system_name'])
):
adapter_id = adapter['id']
distributed_system_id = adapter['distributed_system_id']
logging.info(
'distributed system name pattern %s match: %s',
'distributed system name pattern %s matches: %s',
target_system_pattern, adapter
)
else:
if not target_system_re:
adapter_id = adapter['id']
logging.info(
'os only adapter matches no target_system_pattern'
)
if adapter_id:
logging.info('adadpter does not match: %s', adapter)
logging.info('adadpter matches: %s', adapter)
break
if not adapter_id:
@ -394,12 +405,16 @@ def _get_adapter(client):
msg = 'no os found for %s' % os_pattern
raise Exception(msg)
if target_system_re and not distributed_system_id:
msg = 'no distributed system found for' % target_system_pattern
raise Exception(msg)
if flavor_re and not flavor_id:
msg = 'no flavor found for %s' % flavor_pattern
raise Exception(msg)
logging.info('adpater for deploying a cluster: %s', adapter_id)
return (adapter_id, os_id, flavor_id)
return (adapter_id, os_id, distributed_system_id, flavor_id)
def _add_subnets(client):
@ -686,6 +701,9 @@ def _set_cluster_package_config(client, cluster_id):
for service_credential in flags.OPTIONS.service_credentials.split(',')
if service_credential
]
logging.debug(
'service credentials: %s', service_credentials
)
for service_credential in service_credentials:
if ':' not in service_credential:
raise Exception(
@ -706,6 +724,9 @@ def _set_cluster_package_config(client, cluster_id):
for console_credential in flags.OPTIONS.console_credentials.split(',')
if console_credential
]
logging.debug(
'console credentials: %s', console_credentials
)
for console_credential in console_credentials:
if ':' not in console_credential:
raise Exception(
@ -717,7 +738,7 @@ def _set_cluster_package_config(client, cluster_id):
'there is no = in console %s security' % console_name
)
username, password = console_pair.split('=', 1)
package_config['security']['console_credentials'][service_name] = {
package_config['security']['console_credentials'][console_name] = {
'username': username,
'password': password
}
@ -952,14 +973,14 @@ def main():
else:
machines = _get_machines(client)
subnet_mapping = _add_subnets(client)
adapter_id, os_id, flavor_id = _get_adapter(client)
adapter_id, os_id, distributed_system_id, flavor_id = _get_adapter(client)
cluster_id, host_mapping, role_mapping = _add_cluster(
client, adapter_id, os_id, flavor_id, machines)
host_ips = _set_host_networking(
client, host_mapping, subnet_mapping
)
_set_cluster_os_config(client, cluster_id, host_ips)
if flavor_id:
if distributed_system_id:
_set_cluster_package_config(client, cluster_id)
if role_mapping:
_set_hosts_roles(client, cluster_id, host_mapping, role_mapping)

View File

@ -1,14 +0,0 @@
#!/usr/bin/env python
import os
import site
import sys
activate_this='$PythonHome/bin/activate_this.py'
execfile(activate_this, dict(__file__=activate_this))
site.addsitedir('$PythonHome/lib/python2.6/site-packages')
sys.path.append('$PythonHome')
os.environ['PYTHON_EGG_CACHE'] = '/tmp/.egg'
import compass.actions.cli as cli
sys.exit(cli.main())

18
bin/__init__.py → bin/compass_check.py Normal file → Executable file
View File

@ -1,3 +1,5 @@
#!/usr/bin/env python
#
# Copyright 2014 Huawei Technologies Co. Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
@ -11,3 +13,19 @@
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""compass health check."""
import os
import os.path
import sys
current_dir = os.path.dirname(os.path.realpath(__file__))
sys.path.append(current_dir)
import switch_virtualenv
import compass.actions.cli as cli
sys.exit(cli.main())

42
bin/compass_wsgi.py Executable file
View File

@ -0,0 +1,42 @@
#!/usr/bin/env python
#
# Copyright 2014 Huawei Technologies Co. Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""compass wsgi module."""
import os
import sys
current_dir = os.path.dirname(os.path.realpath(__file__))
sys.path.append(current_dir)
import switch_virtualenv
from compass.utils import flags
from compass.utils import logsetting
from compass.utils import setting_wrapper as setting
flags.init()
flags.OPTIONS.logfile = setting.WEB_LOGFILE
logsetting.init()
from compass.api import api as compass_api
compass_api.init()
application = compass_api.app

View File

@ -18,14 +18,15 @@
import logging
import os
import os.path
import site
import sys
activate_this = '$PythonHome/bin/activate_this.py'
execfile(activate_this, dict(__file__=activate_this))
site.addsitedir('$PythonHome/lib/python2.6/site-packages')
sys.path.append('$PythonHome')
os.environ['PYTHON_EGG_CACHE'] = '/tmp/.egg'
current_dir = os.path.dirname(os.path.realpath(__file__))
sys.path.append(current_dir)
import switch_virtualenv
from compass.db.api import cluster as cluster_api
from compass.db.api import database

View File

@ -17,14 +17,14 @@
"""utility binary to manage database."""
import os
import os.path
import site
import sys
activate_this = '$PythonHome/bin/activate_this.py'
execfile(activate_this, dict(__file__=activate_this))
site.addsitedir('$PythonHome/lib/python2.6/site-packages')
sys.path.append('$PythonHome')
os.environ['PYTHON_EGG_CACHE'] = '/tmp/.egg'
current_dir = os.path.dirname(os.path.realpath(__file__))
sys.path.append(current_dir)
import switch_virtualenv
from flask.ext.script import Manager

View File

@ -19,14 +19,14 @@ import functools
import lockfile
import logging
import os
import site
import sys
activate_this = '$PythonHome/bin/activate_this.py'
execfile(activate_this, dict(__file__=activate_this))
site.addsitedir('$PythonHome/lib/python2.6/site-packages')
sys.path.append('$PythonHome')
os.environ['PYTHON_EGG_CACHE'] = '/tmp/.egg'
current_dir = os.path.dirname(os.path.realpath(__file__))
sys.path.append(current_dir)
import switch_virtualenv
from multiprocessing import Pool

View File

@ -19,14 +19,14 @@ import functools
import lockfile
import logging
import os
import site
import sys
activate_this = '$PythonHome/bin/activate_this.py'
execfile(activate_this, dict(__file__=activate_this))
site.addsitedir('$PythonHome/lib/python2.6/site-packages')
sys.path.append('$PythonHome')
os.environ['PYTHON_EGG_CACHE'] = '/tmp/.egg'
current_dir = os.path.dirname(os.path.realpath(__file__))
sys.path.append(current_dir)
import switch_virtualenv
from compass.actions import update_progress
from compass.db.api import database

View File

@ -0,0 +1,30 @@
#!/usr/bin/env python
#
# Copyright 2014 Huawei Technologies Co. Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""utility switch to virtual env."""
import os
import os.path
import site
import sys
virtual_env = '$PythonHome'
activate_this = '%s/bin/activate_this.py' % virtual_env
execfile(activate_this, dict(__file__=activate_this))
site.addsitedir('%s/lib/python2.6/site-packages' % virtual_env)
if virtual_env not in sys.path:
sys.path.append(virtual_env)
os.environ['PYTHON_EGG_CACHE'] = '/tmp/.egg'

View File

@ -25,7 +25,7 @@ from compass.utils import setting_wrapper as setting
from compass.utils import util
def _add_system(session, model, configs):
def _add_system(session, model, configs, exception_when_existing=True):
parents = {}
for config in configs:
logging.info(
@ -34,7 +34,7 @@ def _add_system(session, model, configs):
)
object = utils.add_db_object(
session, model,
False, config['NAME'],
exception_when_existing, config['NAME'],
deployable=config.get('DEPLOYABLE', False)
)
parents[config['NAME']] = (
@ -48,17 +48,23 @@ def _add_system(session, model, configs):
utils.update_db_object(session, object, parent=parent)
def add_oses_internal(session):
def add_oses_internal(session, exception_when_existing=True):
configs = util.load_configs(setting.OS_DIR)
_add_system(session, models.OperatingSystem, configs)
_add_system(
session, models.OperatingSystem, configs,
exception_when_existing=exception_when_existing
)
def add_distributed_systems_internal(session):
def add_distributed_systems_internal(session, exception_when_existing=True):
configs = util.load_configs(setting.DISTRIBUTED_SYSTEM_DIR)
_add_system(session, models.DistributedSystem, configs)
_add_system(
session, models.DistributedSystem, configs,
exception_when_existing=exception_when_existing
)
def add_adapters_internal(session):
def add_adapters_internal(session, exception_when_existing=True):
parents = {}
configs = util.load_configs(setting.ADAPTER_DIR)
for config in configs:
@ -86,7 +92,7 @@ def add_adapters_internal(session):
package_installer = None
adapter = utils.add_db_object(
session, models.Adapter,
False,
exception_when_existing,
config['NAME'],
display_name=config.get('DISPLAY_NAME', None),
distributed_system=distributed_system,
@ -109,7 +115,7 @@ def add_adapters_internal(session):
if supported_os_pattern.match(os_name):
utils.add_db_object(
session, models.AdapterOS,
True,
exception_when_existing,
os.id, adapter.id
)
break
@ -123,7 +129,7 @@ def add_adapters_internal(session):
utils.update_db_object(session, adapter, parent=parent)
def add_roles_internal(session):
def add_roles_internal(session, exception_when_existing=True):
configs = util.load_configs(setting.ADAPTER_ROLE_DIR)
for config in configs:
logging.info(
@ -136,14 +142,14 @@ def add_roles_internal(session):
for role_dict in config['ROLES']:
utils.add_db_object(
session, models.AdapterRole,
False, role_dict['role'], adapter.id,
exception_when_existing, role_dict['role'], adapter.id,
display_name=role_dict.get('display_name', None),
description=role_dict.get('description', None),
optional=role_dict.get('optional', False)
)
def add_flavors_internal(session):
def add_flavors_internal(session, exception_when_existing=True):
configs = util.load_configs(setting.ADAPTER_FLAVOR_DIR)
for config in configs:
logging.info('add config %s to flavor', config)
@ -154,7 +160,7 @@ def add_flavors_internal(session):
for flavor_dict in config['FLAVORS']:
flavor = utils.add_db_object(
session, models.AdapterFlavor,
False, flavor_dict['flavor'], adapter.id,
exception_when_existing, flavor_dict['flavor'], adapter.id,
display_name=flavor_dict.get('display_name', None),
template=flavor_dict.get('template', None)
)
@ -166,7 +172,7 @@ def add_flavors_internal(session):
)
utils.add_db_object(
session, models.AdapterFlavorRole,
False, flavor.id, role.id
exception_when_existing, flavor.id, role.id
)
utils.update_db_object(
session, flavor,

View File

@ -47,6 +47,10 @@ RESP_FLAVORS_FIELDS = [
@database.run_in_session()
def load_adapters(session):
load_adapters_internal(session)
def load_adapters_internal(session):
global ADAPTER_MAPPING
logging.info('load adapters into memory')
ADAPTER_MAPPING = adapter_api.get_adapters_internal(session)
@ -93,11 +97,16 @@ def _filter_adapters(adapter_config, filter_name, filter_value):
)
def list_adapters(session, lister, **filters):
"""list adapters."""
if not ADAPTER_MAPPING:
load_adapters_internal(session)
return ADAPTER_MAPPING.values()
def get_adapter_internal(adapter_id):
def get_adapter_internal(session, adapter_id):
"""get adapter."""
if not ADAPTER_MAPPING:
load_adapters_internal(session)
if adapter_id not in ADAPTER_MAPPING:
raise exception.RecordNotExists(
'adpater %s does not exist' % adapter_id
@ -118,7 +127,7 @@ def get_adapter_internal(adapter_id):
)
def get_adapter(session, getter, adapter_id, **kwargs):
"""get adapter."""
return get_adapter_internal(adapter_id)
return get_adapter_internal(session, adapter_id)
@utils.supported_filters([])
@ -129,8 +138,5 @@ def get_adapter(session, getter, adapter_id, **kwargs):
@utils.wrap_to_dict(RESP_ROLES_FIELDS)
def get_adapter_roles(session, getter, adapter_id, **kwargs):
"""get adapter roles."""
if adapter_id not in ADAPTER_MAPPING:
raise exception.RecordNotExists(
'adpater %s does not exist' % adapter_id
)
return ADAPTER_MAPPING[adapter_id].get('roles', [])
adapter = get_adapter_internal(session, adapter_id)
return adapter.get('roles', [])

View File

@ -14,6 +14,7 @@
# limitations under the License.
"""Cluster database operations."""
import copy
import functools
import logging
@ -344,12 +345,14 @@ def get_cluster_metadata(session, getter, cluster_id, **kwargs):
os = cluster.os
if os:
metadatas['os_config'] = metadata_api.get_os_metadata_internal(
os.id
session, os.id
)
adapter = cluster.adapter
if adapter:
metadatas['package_config'] = (
metadata_api.get_package_metadata_internal(adapter.id)
metadata_api.get_package_metadata_internal(
session, adapter.id
)
)
return metadatas
@ -407,10 +410,16 @@ def update_cluster_config(session, updater, cluster_id, **kwargs):
cluster = utils.get_db_object(
session, models.Cluster, id=cluster_id
)
os_config_validates = functools.partial(
metadata_api.validate_os_config, os_id=cluster.os_id)
package_config_validates = functools.partial(
metadata_api.validate_package_config, adapter_id=cluster.adapter_id)
def os_config_validates(config):
metadata_api.validate_os_config(
session, config, os_id=cluster.os_id
)
def package_config_validates(config):
metadata_api.validate_package_config(
session, config, adapter_id=cluster.adapter_id
)
@utils.input_validates(
put_os_config=os_config_validates,
@ -443,10 +452,15 @@ def patch_cluster_config(session, updater, cluster_id, **kwargs):
session, models.Cluster, id=cluster_id
)
os_config_validates = functools.partial(
metadata_api.validate_os_config, os_id=cluster.os_id)
package_config_validates = functools.partial(
metadata_api.validate_package_config, adapter_id=cluster.adapter_id)
def os_config_validates(config):
metadata_api.validate_os_config(
session, config, os_id=cluster.os_id
)
def package_config_validates(config):
metadata_api.validate_package_config(
session, config, adapter_id=cluster.adapter_id
)
@utils.output_validates(
os_config=os_config_validates,
@ -896,15 +910,15 @@ def _update_clusterhost_config(session, updater, clusterhost, **kwargs):
ignore_keys.append('put_os_config')
def os_config_validates(os_config):
from compass.db.api import host as host_api
host = clusterhost.host
metadata_api.validate_os_config(os_config, host.os_id)
metadata_api.validate_os_config(
session, os_config, host.os_id)
def package_config_validates(package_config):
cluster = clusterhost.cluster
is_cluster_editable(session, cluster, updater)
metadata_api.validate_package_config(
package_config, cluster.adapter_id
session, package_config, cluster.adapter_id
)
@utils.supported_filters(
@ -1052,13 +1066,13 @@ def _patch_clusterhost_config(session, updater, clusterhost, **kwargs):
def os_config_validates(os_config):
host = clusterhost.host
metadata_api.validate_os_config(os_config, host.os_id)
metadata_api.validate_os_config(session, os_config, host.os_id)
def package_config_validates(package_config):
cluster = clusterhost.cluster
is_cluster_editable(session, cluster, updater)
metadata_api.validate_package_config(
package_config, cluster.adapter_id
session, package_config, cluster.adapter_id
)
@utils.supported_filters(
@ -1240,10 +1254,16 @@ def validate_cluster(session, cluster):
role.name for role in cluster_roles if not role.optional
])
clusterhost_roles = set([])
interface_subnets = {}
for clusterhost in cluster.clusterhosts:
roles = clusterhost.roles
for role in roles:
clusterhost_roles.add(role.name)
host = clusterhost.host
for host_network in host.host_networks:
interface_subnets.setdefault(
host_network.interface, set([])
).add(host_network.subnet.subnet)
missing_roles = necessary_roles - clusterhost_roles
if missing_roles:
raise exception.InvalidParameter(
@ -1251,6 +1271,13 @@ def validate_cluster(session, cluster):
list(missing_roles), cluster.name
)
)
for interface, subnets in interface_subnets.items():
if len(subnets) > 1:
raise exception.InvalidParameter(
'multi subnets %s in interface %s' % (
list(subnets), interface
)
)
@utils.supported_filters(optional_support_keys=['review'])
@ -1279,10 +1306,14 @@ def review_cluster(session, reviewer, cluster_id, review={}, **kwargs):
clusterhost.host_id in host_ids
):
clusterhosts.append(clusterhost)
os_config = cluster.os_config
os_config = copy.deepcopy(cluster.os_config)
os_config = metadata_api.autofill_os_config(
session, os_config, cluster.os_id,
cluster=cluster
)
if os_config:
metadata_api.validate_os_config(
os_config, cluster.os_id, True
session, os_config, cluster.os_id, True
)
for clusterhost in clusterhosts:
host = clusterhost.host
@ -1294,33 +1325,56 @@ def review_cluster(session, reviewer, cluster_id, review={}, **kwargs):
'since it is not editable' % host.name
)
continue
host_os_config = host.os_config
host_os_config = copy.deepcopy(host.os_config)
host_os_config = metadata_api.autofill_os_config(
session, host_os_config, host.os_id,
host=host
)
deployed_os_config = util.merge_dict(
os_config, host_os_config
)
metadata_api.validate_os_config(
deployed_os_config, host.os_id, True
session, deployed_os_config, host.os_id, True
)
host_api.validate_host(session, host)
utils.update_db_object(session, host, config_validated=True)
package_config = cluster.package_config
utils.update_db_object(
session, host, os_config=host_os_config, config_validated=True
)
package_config = copy.deepcopy(cluster.package_config)
package_config = metadata_api.autofill_package_config(
session, package_config, cluster.adapter_id,
cluster=cluster
)
if package_config:
metadata_api.validate_package_config(
package_config, cluster.adapter_id, True
session, package_config, cluster.adapter_id, True
)
for clusterhost in clusterhosts:
clusterhost_package_config = clusterhost.package_config
clusterhost_package_config = copy.deepcopy(
clusterhost.package_config
)
clusterhost_package_config = metadata_api.autofill_package_config(
session, clusterhost_package_config,
cluster.adapter_id, clusterhost=clusterhost
)
deployed_package_config = util.merge_dict(
package_config, clusterhost_package_config
)
metadata_api.validate_package_config(
deployed_package_config,
session, deployed_package_config,
cluster.adapter_id, True
)
validate_clusterhost(session, clusterhost)
utils.update_db_object(session, clusterhost, config_validated=True)
utils.update_db_object(
session, clusterhost,
package_config=clusterhost_package_config,
config_validated=True
)
validate_cluster(session, cluster)
utils.update_db_object(session, cluster, config_validated=True)
utils.update_db_object(
session, cluster, os_config=os_config, package_config=package_config,
config_validated=True
)
return {
'cluster': cluster,
'hosts': clusterhosts

View File

@ -93,6 +93,9 @@ def session():
.. note::
To operate database, it should be called in database session.
"""
if not ENGINE:
init()
if hasattr(SESSION_HOLDER, 'session'):
logging.error('we are already in session')
raise exception.DatabaseException('session already exist')
@ -174,12 +177,7 @@ def _setup_switch_table(switch_session):
from compass.db.api import switch
switch.add_switch_internal(
switch_session, long(netaddr.IPAddress(setting.DEFAULT_SWITCH_IP)),
True, filters=[{
'filter_name': 'deny-all',
'filter_type': 'deny',
'port_prefix': '.*',
'port_suffix': '.*'
}]
True, filters=['deny ports all']
)

View File

@ -397,8 +397,10 @@ def update_host_config(session, updater, host_id, **kwargs):
session, models.Host, id=host_id
)
os_config_validates = functools.partial(
metadata_api.validate_os_config, os_id=host.os_id)
def os_config_validates(config):
metadata_api.validate_os_config(
session, config, os_id=host.os_id
)
@utils.input_validates(
put_os_config=os_config_validates,
@ -426,8 +428,10 @@ def patch_host_config(session, updater, host_id, **kwargs):
session, models.Host, id=host_id
)
os_config_validates = functools.partial(
metadata_api.validate_os_config, os_id=host.os_id)
def os_config_validates(config):
metadata_api.validate_os_config(
session, config, os_id=host.os_id
)
@utils.output_validates(
os_config=os_config_validates,

View File

@ -25,23 +25,29 @@ from compass.utils import setting_wrapper as setting
from compass.utils import util
def _add_installers(session, model, configs):
def _add_installers(session, model, configs, exception_when_existing=True):
installers = []
for config in configs:
installers.append(utils.add_db_object(
session, model,
True, config['INSTANCE_NAME'],
exception_when_existing, config['INSTANCE_NAME'],
name=config['NAME'],
settings=config.get('SETTINGS', {})
))
return installers
def add_os_installers_internal(session):
def add_os_installers_internal(session, exception_when_existing=True):
configs = util.load_configs(setting.OS_INSTALLER_DIR)
return _add_installers(session, models.OSInstaller, configs)
return _add_installers(
session, models.OSInstaller, configs,
exception_when_existing=exception_when_existing
)
def add_package_installers_internal(session):
def add_package_installers_internal(session, exception_when_existing=True):
configs = util.load_configs(setting.PACKAGE_INSTALLER_DIR)
return _add_installers(session, models.PackageInstaller, configs)
return _add_installers(
session, models.PackageInstaller, configs,
exception_when_existing=exception_when_existing
)

View File

@ -13,13 +13,17 @@
# limitations under the License.
"""Metadata related database operations."""
import copy
import logging
import string
from compass.db.api import database
from compass.db.api import utils
from compass.db import callback as metadata_callback
from compass.db import exception
from compass.db import models
from compass.db import validator
from compass.db import validator as metadata_validator
from compass.utils import setting_wrapper as setting
from compass.utils import util
@ -28,6 +32,10 @@ from compass.utils import util
def _add_field_internal(session, model, configs):
fields = []
for config in configs:
if not isinstance(config, dict):
raise exception.InvalidParameter(
'config %s is not dict' % config
)
fields.append(utils.add_db_object(
session, model, False,
config['NAME'],
@ -41,9 +49,12 @@ def _add_field_internal(session, model, configs):
def add_os_field_internal(session):
env_locals = {}
env_locals.update(metadata_validator.VALIDATOR_LOCALS)
env_locals.update(metadata_callback.CALLBACK_LOCALS)
configs = util.load_configs(
setting.OS_FIELD_DIR,
env_locals=validator.VALIDATOR_LOCALS
env_locals=env_locals
)
return _add_field_internal(
session, models.OSConfigField, configs
@ -51,9 +62,12 @@ def add_os_field_internal(session):
def add_package_field_internal(session):
env_locals = {}
env_locals.update(metadata_validator.VALIDATOR_LOCALS)
env_locals.update(metadata_callback.CALLBACK_LOCALS)
configs = util.load_configs(
setting.PACKAGE_FIELD_DIR,
env_locals=validator.VALIDATOR_LOCALS
env_locals=env_locals
)
return _add_field_internal(
session, models.PackageConfigField, configs
@ -61,9 +75,13 @@ def add_package_field_internal(session):
def _add_metadata(
session, field_model, metadata_model, path, name, config,
parent=None, **kwargs
session, field_model, metadata_model, id, path, name, config,
exception_when_existing=True, parent=None, **kwargs
):
if not isinstance(config, dict):
raise exception.InvalidParameter(
'%s config %s is not dict' % (path, config)
)
metadata_self = config.get('_self', {})
if 'field' in metadata_self:
field = utils.get_db_object(
@ -71,38 +89,96 @@ def _add_metadata(
)
else:
field = None
mapping_to_template = metadata_self.get('mapping_to', None)
if mapping_to_template:
mapping_to = string.Template(
mapping_to_template
).safe_substitute(
**kwargs
)
else:
mapping_to = None
metadata = utils.add_db_object(
session, metadata_model, False,
path, name=name, parent=parent, field=field,
session, metadata_model, exception_when_existing,
id, path, name=name, parent=parent, field=field,
display_name=metadata_self.get('display_name', name),
description=metadata_self.get('description', None),
is_required=metadata_self.get('is_required', False),
required_in_whole_config=metadata_self.get(
'required_in_whole_config', False
),
mapping_to=metadata_self.get('mapping_to', None),
'required_in_whole_config', False),
mapping_to=mapping_to,
validator=metadata_self.get('validator', None),
js_validator=metadata_self.get('js_validator', None),
default_value=metadata_self.get('default_value', None),
options=metadata_self.get('options', []),
required_in_options=metadata_self.get('required_in_options', False),
default_callback=metadata_self.get('default_callback', None),
default_callback_params=metadata_self.get(
'default_callback_params', {}),
options=metadata_self.get('options', None),
options_callback=metadata_self.get('options_callback', None),
options_callback_params=metadata_self.get(
'options_callback_params', {}),
autofill_callback=metadata_self.get(
'autofill_callback', None),
autofill_callback_params=metadata_self.get(
'autofill_callback_params', {}),
required_in_options=metadata_self.get(
'required_in_options', False),
**kwargs
)
key_extensions = metadata_self.get('key_extensions', {})
general_keys = []
for key, value in config.items():
if key not in '_self':
if key.startswith('_'):
continue
if key in key_extensions:
if not key.startswith('$'):
raise exception.InvalidParameter(
'%s subkey %s should start with $' % (
path, key
)
)
extended_keys = key_extensions[key]
for extended_key in extended_keys:
if extended_key.startswith('$'):
raise exception.InvalidParameter(
'%s extended key %s should not start with $' % (
path, extended_key
)
)
sub_kwargs = dict(kwargs)
sub_kwargs[key[1:]] = extended_key
_add_metadata(
session, field_model, metadata_model,
'%s/%s' % (path, key), key, value,
id, '%s/%s' % (path, extended_key), extended_key, value,
exception_when_existing=exception_when_existing,
parent=metadata, **sub_kwargs
)
else:
if key.startswith('$'):
general_keys.append(key)
_add_metadata(
session, field_model, metadata_model,
id, '%s/%s' % (path, key), key, value,
exception_when_existing=exception_when_existing,
parent=metadata, **kwargs
)
if len(general_keys) > 1:
raise exception.InvalidParameter(
'foud multi general keys in %s: %s' % (
path, general_keys
)
)
return metadata
def add_os_metadata_internal(session):
def add_os_metadata_internal(session, exception_when_existing=True):
os_metadatas = []
env_locals = {}
env_locals.update(metadata_validator.VALIDATOR_LOCALS)
env_locals.update(metadata_callback.CALLBACK_LOCALS)
configs = util.load_configs(
setting.OS_METADATA_DIR,
env_locals=validator.VALIDATOR_LOCALS
env_locals=env_locals
)
for config in configs:
os = utils.get_db_object(
@ -112,17 +188,21 @@ def add_os_metadata_internal(session):
os_metadatas.append(_add_metadata(
session, models.OSConfigField,
models.OSConfigMetadata,
key, key, value, parent=None,
os=os
os.id, key, key, value,
exception_when_existing=exception_when_existing,
parent=None
))
return os_metadatas
def add_package_metadata_internal(session):
def add_package_metadata_internal(session, exception_when_existing=True):
package_metadatas = []
env_locals = {}
env_locals.update(metadata_validator.VALIDATOR_LOCALS)
env_locals.update(metadata_callback.CALLBACK_LOCALS)
configs = util.load_configs(
setting.PACKAGE_METADATA_DIR,
env_locals=validator.VALIDATOR_LOCALS
env_locals=env_locals
)
for config in configs:
adapter = utils.get_db_object(
@ -132,8 +212,9 @@ def add_package_metadata_internal(session):
package_metadatas.append(_add_metadata(
session, models.PackageConfigField,
models.PackageConfigMetadata,
key, key, value, parent=None,
adapter=adapter
adapter.id, key, key, value,
exception_when_existing=exception_when_existing,
parent=None
))
return package_metadatas
@ -173,9 +254,15 @@ def get_os_metadatas_internal(session):
def _validate_self(
config_path, config_key, config, metadata, whole_check
config_path, config_key, config,
metadata, whole_check,
**kwargs
):
if '_self' not in metadata:
if isinstance(config, dict):
_validate_config(
config_path, config, metadata, whole_check, **kwargs
)
return
field_type = metadata['_self'].get('field_type', 'basestring')
if not isinstance(config, field_type):
@ -185,34 +272,39 @@ def _validate_self(
required_in_options = metadata['_self'].get(
'required_in_options', False
)
options = metadata['_self'].get('options', [])
options = metadata['_self'].get('options', None)
if required_in_options:
if field_type in [int, basestring, float, bool]:
if config not in options:
if options and config not in options:
raise exception.InvalidParameter(
'%s config is not in %s' % (config_path, options)
)
elif field_type in [list, tuple]:
if not set(config).issubset(set(options)):
if options and not set(config).issubset(set(options)):
raise exception.InvalidParameter(
'%s config is not in %s' % (config_path, options)
)
elif field_type == dict:
if not set(config.keys()).issubset(set(options)):
if options and not set(config.keys()).issubset(set(options)):
raise exception.InvalidParameter(
'%s config is not in %s' % (config_path, options)
)
validator = metadata['_self'].get('validator', None)
if validator:
if not validator(config_key, config):
if not validator(config_key, config, **kwargs):
raise exception.InvalidParameter(
'%s config is invalid' % config_path
)
if issubclass(field_type, dict):
_validate_config(config_path, config, metadata, whole_check)
if isinstance(config, dict):
_validate_config(
config_path, config, metadata, whole_check, **kwargs
)
def _validate_config(config_path, config, metadata, whole_check):
def _validate_config(
config_path, config, metadata, whole_check,
**kwargs
):
generals = {}
specified = {}
for key, value in metadata.items():
@ -250,15 +342,118 @@ def _validate_config(config_path, config, metadata, whole_check):
for key in intersect_keys:
_validate_self(
'%s/%s' % (config_path, key),
key, config[key], specified[key], whole_check
key, config[key], specified[key], whole_check,
**kwargs
)
for key in not_found_keys:
if not generals:
raise exception.InvalidParameter(
'key %s missing in metadata %s' % (
key, config_path
)
)
for general_key, general_value in generals.items():
_validate_self(
'%s/%s' % (config_path, key),
key, config[key], general_value, whole_check
key, config[key], general_value, whole_check,
**kwargs
)
def validate_config_internal(config, metadata, whole_check):
_validate_config('', config, metadata, whole_check)
def _autofill_self_config(
config_path, config_key, config,
metadata,
**kwargs
):
if '_self' not in metadata:
if isinstance(config, dict):
_autofill_config(
config_path, config, metadata, **kwargs
)
return config
autofill_callback = metadata['_self'].get(
'autofill_callback', None
)
autofill_callback_params = metadata['_self'].get(
'autofill_callback_params', {}
)
callback_params = dict(kwargs)
if autofill_callback_params:
callback_params.update(autofill_callback_params)
default_value = metadata['_self'].get(
'default_value', None
)
if default_value is not None:
callback_params['default_value'] = default_value
options = metadata['_self'].get(
'options', None
)
if options is not None:
callback_params['options'] = options
if autofill_callback:
config = autofill_callback(
config_key, config, **callback_params
)
if config is None:
new_config = {}
else:
new_config = config
if isinstance(new_config, dict):
_autofill_config(
config_path, new_config, metadata, **kwargs
)
if new_config:
config = new_config
return config
def _autofill_config(
config_path, config, metadata, **kwargs
):
generals = {}
specified = {}
for key, value in metadata.items():
if key.startswith('$'):
generals[key] = value
elif key.startswith('_'):
pass
else:
specified[key] = value
config_keys = set(config.keys())
specified_keys = set(specified.keys())
intersect_keys = config_keys & specified_keys
not_found_keys = config_keys - specified_keys
redundant_keys = specified_keys - config_keys
for key in redundant_keys:
self_config = _autofill_self_config(
'%s/%s' % (config_path, key),
key, None, specified[key], **kwargs
)
if self_config is not None:
config[key] = self_config
for key in intersect_keys:
config[key] = _autofill_self_config(
'%s/%s' % (config_path, key),
key, config[key], specified[key],
**kwargs
)
for key in not_found_keys:
for general_key, general_value in generals.items():
config[key] = _autofill_self_config(
'%s/%s' % (config_path, key),
key, config[key], general_value,
**kwargs
)
return config
def validate_config_internal(
config, metadata, whole_check, **kwargs
):
_validate_config('', config, metadata, whole_check, **kwargs)
def autofill_config_internal(
config, metadata, **kwargs
):
return _autofill_config('', config, metadata, **kwargs)

View File

@ -30,10 +30,19 @@ RESP_METADATA_FIELDS = [
@database.run_in_session()
def load_metadatas(session):
load_os_metadatas_internal(session)
load_package_metadatas_internal(session)
def load_os_metadatas_internal(session):
global OS_METADATA_MAPPING
global PACKAGE_METADATA_MAPPING
logging.info('load metadatas into memory')
logging.info('load os metadatas into memory')
OS_METADATA_MAPPING = metadata_api.get_os_metadatas_internal(session)
def load_package_metadatas_internal(session):
global PACKAGE_METADATA_MAPPING
logging.info('load package metadatas into memory')
PACKAGE_METADATA_MAPPING = (
metadata_api.get_package_metadatas_internal(session)
)
@ -44,48 +53,80 @@ PACKAGE_METADATA_MAPPING = {}
def _validate_config(
config, id, metadata_mapping, whole_check
config, id, id_name, metadata_mapping, whole_check, **kwargs
):
if id not in metadata_mapping:
raise exception.InvalidParameter(
'adapter id %s is not found in metadata mapping' % id
'%s id %s is not found in metadata mapping' % (id_name, id)
)
metadatas = metadata_mapping[id]
metadata_api.validate_config_internal(
config, metadatas, whole_check
config, metadatas, whole_check, **kwargs
)
def validate_os_config(config, os_id, whole_check=False):
def validate_os_config(
session, config, os_id, whole_check=False, **kwargs
):
if not OS_METADATA_MAPPING:
load_os_metadatas_internal(session)
_validate_config(
config, os_id, OS_METADATA_MAPPING,
whole_check
config, os_id, 'os', OS_METADATA_MAPPING,
whole_check, session=session, **kwargs
)
def validate_package_config(config, adapter_id, whole_check=False):
def validate_package_config(
session, config, adapter_id, whole_check=False, **kwargs
):
if not PACKAGE_METADATA_MAPPING:
load_package_metadatas_internal(session)
_validate_config(
config, adapter_id, PACKAGE_METADATA_MAPPING,
whole_check
config, adapter_id, 'adapter', PACKAGE_METADATA_MAPPING,
whole_check, session=session, **kwargs
)
def _filter_metadata(metadata):
def _filter_metadata(metadata, **kwargs):
if not isinstance(metadata, dict):
return metadata
filtered_metadata = {}
for key, value in metadata.items():
if key == '_self':
filtered_metadata['_self'] = {}
default_value = value.get('default_value', None)
if default_value is None:
default_callback_params = value.get(
'default_callback_params', {}
)
callback_params = dict(kwargs)
if default_callback_params:
callback_params.update(default_callback_params)
default_callback = value.get('default_callback', None)
if default_callback:
default_value = default_callback(key, **callback_params)
options = value.get('options', None)
if options is None:
options_callback_params = value.get(
'options_callback_params', {}
)
callback_params = dict(kwargs)
if options_callback_params:
callback_params.update(options_callback_params)
options_callback = value.get('options_callback', None)
if options_callback:
options = options_callback(key, **callback_params)
filtered_metadata[key] = {
'name': value['name'],
'description': value.get('description', None),
'default_value': value.get('default_value', None),
'default_value': default_value,
'is_required': value.get(
'is_required', False),
'required_in_whole_config': value.get(
'required_in_whole_config', False),
'js_validator': value.get('js_validator', None),
'options': value.get('options', []),
'options': options,
'required_in_options': value.get(
'required_in_options', False),
'field_type': value.get(
@ -98,13 +139,17 @@ def _filter_metadata(metadata):
return filtered_metadata
def get_package_metadata_internal(adapter_id):
def get_package_metadata_internal(session, adapter_id):
"""get package metadata internal."""
if not PACKAGE_METADATA_MAPPING:
load_package_metadatas_internal(session)
if adapter_id not in PACKAGE_METADATA_MAPPING:
raise exception.RecordNotExists(
'adpater %s does not exist' % adapter_id
)
return _filter_metadata(PACKAGE_METADATA_MAPPING[adapter_id])
return _filter_metadata(
PACKAGE_METADATA_MAPPING[adapter_id], session=session
)
@utils.supported_filters([])
@ -114,16 +159,22 @@ def get_package_metadata_internal(adapter_id):
)
@utils.wrap_to_dict(RESP_METADATA_FIELDS)
def get_package_metadata(session, getter, adapter_id, **kwargs):
return {'package_config': get_package_metadata_internal(adapter_id)}
return {
'package_config': get_package_metadata_internal(session, adapter_id)
}
def get_os_metadata_internal(os_id):
def get_os_metadata_internal(session, os_id):
"""get os metadata internal."""
if not OS_METADATA_MAPPING:
load_os_metadatas_internal(session)
if os_id not in OS_METADATA_MAPPING:
raise exception.RecordNotExists(
'os %s does not exist' % os_id
)
return _filter_metadata(OS_METADATA_MAPPING[os_id])
return _filter_metadata(
OS_METADATA_MAPPING[os_id], session=session
)
@utils.supported_filters([])
@ -134,7 +185,7 @@ def get_os_metadata_internal(os_id):
@utils.wrap_to_dict(RESP_METADATA_FIELDS)
def get_os_metadata(session, getter, os_id, **kwargs):
"""get os metadatas."""
return {'os_config': get_os_metadata_internal(os_id)}
return {'os_config': get_os_metadata_internal(session, os_id)}
@utils.supported_filters([])
@ -145,7 +196,7 @@ def get_os_metadata(session, getter, os_id, **kwargs):
@utils.wrap_to_dict(RESP_METADATA_FIELDS)
def get_package_os_metadata(session, getter, adapter_id, os_id, **kwargs):
from compass.db.api import adapter_holder as adapter_api
adapter = adapter_api.get_adapter_internal(adapter_id)
adapter = adapter_api.get_adapter_internal(session, adapter_id)
os_ids = [os['os_id'] for os in adapter['supported_oses']]
if os_id not in os_ids:
raise exception.InvalidParameter(
@ -155,9 +206,47 @@ def get_package_os_metadata(session, getter, adapter_id, os_id, **kwargs):
)
metadatas = {}
metadatas['os_config'] = get_os_metadata_internal(
os_id
session, os_id
)
metadatas['package_config'] = get_package_metadata_internal(
adapter_id
session, adapter_id
)
return metadatas
def _autofill_config(
config, id, id_name, metadata_mapping, **kwargs
):
if id not in metadata_mapping:
raise exception.InvalidParameter(
'%s id %s is not found in metadata mapping' % (id_name, id)
)
metadatas = metadata_mapping[id]
logging.debug(
'auto fill %s config %s by metadata %s',
id_name, config, metadatas
)
return metadata_api.autofill_config_internal(
config, metadatas, **kwargs
)
def autofill_os_config(
session, config, os_id, **kwargs
):
if not OS_METADATA_MAPPING:
load_os_metadatas_internal(session)
return _autofill_config(
config, os_id, 'os', OS_METADATA_MAPPING, session=session, **kwargs
)
def autofill_package_config(
session, config, adapter_id, **kwargs
):
if not PACKAGE_METADATA_MAPPING:
load_package_metadatas_internal(session)
return _autofill_config(
config, adapter_id, 'adapter', PACKAGE_METADATA_MAPPING,
session=session, **kwargs
)

View File

@ -434,11 +434,15 @@ def output_validates(**kwargs_validators):
def get_db_object(session, table, exception_when_missing=True, **kwargs):
"""Get db object."""
with session.begin(subtransactions=True):
logging.debug('get db object %s from table %s',
kwargs, table.__name__)
logging.debug(
'session %s get db object %s from table %s',
session, kwargs, table.__name__)
db_object = model_filter(
model_query(session, table), table, **kwargs
).first()
logging.debug(
'session %s db object %s added', session, db_object
)
if db_object:
return db_object
@ -456,8 +460,9 @@ def add_db_object(session, table, exception_when_existing=True,
*args, **kwargs):
"""Create db object."""
with session.begin(subtransactions=True):
logging.debug('add object %s atributes %s to table %s',
args, kwargs, table.__name__)
logging.debug(
'session %s add object %s atributes %s to table %s',
session, args, kwargs, table.__name__)
argspec = inspect.getargspec(table.__init__)
arg_names = argspec.args[1:]
arg_defaults = argspec.defaults
@ -494,66 +499,97 @@ def add_db_object(session, table, exception_when_existing=True,
session.flush()
db_object.initialize()
db_object.validate()
logging.debug(
'session %s db object %s added', session, db_object
)
return db_object
def list_db_objects(session, table, **filters):
"""List db objects."""
with session.begin(subtransactions=True):
logging.debug('list db objects by filters %s in table %s',
filters, table.__name__)
return model_filter(
logging.debug(
'session %s list db objects by filters %s in table %s',
session, filters, table.__name__
)
db_objects = model_filter(
model_query(session, table), table, **filters
).all()
logging.debug(
'session %s got listed db objects: %s',
session, db_objects
)
return db_objects
def del_db_objects(session, table, **filters):
"""delete db objects."""
with session.begin(subtransactions=True):
logging.debug('delete db objects by filters %s in table %s',
filters, table.__name__)
logging.debug(
'session %s delete db objects by filters %s in table %s',
session, filters, table.__name__
)
query = model_filter(
model_query(session, table), table, **filters
)
db_objects = query.all()
query.delete(synchronize_session=False)
logging.debug(
'session %s db objects %s deleted', session, db_objects
)
return db_objects
def update_db_objects(session, table, **filters):
"""Update db objects."""
with session.begin(subtransactions=True):
logging.debug('update db objects by filters %s in table %s',
filters, table.__name__)
query = model_filter(
logging.debug(
'session %s update db objects by filters %s in table %s',
session, filters, table.__name__)
db_objects = model_filter(
model_query(session, table), table, **filters
)
db_objects = query.all()
).all()
for db_object in db_objects:
logging.debug('update db object %s', db_object)
session.flush()
db_object.update()
db_object.validate()
logging.debug(
'session %s db objects %s updated', session, db_objects
)
return db_objects
def update_db_object(session, db_object, **kwargs):
"""Update db object."""
with session.begin(subtransactions=True):
logging.debug('update db object %s by value %s',
db_object, kwargs)
logging.debug(
'session %s update db object %s by value %s',
session, db_object, kwargs
)
for key, value in kwargs.items():
setattr(db_object, key, value)
session.flush()
db_object.update()
db_object.validate()
logging.debug(
'session %s db object %s updated', session, db_object
)
return db_object
def del_db_object(session, db_object):
"""Delete db object."""
with session.begin(subtransactions=True):
logging.debug('delete db object %s', db_object)
logging.debug(
'session %s delete db object %s',
session, db_object
)
session.delete(db_object)
logging.debug(
'session %s db object %s deleted',
session, db_object
)
return db_object

171
compass/db/callback.py Normal file
View File

@ -0,0 +1,171 @@
# Copyright 2014 Huawei Technologies Co. Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Metadata Callback methods."""
import netaddr
import random
import re
import socket
from compass.db import exception
from compass.utils import setting_wrapper as setting
from compass.utils import util
CALLBACK_GLOBALS = globals()
CALLBACK_LOCALS = locals()
CALLBACK_CONFIGS = util.load_configs(
setting.CALLBACK_DIR,
config_name_suffix='.py',
env_globals=CALLBACK_GLOBALS,
env_locals=CALLBACK_LOCALS
)
for callback_config in CALLBACK_CONFIGS:
CALLBACK_LOCALS.update(callback_config)
def default_proxy(name, **kwargs):
return setting.COMPASS_SUPPORTED_PROXY
def proxy_options(name, **kwargs):
return [setting.COMPASS_SUPPORTED_PROXY]
def default_noproxy(name, **kwargs):
return setting.COMPASS_SUPPORTED_DEFAULT_NOPROXY
def noproxy_options(name, **kwargs):
return setting.COMPASS_SUPPORTED_DEFAULT_NOPROXY
def default_ntp_server(name, **kwargs):
return setting.COMPASS_SUPPORTED_NTP_SERVER
def ntp_server_options(name, **kwargs):
return setting.COMPASS_SUPPORTED_NTP_SERVER
def default_dns_servers(name, **kwargs):
return setting.COMPASS_SUPPORTED_DNS_SERVERS
def dns_servers_options(name, **kwargs):
return setting.COMPASS_SUPPORTED_DNS_SERVERS
def default_domain(name, **kwargs):
if setting.COMPASS_SUPPORTED_DOMAINS:
return setting.COMPASS_SUPPORTED_DOMAINS[0]
else:
return None
def domain_options(name, **kwargs):
return setting.COMPASS_SUPPORTED_DOMAINS
def default_search_path(name, **kwargs):
return setting.COMPASS_SUPPORTED_DOMAINS
def search_path_options(name, **kwargs):
return setting.COMPASS_SUPPORTED_DOMAINS
def default_gateway(name, **kwargs):
return setting.COMPASS_SUPPORTED_DEFAULT_GATEWAY
def default_gateway_options(name, **kwargs):
return [setting.COMPASS_SUPPORTED_DEFAULT_GATEWAY]
def default_localrepo(name, **kwargs):
return setting.COMPASS_SUPPORTED_LOCAL_REPO
def default_localrepo_options(name, **kwargs):
return [setting.COMPASS_SUPPORTED_LOCAL_REPO]
def autofill_callback_default(name, config, **kwargs):
if config is None:
if (
'autofill_types' not in kwargs or
not (set(kwargs['autofill_types']) & set(kwargs))
):
return None
if 'default_value' not in kwargs:
return None
return kwargs['default_value']
return config
def autofill_callback_random_option(name, config, **kwargs):
if config is None:
if (
'autofill_types' not in kwargs or
not (set(kwargs['autofill_types']) & set(kwargs))
):
return None
if 'options' not in kwargs or not kwargs['options']:
return None
return random.choice(kwargs['options'])
return config
def autofill_network_mapping(name, config, **kwargs):
if not config:
return config
if isinstance(config, basestring):
config = {
'interface': config,
'subnet': None
}
if not isinstance(config, dict):
return config
if 'interface' not in config:
return config
subnet = None
interface = config['interface']
if 'cluster' in kwargs:
cluster = kwargs['cluster']
for clusterhost in cluster.clusterhosts:
host = clusterhost.host
for host_network in host.host_networks:
if host_network.interface == interface:
subnet = host_network.subnet.subnet
elif 'clusterhost' in kwargs:
clusterhost = kwargs['clusterhost']
host = clusterhost.host
for host_network in host.host_networks:
if host_network.interface == interface:
subnet = host_network.subnet.subnet
if not subnet:
raise exception.InvalidParameter(
'interface %s not found in host(s)' % interface
)
if 'subnet' not in config or not config['subnet']:
config['subnet'] = subnet
else:
if config['subnet'] != subnet:
raise exception.InvalidParameter(
'subnet %s in config is not equal to subnet %s in hosts' % (
config['subnet'], subnet
)
)
return config

View File

@ -37,8 +37,9 @@ from sqlalchemy import Text
from sqlalchemy.types import TypeDecorator
from sqlalchemy import UniqueConstraint
from compass.db import callback as metadata_callback
from compass.db import exception
from compass.db import validator
from compass.db import validator as metadata_validator
from compass.utils import util
@ -126,10 +127,22 @@ class MetadataMixin(HelperMixin):
is_required = Column(Boolean, default=False)
required_in_whole_config = Column(Boolean, default=False)
mapping_to = Column(String(80), default='')
validator_data = Column('validator', Text)
_validator = Column('validator', Text)
js_validator = Column(Text)
default_value = Column(JSONEncoded)
options = Column(JSONEncoded, default=[])
_default_callback = Column('default_callback', Text)
default_callback_params = Column(
'default_callback_params', JSONEncoded, default={}
)
options = Column(JSONEncoded)
_options_callback = Column('options_callback', Text)
options_callback_params = Column(
'options_callback_params', JSONEncoded, default={}
)
_autofill_callback = Column('autofill_callback', Text)
autofill_callback_params = Column(
'autofill_callback_params', JSONEncoded, default={}
)
required_in_options = Column(Boolean, default=False)
def initialize(self):
@ -138,36 +151,125 @@ class MetadataMixin(HelperMixin):
self.display_name = self.name
super(MetadataMixin, self).initialize()
@property
def validator(self):
def validate(self):
super(MetadataMixin, self).validate()
if not self.name:
raise exception.InvalidParamter(
'name is not set in os metadata %s' % self.id
)
if not self.validator_data:
@property
def validator(self):
if not self._validator:
return None
func = eval(
self.validator_data,
validator.VALIDATOR_GLOBALS,
validator.VALIDATOR_LOCALS
self._validator,
metadata_validator.VALIDATOR_GLOBALS,
metadata_validator.VALIDATOR_LOCALS
)
if not callable(func):
raise Exception(
'%s is not callable' % self.validator_data
'validator %s is not callable' % self._validator
)
return func
@validator.setter
def validator(self, value):
if not value:
self.validator_data = None
self._validator = None
elif isinstance(value, basestring):
self.validator_data = value
self._validator = value
elif callable(value):
self.validator_data = value.func_name
self._validator = value.func_name
else:
raise Exception(
'%s is not callable' % value
'validator %s is not callable' % value
)
@property
def default_callback(self):
if not self._default_callback:
return None
func = eval(
self._default_callback,
metadata_callback.CALLBACK_GLOBALS,
metadata_callback.CALLBACK_LOCALS
)
if not callable(func):
raise Exception(
'default callback %s is not callable' % self._default_callback
)
return func
@default_callback.setter
def default_callback(self, value):
if not value:
self._default_callback = None
elif isinstance(value, basestring):
self._default_callback = value
elif callable(value):
self._default_callback = value.func_name
else:
raise Exception(
'default callback %s is not callable' % value
)
@property
def options_callback(self):
if not self._options_callback:
return None
func = eval(
self._options_callback,
metadata_callback.CALLBACK_GLOBALS,
metadata_callback.CALLBACK_LOCALS
)
if not callable(func):
raise Exception(
'options callback %s is not callable' % self._options_callback
)
return func
@options_callback.setter
def options_callback(self, value):
if not value:
self._options_callback = None
elif isinstance(value, basestring):
self._options_callback = value
elif callable(value):
self._options_callback = value.func_name
else:
raise Exception(
'options callback %s is not callable' % value
)
@property
def autofill_callback(self):
if not self._autofill_callback:
return None
func = eval(
self._autofill_callback,
metadata_callback.CALLBACK_GLOBALS,
metadata_callback.CALLBACK_LOCALS
)
if not callable(func):
raise Exception(
'autofill callback %s is not callable' % (
self._autofill_callback
)
)
return func
@autofill_callback.setter
def autofill_callback(self, value):
if not value:
self._autofill_callback = None
elif isinstance(value, basestring):
self._autofill_callback = value
elif callable(value):
self._autofill_callback = value.func_name
else:
raise Exception(
'autofill callback %s is not callable' % value
)
def to_dict(self):
@ -180,8 +282,16 @@ class MetadataMixin(HelperMixin):
self_dict_info.update(super(MetadataMixin, self).to_dict())
validator = self.validator
if validator:
self_dict_info['validator_data'] = self.validator_data
self_dict_info['validator'] = validator
default_callback = self.default_callback
if default_callback:
self_dict_info['default_callback'] = default_callback
options_callback = self.options_callback
if options_callback:
self_dict_info['options_callback'] = options_callback
autofill_callback = self.autofill_callback
if autofill_callback:
self_dict_info['autofill_callback'] = autofill_callback
js_validator = self.js_validator
if js_validator:
self_dict_info['js_validator'] = js_validator
@ -201,7 +311,10 @@ class FieldMixin(HelperMixin):
field = Column(String(80), unique=True)
field_type_data = Column(
'field_type',
Enum('basestring', 'int', 'float', 'list', 'bool'),
Enum(
'basestring', 'int', 'float', 'list', 'bool',
'dict', 'object'
),
ColumnDefault('basestring')
)
display_type = Column(
@ -212,7 +325,7 @@ class FieldMixin(HelperMixin):
),
ColumnDefault('text')
)
validator_data = Column('validator', Text)
_validator = Column('validator', Text)
js_validator = Column(Text)
description = Column(Text)
@ -242,27 +355,27 @@ class FieldMixin(HelperMixin):
@property
def validator(self):
if not self.validator_data:
if not self._validator:
return None
func = eval(
self.validator_data,
validator.VALIDATOR_GLOBALS,
validator.VALIDATOR_LOCALS
self._validator,
metadata_validator.VALIDATOR_GLOBALS,
metadata_validator.VALIDATOR_LOCALS
)
if not callable(func):
raise Exception(
'%s is not callable' % self.validator_data
'%s is not callable' % self._validator
)
return func
@validator.setter
def validator(self, value):
if not value:
self.validator_data = None
self._validator = None
elif isinstance(value, basestring):
self.validator_data = value
self._validator = value
elif callable(value):
self.validator_data = value.func_name
self._validator = value.func_name
else:
raise Exception(
'%s is not callable' % value
@ -561,9 +674,6 @@ class ClusterHost(BASE, TimestampMixin, HelperMixin):
@patched_package_config.setter
def patched_package_config(self, value):
package_config = util.merge_dict(dict(self.package_config), value)
if 'roles' in package_config:
self.patched_roles = package_config['roles']
del package_config['roles']
self.package_config = package_config
self.config_validated = False
@ -575,9 +685,6 @@ class ClusterHost(BASE, TimestampMixin, HelperMixin):
def put_package_config(self, value):
package_config = dict(self.package_config)
package_config.update(value)
if 'roles' in package_config:
self.roles = package_config['roles']
del package_config['roles']
self.package_config = package_config
self.config_validated = False
@ -922,10 +1029,15 @@ class Host(BASE, TimestampMixin, HelperMixin):
dict_info = self.machine.to_dict()
dict_info.update(super(Host, self).to_dict())
state_dict = self.state_dict()
ip = None
for host_network in self.host_networks:
if host_network.is_mgmt:
ip = host_network.ip
dict_info.update({
'machine_id': self.machine.id,
'os_installed': self.os_installed,
'hostname': self.name,
'ip': ip,
'networks': [
host_network.to_dict()
for host_network in self.host_networks
@ -1164,14 +1276,6 @@ class Cluster(BASE, TimestampMixin, HelperMixin):
else:
flavor_adapter_id = flavor.adapter_id
adapter_id = self.adapter_id
logging.info(
'flavor adapter type %s value %s',
type(flavor_adapter_id), flavor_adapter_id
)
logging.info(
'adapter type %s value %s',
type(adapter_id), adapter_id
)
if flavor_adapter_id != adapter_id:
raise exception.InvalidParameter(
'flavor adapter id %s does not match adapter id %s' % (
@ -1525,6 +1629,8 @@ class SwitchMachine(BASE, HelperMixin, TimestampMixin):
denied = filter_type != 'allow'
unmatched_allowed = denied
if 'ports' in port_filter:
if 'all' in port_filter['ports']:
return denied
if port in port_filter['ports']:
return denied
if port_match:
@ -1870,7 +1976,8 @@ class OSConfigMetadata(BASE, MetadataMixin):
UniqueConstraint('path', 'os_id', name='constraint'),
)
def __init__(self, path, **kwargs):
def __init__(self, os_id, path, **kwargs):
self.os_id = os_id
self.path = path
super(OSConfigMetadata, self).__init__(**kwargs)
@ -1982,7 +2089,7 @@ class OperatingSystem(BASE, HelperMixin):
if self.parent:
dict_info.update(self.parent.metadata_dict())
for metadata in self.root_metadatas:
dict_info.update(metadata.to_dict())
util.merge_dict(dict_info, metadata.to_dict())
return dict_info
@property
@ -2206,8 +2313,9 @@ class PackageConfigMetadata(BASE, MetadataMixin):
)
def __init__(
self, path, **kwargs
self, adapter_id, path, **kwargs
):
self.adapter_id = adapter_id
self.path = path
super(PackageConfigMetadata, self).__init__(**kwargs)
@ -2342,7 +2450,7 @@ class Adapter(BASE, HelperMixin):
if self.parent:
dict_info.update(self.parent.metadata_dict())
for metadata in self.root_metadatas:
dict_info.update(metadata.to_dict())
util.merge_dict(dict_info, metadata.to_dict())
return dict_info
@property

View File

@ -21,7 +21,7 @@ from compass.utils import setting_wrapper as setting
from compass.utils import util
def is_valid_ip(name, ip_addr):
def is_valid_ip(name, ip_addr, **kwargs):
"""Valid the format of an IP address."""
try:
netaddr.IPAddress(ip_addr)
@ -30,7 +30,7 @@ def is_valid_ip(name, ip_addr):
return True
def is_valid_network(name, ip_network):
def is_valid_network(name, ip_network, **kwargs):
"""Valid the format of an Ip network."""
try:
netaddr.IPNetwork(ip_network)
@ -39,7 +39,7 @@ def is_valid_network(name, ip_network):
return False
def is_valid_netmask(name, ip_addr):
def is_valid_netmask(name, ip_addr, **kwargs):
"""Valid the format of a netmask."""
if not is_valid_ip(ip_addr):
return False
@ -50,7 +50,7 @@ def is_valid_netmask(name, ip_addr):
return False
def is_valid_gateway(name, ip_addr):
def is_valid_gateway(name, ip_addr, **kwargs):
"""Valid the format of gateway."""
if not is_valid_ip(ip_addr):
return False
@ -61,7 +61,7 @@ def is_valid_gateway(name, ip_addr):
return False
def is_valid_dns(name, dns):
def is_valid_dns(name, dns, **kwargs):
"""Valid the format of DNS."""
if is_valid_ip(dns):
return True
@ -72,17 +72,17 @@ def is_valid_dns(name, dns):
return True
def is_valid_username(name, username):
def is_valid_username(name, username, **kwargs):
"""Valid the format of username."""
return bool(username)
def is_valid_password(name, password):
def is_valid_password(name, password, **kwargs):
"""Valid the format of password."""
return bool(password)
def is_valid_partition(name, partition):
def is_valid_partition(name, partition, **kwargs):
"""Valid the format of partition name."""
if name != 'swap' and not name.startswith('/'):
return False
@ -91,17 +91,17 @@ def is_valid_partition(name, partition):
return True
def is_valid_percentage(name, percentage):
def is_valid_percentage(name, percentage, **kwargs):
"""Valid the percentage."""
return 0 <= percentage <= 100
def is_valid_port(name, port):
def is_valid_port(name, port, **kwargs):
"""Valid the format of port."""
return 0 < port < 65536
def is_valid_size(name, size):
def is_valid_size(name, size, **kwargs):
if re.match(r'(\d+)(K|M|G|T)?', size):
return True
return False

View File

@ -0,0 +1,2 @@
NAME = 'anytype'
FIELD_TYPE = object

View File

@ -46,13 +46,26 @@ METADATA = {
},
'network_mapping': {
'_self': {
'required_in_whole_config': True
'required_in_whole_config': True,
},
'$interface_type': {
'_self': {
'is_required': True,
'field': 'anytype',
'autofill_callback': autofill_network_mapping,
},
'interface': {
'_self': {
'is_required': True,
'field': 'general',
}
},
'subnet': {
'_self': {
'is_required': False,
'field': 'general'
}
}
}
}
}

View File

@ -122,7 +122,10 @@ class ClusterTestCase(unittest2.TestCase):
}
},
'network_mapping': {
'$interface_type': 'eth0'
'$interface_type': {
'interface': 'eth0',
'subnet': '10.145.88.0/23'
}
}
}

View File

@ -53,6 +53,7 @@ class MetadataTestCase(unittest2.TestCase):
database.init('sqlite://')
database.create_db()
adapter.load_adapters()
metadata.load_metadatas()
#Get a os_id and adapter_id
self.user_object = (
@ -82,11 +83,14 @@ class MetadataTestCase(unittest2.TestCase):
class TestGetPackageMetadata(MetadataTestCase):
def setUp(self):
super(TestGetPackageMetadata, self).setUp()
mock_config = mock.Mock()
self.backup_package_configs = util.load_configs
util.load_configs = mock_config
configs = [{
self.backup_load_configs = util.load_configs
def mock_load_configs(config_dir, *args, **kwargs):
if config_dir != setting.PACKAGE_METADATA_DIR:
return self.backup_load_configs(
config_dir, *args, **kwargs
)
config = {
'ADAPTER': 'openstack',
'METADATA': {
'security': {
@ -121,14 +125,14 @@ class TestGetPackageMetadata(MetadataTestCase):
}
}
}
}]
util.load_configs.return_value = configs
with database.session() as session:
metadata_api.add_package_metadata_internal(session)
metadata.load_metadatas()
}
return [config]
util.load_configs = mock.Mock(side_effect=mock_load_configs)
super(TestGetPackageMetadata, self).setUp()
def tearDown(self):
util.load_configs = self.backup_package_configs
util.load_configs = self.backup_load_configs
super(TestGetPackageMetadata, self).tearDown()
def test_get_package_metadata(self):
@ -155,11 +159,14 @@ class TestGetPackageMetadata(MetadataTestCase):
class TestGetOsMetadata(MetadataTestCase):
def setUp(self):
super(TestGetOsMetadata, self).setUp()
mock_config = mock.Mock()
self.backup_os_configs = util.load_configs
util.load_configs = mock_config
configs = [{
self.backup_load_configs = util.load_configs
def mock_load_configs(config_dir, *args, **kwargs):
if config_dir != setting.OS_METADATA_DIR:
return self.backup_load_configs(
config_dir, *args, **kwargs
)
config = {
'OS': 'general',
'METADATA': {
'general': {
@ -196,15 +203,15 @@ class TestGetOsMetadata(MetadataTestCase):
}
}
}
}]
util.load_configs.return_value = configs
with database.session() as session:
metadata_api.add_os_metadata_internal(session)
metadata.load_metadatas()
}
return [config]
util.load_configs = mock.Mock(side_effect=mock_load_configs)
super(TestGetOsMetadata, self).setUp()
def tearDown(self):
util.load_configs = self.backup_load_configs
super(TestGetOsMetadata, self).tearDown()
util.load_configs = self.backup_os_configs
def test_get_os_metadata(self):
"""Test get os metadata."""

View File

@ -52,6 +52,9 @@ LOGLEVEL_MAPPING = {
'critical': logging.CRITICAL,
}
logging.addLevelName(LOGLEVEL_MAPPING['fine'], 'fine')
logging.addLevelName(LOGLEVEL_MAPPING['finest'], 'finest')
# disable logging when logsetting.init not called
logging.getLogger().setLevel(logging.CRITICAL)

View File

@ -59,6 +59,14 @@ SWITCHES_DEFAULT_FILTERS = []
DEFAULT_SWITCH_IP = '0.0.0.0'
DEFAULT_SWITCH_PORT = 0
COMPASS_SUPPORTED_PROXY = 'http://127.0.0.1:3128'
COMPASS_SUPPORTED_DEFAULT_NOPROXY = ['127.0.0.1']
COMPASS_SUPPORTED_NTP_SERVER = '127.0.0.1'
COMPASS_SUPPORTED_DNS_SERVERS = ['127.0.0.1']
COMPASS_SUPPORTED_DOMAINS = []
COMPASS_SUPPORTED_DEFAULT_GATEWAY = '127.0.0.1'
COMPASS_SUPPORTED_LOCAL_REPO = 'http://127.0.0.1'
# For test chef server. please replace these config info with your own.
TEST_CHEF_URL = "https://api.opscode.com/organizations/compasscheftest"
TEST_CLIENT_KEY_PATH = "/etc/compass/client.pem"
@ -102,6 +110,9 @@ ADAPTER_FLAVOR_DIR = lazypy.delay(
VALIDATOR_DIR = lazypy.delay(
lambda: os.path.join(CONFIG_DIR, 'validator')
)
CALLBACK_DIR = lazypy.delay(
lambda: os.path.join(CONFIG_DIR, 'callback')
)
TMPL_DIR = lazypy.delay(
lambda: os.path.join(CONFIG_DIR, 'templates')
)

View File

@ -1,6 +1,7 @@
NAME = 'ceph_openstack_icehouse'
DISPLAY_NAME = 'Ceph + OpenStack Icehouse'
PARENT = 'openstack'
DISTRIBUTED_SYSTEM = 'openstack_ceph'
PACKAGE_INSTALLER = 'chef_installer'
OS_INSTALLER = 'cobbler'
SUPPORTED_OS_PATTERNS = ['(?i)centos.*', '(?i)ubuntu.*']

View File

@ -0,0 +1,3 @@
NAME ='openstack_ceph'
PARENT = 'general'
DEPLOYABLE = True

View File

@ -30,36 +30,24 @@ METADATA = {
'http_proxy': {
'_self': {
'field': 'general',
'default_value': 'http://10.145.89.126:3128',
'options': [
'http://10.145.89.126:3128'
],
'default_callback': default_proxy,
'options_callback': proxy_options,
'mapping_to': 'http_proxy'
}
},
'https_proxy': {
'_self': {
'field': 'general',
'default_value': 'http://10.145.89.126:3128',
'options': [
'http://10.145.89.126:3128'
],
'default_callback': default_proxy,
'options_callback': proxy_options,
'mapping_to': 'https_proxy'
}
},
'no_proxy': {
'_self': {
'field': 'general_list',
'default_value': [
'127.0.0.1',
'xicheng-126',
'10.145.89.126'
],
'options': [
'127.0.0.1',
'xicheng-126',
'10.145.89.126'
],
'default_callback': default_noproxy,
'options_callback': noproxy_options,
'mapping_to': 'no_proxy'
}
},
@ -67,10 +55,8 @@ METADATA = {
'_self': {
'is_required': True,
'field': 'general',
'default_value': '10.145.89.126',
'options': [
'10.145.89.126'
],
'default_callback': default_ntp_server,
'options_callback': ntp_server_options,
'mapping_to': 'ntp_server'
}
},
@ -78,12 +64,8 @@ METADATA = {
'_self': {
'is_required': True,
'field': 'general_list',
'default_value': [
'10.145.89.126',
],
'options': [
'10.145.89.126'
],
'default_callback': default_dns_servers,
'options_callback': dns_servers_options,
'mapping_to': 'nameservers'
}
},
@ -91,17 +73,15 @@ METADATA = {
'_self': {
'field': 'general',
'is_required' : True,
'default_value': ['ods.com'][0],
'options': ['ods.com'],
'default_callback': default_domain,
'options_callback': domain_options,
}
},
'search_path': {
'_self': {
'field': 'general_list',
'default_value': [
'ods.com'
],
'options': ['ods.com'],
'default_callback': default_search_path,
'options_callback': search_path_options,
'mapping_to': 'search_path'
}
},
@ -109,14 +89,14 @@ METADATA = {
'_self': {
'is_required': True,
'field': 'ip',
'default_value': '10.145.88.1',
'default_callback': default_gateway,
'mapping_to': 'gateway'
}
},
'local_repo': {
'_self': {
'field': 'general',
'default_value': 'http://10.145.89.126/',
'default_callback': default_localrepo,
'mapping_to': 'local_repo'
}
}
@ -151,7 +131,8 @@ METADATA = {
},
'$partition': {
'_self': {
'validator': is_valid_partition
'validator': is_valid_partition,
'mapping_to': '$partition'
},
'max_size': {
'_self': {

View File

@ -0,0 +1,2 @@
NAME = 'anytype'
FIELD_TYPE = object

View File

@ -0,0 +1,2 @@
NAME = 'integer'
FIELD_TYPE = int

View File

@ -33,7 +33,7 @@ METADATA = {
'osd_config': {
'_self': {
'mapping_to': 'osd_config'
}
},
'journal_size': {
'_self': {
'field': 'general',
@ -43,7 +43,7 @@ METADATA = {
},
'op_threads': {
'_self': {
'field': 'general',
'field': 'integer',
'default_value': 10,
'mapping_to': 'op_threads'
}
@ -52,7 +52,7 @@ METADATA = {
"osd_devices": {
'_self': {
'mapping_to': 'osd_devices'
}
},
'$device': {
'_self': {
'validator': is_valid_partition

View File

@ -31,7 +31,7 @@ METADATA = {
},
'osd_config': {
'_self': {
}
},
'journal_size': {
'_self': {
'field': 'general',
@ -41,7 +41,7 @@ METADATA = {
},
'op_threads': {
'_self': {
'field': 'general',
'field': 'integer',
'default_value': 10,
'mapping_to': 'op_threads'
}
@ -50,7 +50,7 @@ METADATA = {
"osd_devices": {
'_self': {
'mapping_to': 'osd_devices'
}
},
'$device': {
'_self': {
'validator': is_valid_partition

View File

@ -6,9 +6,17 @@ METADATA = {
},
'service_credentials': {
'_self': {
'required_in_whole_config': True,
'key_extensions': {
'$service': ['image', 'compute', 'dashboard', 'identity', 'metering', 'rabbitmq', 'volume', 'mysql']
},
'mapping_to': 'service_credentials'
},
'$service': {
'_self': {
'required_in_whole_config': True,
'mapping_to': '$service'
},
'username': {
'_self': {
'is_required': True,
@ -26,7 +34,17 @@ METADATA = {
}
},
'console_credentials': {
'_self': {
'required_in_whole_config': True,
'key_extensions': {
'$console': ['admin', 'compute', 'dashboard', 'image', 'metering', 'network', 'object-store', 'volume']
}
},
'$console': {
'_self': {
'required_in_whole_config': True,
'mapping_to': '$console'
},
'username': {
'_self': {
'is_required': True,
@ -46,13 +64,30 @@ METADATA = {
},
'network_mapping': {
'_self': {
'required_in_whole_config': True
'required_in_whole_config': True,
'key_extensions': {
'$interface_type': ['management', 'public', 'storage', 'tenant']
}
},
'$interface_type': {
'_self': {
'required_in_whole_config': True,
'field': 'anytype',
'autofill_callback': autofill_network_mapping,
'mapping_to': '$interface_type'
},
'interface': {
'_self': {
'is_required': True,
'field': 'general',
}
},
'subnet': {
'_self': {
'is_required': False,
'field': 'general'
}
}
}
}
}

View File

@ -3,16 +3,16 @@ DATABASE_TYPE = 'mysql'
DATABASE_USER = 'root'
DATABASE_PASSWORD = 'root'
DATABASE_SERVER = '127.0.0.1:3306'
DATABASE_NAME = 'db'
DATABASE_NAME = 'compass'
SQLALCHEMY_DATABASE_URI = '%s://%s:%s@%s/%s' % (DATABASE_TYPE, DATABASE_USER, DATABASE_PASSWORD, DATABASE_SERVER, DATABASE_NAME)
SQLALCHEMY_DATABASE_POOL_TYPE = 'instant'
INSTALLATION_LOGDIR = {
'CobblerInstaller': '/var/log/cobbler/anamon',
'ChefInstaller': '/var/log/chef'
}
DEFAULT_LOGLEVEL = 'debug'
DEFAULT_LOGLEVEL = 'info'
DEFAULT_LOGDIR = '/var/log/compass'
DEFAULT_LOGINTERVAL = 1
DEFAULT_LOGINTERVAL = 6
DEFAULT_LOGINTERVAL_UNIT = 'h'
DEFAULT_LOGFORMAT = '%(asctime)s - %(filename)s - %(lineno)d - %(levelname)s - %(message)s'
WEB_LOGFILE = 'compass.log'
@ -24,3 +24,10 @@ POLLSWITCH_INTERVAL=60
SWITCHES = [
]
TMPL_DIR = '/etc/compass/templates'
COMPASS_SUPPORTED_PROXY = 'http://$ipaddr:3128'
COMPASS_SUPPORTED_DEFAULT_NOPROXY = ['127.0.0.1','$ipaddr','$hostname']
COMPASS_SUPPORTED_NTP_SERVER = '$ipaddr'
COMPASS_SUPPORTED_DNS_SERVERS = ['$ipaddr']
COMPASS_SUPPORTED_DOMAINS = ['$domains']
COMPASS_SUPPORTED_DEFAULT_GATEWAY = '$gateway'
COMPASS_SUPPORTED_LOCAL_REPO = 'http://$ipaddr'

View File

@ -1,6 +1,19 @@
#!/bin/bash
#
echo "Installing chef"
DIR=$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )
source $DIR/install.conf
if [ -f $DIR/env.conf ]; then
source $DIR/env.conf
else
echo "failed to load environment"
exit 1
fi
source $DIR/install_func.sh
echo "Installing chef related packages"
# create backup dir
sudo mkdir -p /root/backup/chef
@ -11,6 +24,8 @@ else
echo "chef-server has already installed"
fi
echo "reconfigure chef server"
# configure chef-server
sudo chef-server-ctl cleanse
mkdir -p /etc/chef-server
@ -25,26 +40,7 @@ if [[ "$?" != "0" ]]; then
exit 1
fi
sudo rm -rf /var/chef
sudo mkdir -p /var/chef/cookbooks/
sudo cp -r $ADAPTERS_HOME/chef/cookbooks/* /var/chef/cookbooks/
if [ $? -ne 0 ]; then
echo "failed to copy cookbooks to /var/chef/cookbooks/"
exit 1
fi
sudo mkdir -p /var/chef/databags/
sudo cp -r $ADAPTERS_HOME/chef/databags/* /var/chef/databags/
if [ $? -ne 0 ]; then
echo "failed to copy databags to /var/chef/databags/"
exit 1
fi
sudo mkdir -p /var/chef/roles/
sudo cp -r $ADAPTERS_HOME/chef/roles/* /var/chef/roles/
if [ $? -ne 0 ]; then
echo "failed to copy roles to /var/chef/roles/"
exit 1
fi
echo "configure chef client and knife"
# configure chef client and knife
rpm -q chef
if [[ "$?" != "0" ]]; then
@ -55,7 +51,7 @@ fi
sudo mkdir -p ~/.chef
sudo knife configure -y -i --defaults -r ~/chef-repo -s https://localhost:443 -u $USER --admin-client-name admin --admin-client-key /etc/chef-server/admin.pem --validation-client-name chef-validator --validation-key /etc/chef-server/chef-validator.pem <<EOF
sudo knife configure -y -i --defaults -r ~/chef-repo -s https://$IPADDR:443 -u $USER --admin-client-name admin --admin-client-key /etc/chef-server/admin.pem --validation-client-name chef-validator --validation-key /etc/chef-server/chef-validator.pem <<EOF
$CHEF_PASSWORD
EOF
sudo sed -i "/node_name/c\node_name \'admin\'" /$USER/.chef/knife.rb

View File

@ -1,6 +1,17 @@
#!/bin/bash
#
echo "Installing cobbler"
DIR=$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )
source $DIR/install.conf
if [ -f $DIR/env.conf ]; then
source $DIR/env.conf
else
echo "failed to load environment"
exit 1
fi
source $DIR/install_func.sh
echo "Installing cobbler related packages"
sudo yum -y install cobbler cobbler-web createrepo mkisofs python-cheetah python-simplejson python-urlgrabber PyYAML Django cman debmirror pykickstart reprepro
if [[ "$?" != "0" ]]; then
@ -19,7 +30,6 @@ sudo mkdir -p /root/backup/cobbler
# update httpd conf
sudo cp -rn /etc/httpd/conf.d /root/backup/cobbler/
sudo rm -f /etc/httpd/conf.d/cobbler_web.conf
sudo rm -f /etc/httpd/conf.d/ods-server.conf
sudo cp -rf $COMPASSDIR/misc/apache/cobbler_web.conf /etc/httpd/conf.d/cobbler_web.conf
chmod 644 /etc/httpd/conf.d/cobbler_web.conf
sudo rm -rf /etc/httpd/conf.d/ssl.conf
@ -36,8 +46,8 @@ sudo cp -rn /etc/cobbler/settings /root/backup/cobbler/
sudo rm -f /etc/cobbler/settings
sudo cp -rf $ADAPTERS_HOME/cobbler/conf/settings /etc/cobbler/settings
sudo sed -i "s/next_server:[ \t]*\$next_server/next_server: $NEXTSERVER/g" /etc/cobbler/settings
sudo sed -i "s/server:[ \t]*\$ipaddr/server: $ipaddr/g" /etc/cobbler/settings
sudo sed -i "s/default_name_servers:[ \t]*\['\$ipaddr'\]/default_name_servers: \['$ipaddr'\]/g" /etc/cobbler/settings
sudo sed -i "s/server:[ \t]*\$ipaddr/server: $IPADDR/g" /etc/cobbler/settings
sudo sed -i "s/default_name_servers:[ \t]*\['\$ipaddr'\]/default_name_servers: \['$IPADDR'\]/g" /etc/cobbler/settings
domains=$(echo $NAMESERVER_DOMAINS | sed "s/,/','/g")
sudo sed -i "s/manage_forward_zones:[ \t]*\[\]/manage_forward_zones: \['$domains'\]/g" /etc/cobbler/settings
export cobbler_passwd=$(openssl passwd -1 -salt 'huawei' '123456')
@ -48,13 +58,15 @@ sudo chmod 644 /etc/cobbler/settings
sudo cp -rn /etc/cobbler/dhcp.template /root/backup/cobbler/
sudo rm -f /etc/cobbler/dhcp.template
sudo cp -rf $ADAPTERS_HOME/cobbler/conf/dhcp.template /etc/cobbler/dhcp.template
subnet=$(ipcalc $SUBNET -n |cut -f 2 -d '=')
sudo sed -i "s/subnet \$subnet netmask \$netmask/subnet $subnet netmask $netmask/g" /etc/cobbler/dhcp.template
export netaddr=$(ipcalc $IPADDR $NETMASK -n |cut -f 2 -d '=')
export netprefix=$(ipcalc $IPADDR $NETMASK -p |cut -f 2 -d '=')
export subnet=${netaddr}/${netprefix}
sudo sed -i "s/subnet \$subnet netmask \$netmask/subnet $netaddr netmask $NETMASK/g" /etc/cobbler/dhcp.template
sudo sed -i "s/option routers \$gateway/option routers $OPTION_ROUTER/g" /etc/cobbler/dhcp.template
sudo sed -i "s/option subnet-mask \$netmask/option subnet-mask $netmask/g" /etc/cobbler/dhcp.template
sudo sed -i "s/option domain-name-servers \$ipaddr/option domain-name-servers $ipaddr/g" /etc/cobbler/dhcp.template
sudo sed -i "s/option subnet-mask \$netmask/option subnet-mask $NETMASK/g" /etc/cobbler/dhcp.template
sudo sed -i "s/option domain-name-servers \$ipaddr/option domain-name-servers $IPADDR/g" /etc/cobbler/dhcp.template
sudo sed -i "s/range dynamic-bootp \$ip_range/range dynamic-bootp $IP_START $IP_END/g" /etc/cobbler/dhcp.template
sudo sed -i "s/local-address \$ipaddr/local-address $ipaddr/g" /etc/cobbler/dhcp.template
sudo sed -i "s/local-address \$ipaddr/local-address $IPADDR/g" /etc/cobbler/dhcp.template
sudo chmod 644 /etc/cobbler/dhcp.template
# update tftpd.template
@ -67,8 +79,8 @@ sudo chmod 644 /etc/cobbler/tftpd.template
sudo cp -rn /etc/cobbler/named.template /root/backup/cobbler/
sudo rm -f /etc/cobbler/named.template
sudo cp -rf $ADAPTERS_HOME/cobbler/conf/named.template /etc/cobbler/named.template
sudo sed -i "s/listen-on port 53 { \$ipaddr; }/listen-on port 53 \{ $ipaddr; \}/g" /etc/cobbler/named.template
subnet_escaped=$(echo $SUBNET | sed -e 's/[\/&]/\\&/g')
sudo sed -i "s/listen-on port 53 { \$ipaddr; }/listen-on port 53 \{ $IPADDR; \}/g" /etc/cobbler/named.template
subnet_escaped=$(echo $subnet | sed -e 's/[\/&]/\\&/g')
sudo sed -i "s/allow-query { 127.0.0.0\/8; \$subnet; }/allow-query \{ 127.0.0.0\/8; $subnet_escaped; \}/g" /etc/cobbler/named.template
sudo chmod 644 /etc/cobbler/named.template
@ -76,8 +88,8 @@ sudo chmod 644 /etc/cobbler/named.template
sudo cp -rn /etc/cobbler/zone.template /root/backup/cobbler/
sudo rm -f /etc/cobbler/zone.template
sudo cp -rf $ADAPTERS_HOME/cobbler/conf/zone.template /etc/cobbler/zone.template
sudo sed -i "s/\$hostname IN A \$ipaddr/$HOSTNAME IN A $ipaddr/g" /etc/cobbler/zone.template
sudo sed -i "s/metrics IN A \$ipaddr/metrics IN A $ipaddr/g" /etc/cobbler/zone.template
sudo sed -i "s/\$hostname IN A \$ipaddr/$HOSTNAME IN A $IPADDR/g" /etc/cobbler/zone.template
sudo sed -i "s/metrics IN A \$ipaddr/metrics IN A $IPADDR/g" /etc/cobbler/zone.template
sudo chmod 644 /etc/cobbler/zone.template
# update modules.conf
@ -390,7 +402,7 @@ for profile in $(cobbler profile list); do
done
if [ "$centos_found_profile" == "0" ]; then
sudo cobbler profile add --name="${CENTOS_IMAGE_NAME}-${CENTOS_IMAGE_ARCH}" --repo=centos_ppa_repo --distro="${CENTOS_IMAGE_NAME}-${CENTOS_IMAGE_ARCH}" --ksmeta="tree=http://$ipaddr/cobbler/ks_mirror/${CENTOS_IMAGE_NAME}-${CENTOS_IMAGE_ARCH}" --kickstart=/var/lib/cobbler/kickstarts/default.ks
sudo cobbler profile add --name="${CENTOS_IMAGE_NAME}-${CENTOS_IMAGE_ARCH}" --repo=centos_ppa_repo --distro="${CENTOS_IMAGE_NAME}-${CENTOS_IMAGE_ARCH}" --ksmeta="tree=http://$IPADDR/cobbler/ks_mirror/${CENTOS_IMAGE_NAME}-${CENTOS_IMAGE_ARCH}" --kickstart=/var/lib/cobbler/kickstarts/default.ks
if [[ "$?" != "0" ]]; then
echo "failed to add profile ${CENTOS_IMAGE_NAME}-${CENTOS_IMAGE_ARCH}"
exit 1
@ -399,7 +411,7 @@ if [ "$centos_found_profile" == "0" ]; then
fi
else
echo "profile ${CENTOS_IMAGE_NAME}-${CENTOS_IMAGE_ARCH} has already existed."
sudo cobbler profile edit --name="${CENTOS_IMAGE_NAME}-${CENTOS_IMAGE_ARCH}" --repo=centos_ppa_repo --distro="${CENTOS_IMAGE_NAME}-${CENTOS_IMAGE_ARCH}" --ksmeta="tree=http://$ipaddr/cobbler/ks_mirror/${CENTOS_IMAGE_NAME}-${CENTOS_IMAGE_ARCH}" --kickstart=/var/lib/cobbler/kickstarts/default.ks
sudo cobbler profile edit --name="${CENTOS_IMAGE_NAME}-${CENTOS_IMAGE_ARCH}" --repo=centos_ppa_repo --distro="${CENTOS_IMAGE_NAME}-${CENTOS_IMAGE_ARCH}" --ksmeta="tree=http://$IPADDR/cobbler/ks_mirror/${CENTOS_IMAGE_NAME}-${CENTOS_IMAGE_ARCH}" --kickstart=/var/lib/cobbler/kickstarts/default.ks
if [[ "$?" != "0" ]]; then
echo "failed to edit profile ${CENTOS_IMAGE_NAME}-${CENTOS_IMAGE_ARCH}"
exit 1
@ -416,7 +428,7 @@ for profile in $(cobbler profile list); do
done
if [ "$ubuntu_found_profile" == "0" ]; then
sudo cobbler profile add --name="${UBUNTU_IMAGE_NAME}-${UBUNTU_IMAGE_ARCH}" --repo=ubuntu_ppa_repo --distro="${UBUNTU_IMAGE_NAME}-${UBUNTU_IMAGE_ARCH}" --ksmeta="tree=http://$ipaddr/cobbler/ks_mirror/${UBUNTU_IMAGE_NAME}-${UBUNTU_IMAGE_ARCH}" --kickstart=/var/lib/cobbler/kickstarts/default.seed
sudo cobbler profile add --name="${UBUNTU_IMAGE_NAME}-${UBUNTU_IMAGE_ARCH}" --repo=ubuntu_ppa_repo --distro="${UBUNTU_IMAGE_NAME}-${UBUNTU_IMAGE_ARCH}" --ksmeta="tree=http://$IPADDR/cobbler/ks_mirror/${UBUNTU_IMAGE_NAME}-${UBUNTU_IMAGE_ARCH}" --kickstart=/var/lib/cobbler/kickstarts/default.seed
if [[ "$?" != "0" ]]; then
echo "failed to add profile ${UBUNTU_IMAGE_NAME}-${UBUNTU_IMAGE_ARCH}"
exit 1
@ -425,7 +437,7 @@ if [ "$ubuntu_found_profile" == "0" ]; then
fi
else
echo "profile ${UBUNTU_IMAGE_NAME}-${UBUNTU_IMAGE_ARCH} has already existed."
sudo cobbler profile edit --name="${UBUNTU_IMAGE_NAME}-${UBUNTU_IMAGE_ARCH}" --repo=ubuntu_ppa_repo --distro="${UBUNTU_IMAGE_NAME}-${UBUNTU_IMAGE_ARCH}" --ksmeta="tree=http://$ipaddr/cobbler/ks_mirror/${UBUNTU_IMAGE_NAME}-${UBUNTU_IMAGE_ARCH}" --kickstart=/var/lib/cobbler/kickstarts/default.seed
sudo cobbler profile edit --name="${UBUNTU_IMAGE_NAME}-${UBUNTU_IMAGE_ARCH}" --repo=ubuntu_ppa_repo --distro="${UBUNTU_IMAGE_NAME}-${UBUNTU_IMAGE_ARCH}" --ksmeta="tree=http://$IPADDR/cobbler/ks_mirror/${UBUNTU_IMAGE_NAME}-${UBUNTU_IMAGE_ARCH}" --kickstart=/var/lib/cobbler/kickstarts/default.seed
if [[ "$?" != "0" ]]; then
echo "failed to edit profile ${UBUNTU_IMAGE_NAME}-${UBUNTU_IMAGE_ARCH}"
exit 1

View File

@ -1,34 +1,85 @@
#!/bin/bash
# Move files to their respective locations
sudo mkdir -p /etc/compass
sudo mkdir -p /opt/compass/bin
sudo mkdir -p /var/www/compass_web
sudo mkdir -p /var/log/compass
### BEGIN OF SCRIPT ###
echo "setup compass configuration"
DIR=$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )
source $DIR/install.conf
if [ -f $DIR/env.conf ]; then
source $DIR/env.conf
else
echo "failed to load environment"
exit 1
fi
source $DIR/install_func.sh
cd $SCRIPT_DIR
if [ -z $WEB_SOURCE ]; then
echo "web source $WEB_SOURCE is not set"
exit 1
fi
copy2dir "$WEB_SOURCE" "$WEB_HOME" "stackforge/compass-web" || exit $?
if [ -z $ADAPTERS_SOURCE ]; then
echo "adpaters source $ADAPTERS_SOURCE is not set"
exit 1
fi
copy2dir "$ADAPTERS_SOURCE" "$ADAPTERS_HOME" "stackforge/compass-adapters" dev/experimental || exit $?
mkdir -p /etc/compass
rm -rf /etc/compass/*
mkdir -p /opt/compass/bin
rm -rf /opt/compass/bin/*
mkdir -p /var/www/compass_web
rm -rf /var/www/compass_web/*
mkdir -p /var/log/compass
rm -rf /var/log/compass/*
sudo mkdir -p /var/log/chef
sudo mkdir -p /opt/compass/db
sudo mkdir -p /var/www/compass
rm -rf /var/log/chef/*
mkdir -p /var/www/compass
rm -rf /var/www/compass/*
sudo cp -rf $COMPASSDIR/misc/apache/ods-server.conf /etc/httpd/conf.d/ods-server.conf
sudo cp -rf $COMPASSDIR/misc/apache/compass.wsgi /var/www/compass/compass.wsgi
sudo cp -rf $COMPASSDIR/conf/* /etc/compass/
sudo cp -rf $COMPASSDIR/service/* /etc/init.d/
sudo cp -rf $COMPASSDIR/bin/*.py /opt/compass/bin/
sudo cp -rf $COMPASSDIR/bin/*.sh /opt/compass/bin/
sudo cp -rf $COMPASSDIR/bin/compassd /usr/bin/
sudo cp -rf $COMPASSDIR/bin/compass /usr/bin/
sudo cp -rf $COMPASSDIR/bin/switch_virtualenv.py.template /opt/compass/bin/switch_virtualenv.py
sudo ln -s -f /opt/compass/bin/compass_check.py /usr/bin/compass
sudo ln -s -f /opt/compass/bin/compass_wsgi.py /var/www/compass/compass.wsgi
sudo cp -rf $COMPASSDIR/bin/chef/* /opt/compass/bin/
sudo cp -rf $COMPASSDIR/bin/cobbler/* /opt/compass/bin/
sudo cp -rf $WEB_HOME/public/* /var/www/compass_web/
sudo cp -rf $WEB_HOME/v2 /var/www/compass_web/
sudo cp -rf $COMPASSDIR/templates /etc/compass/
sudo rm -rf /var/chef
sudo mkdir -p /var/chef/cookbooks/
sudo cp -r $ADAPTERS_HOME/chef/cookbooks/* /var/chef/cookbooks/
if [ $? -ne 0 ]; then
echo "failed to copy cookbooks to /var/chef/cookbooks/"
exit 1
fi
sudo mkdir -p /var/chef/databags/
sudo cp -r $ADAPTERS_HOME/chef/databags/* /var/chef/databags/
if [ $? -ne 0 ]; then
echo "failed to copy databags to /var/chef/databags/"
exit 1
fi
sudo mkdir -p /var/chef/roles/
sudo cp -r $ADAPTERS_HOME/chef/roles/* /var/chef/roles/
if [ $? -ne 0 ]; then
echo "failed to copy roles to /var/chef/roles/"
exit 1
fi
# add apache user to the group of virtualenv user
sudo usermod -a -G `groups $USER|awk '{print$3}'` apache
sudo chkconfig compass-progress-updated on
sudo chkconfig compass-celeryd on
# setup ods server
if [ ! -f /usr/lib64/libcrypto.so ]; then
sudo cp -rf /usr/lib64/libcrypto.so.6 /usr/lib64/libcrypto.so
sudo cp -rf /usr/lib64/libcrypto.so.6 /usr/lib64/libcrypto.so
fi
sudo chmod -R 777 /opt/compass/db
@ -36,42 +87,38 @@ sudo chmod -R 777 /var/log/compass
sudo chmod -R 777 /var/log/chef
sudo echo "export C_FORCE_ROOT=1" > /etc/profile.d/celery_env.sh
sudo chmod +x /etc/profile.d/celery_env.sh
source `which virtualenvwrapper.sh`
if ! lsvirtualenv |grep compass-core>/dev/null; then
mkvirtualenv compass-core
fi
cd $COMPASSDIR
workon compass-core
function compass_cleanup {
echo "deactive"
deactivate
}
trap compass_cleanup EXIT
python setup.py install
if [[ "$?" != "0" ]]; then
echo "failed to install compass package"
deactivate
exit 1
else
echo "compass package is installed in virtualenv under current dir"
fi
sudo sed -i "/COBBLER_INSTALLER_URL/c\COBBLER_INSTALLER_URL = 'http:\/\/$ipaddr/cobbler_api'" /etc/compass/setting
sudo sed -i "s/\$cobbler_ip/$ipaddr/g" /etc/compass/os_installer/cobbler.conf
sudo sed -i "/CHEF_INSTALLER_URL/c\CHEF_INSTALLER_URL = 'https:\/\/$ipaddr/'" /etc/compass/setting
sudo sed -i "s/\$chef_ip/$ipaddr/g" /etc/compass/package_installer/chef-icehouse.conf
sudo sed -i "s/\$chef_hostname/$HOSTNAME/g" /etc/compass/package_installer/chef-icehouse.conf
sudo sed -e 's|$PythonHome|'$VIRTUAL_ENV'|' -i /var/www/compass/compass.wsgi
sudo sed -e 's|$PythonHome|'$VIRTUAL_ENV'|' -i /usr/bin/compass
sudo sed -e 's|$PythonHome|'$VIRTUAL_ENV'|' -i /opt/compass/bin/poll_switch.py
sudo sed -e 's|$PythonHome|'$VIRTUAL_ENV'|' -i /opt/compass/bin/progress_update.py
sudo sed -e 's|$PythonHome|'$VIRTUAL_ENV'|' -i /opt/compass/bin/manage_db.py
sudo sed -e 's|$PythonHome|'$VIRTUAL_ENV'|' -i /opt/compass/bin/client.py
sudo sed -e 's|$PythonHome|'$VIRTUAL_ENV'|' -i /opt/compass/bin/clean_installation_logs.py
sudo sed -e 's|$PythonHome|'$VIRTUAL_ENV'|' -i /opt/compass/bin/delete_clusters.py
sudo sed -e 's|$Python|'$VIRTUAL_ENV/bin/python'|' -i /etc/init.d/compass-progress-updated
sudo sed -e 's|$CeleryPath|'$VIRTUAL_ENV/bin/celery'|' -i /etc/init.d/compass-celeryd
sudo sed -i "s/\$ipaddr/$ipaddr/g" /etc/compass/os_metadata/general.conf
sudo sed -i "s/\$hostname/$HOSTNAME/g" /etc/compass/os_metadata/general.conf
sed -i "s/\$gateway/$OPTION_ROUTER/g" /etc/compass/os_metadata/general.conf
sudo sed -i "s/\$ipaddr/$IPADDR/g" /etc/compass/setting
sudo sed -i "s/\$hostname/$HOSTNAME/g" /etc/compass/setting
sed -i "s/\$gateway/$OPTION_ROUTER/g" /etc/compass/setting
domains=$(echo $NAMESERVER_DOMAINS | sed "s/,/','/g")
sudo sed -i "s/\$domain/$domains/g" /etc/compass/os_metadata/general.conf
sudo sed -i "s/\$domains/$domains/g" /etc/compass/setting
# add cookbooks, databags and roles
sudo chmod +x /opt/compass/bin/addcookbooks.py
sudo chmod +x /opt/compass/bin/adddatabags.py
sudo chmod +x /opt/compass/bin/addroles.py
sudo sed -i "s/\$cobbler_ip/$IPADDR/g" /etc/compass/os_installer/cobbler.conf
sudo sed -i "s/\$chef_ip/$IPADDR/g" /etc/compass/package_installer/chef-icehouse.conf
sudo sed -i "s/\$chef_hostname/$HOSTNAME/g" /etc/compass/package_installer/chef-icehouse.conf
sudo sed -i "s|\$PythonHome|$VIRTUAL_ENV|g" /opt/compass/bin/switch_virtualenv.py
sudo ln -s -f $VIRTUAL_ENV/bin/celery /opt/compass/bin/celery
/opt/compass/bin/addcookbooks.py
if [[ "$?" != "0" ]]; then
@ -111,6 +158,9 @@ else
exit 1
fi
sudo chkconfig compass-progress-updated on
sudo chkconfig compass-celeryd on
/opt/compass/bin/refresh.sh
if [[ "$?" != "0" ]]; then
echo "failed to refresh compassd service"
@ -131,13 +181,6 @@ else
echo "httpd has already started"
fi
sudo mkdir -p /var/log/redis
sudo chown -R redis:root /var/log/redis
sudo mkdir -p /var/lib/redis/
sudo chown -R redis:root /var/lib/redis
sudo mkdir -p /var/run/redis
sudo chown -R redis:root /var/run/redis
sudo service redis status |grep running
if [[ "$?" != "0" ]]; then
echo "redis is not started"
@ -152,7 +195,6 @@ if [[ "$?" != "0" ]]; then
exit 1
fi
killall -9 celeryd
killall -9 celery
service compass-celeryd restart
service compass-celeryd status |grep running
@ -170,10 +212,9 @@ if [[ "$?" != "0" ]]; then
else
echo "compass-progress-updated has already started"
fi
#compass check
#if [[ "$?" != "0" ]]; then
# echo "compass check failed"
# exit 1
#fi
deactivate

10
install/env.conf Executable file
View File

@ -0,0 +1,10 @@
NIC=${NIC:-eth0}
IPADDR=${IPADDR:-10.145.89.100}
NETMASK=${NETMASK:-255.255.254.0}
WEB_SOURCE=${WEB_SOURCE:-http://git.openstack.org/stackforge/compass-web}
ADAPTERS_SOURCE=${ADAPTERS_SOURCE:-http://git.openstack.org/stackforge/compass-adapters}
OPTION_ROUTER=${OPTION_ROUTER:-10.145.88.1}
NAMESERVER_DOMAINS=${NAMESERVER_DOMAINS:-ods.com}
NEXTSERVER=${NEXTSERVER:-10.145.89.100}
IP_START=${IP_START:-10.145.89.100}
IP_END=${IP_END:-10.145.89.250}

View File

@ -10,10 +10,10 @@ export PACKAGE_INSTALLER=${PACKAGE_INSTALLER:-chef}
# service NIC
export NIC=${NIC:-}
export IPADDR=${IPADDR:-}
export NETMASK=${NETMASK:-}
# DHCP config
# SUBNET variable specifies the subnet for DHCP server. Example: 192.168.0.0/16
export SUBNET=${SUBNET:-}
# DHCP option router address(Default is your management interface IP address )"
export OPTION_ROUTER=${OPTION_ROUTER:-}
# The IP range for DHCP clients (Default: local subnet start from 100 to 254)

View File

@ -14,15 +14,13 @@ export PACKAGE_INSTALLER=chef
export NIC=installation
# DHCP config
# SUBNET variable specifies the subnet for DHCP server. Example: 192.168.0.0/16
export netmask=$(ifconfig $NIC |grep Mask | cut -f 4 -d ':')
export ipaddr=`ifconfig $NIC | grep 'inet addr:' | cut -d: -f2 | awk '{ print $1}'`
export SUBNET=$(ipcalc $ipaddr $netmask -n |cut -f 2 -d '=')/$(ipcalc $ipaddr $netmask -p |cut -f 2 -d '=')
export IPADDR=`ifconfig $NIC | grep 'inet addr:' | cut -d: -f2 | awk '{ print $1}'`
export NETMASK=$(ifconfig $NIC |grep Mask | cut -f 4 -d ':')
# DHCP option router address(Default is your management interface IP address )"
export OPTION_ROUTER=`ifconfig $NIC | grep 'inet addr:' | cut -d: -f2 | awk '{ print $1}'`
# The IP range for DHCP clients (Default: local subnet start from 100 to 254)
export IP_START=`echo $ipaddr |cut -d. -f'1 2 3'`.128
export IP_END=`echo $ipaddr |cut -d. -f'1 2 3'`.254
export IP_START=`echo $IPADDR |cut -d. -f'1 2 3'`.128
export IP_END=`echo $IPADDR |cut -d. -f'1 2 3'`.254
# TFTP server's IP address(Default: Management Interface/eth0 IP)
export NEXTSERVER=`ifconfig $NIC | grep 'inet addr:' | cut -d: -f2 | awk '{ print $1}'`
# the domains covered by nameserver
@ -83,8 +81,8 @@ export SCRIPT_DIR=$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )
export COMPASSDIR=${SCRIPT_DIR}/..
# Set test script variables
export NAMESERVERS=$ipaddr
export NTP_SERVER=$ipaddr
export GATEWAY=$ipaddr
export PROXY=http://$ipaddr:3128
export NAMESERVERS=$IPADDR
export NTP_SERVER=$IPADDR
export GATEWAY=$IPADDR
export PROXY=http://$IPADDR:3128
export TESTMODE=${TESTMODE:-"True"}

View File

@ -9,18 +9,55 @@ exec 2>&1
LOCKFILE="/tmp/`basename $0`"
LOCKFD=99
if [ -f $LOCKFILE ]; then
LOCKED_PID=$(cat $LOCKFILE | head -n 1)
ps -p $LOCKED_PID &> /dev/null
if [[ "$?" != "0" ]]; then
echo "the progress of pid $LOCKED_PID does not exist"
rm -f $LOCKFILE
else
echo "the progress of pid $LOCKED_PID is running"
exit 1
fi
else
echo "$LOCKFILE not exist"
fi
# PRIVATE
_lock() { flock -$1 $LOCKFD; }
_no_more_locking() { _lock u; _lock xn && rm -f $LOCKFILE; }
_prepare_locking() { eval "exec $LOCKFD>\"$LOCKFILE\""; trap _no_more_locking EXIT; }
_lock()
{
echo "lock $LOCKFILE"
flock -$1 $LOCKFD
pid=$$
echo $pid 1>& $LOCKFD
}
_no_more_locking()
{
_lock u
_lock xn && rm -f $LOCKFILE
}
_prepare_locking()
{
eval "exec $LOCKFD>\"$LOCKFILE\""
trap _no_more_locking EXIT
}
# ON START
_prepare_locking
# PUBLIC
exlock_now() { _lock xn; } # obtain an exclusive lock immediately or fail
exlock_now()
{
_lock xn || exit 1
} # obtain an exclusive lock immediately or fail
exlock_now || exit 1
exlock_now
if [[ "$?" != "0" ]]; then
echo "failed to acquire lock $LOCKFILE"
exit 1
fi
### BEGIN OF SCRIPT ###
DIR=$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )
@ -134,22 +171,21 @@ if [ $? -ne 0 ]; then
exit 1
fi
export netmask=$(ifconfig $NIC |grep Mask | cut -f 4 -d ':')
export ipaddr=$(ifconfig $NIC | grep 'inet addr:' | cut -d: -f2 | awk '{ print $1}')
export netaddr=$(ipcalc $ipaddr $netmask -n |cut -f 2 -d '=')
export netprefix=$(ipcalc $ipaddr $netmask -p |cut -f 2 -d '=')
loadvars SUBNET ${netaddr}/${netprefix}
ipcalc $SUBNET -c
loadvars IPADDR ${ipaddr}
ipcalc $IPADDR -c
if [ $? -ne 0 ]; then
echo "subnet $SUBNET format should be x.x.x.x/x"
echo "ip addr $IPADDR format should be x.x.x.x"
exit 1
fi
export netaddr=$(ipcalc $SUBNET -n |cut -f 2 -d '=')
export netprefix=$(ipcalc $SUBNET -p |cut -f 2 -d '=')
export netmask=$(ipcalc $SUBNET -m |cut -f 2 -d '=')
export expected_subnet=${netaddr}/${netprefix}
if [[ "$SUBNET" != "$expected_subnet" ]]; then
echo "expected subnet should be $expected_subnet"
export netmask=$(ifconfig $NIC |grep Mask | cut -f 4 -d ':')
loadvars NETMASK ${netmask}
export netaddr=$(ipcalc $IPADDR $NETMASK -n |cut -f 2 -d '=')
export netprefix=$(ipcalc $IPADDR $NETMASK -p |cut -f 2 -d '=')
subnet=${netaddr}/${netprefix}
ipcalc $subnet -c
if [ $? -ne 0 ]; then
echo "subnet $subnet format should be x.x.x.x/x"
exit 1
fi
loadvars OPTION_ROUTER $(route -n | grep '^0.0.0.0' | xargs | cut -d ' ' -f 2)
@ -158,8 +194,8 @@ if [ $? -ne 0 ]; then
echo "router $OPTION_ROUTER format should be x.x.x.x"
exit 1
fi
export ip_start=$(echo "$ipaddr"|cut -f 1,2,3 -d '.')."100"
export ip_end=$(echo "$ipaddr"|cut -f 1,2,3 -d '.')."250"
export ip_start=$(echo "$IPADDR"|cut -f 1,2,3 -d '.')."100"
export ip_end=$(echo "$IPADDR"|cut -f 1,2,3 -d '.')."250"
loadvars IP_START "$ip_start"
ipcalc $IP_START -c
if [ $? -ne 0 ]; then
@ -168,9 +204,9 @@ if [ $? -ne 0 ]; then
else
echo "ip start address is $IP_START"
fi
ip_start_net=$(ipcalc $IP_START $netmask -n |cut -f 2 -d '=')
ip_start_net=$(ipcalc $IP_START $NETMASK -n |cut -f 2 -d '=')
if [[ "$ip_start_net" != "$netaddr" ]]; then
echo "ip start $IP_START is not in $SUBNET"
echo "ip start $IP_START is not in $subnet"
exit 1
fi
loadvars IP_END "$ip_end"
@ -179,9 +215,9 @@ if [ $? -ne 0 ]; then
echo "ip end $IP_END format should be x.x.x.x"
exit 1
fi
ip_end_net=$(ipcalc $IP_END $netmask -n |cut -f 2 -d '=')
ip_end_net=$(ipcalc $IP_END $NETMASK -n |cut -f 2 -d '=')
if [[ "$ip_end_net" != "$netaddr" ]]; then
echo "ip end $IP_END is not in $SUBNET"
echo "ip end $IP_END is not in $subnet"
exit 1
fi
ip_start_int=$(ipaddr_convert $IP_START)
@ -192,7 +228,7 @@ if [ $ip_range -le 0 ]; then
exit 1
fi
echo "there will be at most $ip_range hosts deployed."
loadvars NEXTSERVER $ipaddr
loadvars NEXTSERVER $IPADDR
ipcalc $NEXTSERVER -c
if [ $? -ne 0 ]; then
echo "next server $NEXTSERVER format should be x.x.x.x"
@ -205,6 +241,9 @@ loadvars ADAPTERS_SOURCE 'http://git.openstack.org/stackforge/compass-adapters'
echo "script dir: $SCRIPT_DIR"
echo "compass dir is $COMPASSDIR"
echo "generate env.conf"
source ${COMPASSDIR}/install/setup_env.sh || exit $?
echo "Install the Dependencies"
source ${COMPASSDIR}/install/dependency.sh || exit $?

145
install/install_func.sh Executable file
View File

@ -0,0 +1,145 @@
#!/bin/bash
#
copy2dir()
{
repo=$1
destdir=$2
git_project=$3
git_branch=master
if [ -n "$4" ]; then
git_branch=$4
fi
echo "copy $repo branch $git_branch to $destdir"
if [[ "$repo" =~ (git|http|https|ftp):// ]]; then
if [[ -d $destdir || -L $destdir ]]; then
cd $destdir
git status &> /dev/null
if [ $? -ne 0 ]; then
echo "$destdir is not git repo"
cd -
rm -rf $destdir
else
echo "$destdir is git repo"
cd -
fi
fi
if [[ -d $destdir || -L $destdir ]]; then
echo "$destdir exists"
cd $destdir
git remote set-url origin $repo
git remote update
if [ $? -ne 0 ]; then
echo "failed to git remote update $repo in $destdir"
cd -
exit 1
else
echo "git remote update $repo in $destdir succeeded"
fi
git reset --hard
git clean -x -f
git checkout $git_branch
git reset --hard remotes/origin/$git_branch
cd -
else
echo "create $destdir"
mkdir -p $destdir
git clone $repo $destdir
if [ $? -ne 0 ]; then
echo "failed to git clone $repo $destdir"
exit 1
else
echo "git clone $repo $destdir suceeded"
fi
cd $destdir
git checkout $git_branch
git reset --hard remotes/origin/$git_branch
cd -
fi
cd $destdir
if [[ -z $ZUUL_PROJECT ]]; then
echo "ZUUL_PROJECT is not set"
elif [[ -z $ZUUL_BRANCH ]]; then
echo "ZUUL_BRANCH is not set"
elif [[ -z $ZUUL_REF ]]; then
echo "ZUUL_REF is not set"
elif [[ "$ZUUL_PROJECT" != "$git_project" ]]; then
echo "ZUUL_PROJECT $ZUUL_PROJECT is not equal to git_project $git_project"
elif [[ "$ZUUL_BRANCH" != "$git_branch" ]]; then
echo "ZUUL_BRANCH $ZUUL_BRANCH is not equal git_branch $git_branch"
else
git_repo=$ZUUL_URL/$ZUUL_PROJECT
git_ref=$ZUUL_REF
git reset --hard remotes/origin/$git_branch
git fetch $git_repo $git_ref && git checkout FETCH_HEAD
if [ $? -ne 0 ]; then
echo "failed to git fetch $git_repo $git_ref"
cd -
exit 1
fi
git clean -x -f
fi
cd -
else
sudo rm -rf $destdir
sudo cp -rf $repo $destdir
if [ $? -ne 0 ]; then
echo "failed to copy $repo to $destdir"
exit 1
else
echo "copy $repo to $destdir succeeded"
fi
fi
if [[ ! -d $destdir && ! -L $destdir ]]; then
echo "$destdir does not exist"
exit 1
else
echo "$destdir is ready"
fi
}
# TODO(xicheng): Please add comments to ths function. e.g, arg list
download()
{
#download params: <download url> [<package name>] [<action after package downloaded>]
url=$1
package=${2:-$(basename $url)}
action=${3:-""}
echo "download $package from $url and run $action"
if [[ -f /tmp/${package} || -L /tmp/${package} ]]; then
echo "$package already exists"
else
if [[ "$url" =~ (http|https|ftp):// ]]; then
echo "downloading $url to /tmp/${package}"
curl -L -o /tmp/${package}.tmp $url
if [[ "$?" != "0" ]]; then
echo "failed to download $package"
exit 1
else
echo "successfully download $package"
mv -f /tmp/${package}.tmp /tmp/${package}
fi
else
cp -rf $url /tmp/${package}
fi
if [[ ! -f /tmp/${package} && ! -L /tmp/${package} ]]; then
echo "/tmp/$package is not created"
exit 1
fi
fi
if [[ "$action" == "install" ]]; then
echo "install /tmp/$package"
sudo rpm -Uvh /tmp/$package
if [[ "$?" != "0" ]]; then
echo "failed to install $package"
exit 1
else
echo "$package is installed"
fi
elif [[ "$action" == "copy" ]]; then
echo "copy /tmp/$package to $destdir"
destdir=$4
sudo cp /tmp/$package $destdir
fi
}

View File

@ -1,113 +1,31 @@
#!/bin/bash
# prepare the installation
copy2dir()
{
repo=$1
destdir=$2
git_branch=master
if [ -n "$4" ]; then
git_branch=$4
fi
if [[ "$repo" =~ (git|http|https|ftp):// ]]; then
if [[ -d $destdir || -L $destdir ]]; then
cd $destdir
git status &> /dev/null
if [ $? -ne 0 ]; then
echo "$destdir is not git repo"
rm -rf $destdir
else
echo "$destdir is git repo"
fi
cd -
fi
if [[ -d $destdir || -L $destdir ]]; then
echo "$destdir exists"
cd $destdir
git remote set-url origin $repo
git remote update
if [ $? -ne 0 ]; then
echo "failed to git remote update $repo in $destdir"
### BEGIN OF SCRIPT ###
echo "prepare installation"
DIR=$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )
source $DIR/install.conf
if [ -f $DIR/env.conf ]; then
source $DIR/env.conf
else
echo "failed to load environment"
exit 1
else
echo "git remote update $repo in $destdir succeeded"
fi
git reset --hard
git clean -x -f
git checkout $git_branch
git reset --hard remotes/origin/$git_branch
else
echo "create $destdir"
mkdir -p $destdir
git clone $repo $destdir
if [ $? -ne 0 ]; then
echo "failed to git clone $repo $destdir"
exit 1
else
echo "git clone $repo $destdir suceeded"
fi
cd $destdir
git checkout $git_branch
git reset --hard remotes/origin/$git_branch
fi
if [[ ! -z $ZUUL_REF || ! -z $GERRIT_REFSPEC ]]; then
if [[ ! -z $ZUUL_REF ]]; then
git_repo=$ZUUL_URL/$3
git_ref=$ZUUL_REF
if git branch -a|grep ${ZUUL_BRANCH}; then
git_branch=$ZUUL_BRANCH
else
git_branch=master
fi
elif [[ ! -z $GERRIT_REFSPEC ]]; then
git_repo=https://$GERRIT_HOST/$3
git_ref=$GERRIT_REFSPEC
if git branch -a|grep $GERRIT_BRANCH; then
git_branch=$GERRIT_BRANCH
else
git_branch=master
fi
fi
git reset --hard remotes/origin/$git_branch
git fetch $git_repo $git_ref && git checkout FETCH_HEAD
if [ $? -ne 0 ]; then
echo "failed to git fetch $git_repo $git_ref"
fi
git clean -x -f
fi
else
sudo rm -rf $destdir
sudo cp -rf $repo $destdir
if [ $? -ne 0 ]; then
echo "failed to copy $repo to $destdir"
exit 1
else
echo "copy $repo to $destdir succeeded"
fi
fi
if [[ ! -d $destdir && ! -L $destdir ]]; then
echo "$destdir does not exist"
exit 1
else
echo "$destdir is ready"
fi
cd $SCRIPT_DIR
}
fi
source $DIR/install_func.sh
# Create backup dir
sudo mkdir -p /root/backup
# update /etc/hosts
echo "update /etc/hosts"
sudo cp -rn /etc/hosts /root/backup/hosts
sudo rm -f /etc/hosts
sudo cp -rf $COMPASSDIR/misc/hosts /etc/hosts
sudo sed -i "s/\$ipaddr \$hostname/$ipaddr $HOSTNAME/g" /etc/hosts
sudo sed -i "s/\$ipaddr \$hostname/$IPADDR $HOSTNAME/g" /etc/hosts
sudo chmod 644 /etc/hosts
# update rsyslog
echo "update rsyslog"
sudo cp -rn /etc/rsyslog.conf /root/backup/
sudo rm -f /etc/rsyslog.conf
sudo cp -rf $COMPASSDIR/misc/rsyslog/rsyslog.conf /etc/rsyslog.conf
@ -122,12 +40,14 @@ else
fi
# update logrotate.d
echo "update logrotate config"
sudo cp -rn /etc/logrotate.d /root/backup/
rm -f /etc/logrotate.d/*
sudo cp -rf $COMPASSDIR/misc/logrotate.d/* /etc/logrotate.d/
sudo chmod 644 /etc/logrotate.d/*
# update ntp conf
echo "update ntp config"
sudo cp -rn /etc/ntp.conf /root/backup/
sudo rm -f /etc/ntp.conf
sudo cp -rf $COMPASSDIR/misc/ntp/ntp.conf /etc/ntp.conf
@ -144,10 +64,14 @@ else
fi
# update squid conf
echo "update squid config"
sudo cp -rn /etc/squid/squid.conf /root/backup/
sudo rm -f /etc/squid/squid.conf
sudo cp $COMPASSDIR/misc/squid/squid.conf /etc/squid/
subnet_escaped=$(echo $SUBNET | sed -e 's/[\/&]/\\&/g')
export netaddr=$(ipcalc $IPADDR $NETMASK -n |cut -f 2 -d '=')
export netprefix=$(ipcalc $IPADDR $NETMASK -p |cut -f 2 -d '=')
subnet=${netaddr}/${netprefix}
subnet_escaped=$(echo $subnet | sed -e 's/[\/&]/\\&/g')
sudo sed -i "s/acl localnet src \$subnet/acl localnet src $subnet_escaped/g" /etc/squid/squid.conf
sudo chmod 644 /etc/squid/squid.conf
sudo mkdir -p /var/squid/cache
@ -162,13 +86,14 @@ else
fi
#update mysqld
echo "update mysqld"
sudo service mysqld restart
MYSQL_USER=${MYSQL_USER:-root}
MYSQL_OLD_PASSWORD=${MYSQL_OLD_PASSWORD:-root}
MYSQL_PASSWORD=${MYSQL_PASSWORD:-root}
MYSQL_SERVER=${MYSQL_SERVER:-127.0.0.1}
MYSQL_PORT=${MYSQL_PORT:-3306}
MYSQL_DATABASE=${MYSQL_DATABASE:-db}
MYSQL_DATABASE=${MYSQL_DATABASE:-compass}
# first time set mysql password
sudo mysqladmin -h${MYSQL_SERVER} --port=${MYSQL_PORT} -u ${MYSQL_USER} -p"${MYSQL_OLD_PASSWORD}" password ${MYSQL_PASSWORD}
if [[ "$?" != "0" ]]; then
@ -213,6 +138,7 @@ fi
copy2dir "$ADAPTERS_SOURCE" "$ADAPTERS_HOME" "stackforge/compass-adapters" dev/experimental || exit $?
if [ "$tempest" == "true" ]; then
echo "download tempest packages"
if [[ ! -e /tmp/tempest ]]; then
git clone http://git.openstack.org/openstack/tempest /tmp/tempest
if [[ "$?" != "0" ]]; then
@ -259,8 +185,9 @@ source `which virtualenvwrapper.sh`
if ! lsvirtualenv |grep compass-core>/dev/null; then
mkvirtualenv compass-core
fi
workon compass-core
cd $COMPASSDIR
workon compass-core
echo "install compass requirements"
pip install -U -r requirements.txt
if [[ "$?" != "0" ]]; then
echo "failed to install compass requiremnts"
@ -277,50 +204,6 @@ else
deactivate
fi
# TODO(xicheng): Please add comments to ths function. e.g, arg list
download()
{
url=$1
package=${2:-$(basename $url)}
action=${3:-""}
if [[ -f /tmp/${package} || -L /tmp/${package} ]]; then
echo "$package already exists"
else
if [[ "$url" =~ (http|https|ftp):// ]]; then
echo "downloading $url to /tmp/${package}"
curl -L -o /tmp/${package}.tmp $url
if [[ "$?" != "0" ]]; then
echo "failed to download $package"
exit 1
else
echo "successfully download $package"
mv -f /tmp/${package}.tmp /tmp/${package}
fi
else
cp -rf $url /tmp/${package}
fi
if [[ ! -f /tmp/${package} && ! -L /tmp/${package} ]]; then
echo "/tmp/$package is not created"
exit 1
fi
fi
if [[ "$action" == "install" ]]; then
echo "install /tmp/$package"
sudo rpm -Uvh /tmp/$package
if [[ "$?" != "0" ]]; then
echo "failed to install $package"
exit 1
else
echo "$package is installed"
fi
elif [[ "$action" == "copy" ]]; then
echo "copy /tmp/$package to $destdir"
destdir=$4
sudo cp /tmp/$package $destdir
fi
}
# download cobbler related packages
centos_ppa_repo_packages="
ntp-4.2.6p5-1.${CENTOS_IMAGE_TYPE_OTHER}${CENTOS_IMAGE_VERSION_MAJOR}.${CENTOS_IMAGE_TYPE,,}.${CENTOS_IMAGE_ARCH}.rpm
@ -356,6 +239,7 @@ download "$CENTOS_IMAGE_SOURCE" ${CENTOS_IMAGE_NAME}-${CENTOS_IMAGE_ARCH}.iso ||
download "$UBUNTU_IMAGE_SOURCE" ${UBUNTU_IMAGE_NAME}-${UBUNTU_IMAGE_ARCH}.iso || exit $?
# Install net-snmp
echo "install snmp config"
if [[ ! -e /etc/snmp ]]; then
sudo mkdir -p /etc/snmp
fi
@ -371,6 +255,7 @@ sudo mkdir -p /var/lib/net-snmp/mib_indexes
sudo chmod 755 /var/lib/net-snmp/mib_indexes
# generate ssh key
echo "generate ssh key"
if [[ ! -e $HOME/.ssh ]]; then
sudo mkdir -p $HOME/.ssh
fi

13
install/setup_env.sh Executable file
View File

@ -0,0 +1,13 @@
cat << EOF > $SCRIPT_DIR/env.conf
NIC=\${NIC:-$NIC}
IPADDR=\${IPADDR:-$IPADDR}
NETMASK=\${NETMASK:-$NETMASK}
WEB_SOURCE=\${WEB_SOURCE:-$WEB_SOURCE}
ADAPTERS_SOURCE=\${ADAPTERS_SOURCE:-$ADAPTERS_SOURCE}
OPTION_ROUTER=\${OPTION_ROUTER:-$OPTION_ROUTER}
NAMESERVER_DOMAINS=\${NAMESERVER_DOMAINS:-$NAMESERVER_DOMAINS}
NEXTSERVER=\${NEXTSERVER:-$NEXTSERVER}
IP_START=\${IP_START:-$IP_START}
IP_END=\${IP_END:-$IP_END}
EOF
chmod ugo+x $SCRIPT_DIR/env.conf

View File

@ -1,23 +0,0 @@
#!/usr/bin/env python
import site
import sys
import os
activate_this='$PythonHome/bin/activate_this.py'
execfile(activate_this, dict(__file__=activate_this))
site.addsitedir('$PythonHome/lib/python2.6/site-packages')
sys.path.append('$PythonHome')
os.environ['PYTHON_EGG_CACHE'] = '/tmp/.egg'
from compass.api import app as application
from compass.utils import flags
from compass.utils import logsetting
from compass.utils import setting_wrapper as setting
flags.init()
flags.OPTIONS.logfile = setting.WEB_LOGFILE
logsetting.init()
from compass.api import api as compass_api
compass_api.init()
application = compass_api.app

View File

@ -5,7 +5,7 @@ export VIRT_MEM=${VIRT_MEM:-'8192'}
export VIRT_DISK=${VIRT_DISK:-'30G'}
export CLEAN_OLD_DATA=${CLEAN_OLD_DATA:-true}
export COMPASS_SERVER_URL=${COMPASS_SERVER_URL:-"http://$ipaddr/api"}
export COMPASS_SERVER_URL=${COMPASS_SERVER_URL:-"http://$IPADDR/api"}
export COMPASS_USER_EMAIL=${COMPASS_USER_EMAIL:-'admin@huawei.com'}
export COMPASS_USER_PASSWORD=${COMPASS_USER_PASSWORD:-'admin'}
export CLUSTER_NAME=${CLUSTER_NAME:-'allinone'}
@ -20,16 +20,16 @@ export TIMEZONE=${TIMEZONE:-'America/Los_Angeles'}
export HOSTNAMES=${HOSTNAMES:-'allinone'}
export ADAPTER_OS_PATTERN=${ADAPTER_OS_PATTERN:-'(?i)centos.*'}
export ADAPTER_NAME=${ADAPTER_NAME:=''}
export ADAPTER_TARGET_SYSTEM_PATTERN=${ADAPTER_TARGET_SYSTEM_PATTERN:-'openstack.*'}
export ADAPTER_TARGET_SYSTEM_PATTERN=${ADAPTER_TARGET_SYSTEM_PATTERN:-'^openstack$'}
export ADAPTER_FLAVOR_PATTERN=${ADAPTER_FLAVOR_PATTERN:-'allinone'}
export HOST_ROLES=${HOST_ROLES:-'allinone=allinone-compute'}
export DEFAULT_ROLES=${DEFAULT_ROLES:-'allinone-compute'}
export NAMESERVERS=${NAMESERVERS:-$ipaddr}
export NTP_SERVER=${NTP_SERVER:-$ipaddr}
export GATEWAY=${GATEWAY:-$ipaddr}
export PROXY=${PROXY:-http://$ipaddr:3128}
export IGNORE_PROXY=${IGNORE_PROXY:-"127.0.0.1,localhost,$ipaddr,$HOSTNAME"}
export NAMESERVERS=${NAMESERVERS:-$IPADDR}
export NTP_SERVER=${NTP_SERVER:-$IPADDR}
export GATEWAY=${GATEWAY:-$IPADDR}
export PROXY=${PROXY:-http://${IPADDR}:3128}
export IGNORE_PROXY=${IGNORE_PROXY:-"127.0.0.1,localhost,${IPADDR},$HOSTNAME"}
export DOMAIN=${DOMAIN:-'ods.com'}
export SEARCH_PATH=${SEARCH_PATH:-${DOMAIN}}
@ -45,7 +45,7 @@ function ip_subnet {
}
if [ -z "$MANAGEMENT_SUBNET" ]; then
export MANAGEMENT_SUBNET=$(ip_subnet $ipaddr)
export MANAGEMENT_SUBNET=$(ip_subnet ${IPADDR})
fi
export TENANT_SUBNET=${TENANT_SUBNET:-'172.16.2.0/24'}
export PUBLIC_SUBNET=${PUBLIC_SUBNET:-'172.16.3.0/24'}
@ -78,7 +78,7 @@ export CONSOLE_OBJECT_STORE_CREDENTIAL=${CONSOLE_OBJECT_STORE_CREDENTIAL:-"objec
export CONSOLE_VOLUME_CREDENTIAL=${CONSOLE_VOLUME_CREDENTIAL:-"volume:${CONSOLE_USERNAME}=${CONSOLE_PASSWORD}"}
export CONSOLE_CREDENTIALS=${CONSOLE_CREDENTIALS:-"${CONSOLE_ADMIN_CREDENTIAL},${CONSOLE_COMPUTE_CREDENTIAL},${CONSOLE_DASHBOARD_CREDENTIAL},${CONSOLE_IMAGE_CREDENTIAL},${CONSOLE_METERING_CREDENTIAL},${CONSOLE_NETWORK_CREDENTIAL},${CONSOLE_OBJECT_STORE_CREDENTIAL},${CONSOLE_VOLUME_CREDENTIAL}"}
export MANAGEMENT_IP_START=${MANAGEMENT_IP_START:-`echo $ipaddr |cut -d. -f'1 2 3'`.50}
export MANAGEMENT_IP_START=${MANAGEMENT_IP_START:-`echo $IPADDR |cut -d. -f'1 2 3'`.50}
export TENANT_IP_START=${TENANT_IP_START:-'172.16.2.50'}
export PUBLIC_IP_START=${PUBLIC_IP_START:-'172.16.3.50'}
export STORAGE_IP_START=${STORAGE_IP_START:-'172.16.4.50'}

View File

@ -9,7 +9,7 @@ export HOSTNAMES=${HOSTNAMES:-'database,messaging,identity,compute-controller,co
export HOST_ROLES=${HOST_ROLES:-'database=os-ops-database;messaging=os-ops-messaging;identity=os-identity;compute-controller=os-compute-controller;network-server=os-network-server;network-worker=os-network-worker;block-storage-volume=os-block-storage-volume;block-storage-controller=os-block-storage-controller;image=os-image;dashboard=os-dashboard'}
export DEFAULT_ROLES=${DEFAULT_ROLES:-'os-compute-worker'}
export ADAPTER_FLAVOR_PATTERN=${ADAPTER_FLAVOR_PATTERN:-'multinodes'}
export MANAGEMENT_IP_START=${MANAGEMENT_IP_START:-`echo $ipaddr |cut -d. -f'1 2 3'`.90}
export MANAGEMENT_IP_START=${MANAGEMENT_IP_START:-`echo $IPADDR |cut -d. -f'1 2 3'`.90}
export TENANT_IP_START=${TENANT_IP_START:-'172.16.2.90'}
export PUBLIC_IP_START=${PUBLIC_IP_START:-'172.16.3.90'}
export STORAGE_IP_START=${STORAGE_IP_START:-'172.16.4.90'}

View File

@ -8,7 +8,7 @@ export HOSTNAMES=${HOSTNAMES:-'allinone'}
export HOST_ROLES=${HOST_ROLES:-'allinone=os-controller,os-compute-worker,os-network,os-block-storage-volume'}
export DEFAULT_ROLES=${DEFAULT_ROLES:-'os-compute-worker'}
export ADAPTER_FLAVOR_PATTERN=${ADAPTER_FLAVOR_PATTERN:-'single-contoller-multi-compute'}
export MANAGEMENT_IP_START=${MANAGEMENT_IP_START:-`echo $ipaddr |cut -d. -f'1 2 3'`.52}
export MANAGEMENT_IP_START=${MANAGEMENT_IP_START:-`echo $IPADDR |cut -d. -f'1 2 3'`.52}
export TENANT_IP_START=${TENANT_IP_START:-'172.16.2.52'}
export PUBLIC_IP_START=${PUBLIC_IP_START:-'172.16.3.52'}
export STORAGE_IP_START=${STORAGE_IP_START:-'172.16.4.52'}

View File

@ -8,7 +8,7 @@ export HOSTNAMES=${HOSTNAMES:-'allinone'}
export HOST_ROLES=${HOST_ROLES:-'allinone=os-ops-database,os-ops-messaging,os-identity,os-compute-controller,os-compute-worker,os-network-server,os-network-worker,os-block-storage-volume,os-block-storage-controller,os-image,os-dashboard'}
export DEFAULT_ROLES=${DEFAULT_ROLES:-'os-compute-worker'}
export ADAPTER_FLAVOR_PATTERN=${ADAPTER_FLAVOR_PATTERN:-'multinodes'}
export MANAGEMENT_IP_START=${MANAGEMENT_IP_START:-`echo $ipaddr |cut -d. -f'1 2 3'`.54}
export MANAGEMENT_IP_START=${MANAGEMENT_IP_START:-`echo $IPADDR |cut -d. -f'1 2 3'`.54}
export TENANT_IP_START=${TENANT_IP_START:-'172.16.2.54'}
export PUBLIC_IP_START=${PUBLIC_IP_START:-'172.16.3.54'}
export STORAGE_IP_START=${STORAGE_IP_START:-'172.16.4.54'}

View File

@ -5,7 +5,7 @@ export VIRT_MEM=${VIRT_MEM:-'8192'}
export VIRT_DISK=${VIRT_DISK:-'30G'}
export HOSTNAMES=${HOSTNAMES:-'controller,network,compute'}
export HOST_ROLES=${HOST_ROLES:-'controller=os-controller;network=os-network,os-block-storage-volume'}
export MANAGEMENT_IP_START=${MANAGEMENT_IP_START:-`echo $ipaddr |cut -d. -f'1 2 3'`.56}
export MANAGEMENT_IP_START=${MANAGEMENT_IP_START:-`echo $IPADDR |cut -d. -f'1 2 3'`.56}
export TENANT_IP_START=${TENANT_IP_START:-'172.16.2.56'}
export PUBLIC_IP_START=${PUBLIC_IP_START:-'172.16.3.56'}
export STORAGE_IP_START=${STORAGE_IP_START:-'172.16.4.56'}

View File

@ -5,7 +5,7 @@ export VIRT_MEM=${VIRT_MEM:-'8192'}
export VIRT_DISK=${VIRT_DISK:-'20G'}
export HOSTNAMES=${HOSTNAMES:-'database,messaging,identity,compute-controller,compute-worker1,compute-worker2,network-server,network-worker,block-storage-volume,block-storage-controller,image,dashboard'}
export HOST_ROLES=${HOST_ROLES:-'database=os-ops-database;messaging=os-ops-messaging;identity=os-identity;compute-controller=os-compute-controller;network-server=os-network-server;network-worker=os-network-worker;block-storage-volume=os-block-storage-volume;block-storage-controller=os-block-storage-controller;image=os-image;dashboard=os-dashboard'}
export MANAGEMENT_IP_START=${MANAGEMENT_IP_START:-`echo $ipaddr |cut -d. -f'1 2 3'`.60}
export MANAGEMENT_IP_START=${MANAGEMENT_IP_START:-`echo $IPADDR |cut -d. -f'1 2 3'`.60}
export TENANT_IP_START=${TENANT_IP_START:-'172.16.2.60'}
export PUBLIC_IP_START=${PUBLIC_IP_START:-'172.16.3.60'}
export STORAGE_IP_START=${STORAGE_IP_START:-'172.16.4.60'}

View File

@ -5,7 +5,7 @@ export VIRT_MEM=${VIRT_MEM:-'8192'}
export VIRT_DISK=${VIRT_DISK:-'30G'}
export CLUSTER_NAME=${CLUSTER_NAME:-'osonly'}
export HOSTNAMES=${HOSTNAMES:-'centos'}
export MANAGEMENT_IP_START=${MANAGEMENT_IP_START:-`echo $ipaddr |cut -d. -f'1 2 3'`.80}
export MANAGEMENT_IP_START=${MANAGEMENT_IP_START:-`echo $IPADDR |cut -d. -f'1 2 3'`.80}
export TENANT_IP_START=${TENANT_IP_START:-'172.16.2.80'}
export PUBLIC_IP_START=${PUBLIC_IP_START:-'172.16.3.80'}
export STORAGE_IP_START=${STORAGE_IP_START:-'172.16.4.80'}

View File

@ -5,7 +5,7 @@ export VIRT_MEM=${VIRT_MEM:-'8192'}
export VIRT_DISK=${VIRT_DISK:-'30G'}
export HOSTNAMES=${HOSTNAMES:-'ubuntu'}
export ADAPTER_OS_PATTERN=${ADAPTER_OS_PATTERN:-'(?i)ubuntu.*'}
export MANAGEMENT_IP_START=${MANAGEMENT_IP_START:-`echo $ipaddr |cut -d. -f'1 2 3'`.82}
export MANAGEMENT_IP_START=${MANAGEMENT_IP_START:-`echo $IPADDR |cut -d. -f'1 2 3'`.82}
export TENANT_IP_START=${TENANT_IP_START:-'172.16.2.82'}
export PUBLIC_IP_START=${PUBLIC_IP_START:-'172.16.3.82'}
export STORAGE_IP_START=${STORAGE_IP_START:-'172.16.4.82'}

View File

@ -5,7 +5,7 @@ export VIRT_MEM=${VIRT_MEM:-'8192'}
export VIRT_DISK=${VIRT_DISK:-'30G'}
export CLUSTER_NAME=${CLUSTER_NAME:-'allinone-ubuntu'}
export ADAPTER_OS_PATTERN=${ADAPTER_OS_PATTERN:-'(?i)ubuntu.*'}
export MANAGEMENT_IP_START=${MANAGEMENT_IP_START:-`echo $ipaddr |cut -d. -f'1 2 3'`.84}
export MANAGEMENT_IP_START=${MANAGEMENT_IP_START:-`echo $IPADDR |cut -d. -f'1 2 3'`.84}
export TENANT_IP_START=${TENANT_IP_START:-'172.16.2.84'}
export PUBLIC_IP_START=${PUBLIC_IP_START:-'172.16.3.84'}
export STORAGE_IP_START=${STORAGE_IP_START:-'172.16.4.84'}

View File

@ -9,7 +9,7 @@ export HOSTNAMES=${HOSTNAMES:-'controller,network,compute'}
export HOST_ROLES=${HOST_ROLES:-'controller=os-controller;network=os-network,os-block-storage-volume'}
export DEFAULT_ROLES=${DEFAULT_ROLES:-'os-compute-worker'}
export ADAPTER_FLAVOR_PATTERN=${ADAPTER_FLAVOR_PATTERN:-'single-contoller-multi-compute'}
export MANAGEMENT_IP_START=${MANAGEMENT_IP_START:-`echo $ipaddr |cut -d. -f'1 2 3'`.86}
export MANAGEMENT_IP_START=${MANAGEMENT_IP_START:-`echo $IPADDR |cut -d. -f'1 2 3'`.86}
export TENANT_IP_START=${TENANT_IP_START:-'172.16.2.86'}
export PUBLIC_IP_START=${PUBLIC_IP_START:-'172.16.3.86'}
export STORAGE_IP_START=${STORAGE_IP_START:-'172.16.4.86'}

View File

@ -23,7 +23,7 @@
# Checking Sanity
DEBIAN=/etc/debian_version
SUSE=/etc/SuSE-release
CELERY=$CeleryPath
CELERY=/opt/compass/bin/celery
if [ -f $DEBIAN ]; then
. /lib/lsb/init-functions

View File

@ -23,7 +23,6 @@
# Checking Sanity
DEBIAN=/etc/debian_version
SUSE=/etc/SuSE-release
PYTHON=$Python
if [ -f $DEBIAN ]; then
. /lib/lsb/init-functions
@ -37,14 +36,14 @@ RETVAL=0
start() {
echo -n "Starting Compass progress updated: "
if [ -f $SUSE ]; then
startproc -f -p /var/run/progress_update.pid -l /tmp/progress_update.log $PYTHON /opt/compass/bin/progress_update.py
startproc -f -p /var/run/progress_update.pid -l /tmp/progress_update.log /opt/compass/bin/progress_update.py
rc_status -v
RETVAL=$?
elif [ -f $DEBIAN ]; then
start_daemon -p /var/run/progress_update.pid "$PYTHON /opt/compass/bin/progress_update.py &>/tmp/progress_update.log & echo \$! > /var/run/progress_update.pid"
start_daemon -p /var/run/progress_update.pid "/opt/compass/bin/progress_update.py &>/tmp/progress_update.log & echo \$! > /var/run/progress_update.pid"
RETVAL=$?
else
daemon --pidfile /var/run/progress_update.pid "$PYTHON /opt/compass/bin/progress_update.py &>/tmp/progress_update.log & echo \$! > /var/run/progress_update.pid"
daemon --pidfile /var/run/progress_update.pid "/opt/compass/bin/progress_update.py &>/tmp/progress_update.log & echo \$! > /var/run/progress_update.pid"
RETVAL=$?
fi
echo