Add support for ansible: openstack-juno

Change-Id: I41fc56862073af6f925248244870b32c8cd2c8e8
This commit is contained in:
Xicheng Chang 2015-04-06 15:08:17 -07:00
parent 3e9c75ff81
commit b599f60020
37 changed files with 2701 additions and 60 deletions

View File

@ -58,6 +58,15 @@ class CobblerInstaller(object):
logging.exception(error)
class AnsibleInstaller(object):
def __init__(self, settings):
return
def clean(self):
pass
class ChefInstaller(object):
DATABAGS = "databags"
CHEFSERVER_URL = "chef_url"
@ -136,7 +145,8 @@ OS_INSTALLERS = {
'cobbler': CobblerInstaller
}
PK_INSTALLERS = {
'chef_installer': ChefInstaller
'chef_installer': ChefInstaller,
'ansible_installer': AnsibleInstaller
}

View File

@ -55,3 +55,14 @@ class PackageInstallerCheck(base.BaseCheck):
self._set_status(0, message)
return None
def ansible_check(self):
"""Placeholder for ansible check."""
print "Checking ansible......"
print ("[Done]")
self.code == 1
self.messages.append(
"[%s]Info: Package installer health check "
"has completed. No problems found, all systems "
"go." % self.NAME)
return (self.code, self.messages)

View File

@ -0,0 +1,189 @@
# Copyright 2014 Huawei Technologies Co. Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Module to reinstall a given cluster
.. moduleauthor:: Xiaodong Wang <xiaodongwang@huawei.com>
"""
import logging
from compass.actions import util
from compass.db.api import cluster as cluster_api
from compass.db.api import host as host_api
from compass.db.api import user as user_db
from compass.deployment.deploy_manager import DeployManager
from compass.deployment.utils import constants as const
def os_installed(
host_id, clusterhosts_ready, clusters_os_ready,
username=None
):
"""Callback when os is installed.
:param host_id: host that os is installed.
:type host_id: integer
.. note::
The function should be called out of database session.
"""
with util.lock('serialized_action') as lock:
if not lock:
raise Exception(
'failed to acquire lock to '
'do the post action after os installation'
)
logging.info(
'os installed on host %s '
'with cluster host ready %s cluster os ready %s',
host_id, clusterhosts_ready, clusters_os_ready
)
if username:
user = user_db.get_user_object(username)
else:
user = None
os_installed_triggered = False
for cluster_id, clusterhost_ready in clusterhosts_ready.items():
if not clusterhost_ready and os_installed_triggered:
continue
cluster_info = util.ActionHelper.get_cluster_info(
cluster_id, user)
adapter_id = cluster_info[const.ADAPTER_ID]
adapter_info = util.ActionHelper.get_adapter_info(
adapter_id, cluster_id, user)
hosts_info = util.ActionHelper.get_hosts_info(
cluster_id, [host_id], user)
deploy_manager = DeployManager(
adapter_info, cluster_info, hosts_info)
if not os_installed_triggered:
deploy_manager.os_installed()
util.ActionHelper.host_ready(host_id, True, user)
os_installed_triggered = True
if clusterhost_ready:
deploy_manager.cluster_os_installed()
util.ActionHelper.cluster_host_ready(
cluster_id, host_id, False, user
)
for cluster_id, cluster_os_ready in clusters_os_ready.items():
if not cluster_os_ready and os_installed_triggered:
continue
cluster_info = util.ActionHelper.get_cluster_info(
cluster_id, user)
adapter_id = cluster_info[const.ADAPTER_ID]
adapter_info = util.ActionHelper.get_adapter_info(
adapter_id, cluster_id, user)
hosts_info = util.ActionHelper.get_hosts_info(
cluster_id, [host_id], user)
deploy_manager = DeployManager(
adapter_info, cluster_info, hosts_info)
if not os_installed_triggered:
deploy_manager.os_installed()
util.ActionHelper.host_ready(host_id, True, user)
os_installed_triggered = True
if cluster_os_ready:
deploy_manager.cluster_os_installed()
def package_installed(
cluster_id, host_id, cluster_ready,
host_ready, username=None
):
"""Callback when package is installed.
.. note::
The function should be called out of database session.
"""
with util.lock('serialized_action') as lock:
if not lock:
raise Exception(
'failed to acquire lock to '
'do the post action after package installation'
)
logging.info(
'package installed on cluster %s host %s '
'with cluster ready %s host ready %s',
cluster_id, host_id, cluster_ready, host_ready
)
if username:
user = user_db.get_user_object(username)
else:
user = None
cluster_info = util.ActionHelper.get_cluster_info(cluster_id, user)
adapter_id = cluster_info[const.ADAPTER_ID]
adapter_info = util.ActionHelper.get_adapter_info(
adapter_id, cluster_id, user)
hosts_info = util.ActionHelper.get_hosts_info(
cluster_id, [host_id], user)
deploy_manager = DeployManager(adapter_info, cluster_info, hosts_info)
deploy_manager.package_installed()
util.ActionHelper.cluster_host_ready(cluster_id, host_id, True, user)
if cluster_ready:
util.ActionHelper.cluster_ready(cluster_id, False, user)
if host_ready:
util.ActionHelper.host_ready(host_id, False, user)
def cluster_installed(
cluster_id, clusterhosts_ready,
username=None
):
"""Callback when cluster is installed.
.. note::
The function should be called out of database session.
"""
with util.lock('serialized_action') as lock:
if not lock:
raise Exception(
'failed to acquire lock to '
'do the post action after cluster installation'
)
logging.info(
'package installed on cluster %s with clusterhosts ready %s',
cluster_id, clusterhosts_ready
)
if username:
user = user_db.get_user_object(username)
else:
user = None
cluster_info = util.ActionHelper.get_cluster_info(cluster_id, user)
adapter_id = cluster_info[const.ADAPTER_ID]
adapter_info = util.ActionHelper.get_adapter_info(
adapter_id, cluster_id, user)
hosts_info = util.ActionHelper.get_hosts_info(
cluster_id, clusterhosts_ready.keys(), user)
deploy_manager = DeployManager(adapter_info, cluster_info, hosts_info)
deploy_manager.cluster_installed()
util.ActionHelper.cluster_ready(cluster_id, True, user)
for host_id, clusterhost_ready in clusterhosts_ready.items():
if clusterhost_ready:
util.ActionHelper.cluster_host_ready(
cluster_id, host_id, False, user
)

View File

@ -280,6 +280,29 @@ class ActionHelper(object):
host_id, True, True, user=user
)
@staticmethod
def host_ready(host_id, from_database_only, user):
host_db.update_host_state_internal(
host_id, from_database_only=from_database_only,
user=user, ready=True
)
@staticmethod
def cluster_host_ready(
cluster_id, host_id, from_database_only, user
):
cluster_db.update_cluster_host_state_internal(
cluster_id, host_id, from_database_only=from_database_only,
user=user, ready=True
)
@staticmethod
def cluster_ready(cluster_id, from_database_only, user):
cluster_db.update_cluster_state_internal(
cluster_id, from_database_only=from_database_only,
user=user, ready=True
)
@staticmethod
def get_machine_IPMI(machine_id, user):
machine_info = machine_db.get_machine(user, machine_id)

View File

@ -2009,7 +2009,7 @@ def show_clusterhost_state(clusterhost_id):
@app.route(
"/clusters/<int:cluster_id>/hosts/<int:host_id>/state",
methods=['PUT']
methods=['PUT', 'POST']
)
@log_user_action
@login_required
@ -2025,7 +2025,25 @@ def update_cluster_host_state(cluster_id, host_id):
)
@app.route("/clusterhosts/<int:clusterhost_id>/state", methods=['PUT'])
@app.route(
"/clusters/<clustername>/hosts/<hostname>/state_internal",
methods=['PUT', 'POST']
)
def update_cluster_host_state_internal(clustername, hostname):
"""update clusterhost state."""
data = _get_request_data()
return utils.make_json_response(
200,
cluster_api.update_clusterhost_state_internal(
clustername, hostname, **data
)
)
@app.route(
"/clusterhosts/<int:clusterhost_id>/state",
methods=['PUT', 'POST']
)
@log_user_action
@login_required
@update_user_token
@ -2040,6 +2058,21 @@ def update_clusterhost_state(clusterhost_id):
)
@app.route(
"/clusterhosts/<clusterhost_name>/state_internal",
methods=['PUT', 'POST']
)
def update_clusterhost_state_internal(clusterhost_name):
"""update clusterhost state."""
data = _get_request_data()
return utils.make_json_response(
200,
cluster_api.update_clusterhost_state_internal(
clusterhost_name, **data
)
)
@app.route("/hosts", methods=['GET'])
@log_user_action
@login_required
@ -2399,7 +2432,7 @@ def show_host_state(host_id):
)
@app.route("/hosts/<int:host_id>/state", methods=['PUT'])
@app.route("/hosts/<int:host_id>/state", methods=['PUT', 'POST'])
@log_user_action
@login_required
@update_user_token
@ -2414,6 +2447,18 @@ def update_host_state(host_id):
)
@app.route("/hosts/<hostname>/state_internal", methods=['PUT', 'POST'])
def update_host_state_internal(hostname):
"""update host state."""
data = _get_request_data()
return utils.make_json_response(
200,
host_api.update_host_state_internal(
hostname, **data
)
)
def _poweron_host(*args, **kwargs):
return utils.make_json_response(
202,

View File

@ -85,12 +85,12 @@ RESP_CLUSTERHOST_DEPLOYED_CONFIG_FIELDS = [
]
RESP_STATE_FIELDS = [
'id', 'state', 'percentage', 'message', 'severity',
'status',
'status', 'ready',
'created_at', 'updated_at'
]
RESP_CLUSTERHOST_STATE_FIELDS = [
'id', 'state', 'percentage', 'message', 'severity',
'created_at', 'updated_at'
'ready', 'created_at', 'updated_at'
]
RESP_REVIEW_FIELDS = [
'cluster', 'hosts'
@ -130,9 +130,11 @@ UPDATED_CLUSTERHOST_DEPLOYED_CONFIG_FIELDS = [
UPDATED_CLUSTERHOST_STATE_FIELDS = [
'state', 'percentage', 'message', 'severity'
]
UPDATED_CLUSTER_STATE_FIELDS = [
'state'
UPDATED_CLUSTERHOST_STATE_INTERNAL_FIELDS = [
'ready'
]
UPDATED_CLUSTER_STATE_FIELDS = ['state']
UPDATED_CLUSTER_STATE_INTERNAL_FIELDS = ['ready']
RESP_CLUSTERHOST_LOG_FIELDS = [
'clusterhost_id', 'id', 'host_id', 'cluster_id',
'filename', 'position', 'partial_line',
@ -1400,6 +1402,8 @@ def update_cluster_hosts(
session, models.ClusterHost, cluster_id=cluster.id
)
logging.info('updated clusterhosts: %s', clusterhosts)
for clusterhost in clusterhosts:
logging.info('clusterhost state: %s', clusterhost.state)
return {
'hosts': clusterhosts
}
@ -1722,6 +1726,90 @@ def update_cluster_host_state(
return clusterhost.state_dict()
@utils.supported_filters(
optional_support_keys=UPDATED_CLUSTERHOST_STATE_INTERNAL_FIELDS,
ignore_support_keys=IGNORE_FIELDS
)
@database.run_in_session()
@user_api.check_user_permission_in_session(
permission.PERMISSION_UPDATE_CLUSTERHOST_STATE
)
@utils.wrap_to_dict(['status', 'clusterhost'])
def update_cluster_host_state_internal(
clustername, hostname, from_database_only=False,
user=None, session=None, **kwargs
):
"""Update a clusterhost state."""
if isinstance(clustername, (int, long)):
cluster = utils.get_db_object(
session, models.Cluster, id=clustername
)
else:
cluster = utils.get_db_object(
session, models.Cluster, name=clustername
)
if isinstance(hostname, (int, long)):
host = utils.get_db_object(
session, models.Host, id=hostname
)
else:
host = utils.get_db_object(
session, models.Host, name=hostname
)
clusterhost = utils.get_db_object(
session, models.ClusterHost,
cluster_id=cluster.id, host_id=host.id
)
if 'ready' in kwargs and kwargs['ready'] and not clusterhost.state.ready:
ready_triggered = True
else:
ready_triggered = False
cluster_ready = False
host_ready = not host.state.ready
if ready_triggered:
cluster_ready = True
for clusterhost_in_cluster in cluster.clusterhosts:
if (
clusterhost_in_cluster.clusterhost_id
==
clusterhost.clusterhost_id
):
continue
if not clusterhost_in_cluster.state.ready:
cluster_ready = False
logging.info(
'cluster %s host %s ready: %s',
clustername, hostname, ready_triggered
)
logging.info('cluster ready: %s', cluster_ready)
logging.info('host ready: %s', host_ready)
if not ready_triggered or from_database_only:
logging.info('%s state is set to %s', clusterhost.name, kwargs)
utils.update_db_object(session, clusterhost.state, **kwargs)
if not clusterhost.state.ready:
logging.info('%s state ready is set to False', cluster.name)
utils.update_db_object(session, cluster.state, ready=False)
status = '%s state is updated' % clusterhost.name
else:
from compass.tasks import client as celery_client
celery_client.celery.send_task(
'compass.tasks.package_installed',
(
clusterhost.cluster_id, clusterhost.host_id,
cluster_ready, host_ready
)
)
status = '%s: cluster ready %s host ready %s' % (
clusterhost.name, cluster_ready, host_ready
)
logging.info('action status: %s', status)
return {
'status': status,
'clusterhost': clusterhost.state_dict()
}
@utils.supported_filters(
optional_support_keys=UPDATED_CLUSTERHOST_STATE_FIELDS,
ignore_support_keys=IGNORE_FIELDS
@ -1743,6 +1831,89 @@ def update_clusterhost_state(
return clusterhost.state_dict()
@utils.supported_filters(
optional_support_keys=UPDATED_CLUSTERHOST_STATE_INTERNAL_FIELDS,
ignore_support_keys=IGNORE_FIELDS
)
@database.run_in_session()
@user_api.check_user_permission_in_session(
permission.PERMISSION_UPDATE_CLUSTERHOST_STATE
)
@utils.wrap_to_dict(['status', 'clusterhost'])
def update_clusterhost_state_internal(
clusterhost_name, from_database_only=False,
user=None, session=None, **kwargs
):
"""Update a clusterhost state."""
if isinstance(clusterhost_name, (int, long)):
clusterhost = utils.get_db_object(
session, models.ClusterHost,
clusterhost_id=clusterhost_name
)
cluster = clusterhost.cluster
host = clusterhost.host
else:
hostname, clustername = clusterhost_name.split('.', 1)
cluster = utils.get_db_object(
session, models.Cluster, name=clustername
)
host = utils.get_db_object(
session, models.Host, name=hostname
)
clusterhost = utils.get_db_object(
session, models.ClusterHost,
cluster_id=cluster.id, host_id=host.id
)
if 'ready' in kwargs and kwargs['ready'] and not clusterhost.state.ready:
ready_triggered = True
else:
ready_triggered = False
cluster_ready = False
host_ready = not host.state.ready
if ready_triggered:
cluster_ready = True
for clusterhost_in_cluster in cluster.clusterhosts:
if (
clusterhost_in_cluster.clusterhost_id
==
clusterhost.clusterhost_id
):
continue
if not clusterhost_in_cluster.state.ready:
cluster_ready = False
logging.info(
'clusterhost %s ready: %s',
clusterhost_name, ready_triggered
)
logging.info('cluster ready: %s', cluster_ready)
logging.info('host ready: %s', host_ready)
if not ready_triggered or from_database_only:
logging.info('%s set state to %s', clusterhost.name, kwargs)
utils.update_db_object(session, clusterhost.state, **kwargs)
if not clusterhost.state.ready:
logging.info('%s state ready is to False', cluster.name)
utils.update_db_object(session, cluster.state, ready=False)
status = '%s state is updated' % clusterhost.name
else:
from compass.tasks import client as celery_client
celery_client.celery.send_task(
'compass.tasks.package_installed',
(
clusterhost.cluster_id, clusterhost.host_id,
cluster_ready, host_ready
)
)
status = '%s: cluster ready %s host ready %s' % (
clusterhost.name, cluster_ready, host_ready
)
logging.info('action status: %s', status)
return {
'status': status,
'clusterhost': clusterhost.state_dict()
}
@utils.supported_filters(
optional_support_keys=UPDATED_CLUSTER_STATE_FIELDS,
ignore_support_keys=IGNORE_FIELDS
@ -1763,6 +1934,68 @@ def update_cluster_state(
return cluster.state_dict()
@utils.supported_filters(
optional_support_keys=UPDATED_CLUSTER_STATE_INTERNAL_FIELDS,
ignore_support_keys=IGNORE_FIELDS
)
@database.run_in_session()
@user_api.check_user_permission_in_session(
permission.PERMISSION_UPDATE_CLUSTER_STATE
)
@utils.wrap_to_dict(['status', 'cluster'])
def update_cluster_state_internal(
clustername, from_database_only=False,
user=None, session=None, **kwargs
):
"""Update a cluster state."""
if isinstance(clustername, (int, long)):
cluster = utils.get_db_object(
session, models.Cluster, id=clustername
)
else:
cluster = utils.get_db_object(
session, models.Cluster, name=clustername
)
if 'ready' in kwargs and kwargs['ready'] and not cluster.state.ready:
ready_triggered = True
else:
ready_triggered = False
clusterhost_ready = {}
if ready_triggered:
for clusterhost in cluster.clusterhosts:
clusterhost_ready[clusterhost.host_id] = (
not clusterhost.state.ready
)
logging.info('cluster %s ready: %s', clustername, ready_triggered)
logging.info('clusterhost ready: %s', clusterhost_ready)
if not ready_triggered or from_database_only:
logging.info('%s state is set to %s', cluster.name, kwargs)
utils.update_db_object(session, cluster.state, **kwargs)
if not cluster.state.ready:
for clusterhost in cluster.clusterhosts:
logging.info('%s state ready is to False', clusterhost.name)
utils.update_db_object(
session, clusterhost.state, ready=False
)
status = '%s state is updated' % cluster.name
else:
from compass.tasks import client as celery_client
celery_client.celery.send_task(
'compass.tasks.cluster_installed',
(clusterhost.cluster_id, clusterhost_ready)
)
status = '%s installed action set clusterhost ready %s' % (
cluster.name, clusterhost_ready
)
logging.info('action status: %s', status)
return {
'status': status,
'cluster': cluster.state_dict()
}
@utils.supported_filters([])
@database.run_in_session()
@utils.wrap_to_dict(RESP_CLUSTERHOST_LOG_FIELDS)

View File

@ -83,11 +83,14 @@ IGNORE_FIELDS = [
'id', 'created_at', 'updated_at'
]
RESP_STATE_FIELDS = [
'id', 'state', 'percentage', 'message', 'severity'
'id', 'state', 'percentage', 'message', 'severity', 'ready'
]
UPDATED_STATE_FIELDS = [
'state', 'percentage', 'message', 'severity'
]
UPDATED_STATE_INTERNAL_FIELDS = [
'ready'
]
RESP_LOG_FIELDS = [
'id', 'filename', 'position', 'partial_line', 'percentage',
'message', 'severity', 'line_matcher_name'
@ -788,6 +791,84 @@ def update_host_state(host_id, user=None, session=None, **kwargs):
return host.state_dict()
@utils.supported_filters(
optional_support_keys=UPDATED_STATE_INTERNAL_FIELDS,
ignore_support_keys=IGNORE_FIELDS
)
@database.run_in_session()
@user_api.check_user_permission_in_session(
permission.PERMISSION_UPDATE_HOST_STATE
)
@utils.wrap_to_dict(['status', 'host'])
def update_host_state_internal(
hostname, from_database_only=False,
user=None, session=None, **kwargs
):
"""Update a host state."""
if isinstance(hostname, (int, long)):
host = utils.get_db_object(
session, models.Host, id=hostname
)
else:
host = utils.get_db_object(
session, models.Host, name=hostname
)
if 'ready' in kwargs and kwargs['ready'] and not host.state.ready:
ready_triggered = True
else:
ready_triggered = False
clusterhost_ready = {}
cluster_os_ready = {}
if ready_triggered:
for clusterhost in host.clusterhosts:
cluster = clusterhost.cluster
if cluster.distributed_system:
clusterhost_ready[cluster.id] = False
else:
clusterhost_ready[cluster.id] = True
all_os_ready = True
for clusterhost_in_cluster in cluster.clusterhosts:
host_in_cluster = clusterhost_in_cluster.host
if host_in_cluster.id == host.id:
continue
if not host_in_cluster.state.ready:
all_os_ready = False
cluster_os_ready[cluster.id] = all_os_ready
logging.info('host %s ready: %s', hostname, ready_triggered)
logging.info("clusterhost_ready is: %s", clusterhost_ready)
logging.info("cluster_os_ready is %s", cluster_os_ready)
if not ready_triggered or from_database_only:
logging.info('%s state is set to %s', host.name, kwargs)
utils.update_db_object(session, host.state, **kwargs)
if not host.state.ready:
for clusterhost in host.clusterhosts:
utils.update_db_object(
session, clusterhost.state, ready=False
)
utils.update_db_object(
session, clusterhost.cluster.state, ready=False
)
status = '%s state is updated' % host.name
else:
from compass.tasks import client as celery_client
celery_client.celery.send_task(
'compass.tasks.os_installed',
(
host.id, clusterhost_ready,
cluster_os_ready
)
)
status = '%s: clusterhost ready %s cluster os ready %s' % (
host.name, clusterhost_ready, cluster_os_ready
)
logging.info('action status: %s', status)
return {
'status': status,
'host': host.state_dict()
}
@utils.supported_filters([])
@database.run_in_session()
@utils.wrap_to_dict(RESP_LOG_FIELDS)

View File

@ -15,6 +15,7 @@
"""User database operations."""
import datetime
import functools
import logging
from flask.ext.login import UserMixin
@ -83,6 +84,9 @@ def add_user_internal(
def _check_user_permission(session, user, permission):
"""Check user has permission."""
if not user:
logging.info('empty user means the call is from internal')
return
if user.is_admin:
return

View File

@ -421,8 +421,11 @@ class StateMixin(TimestampMixin, HelperMixin):
Enum('INFO', 'WARNING', 'ERROR'),
ColumnDefault('INFO')
)
ready = Column(Boolean, default=False)
def update(self):
if self.ready:
self.state = 'SUCCESSFUL'
if self.state in ['UNINITIALIZED', 'INITIALIZED']:
self.percentage = 0.0
self.severity = 'INFO'
@ -792,6 +795,7 @@ class ClusterHost(BASE, TimestampMixin, HelperMixin):
@property
def os_installed(self):
logging.debug('os installed: %s' % self.host.os_installed)
return self.host.os_installed
@property
@ -1184,8 +1188,17 @@ class ClusterState(BASE, StateMixin):
)
if self.failed_hosts:
self.severity = 'ERROR'
super(ClusterState, self).update()
if self.state == 'SUCCESSFUL':
self.completed_hosts = self.total_hosts
for clusterhost in clusterhosts:
clusterhost_state = clusterhost.state
if clusterhost_state.state != 'SUCCESSFUL':
clusterhost_state.state = 'SUCCESSFUL'
clusterhost.state.update()
class Cluster(BASE, TimestampMixin, HelperMixin):
"""Cluster table."""

View File

@ -151,6 +151,26 @@ class DeployManager(object):
if self.pk_installer:
self.pk_installer.delete_hosts(delete_cluster=delete_cluster)
def os_installed(self):
if self.os_installer:
self.os_installer.ready()
if self.pk_installer:
self.pk_installer.os_ready()
def cluster_os_installed(self):
if self.os_installer:
self.os_installer.cluster_ready()
if self.pk_installer:
self.pk_installer.cluster_os_ready()
def package_installed(self):
if self.pk_installer:
self.pk_installer.ready()
def cluster_installed(self):
if self.pk_installer:
self.pk_installer.cluster_ready()
class PowerManager(object):
"""Manage host to power on, power off, and reset."""

View File

@ -352,7 +352,7 @@ class BaseConfigManager(object):
def _get_cluster_roles_mapping_helper(self):
"""The ouput format will be as below, for example:
{
"controller": {
"controller": [{
"hostname": "xxx",
"management": {
"interface": "eth0",
@ -363,7 +363,7 @@ class BaseConfigManager(object):
"is_promiscuous": False
},
...
},
}],
...
}
"""
@ -375,10 +375,8 @@ class BaseConfigManager(object):
for host_id in hosts_id_list:
roles_mapping = self.get_host_roles_mapping(host_id)
for role in roles_mapping:
if role not in mapping:
mapping[role] = roles_mapping[role]
for role, value in roles_mapping.items():
mapping.setdefault(role, []).append(value)
return mapping
def _get_host_roles_mapping_helper(self, host_id):

View File

@ -52,6 +52,12 @@ class BaseInstaller(object):
def redeploy(self, **kwargs):
raise NotImplementedError
def ready(self, **kwargs):
pass
def cluster_ready(self, **kwargs):
pass
def get_tmpl_vars_from_metadata(self, metadata, config):
"""Get variables dictionary for rendering templates from metadata.
:param dict metadata: The metadata dictionary.
@ -219,6 +225,12 @@ class PKInstaller(BaseInstaller):
NAME = 'PKInstaller'
INSTALLER_BASE_DIR = os.path.join(CURRENT_DIR, 'pk_installers')
def generate_installer_config(self):
raise NotImplementedError(
'generate_installer_config is not defined in %s',
self.__class__.__name__
)
def get_target_systems(self):
"""virtual method to get available target_systems for each os.
@ -239,6 +251,16 @@ class PKInstaller(BaseInstaller):
"""
return {}
def os_ready(self, **kwargs):
pass
def cluster_os_ready(self, **kwargs):
pass
def serialize_config(self, config, destination):
with open(destination, "w") as f:
f.write(config)
@classmethod
def get_installer(cls, name, adapter_info, cluster_info, hosts_info):
if name is None:

View File

@ -0,0 +1,335 @@
# Copyright 2014 Huawei Technologies Co. Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
__auther__ = "Compass Dev Team (dev-team@syscompass.org)"
"""package installer: ansible plugin."""
from Cheetah.Template import Template
from copy import deepcopy
import json
import logging
import os
import re
import shutil
import subprocess
from compass.deployment.installers.installer import PKInstaller
from compass.deployment.utils import constants as const
from compass.utils import setting_wrapper as compass_setting
from compass.utils import util
NAME = "AnsibleInstaller"
class AnsibleInstaller(PKInstaller):
INVENTORY_TMPL_DIR = 'inventories'
GROUPVARS_TMPL_DIR = 'vars'
# keywords in package installer settings
ANSIBLE_DIR = 'ansible_dir'
ANSIBLE_RUN_DIR = 'ansible_run_dir'
LOG_FILE = 'ansible_log_file'
ANSIBLE_CONFIG = 'ansible_config'
INVENTORY = 'inventory_file'
GROUP_VARIABLE = 'group_variable'
HOSTS_PATH = 'etc_hosts_path'
RUNNER_DIRS = 'runner_dirs'
def __init__(self, config_manager):
super(AnsibleInstaller, self).__init__()
self.config_manager = config_manager
self.tmpl_name = self.config_manager.get_cluster_flavor_template()
self.installer_settings = (
self.config_manager.get_pk_installer_settings()
)
settings = self.installer_settings
self.ansible_dir = settings.setdefault(self.ANSIBLE_DIR, None)
self.ansible_run_dir = (
settings.setdefault(self.ANSIBLE_RUN_DIR, None)
)
self.log_file = settings.setdefault(self.LOG_FILE, None)
self.ansible_config = (
settings.setdefault(self.ANSIBLE_CONFIG, None)
)
self.inventory = settings.setdefault(self.INVENTORY, None)
self.group_variable = (
settings.setdefault(self.GROUP_VARIABLE, None)
)
self.hosts_path = (
settings.setdefault(self.HOSTS_PATH, None)
)
self.runner_dirs = (
settings.setdefault(self.RUNNER_DIRS, None)
)
self.playbook = self.tmpl_name.replace('tmpl', 'yml')
self.runner_files = [self.playbook]
adapter_name = self.config_manager.get_dist_system_name()
self.tmpl_dir = AnsibleInstaller.get_tmpl_path(adapter_name)
self.adapter_dir = os.path.join(self.ansible_dir, adapter_name)
logging.debug('%s instance created', self)
@classmethod
def get_tmpl_path(cls, adapter_name):
tmpl_path = os.path.join(
os.path.join(compass_setting.TMPL_DIR, 'ansible_installer'),
adapter_name
)
return tmpl_path
def __repr__(self):
return '%s[name=%s,installer_url=%s]' % (
self.__class__.__name__, self.NAME, self.installer_url)
def generate_installer_config(self):
"""Render ansible config file by OS installing right after
OS is installed successfully.
The output format:
{
'1'($host_id/clusterhost_id):{
'tool': 'ansible',
},
.....
}
"""
host_ids = self.config_manager.get_host_id_list()
os_installer_configs = {}
for host_id in host_ids:
temp = {
"tool": "ansible",
}
os_installer_configs[host_id] = temp
return os_installer_configs
def get_env_name(self, dist_sys_name, cluster_name):
return "-".join((dist_sys_name, cluster_name))
def _get_cluster_tmpl_vars(self):
"""Generate template variables dict based on cluster level config.
The vars_dict will be:
{
"baseinfo": {
"id":1,
"name": "cluster01",
...
},
"package_config": {
.... //mapped from original package config based on metadata
},
"role_mapping": {
....
}
}
"""
cluster_vars_dict = {}
# set cluster basic information to vars_dict
cluster_baseinfo = self.config_manager.get_cluster_baseinfo()
cluster_vars_dict[const.BASEINFO] = cluster_baseinfo
# get and set template variables from cluster package config.
pk_metadata = self.config_manager.get_pk_config_meatadata()
pk_config = self.config_manager.get_cluster_package_config()
# get os config as ansible needs them
os_metadata = self.config_manager.get_os_config_metadata()
os_config = self.config_manager.get_cluster_os_config()
pk_meta_dict = self.get_tmpl_vars_from_metadata(pk_metadata, pk_config)
os_meta_dict = self.get_tmpl_vars_from_metadata(os_metadata, os_config)
util.merge_dict(pk_meta_dict, os_meta_dict)
cluster_vars_dict[const.PK_CONFIG] = pk_meta_dict
# get and set roles_mapping to vars_dict
mapping = self.config_manager.get_cluster_roles_mapping()
logging.info("cluster role mapping is %s", mapping)
cluster_vars_dict[const.ROLES_MAPPING] = mapping
return cluster_vars_dict
def _generate_inventory_attributes(self, global_vars_dict):
inventory_tmpl_path = os.path.join(
os.path.join(self.tmpl_dir, self.INVENTORY_TMPL_DIR),
self.tmpl_name
)
if not os.path.exists(inventory_tmpl_path):
logging.error(
"Inventory template '%s' does not exist", self.tmpl_name
)
raise Exception("Template '%s' does not exist!" % self.tmpl_name)
return self.get_config_from_template(
inventory_tmpl_path, global_vars_dict
)
def _generate_group_vars_attributes(self, global_vars_dict):
group_vars_tmpl_path = os.path.join(
os.path.join(self.tmpl_dir, self.GROUPVARS_TMPL_DIR),
self.tmpl_name
)
if not os.path.exists(group_vars_tmpl_path):
logging.error("Vars template '%s' does not exist",
self.tmpl_name)
raise Exception("Template '%s' does not exist!" % self.tmpl_name)
return self.get_config_from_template(
group_vars_tmpl_path, global_vars_dict
)
def _generate_hosts_attributes(self, global_vars_dict):
hosts_tmpl_path = os.path.join(
os.path.join(self.tmpl_dir, 'hosts'), self.tmpl_name
)
if not os.path.exists(hosts_tmpl_path):
logging.error("Hosts template '%s' does not exist", self.tmpl_name)
raise Exception("Template '%s' does not exist!" % self.tmpl_name)
return self.get_config_from_template(hosts_tmpl_path, global_vars_dict)
def get_config_from_template(self, tmpl_path, vars_dict):
logging.debug("vars_dict is %s", vars_dict)
if not os.path.exists(tmpl_path) or not vars_dict:
logging.info("Template dir or vars_dict is None!")
return {}
searchList = []
copy_vars_dict = deepcopy(vars_dict)
for key, value in vars_dict.iteritems():
if isinstance(value, dict):
temp = copy_vars_dict[key]
del copy_vars_dict[key]
searchList.append(temp)
searchList.append(copy_vars_dict)
# Load specific template for current adapter
tmpl = Template(file=tmpl_path, searchList=searchList)
return tmpl.respond()
def _create_ansible_run_env(self, env_name):
ansible_run_destination = os.path.join(self.ansible_run_dir, env_name)
os.mkdir(ansible_run_destination)
ansible_log_path = os.path.join(
ansible_run_destination,
self.log_file
)
log_option = "log_path = %s" % ansible_log_path
ansible_cfg_file = os.path.join(
ansible_run_destination,
self.ansible_config
)
with open(ansible_cfg_file, 'w') as cfg:
cfg.write('[defaults]\n')
cfg.write(log_option)
# copy roles to run env
dirs = self.runner_dirs
files = self.runner_files
for dir in dirs:
shutil.copytree(
os.path.join(self.adapter_dir, dir),
os.path.join(
ansible_run_destination,
dir
)
)
for file in files:
logging.info('file is %s', file)
shutil.copy(
os.path.join(self.adapter_dir, file),
os.path.join(
ansible_run_destination,
file
)
)
def prepare_ansible(self, env_name, global_vars_dict):
ansible_run_destination = os.path.join(self.ansible_run_dir, env_name)
self._create_ansible_run_env(env_name)
inv_config = self._generate_inventory_attributes(global_vars_dict)
inventory_dir = os.path.join(ansible_run_destination, 'inventories')
vars_config = self._generate_group_vars_attributes(global_vars_dict)
vars_dir = os.path.join(ansible_run_destination, 'group_vars')
hosts_config = self._generate_hosts_attributes(global_vars_dict)
hosts_destination = os.path.join(
ansible_run_destination, self.hosts_path
)
os.mkdir(inventory_dir)
os.mkdir(vars_dir)
inventory_destination = os.path.join(inventory_dir, self.inventory)
group_vars_destination = os.path.join(vars_dir, self.group_variable)
self.serialize_config(inv_config, inventory_destination)
self.serialize_config(vars_config, group_vars_destination)
self.serialize_config(hosts_config, hosts_destination)
def deploy(self):
"""Start to deploy a distributed system. Return both cluster and hosts
deployed configs. The return format:
{
"cluster": {
"id": 1,
"deployed_package_config": {
"roles_mapping": {...},
"service_credentials": {...},
....
}
},
"hosts": {
1($clusterhost_id): {
"deployed_package_config": {...}
},
....
}
}
"""
host_list = self.config_manager.get_host_id_list()
if not host_list:
return {}
adapter_name = self.config_manager.get_adapter_name()
cluster_name = self.config_manager.get_clustername()
env_name = self.get_env_name(adapter_name, cluster_name)
global_vars_dict = self._get_cluster_tmpl_vars()
logging.info(
'%s var dict: %s', self.__class__.__name__, global_vars_dict
)
# Create ansible related files
self.prepare_ansible(env_name, global_vars_dict)
def cluster_os_ready(self):
adapter_name = self.config_manager.get_adapter_name()
cluster_name = self.config_manager.get_clustername()
env_name = self.get_env_name(adapter_name, cluster_name)
ansible_run_destination = os.path.join(self.ansible_run_dir, env_name)
inventory_dir = os.path.join(ansible_run_destination, 'inventories')
inventory_file = os.path.join(inventory_dir, self.inventory)
playbook_file = os.path.join(ansible_run_destination, self.playbook)
log_file = os.path.join(ansible_run_destination, 'run.log')
config_file = os.path.join(
ansible_run_destination, self.ansible_config
)
cmd = "ANSIBLE_CONFIG=%s ansible-playbook -i %s %s" % (config_file,
inventory_file,
playbook_file)
with open(log_file, 'w') as logfile:
subprocess.Popen(cmd, shell=True, stdout=logfile, stderr=logfile)

View File

@ -33,6 +33,7 @@ USERNAME = 'username'
DIST_SYS_NAME = 'distributed_system_name'
FLAVOR = 'flavor'
FLAVORS = 'flavors'
PLAYBOOK = 'playbook'
FLAVOR_NAME = 'flavor_name'
HEALTH_CHECK_CMD = 'health_check_cmd'
TMPL = 'template'
@ -61,6 +62,7 @@ MGMT_NIC_FLAG = 'is_mgmt'
NETMASK = 'netmask'
NETWORKS = 'networks'
NIC = 'interface'
CLUSTER_ID = 'cluster_id'
ORIGIN_CLUSTER_ID = 'origin_cluster_id'
PROMISCUOUS_FLAG = 'is_promiscuous'
REINSTALL_OS_FLAG = 'reinstall_os'

View File

@ -24,6 +24,7 @@ from celery.signals import setup_logging
from compass.actions import clean
from compass.actions import delete
from compass.actions import deploy
from compass.actions import install_callback
from compass.actions import poll_switch
from compass.actions import update_progress
from compass.db.api import adapter_holder as adapter_api
@ -176,7 +177,7 @@ def delete_host(deleter_email, host_id, cluster_ids):
"""
try:
delete.delete_host(
host_id, deleter_email, cluster_ids
host_id, cluster_ids, deleter_email
)
except Exception as error:
logging.exception(error)
@ -250,6 +251,50 @@ def reset_machine(machine_id):
pass
@celery.task(name='compass.tasks.os_installed')
def os_installed(
host_id, clusterhosts_ready,
clusters_os_ready
):
"""callback when os is installed.
"""
try:
install_callback.os_installed(
host_id, clusterhosts_ready,
clusters_os_ready
)
except Exception as error:
logging.exception(error)
@celery.task(name='compass.tasks.package_installed')
def package_installed(
cluster_id, host_id, cluster_ready, host_ready
):
"""callback when package is installed.
"""
try:
install_callback.package_installed(
cluster_id, host_id, cluster_ready, host_ready
)
except Exception as error:
logging.exception(error)
@celery.task(name='compass.tasks.cluster_installed')
def cluster_installed(
cluster_id, clusterhosts_ready
):
"""callback when package is installed.
"""
try:
install_callback.cluster_installed(
cluster_id, clusterhosts_ready
)
except Exception as error:
logging.exception(error)
@celery.task(name='compass.tasks.update_progress')
def update_clusters_progress():
"""Calculate the installing progress of the given cluster.

View File

@ -60,6 +60,7 @@ from compass.log_analyzor import progress_calculator
from compass.utils import flags
from compass.utils import logsetting
ADAPTER_NAME = 'openstack_icehouse'
OS_NAME = 'CentOS-6.5-x86_64'
SWITCH_IP = '172.29.8.40'
MACHINE_MAC = '00:0c:29:bf:eb:1d'
@ -87,18 +88,40 @@ class TestProgressCalculator(unittest2.TestCase):
# get adapter information
list_adapters = adapter.list_adapters(user=self.user_object)
for adptr in list_adapters:
if ('package_installer' in adptr.keys() and
adptr['flavors'] != [] and
adptr['distributed_system_name'] == 'openstack'):
self.adapter_id = adptr['id']
for flavor in adptr['flavors']:
if flavor['name'] == 'allinone':
self.flavor_id = flavor['id']
break
self.adapter_id = None
if adptr['name'] != ADAPTER_NAME:
continue
self.adapter_id = adptr['id']
self.os_id = None
for supported_os in adptr['supported_oses']:
if supported_os['name'] == OS_NAME:
self.os_id = supported_os['os_id']
break
if not self.os_id:
continue
if (
'package_installer' in adptr.keys() and
adptr['flavors'] != [] and
adptr['distributed_system_name'] == 'openstack'
):
self.flavor_id = None
for flavor in adptr['flavors']:
if flavor['name'] == 'allinone':
self.flavor_id = flavor['id']
break
if not self.flavor_id:
continue
else:
continue
if self.adapter_id and self.os_id and self.flavor_id:
break
if not self.adapter_id:
raise Exception('adapter id not found')
if not self.os_id:
raise Exception('os id not found')
if not self.flavor_id:
raise Exception('flavor id not found')
#add cluster
cluster.add_cluster(

View File

@ -94,6 +94,7 @@ class ApiTestCase(unittest2.TestCase):
data['name'] = 'test_cluster1'
data['adapter_id'] = adapter_id
data['os_id'] = os_id
self.os_id = os_id
data['flavor_id'] = flavor_id
self.post(url, data)
data = {}
@ -1014,7 +1015,7 @@ class TestMetadataAPI(ApiTestCase):
super(TestMetadataAPI, self).tearDown()
def test_get_os_ui_metadata(self):
url = '/oses/1/ui_metadata'
url = '/oses/%s/ui_metadata' % self.os_id
return_value = self.get(url)
self.assertEqual(return_value.status_code, 200)
self.assertIn('os_global_config', return_value.get_data())

View File

@ -95,24 +95,33 @@ class TestChefInstaller(unittest2.TestCase):
def test_get_env_attributes(self):
expected_env = {
"chef_type": "environment",
"name": "testing",
"description": "Environment",
"cookbook_versions": {
},
"json_class": "Chef::Environment",
"chef_type": "environment",
"override_attributes": {
"compass": {
"cluster_id": "1"
}
},
"default_attributes": {
"local_repo": "",
"memcached": {
"bind_interface": "vnet0"
},
"compute": {
"xvpvnc_proxy": {
"bind_interface": "eth0"
},
"syslog": {
"use": False
},
"libvirt": {
"bind_interface": "eth0"
},
"novnc_proxy": {
"bind_interface": "vnet0"
},
"xvpvnc_proxy": {
"libvirt": {
"bind_interface": "eth0"
}
},
@ -121,21 +130,299 @@ class TestChefInstaller(unittest2.TestCase):
"external_network_bridge_interface": "eth2"
}
},
"mysql": {
"server_root_password": "root",
"server_repl_password": "root",
"root_network_acl": "%",
"allow_remote_root": True,
"server_debian_password": "root"
},
"mq": {
"vhost": "/nova",
"password": "test",
"user": "guest",
"network": {
"service_type": "rabbitmq"
}
},
"openstack": {
"image": {
"upload_images": ["cirros"],
"syslog": {
"use": False
},
"api": {
"bind_interface": "vnet0"
},
"registry": {
"bind_interface": "vnet0"
},
"debug": True,
"upload_image": {
"cirros": "http://download.cirros-cloud.net"
"/0.3.2/cirros-0.3.2-x86_64-disk.img"
}
},
"db": {
"volume": {
"host": "12.234.32.100"
},
"compute": {
"host": "12.234.32.100"
},
"network": {
"host": "12.234.32.100"
},
"orchestration": {
"host": "12.234.32.100"
},
"bind_interface": "vnet0",
"image": {
"host": "12.234.32.100"
},
"telemetry": {
"host": "12.234.32.100"
},
"identity": {
"host": "12.234.32.100"
},
"dashboard": {
"host": "12.234.32.100"
}
},
"auth": {
"validate_certs": False
},
"use_databags": False,
"developer_mode": True,
"block-storage": {
"debug": True,
"syslog": {
"use": False
},
"api": {
"ratelimit": "False"
}
},
"compute": {
"xvpvnc_proxy": {
"bind_interface": "eth0"
},
"network": {
"service_type": "neutron"
},
"libvirt": {
"bind_interface": "eth0"
},
"syslog": {
"use": False
},
"ratelimit": {
"volume": {
"enabled": False
},
"api": {
"enabled": False
}
},
"novnc_proxy": {
"bind_interface": "eth0"
}
},
"network": {
"verbose": "True",
"openvswitch": {
"network_vlan_ranges": "",
"enable_tunneling": "True",
"bind_interface": "eth1",
"tenant_network_type": "gre",
"bridge_mappings": "",
"tunnel_id_ranges": "1:1000"
},
"ml2": {
"type_drivers": "gre",
"tenant_network_types": "gre",
"enable_security_group": "True",
"network_vlan_ranges": "",
"tunnel_id_ranges": "1:1000"
},
"l3": {
"external_network_bridge_interface": "eth2"
},
"debug": "True",
"service_plugins": ["router"]
},
"mq": {
"vhost": "/nova",
"password": "guest",
"user": "guest",
"network": {
"service_type": "rabbitmq"
}
},
"dashboard": {
"use_ssl": "false"
},
"identity": {
"syslog": {
"use": False
},
"token": {
"backend": "sql"
},
"admin_user": "admin",
"users": {
"admin": {
"password": "admin",
"default_tenant": "admin",
"roles": {
"admin": ["admin"]
}
},
"demo": {
"password": "demo",
"default_tenant": "demo",
"roles": {
"member": ["demo"]
}
}
},
"roles": ["admin", "member"],
"bind_interface": "vnet0",
"debug": True,
"tenants": ["admin", "service", "demo"],
"catalog": {
"backend": "sql"
}
},
"endpoints": {
"telemetry-api": {
"path": "/v1",
"host": "12.234.32.100",
"scheme": "http",
"port": "8777"
},
"compute-api": {
"path": "/v2/%(tenant_id)s",
"host": "12.234.32.100",
"scheme": "http",
"port": "8774"
},
"identity-admin": {
"path": "/v2.0",
"host": "12.234.32.100",
"scheme": "http",
"port": "35357"
},
"image-api-bind": {
"bind_interface": "vnet0"
},
"image-registry": {
"path": "/v2",
"host": "12.234.32.100",
"scheme": "http",
"port": "9191"
},
"orchestration-api-cfn": {
"path": "/v1",
"host": "12.234.32.100",
"scheme": "http",
"port": "8000"
},
"vnc_bind": {
"bind_interface": "vnet0"
},
"image-registry-bind": {
"bind_interface": "vnet0"
},
"orchestration-api": {
"path": "/v1/%(tenant_id)s",
"host": "12.234.32.100",
"scheme": "http",
"port": "8004"
},
"block-storage-api-bind": {
"bind_interface": "vnet0"
},
"identity-api": {
"path": "/v2.0",
"host": "12.234.32.100",
"scheme": "http",
"port": "5000"
},
"network-api-bind": {
"bind_interface": "eth0"
},
"block-storage-api": {
"path": "/v1/%(tenant_id)s",
"host": "12.234.32.100",
"scheme": "http",
"port": "8776"
},
"db": {
"host": "12.234.32.100"
},
"compute-api-bind": {
"bind_interface": "vnet0"
},
"compute-novnc": {
"path": "/vnc_auto.html",
"host": "12.234.32.100",
"scheme": "http",
"port": "6080"
},
"image-api": {
"path": "/v2",
"host": "12.234.32.100",
"scheme": "http",
"port": "9292"
},
"compute-vnc-bind": {
"bind_interface": "eth0"
},
"identity-bind": {
"bind_interface": "vnet0"
},
"network-api": {
"path": "",
"host": "12.234.32.103",
"scheme": "http",
"port": "9696"
},
"mq": {
"host": "12.234.32.100"
},
"compute-ec2-admin": {
"path": "/services/Admin",
"host": "12.234.32.100",
"scheme": "http",
"port": "8773"
},
"compute-novnc-bind": {
"bind_interface": "vnet0"
},
"compute-ec2-api": {
"path": "/services/Cloud",
"host": "12.234.32.100",
"scheme": "http",
"port": "8773"
}
}
},
"db": {
"bind_interface": "vnet0",
"compute": {
"host": "12.234.32.100"
},
"identity": {
"host": "12.234.32.100"
}
},
"bind_interface": "vnet0"
},
"mq": {
"user": "guest",
"password": "test",
"vhost": "/nova",
"network": {
"service_type": "rabbitmq"
"collectd": {
"server": {
"host": "metrics",
"protocol": "tcp",
"port": "4242"
}
}
}
@ -221,7 +508,7 @@ class TestChefInstaller(unittest2.TestCase):
}
},
"roles_mapping": {
"os_controller": {
"os_controller": [{
"hostname": "server01",
"management": {
"interface": "vnet0",
@ -239,8 +526,8 @@ class TestChefInstaller(unittest2.TestCase):
"is_promiscuous": False,
"subnet": "172.16.1.0/24"
}
},
"os_compute_worker": {
}],
"os_compute_worker": [{
"hostname": "server02",
"management": {
"interface": "eth0",
@ -258,8 +545,7 @@ class TestChefInstaller(unittest2.TestCase):
"is_promiscuous": False,
"subnet": "172.16.1.0/24"
}
},
"os_network": {
}, {
"hostname": "server03",
"management": {
"interface": "eth0",
@ -285,7 +571,34 @@ class TestChefInstaller(unittest2.TestCase):
"is_promiscuous": True,
"subnet": "10.0.0.0/24"
}
}
}],
"os_network": [{
"hostname": "server03",
"management": {
"interface": "eth0",
"ip": "12.234.32.103",
"netmask": "255.255.255.0",
"is_mgmt": True,
"is_promiscuous": False,
"subnet": "12.234.32.0/24"
},
"tenant": {
"interface": "eth1",
"ip": "172.16.1.3",
"netmask": "255.255.255.0",
"is_mgmt": False,
"is_promiscuous": False,
"subnet": "172.16.1.0/24"
},
"public": {
"interface": "eth2",
"ip": "10.0.0.1",
"netmask": "255.255.255.0",
"is_mgmt": False,
"is_promiscuous": True,
"subnet": "10.0.0.0/24"
}
}]
}
}
},

View File

@ -63,7 +63,7 @@ class TestConfigManager(unittest2.TestCase):
def test_get_cluster_roles_mapping(self):
expected_output = {
"os_controller": {
"os_controller": [{
"hostname": "server01",
"management": {
"interface": "vnet0",
@ -81,8 +81,8 @@ class TestConfigManager(unittest2.TestCase):
"is_promiscuous": False,
"subnet": "172.16.1.0/24"
}
},
"os_compute_worker": {
}],
"os_compute_worker": [{
"hostname": "server02",
"management": {
"interface": "eth0",
@ -100,8 +100,34 @@ class TestConfigManager(unittest2.TestCase):
"is_promiscuous": False,
"subnet": "172.16.1.0/24"
}
},
"os_network": {
}, {
"hostname": "server03",
"management": {
"interface": "eth0",
"ip": "12.234.32.103",
"is_mgmt": True,
"is_promiscuous": False,
"netmask": "255.255.255.0",
"subnet": "12.234.32.0/24"
},
'public': {
"interface": "eth2",
"ip": "10.0.0.1",
"is_mgmt": False,
"is_promiscuous": True,
"netmask": "255.255.255.0",
"subnet": "10.0.0.0/24"
},
"tenant": {
"interface": "eth1",
"ip": "172.16.1.3",
"netmask": "255.255.255.0",
"is_mgmt": False,
"is_promiscuous": False,
"subnet": "172.16.1.0/24"
}
}],
"os_network": [{
"hostname": "server03",
"management": {
"interface": "eth0",
@ -127,7 +153,7 @@ class TestConfigManager(unittest2.TestCase):
"is_promiscuous": True,
"subnet": "10.0.0.0/24"
}
}
}]
}
self.maxDiff = None
output = self.test_config_manager.get_cluster_roles_mapping()

View File

@ -0,0 +1,427 @@
#set controller = $get_role('os_compute_controller', [$get_role('os_controller', None), $get_role('allinone_compute', None)])
## Contoller node management IP/interface
#set controller_ip = $controller.management.ip
#set controller_nic = $controller.management.interface
## Compute node management IP/interface
#set compute = $get_role('os_compute_worker', $get_role('allinone_compute', None))
#set compute_nic = $compute.management.interface
## Network server/worker node IP/interface
#set network = $get_role('os_network', None)
#set network_server = $get_role('os_network_server', [$network, $controller])
#set network_worker = $get_role('os_network_worker', [$network, $compute])
#set network_server_ip = $network_server.management.ip
#set network_server_nic = $network_server.management.interface
## Network worker node management IP/interface
#set network_worker_ip = $network_worker.management.ip
#set network_worker_nic = $network_worker.management.interface
#set network_worker_tenant_nic = $network_worker.tenant.interface
#set network_worker_public_nic = $network_worker.public.interface
## Database node management IP
#set db_role = $get_role('os_ops_database', $controller)
#set db_ip = $db_role.management.ip
#set db_nic = $db_role.management.interface
## Message queue node management IP/interface
#set msg_queue = $get_role('os_ops_messaging', $controller)
#set msg_queue_ip = $msg_queue.management.ip
## Identity (keystone) node IP/interface
#set identity = $get_role('os_identity', $controller)
#set identity_ip = $identity.management.ip
#set identity_nic = $identity.management.interface
## Glance node management IP/interface
#set glance = $get_role('os_image', $controller)
#set glance_ip = $glance.management.ip
#set glance_nic = $glance.management.interface
## Cinder node management IP/interface
#set cinder = $get_role('os_block_storage_controller', $controller)
#set cinder_controller_ip = $cinder.management.ip
#set cinder_controller_nic = $cinder.management.interface
## Dashboard node management IP/interface
#set dash = $get_role('os_dashboard', $controller)
#set dash_ip = $dash.management.ip
## Set services credentials
#set db_pass = $getDictValue('service_credentials/mysql/password', 'root')
#set mq_username = $getDictValue('service_credentials/rabbitmq/username', 'guest')
#set mq_pass = $getDictValue('service_credentials/rabbitmq/password', 'guest')
## Set Openstack neutron config
#set tenant_network_type = $getDictValue('neutron_config/openvswitch/tenant_network_type', 'gre')
#set enable_tunneling = True
#set tunnel_id_ranges = ''
#set vlan_ranges = ''
#set bridge_mappings = ''
#if $tenant_network_type == 'vlan'
#set enable_tunneling = False
#set bridge_mappings = ','.join($getDictValue('neutron_config/openvswitch/bridge_mappings', ['physnet1:br-eth1']))
#set vlan_ranges = ','.join($getDictValue('neutron_config/openvswitch/vlan_ranges', ['physnet1:2700:2999']))
#else
#set tunnel_id_ranges = ','.join($getDictValue('neutron_config/openvswitch/tunnel_id_ranges', ['1:1000']))
#end if
## Utility functions definition
#def get_role($role_name, $default_roles=None)
#set target_role = $getVar($role_name, None)
#if isinstance($target_role, list)
#if $target_role
#set target_role = $target_role[0]
#else
#set target_role = None
#end if
#end if
#if $target_role is None and $default_roles is not None
#if isinstance($default_roles, list)
#for $role in $default_roles
#if $role is not None
#set target_role = $role
#break
#end if
#end for
#else
#set target_role = $default_roles
#end if
#end if
#return $target_role
#end def
#def getDictValue($path, $default_value="")
#set keys = str($path).split('/')
#set value = $getVar($keys[0], {})
#if not $value
#return $default_value
#else
#for $key in $keys[1:]
#try
#set value = $value[$key]
#except
#return $default_value
#end try
#end for
#end if
#return $value
#end def
{
"name": "base",
"description": "Environment used in testing the upstream cookbooks and reference Chef repository",
"cookbook_versions": {
},
"json_class": "Chef::Environment",
"chef_type": "environment",
"override_attributes": {
"compass": {
"cluster_id": "$id"
}
},
"default_attributes": {
"local_repo": "",
"mysql": {
"server_root_password": "$db_pass",
"server_debian_password": "$db_pass",
"server_repl_password": "$db_pass",
"allow_remote_root": true,
"root_network_acl": "%"
},
"collectd": {
"server": {
"host": "metrics",
"port": "4242",
"protocol": "tcp"
}
},
"memcached": {
"bind_interface": "$identity_nic"
},
"openstack": {
"use_databags": false,
"auth": {
"validate_certs": false
},
"block-storage": {
"syslog": {
"use": false
},
"api": {
"ratelimit": "False"
},
"debug": true
},
"dashboard": {
"use_ssl": "false"
},
"compute": {
"syslog": {
"use": false
},
"libvirt": {
"bind_interface": "$compute_nic"
},
"novnc_proxy": {
"bind_interface": "$compute_nic"
},
"xvpvnc_proxy": {
"bind_interface": "$compute_nic"
},
"ratelimit": {
"api": {
"enabled": false
},
"volume": {
"enabled": false
}
},
"network": {
"service_type": "neutron"
}
},
"network": {
"verbose": "True",
"debug": "True",
"service_plugins": [
"router"
],
"ml2": {
"type_drivers": "$tenant_network_type",
"tenant_network_types": "$tenant_network_type",
"tunnel_id_ranges": "$tunnel_id_ranges",
"network_vlan_ranges": "$vlan_ranges",
"enable_security_group": "True"
},
"openvswitch": {
"tenant_network_type": "$tenant_network_type",
"enable_tunneling": "$enable_tunneling",
"tunnel_id_ranges": "$tunnel_id_ranges",
"network_vlan_ranges": "$vlan_ranges",
"bridge_mappings": "$bridge_mappings",
"bind_interface": "$network_worker_tenant_nic"
},
"l3": {
"external_network_bridge_interface": "$network_worker_public_nic"
}
},
"db": {
"bind_interface": "$db_nic",
"compute": {
"host": "$db_ip"
},
"identity": {
"host": "$db_ip"
},
"image": {
"host": "$db_ip"
},
"network": {
"host": "$db_ip"
},
"volume": {
"host": "$db_ip"
},
"dashboard": {
"host": "$db_ip"
},
"telemetry": {
"host": "$db_ip"
},
"orchestration": {
"host": "$db_ip"
}
},
"developer_mode": true,
"endpoints": {
"db": {
"host": "$db_ip"
},
"mq": {
"host": "$msg_queue_ip"
},
"compute-api": {
"host": "$controller_ip",
"scheme": "http",
"port": "8774",
"path": "/v2/%(tenant_id)s"
},
"compute-api-bind": {
"bind_interface": "$controller_nic"
},
"compute-ec2-admin": {
"host": "$controller_ip",
"scheme": "http",
"port": "8773",
"path": "/services/Admin"
},
"compute-ec2-api": {
"host": "$controller_ip",
"scheme": "http",
"port": "8773",
"path": "/services/Cloud"
},
"compute-novnc": {
"host": "$controller_ip",
"scheme": "http",
"port": "6080",
"path": "/vnc_auto.html"
},
"compute-novnc-bind": {
"bind_interface": "$controller_nic"
},
"compute-vnc-bind": {
"bind_interface" : "$compute_nic"
},
"vnc_bind": {
"bind_interface": "$controller_nic"
},
"image-api": {
"host": "$glance_ip",
"scheme": "http",
"port": "9292",
"path": "/v2"
},
"image-api-bind": {
"bind_interface": "$glance_nic"
},
"image-registry": {
"host": "$glance_ip",
"scheme": "http",
"port": "9191",
"path": "/v2"
},
"image-registry-bind": {
"bind_interface": "$glance_nic"
},
"identity-bind": {
"bind_interface": "$identity_nic"
},
"identity-api": {
"host": "$identity_ip",
"scheme": "http",
"port": "5000",
"path": "/v2.0"
},
"identity-admin": {
"host": "$identity_ip",
"scheme": "http",
"port": "35357",
"path": "/v2.0"
},
"block-storage-api": {
"host": "$cinder_controller_ip",
"scheme": "http",
"port": "8776",
"path": "/v1/%(tenant_id)s"
},
"block-storage-api-bind": {
"bind_interface": "$cinder_controller_nic"
},
"telemetry-api": {
"host": "$controller_ip",
"scheme": "http",
"port": "8777",
"path": "/v1"
},
"network-api": {
"host": "$network_server_ip",
"scheme": "http",
"port": "9696",
"path": ""
},
"network-api-bind": {
"bind_interface": "$network_server_nic"
},
"orchestration-api": {
"host": "$controller_ip",
"scheme": "http",
"port": "8004",
"path": "/v1/%(tenant_id)s"
},
"orchestration-api-cfn": {
"host": "$controller_ip",
"scheme": "http",
"port": "8000",
"path": "/v1"
}
},
"identity": {
"admin_user": "admin",
"bind_interface": "$identity_nic",
"catalog": {
"backend": "sql"
},
"debug": true,
"roles": [
"admin",
"member"
],
"syslog": {
"use": false
},
"tenants": [
"admin",
"service",
"demo"
],
"token": {
"backend": "sql"
},
"users": {
"admin": {
"password": "admin",
"default_tenant": "admin",
"roles": {
"admin": [
"admin"
]
}
},
"demo": {
"password": "demo",
"default_tenant": "demo",
"roles": {
"member": [
"demo"
]
}
}
}
},
"image": {
"api": {
"bind_interface": "$glance_nic"
},
"debug": true,
"registry": {
"bind_interface": "$glance_nic"
},
"syslog": {
"use": false
},
"upload_image": {
"cirros": "http://download.cirros-cloud.net/0.3.2/cirros-0.3.2-x86_64-disk.img"
},
"upload_images": [
"cirros"
]
},
"mq": {
"user": "$mq_username",
"password": "$mq_pass",
"vhost": "/nova",
"network": {
"service_type": "rabbitmq"
}
}
}
}
}

View File

@ -1,15 +1,15 @@
#set controller_role = $get_role('os_compute_controller', $getVar('os_controller', None))
#set controller_role = $get_role('os_compute_controller', $get_role('os_controller', None))
## Contoller node management IP/interface
#set controller_ip = $controller_role.management.ip
#set controller_nic = $controller_role.management.interface
## Compute node management IP/interface
#set compute_role = $getVar('os_compute_worker', None)
#set compute_role = $get_role('os_compute_worker', None)
#set compute_nic = $compute_role.management.interface
## Network server/worker node IP/interface
#set network = $getVar('os_network', None)
#set network = $get_role('os_network', None)
#set network_server_role = $get_role('os_network_server', [$network, $controller_role])
#set network_worker_role = $get_role('os_network_worker', [$network, $compute_role])
@ -27,6 +27,13 @@
#def get_role($role_name, $default_roles=None)
#set target_role = $getVar($role_name, None)
#if isinstance($target_role, list)
#if $target_role
#set target_role = $target_role[0]
#else
#set target_role = None
#end if
#end if
#if $target_role is None and $default_roles is not None:
#if isinstance($default_roles, list)
#for $role in $default_roles

View File

@ -0,0 +1,7 @@
NAME = 'openstack_juno'
DISPLAY_NAME = 'Openstack Juno'
PARENT = 'openstack'
PACKAGE_INSTALLER = 'ansible_installer'
OS_INSTALLER = 'cobbler'
SUPPORTED_OS_PATTERNS = ['(?i)ubuntu-14\.04.*']
DEPLOYABLE = True

View File

@ -0,0 +1,23 @@
ADAPTER_NAME = 'openstack_juno'
FLAVORS = [{
'flavor': 'allinone',
'display_name': 'All-In-One',
'template': 'allinone.tmpl',
'roles': ['allinone-compute'],
}, {
'flavor': 'single-controller',
'display_name': 'Single Controller',
'template': 'single-controller.tmpl',
'roles': [
'controller', 'compute', 'network', 'storage'
],
}, {
'flavor': 'multinodes',
'display_name': 'Multi-nodes',
'template': 'multinodes.tmpl',
'roles': [
'compute-controller', 'compute-worker', 'network-server',
'network-worker', 'database', 'messaging', 'image',
'dashboard', 'identity', 'storage-controller', 'storage-volume'
],
}]

View File

@ -0,0 +1,14 @@
NAME = 'ansible_installer'
INSTANCE_NAME = 'ansible_installer'
SETTINGS = {
'ansible_dir': '/var/ansible',
'ansible_run_dir': '/var/ansible/run',
'ansible_log_file': 'ansible.log',
'ansible_config': 'ansible.cfg',
'playbook_file': 'site.yml',
'inventory_file': 'inventory.yml',
'group_variable': 'all',
'etc_hosts_path': 'roles/common/templates/hosts',
'runner_dirs': ['global_files', 'roles']
}

View File

@ -0,0 +1,66 @@
ADAPTER_NAME = 'openstack_juno'
ROLES = [{
'role': 'allinone-compute',
'display_name': 'all in one',
'description': 'All in One'
}, {
'role': 'controller',
'display_name': 'controller node',
'description': 'Controller Node'
}, {
'role': 'compute',
'display_name': 'compute node',
'description': 'Compute Node'
}, {
'role': 'storage',
'display_name': 'storage node',
'description': 'Storage Node'
}, {
'role': 'network',
'display_name': 'network node',
'description': 'Network Node'
}, {
'role': 'compute-worker',
'display_name': 'Compute worker node',
'description': 'Compute worker node'
}, {
'role': 'compute-controller',
'display_name': 'Compute controller node',
'description': 'Compute controller node'
}, {
'role': 'network-server',
'display_name': 'Network server node',
'description': 'Network server node'
}, {
'role': 'database',
'display_name': 'Database node',
'description': 'Database node'
}, {
'role': 'messaging',
'display_name': 'Messaging queue node',
'description': 'Messaging queue node'
}, {
'role': 'image',
'display': 'Image node',
'description': 'Image node'
}, {
'role': 'dashboard',
'display': 'Dashboard node',
'description': 'Dashboard node'
}, {
'role': 'identity',
'display': 'Identity node',
'description': 'Identity node'
}, {
'role': 'storage-controller',
'display': 'Storage controller node',
'description': 'Storage controller node'
}, {
'role': 'storage-volume',
'display': 'Storage volume node',
'description': 'Storage volume node'
}, {
'role': 'network-worker',
'display': 'Network worker node',
'description': 'Network worker node'
}]

View File

@ -0,0 +1,10 @@
#set controllers = $getVar('allinone_compute', [])
#if not $isinstance($controllers, list)
#set controllers = [$controllers]
#end if
# allinone
#for controller in $controllers
#set controller_ip = $controller.management.ip
#set controller_hostname = $controller.hostname
$controller_ip $controller_hostname
#end for

View File

@ -0,0 +1,110 @@
#set compute_controllers = $getVar('compute_controller', [])
#set compute_workers = $getVar('compute_worker', [])
#set network_servers = $getVar('network_server', [])
#set network_workers = $getVar('network_worker', [])
#set databases = $getVar('database', [])
#set messagings = $getVar('messaging', [])
#set images = $getVar('image', [])
#set dashboards = $getVar('dashboard', [])
#set identities = $getVar('identity', [])
#set storage_controllers = $getVar('storage_controller', [])
#set storage_volumes = $getVar('storage_volume', [])
#if not $isinstance($compute_controllers, list)
#set compute_controllers = [$compute_controllers]
#end if
#if not $isinstance($compute_workers, list)
#set compute_workers = [$compute_workers]
#end if
#if not $isinstance($network_servers, list)
#set network_servers = [$network_servers]
#end if
#if not $isinstance($network_workers, list)
#set network_workers = [$network_workers]
#end if
#if not $isinstance($databases, list)
#set databases = [$databases]
#end if
#if not $isinstance($messagings, list)
#set messagings = [$messagings]
#end if
#if not $isinstance($images, list)
#set images = [$images]
#end if
#if not $isinstance($dashboards, list)
#set dashboards = [$dashboards]
#end if
#if not $isinstance($identities, list)
#set identities = [$identities]
#end if
#if not $isinstance($storage_controllers, list)
#set storage_controllers = [$storage_controllers]
#end if
#if not $isinstance($storage_volumes, list)
#set storage_volumes = [$storage_volumes]
#end if
# compute-controller
#for worker in $compute_controllers
#set worker_ip = $worker.management.ip
#set worker_hostname = $worker.hostname
$worker_ip $worker_hostname
#end for
# database
#for worker in $databases
#set worker_ip = $worker.management.ip
#set worker_hostname = $worker.hostname
$worker_ip $worker_hostname
#end for
# messaging
#for worker in $messagings
#set worker_ip = $worker.management.ip
#set worker_hostname = $worker.hostname
$worker_ip $worker_hostname
#end for
# storage-controller
#for worker in $storage_controllers
#set worker_ip = $worker.management.ip
#set worker_hostname = $worker.hostname
$worker_ip $worker_hostname
#end for
# image
#for worker in $images
#set worker_ip = $worker.management.ip
#set worker_hostname = $worker.hostname
$worker_ip $worker_hostname
#end for
# identity
#for worker in $identities
#set worker_ip = $worker.management.ip
#set worker_hostname = $worker.hostname
$worker_ip $worker_hostname
#end for
# network-server
#for worker in $network_servers
#set worker_ip = $worker.management.ip
#set worker_hostname = $worker.hostname
$worker_ip $worker_hostname
#end for
# dashboard
#for worker in $dashboards
#set worker_ip = $worker.management.ip
#set worker_hostname = $worker.hostname
$worker_ip $worker_hostname
#end for
# storage-volume
#for worker in $storage_volumes
#set worker_ip = $worker.management.ip
#set worker_hostname = $worker.hostname
$worker_ip $worker_hostname
#end for
# network-worker
#for worker in $network_workers
#set worker_ip = $worker.management.ip
#set worker_hostname = $worker.hostname
$worker_ip $worker_hostname
#end for
# compute-worker
#for worker in $compute_workers
#set worker_ip = $worker.management.ip
#set worker_hostname = $worker.hostname
$worker_ip $worker_hostname
#end for

View File

@ -0,0 +1,40 @@
#set controllers = $getVar('controller', [])
#set computes = $getVar('compute', [])
#set storages = $getVar('storage', [])
#set networks = $getVar('network', [])
#if not $isinstance($controllers, list)
#set controllers = [$controllers]
#end if
#if not $isinstance($computes, list)
#set computes = [$computes]
#end if
#if not $isinstance($storages, list)
#set storages = [$storages]
#end if
#if not $isinstance($networks, list)
#set networks = [$networks]
#end if
# controller
#for controller in $controllers
#set controller_ip = $controller.management.ip
#set controller_hostname = $controller.hostname
$controller_ip $controller_hostname
#end for
# compute
#for worker in $computes
#set worker_ip = $worker.management.ip
#set worker_hostname = $worker.hostname
$worker_ip $worker_hostname
#end for
# storage
#for worker in $storages
#set worker_ip = $worker.management.ip
#set worker_hostname = $worker.hostname
$worker_ip $worker_hostname
#end for
# network
#for worker in $networks
#set worker_ip = $worker.management.ip
#set worker_hostname = $worker.hostname
$worker_ip $worker_hostname
#end for

View File

@ -0,0 +1,47 @@
#set controllers = $getVar('allinone_compute', [])
#set computes = $getVar('allinone_compute', [])
#set storages = $getVar('allinone_compute', [])
#set networks = $getVar('allinone_compute', [])
#if not $isinstance($controllers, list)
#set controllers = [$controllers]
#end if
#if not $isinstance($computes, list)
#set computes = [$computes]
#end if
#if not $isinstance($storages, list)
#set storages = [$storages]
#end if
#if not $isinstance($networks, list)
#set networks = [$networks]
#end if
#set credentials = $getVar('server_credentials', {})
#set username = $credentials.get('username', 'root')
#set password = $credentials.get('password', 'root')
[controller]
#for controller in $controllers
#set controller_ip = $controller.management.ip
#set controller_hostname = $controller.hostname
$controller_hostname ansible_ssh_host=$controller_ip ansible_ssh_user=$username ansible_ssh_passowrd=$password
#end for
[compute]
#for compute in $computes
#set compute_ip = $compute.management.ip
#set compute_hostname = $compute.hostname
$compute_hostname ansible_ssh_host=$compute_ip ansible_ssh_user=$username ansible_ssh_password=$password
#end for
[network]
#for network in $networks
#set network_ip = $network.management.ip
#set network_hostname = $network.hostname
$network_hostname ansible_ssh_host=$network_ip ansible_ssh_user=$username ansible_ssh_password=$password
#end for
[storage]
#for storage in storages
#set storage_ip = $storage.management.ip
#set storage_hostname = $storage.hostname
$storage_hostname ansible_ssh_host=$storage_ip ansible_ssh_user=$username ansible_ssh_password=$password
#end for

View File

@ -0,0 +1,123 @@
#set compute_controllers = $getVar('compute_controller', [])
#set compute_workers = $getVar('compute_worker', [])
#set network_servers = $getVar('network_server', [])
#set network_workers = $getVar('network_worker', [])
#set databases = $getVar('database', [])
#set messagings = $getVar('messaging', [])
#set images = $getVar('image', [])
#set dashboards = $getVar('dashboard', [])
#set identities = $getVar('identity', [])
#set storage_controllers = $getVar('storage_controller', [])
#set storage_volumes = $getVar('storage_volume', [])
#if not $isinstance($compute_controllers, list)
#set compute_controllers = [$compute_controllers]
#end if
#if not $isinstance($compute_workers, list)
#set compute_workers = [$compute_workers]
#end if
#if not $isinstance($network_servers, list)
#set network_servers = [$network_servers]
#end if
#if not $isinstance($network_workers, list)
#set network_workers = [$network_workers]
#end if
#if not $isinstance($databases, list)
#set databases = [$databases]
#end if
#if not $isinstance($messagings, list)
#set messagings = [$messagings]
#end if
#if not $isinstance($images, list)
#set images = [$images]
#end if
#if not $isinstance($dashboards, list)
#set dashboards = [$dashboards]
#end if
#if not $isinstance($identities, list)
#set identities = [$identities]
#end if
#if not $isinstance($storage_controllers, list)
#set storage_controllers = [$storage_controllers]
#end if
#if not $isinstance($storage_volumes, list)
#set storage_volumes = [$storage_volumes]
#end if
#set credentials = $getVar('server_credentials', {})
#set username = $credentials.get('username', 'root')
#set password = $credentials.get('password', 'root')
[compute-controller]
#for controller in $compute_controllers
#set controller_ip = $controller.management.ip
#set controller_hostname = $controller.hostname
$controller_hostname ansible_ssh_host=$controller_ip ansible_ssh_user=$username ansible_ssh_passowrd=$password
#end for
[compute-worker]
#for compute in $compute_workers
#set compute_ip = $compute.management.ip
#set compute_hostname = $compute.hostname
$compute_hostname ansible_ssh_host=$compute_ip ansible_ssh_user=$username ansible_ssh_password=$password
#end for
[network-server]
#for network in $network_servers
#set network_ip = $network.management.ip
#set network_hostname = $network.hostname
$network_hostname ansible_ssh_host=$network_ip ansible_ssh_user=$username ansible_ssh_password=$password
#end for
[network-worker]
#for network in $network_workers
#set network_ip = $network.management.ip
#set network_hostname = $network.hostname
$network_hostname ansible_ssh_host=$network_ip ansible_ssh_user=$username ansible_ssh_password=$password
#end for
[database]
#for worker in $databases
#set worker_ip = $worker.management.ip
#set worker_hostname = $worker.hostname
$worker_hostname ansible_ssh_host=$worker_ip ansible_ssh_user=$username ansible_ssh_password=$password
#end for
[messaging]
#for worker in $messagings
#set worker_ip = $worker.management.ip
#set worker_hostname = $worker.hostname
$worker_hostname ansible_ssh_host=$worker_ip ansible_ssh_user=$username ansible_ssh_password=$password
#end for
[image]
#for worker in $images
#set worker_ip = $worker.management.ip
#set worker_hostname = $worker.hostname
$worker_hostname ansible_ssh_host=$worker_ip ansible_ssh_user=$username ansible_ssh_password=$password
#end for
[dashboard]
#for worker in $dashboards
#set worker_ip = $worker.management.ip
#set worker_hostname = $worker.hostname
$worker_hostname ansible_ssh_host=$worker_ip ansible_ssh_user=$username ansible_ssh_password=$password
#end for
[identity]
#for worker in $identities
#set worker_ip = $worker.management.ip
#set worker_hostname = $worker.hostname
$worker_hostname ansible_ssh_host=$worker_ip ansible_ssh_user=$username ansible_ssh_password=$password
#end for
[storage-controller]
#for worker in $storage_controllers
#set worker_ip = $worker.management.ip
#set worker_hostname = $worker.hostname
$worker_hostname ansible_ssh_host=$worker_ip ansible_ssh_user=$username ansible_ssh_password=$password
#end for
[storage-volume]
#for worker in $storage_volumes
#set worker_ip = $worker.management.ip
#set worker_hostname = $worker.hostname
$worker_hostname ansible_ssh_host=$worker_ip ansible_ssh_user=$username ansible_ssh_password=$password
#end for

View File

@ -0,0 +1,47 @@
#set controllers = $getVar('controller', [])
#set computes = $getVar('compute', [])
#set storages = $getVar('storage', [])
#set networks = $getVar('network', [])
#if not $isinstance($controllers, list)
#set controllers = [$controllers]
#end if
#if not $isinstance($computes, list)
#set computes = [$computes]
#end if
#if not $isinstance($storages, list)
#set storages = [$storages]
#end if
#if not $isinstance($networks, list)
#set networks = [$networks]
#end if
#set credentials = $getVar('server_credentials', {})
#set username = $credentials.get('username', 'root')
#set password = $credentials.get('password', 'root')
[controller]
#for controller in $controllers
#set controller_ip = $controller.management.ip
#set controller_hostname = $controller.hostname
$controller_hostname ansible_ssh_host=$controller_ip ansible_ssh_user=$username ansible_ssh_passowrd=$password
#end for
[compute]
#for compute in $computes
#set compute_ip = $compute.management.ip
#set compute_hostname = $compute.hostname
$compute_hostname ansible_ssh_host=$compute_ip ansible_ssh_user=$username ansible_ssh_password=$password
#end for
[network]
#for network in $networks
#set network_ip = $network.management.ip
#set network_hostname = $network.hostname
$network_hostname ansible_ssh_host=$network_ip ansible_ssh_user=$username ansible_ssh_password=$password
#end for
[storage]
#for storage in storages
#set storage_ip = $storage.management.ip
#set storage_hostname = $storage.hostname
$storage_hostname ansible_ssh_host=$storage_ip ansible_ssh_user=$username ansible_ssh_password=$password
#end for

View File

@ -0,0 +1,80 @@
#set controllers = $getVar('allinone_compute', [])
#if not $isinstance($controllers, list)
#set controllers = [$controllers]
#end if
#for controller in $controllers
#set controller_ip = $controller.management.ip
#set controller_hostname = $controller.hostname
controller_host: $controller_ip
#end for
#for network in $controllers
#set network_external_nic = $network.external.interface
#set network_external_subnet = $network.external.subnet
#set network_internal_nic = $network.management.interface
INTERFACE_NAME: $network_external_nic
INTERNAL_INTERFACE: $network_internal_nic
#end for
#set credentials = $getVar('service_credentials', {})
#set rabbit_username = $credentials.rabbitmq.username
#set rabbit_password = $credentials.rabbitmq.password
compute_controller_host: "{{ controller_host }}"
db_host: "{{ controller_host }}"
rabbit_host: "{{ controller_host }}"
storage_controller_host: "{{ controller_host }}"
image_host: "{{ controller_host }}"
identity_host: "{{ controller_host }}"
network_server_host: "{{ controller_host }}"
dashboard_host: "{{ controller_host }}"
odl_controller: 10.1.0.15
DEBUG: False
VERBOSE: False
NTP_SERVER_LOCAL: "{{ controller_host }}"
DB_HOST: "{{ controller_host }}"
MQ_BROKER: rabbitmq
OPENSTACK_REPO: cloudarchive-juno.list
ADMIN_TOKEN: admin
CEILOMETER_TOKEN: c095d479023a0fd58a54
RABBIT_PASS: $rabbit_password
KEYSTONE_DBPASS: keystone_db_secret
DEMO_PASS: demo_secret
ADMIN_PASS: admin_secret
GLANCE_DBPASS: glance_db_secret
GLANCE_PASS: glance_secret
NOVA_DBPASS: nova_db_secret
NOVA_PASS: nova_secret
DASH_DBPASS: dash_db_secret
CINDER_DBPASS: cinder_db_secret
CINDER_PASS: cinder_secret
NEUTRON_DBPASS: neutron_db_secret
NEUTRON_PASS: netron_secret
NEUTRON_TYPE_DRIVERS: ['flat', 'gre', 'vxlan']
NEUTRON_TENANT_NETWORK_TYPES: ['vxlan']
#NEUTRON_MECHANISM_DRIVERS: ['opendaylight']
NEUTRON_MECHANISM_DRIVERS: ['openvswitch']
NEUTRON_TUNNEL_TYPES: ['vxlan']
METADATA_SECRET: metadata_secret
INSTANCE_TUNNELS_INTERFACE_IP_ADDRESS: 10.1.1.21
EXTERNAL_NETWORK_CIDR: 203.0.113.0/24
EXTERNAL_NETWORK_GATEWAY: 203.0.113.1
FLOATING_IP_START: 203.0.113.101
FLOATING_IP_END: 203.0.113.200
build_in_image: http://cdn.download.cirros-cloud.net/0.3.3/cirros-0.3.3-x86_64-disk.img
build_in_image_name: cirros-0.3.3-x86_64-disk.img
physical_device: /dev/sdb
internal_interface: "ansible_{{ INTERNAL_INTERFACE }}"
internal_ip: "{{ hostvars[inventory_hostname][internal_interface]['ipv4']['address'] }}"
odl_username: admin
odl_password: admin
odl_api_port: 8080

View File

@ -0,0 +1,140 @@
#set compute_controllers = $getVar('compute_controller', [])
#set compute_workers = $getVar('compute_worker', [])
#set network_servers = $getVar('network_server', [])
#set network_workers = $getVar('network_worker', [])
#set databases = $getVar('database', [])
#set messagings = $getVar('messaging', [])
#set images = $getVar('image', [])
#set dashboards = $getVar('dashboard', [])
#set identities = $getVar('identity', [])
#set storage_controllers = $getVar('storage_controller', [])
#set storage_volumes = $getVar('storage_volume', [])
#if not $isinstance($compute_controllers, list)
#set compute_controllers = [$compute_controllers]
#end if
#if not $isinstance($compute_workers, list)
#set compute_workers = [$compute_workers]
#end if
#if not $isinstance($network_servers, list)
#set network_servers = [$network_servers]
#end if
#if not $isinstance($network_workers, list)
#set network_workers = [$network_workers]
#end if
#if not $isinstance($databases, list)
#set databases = [$databases]
#end if
#if not $isinstance($messagings, list)
#set messagings = [$messagings]
#end if
#if not $isinstance($images, list)
#set images = [$images]
#end if
#if not $isinstance($dashboards, list)
#set dashboards = [$dashboards]
#end if
#if not $isinstance($identities, list)
#set identities = [$identities]
#end if
#if not $isinstance($storage_controllers, list)
#set storage_controllers = [$storage_controllers]
#end if
#if not $isinstance($storage_volumes, list)
#set storage_volumes = [$storage_volumes]
#end if
#for worker in $compute_controllers
#set worker_ip = $worker.management.ip
compute_controller_host: $worker_ip
#end for
#for worker in $databases
#set worker_ip = $worker.management.ip
db_host: $worker_ip
#end for
#for worker in $messagings
#set worker_ip = $worker.management.ip
rabbit_host: $worker_ip
#end for
#for worker in $storage_controllers
#set worker_ip = $worker.management.ip
storage_controller_host: $worker_ip
#end for
#for worker in $images
#set worker_ip = $worker.management.ip
image_host: $worker_ip
#end for
#for worker in $identities
#set worker_ip = $worker.management.ip
identity_host: $worker_ip
#end for
#for worker in $compute_controllers
#set worker_ip = $worker.management.ip
compute_controller_host: $worker_ip
#end for
#for worker in $network_servers
#set worker_ip = $worker.management.ip
network_server_host: $worker_ip
#end for
#for worker in $dashboards
#set worker_ip = $worker.management.ip
dashboard_host: $worker_ip
#end for
#for network in $network_workers
#set network_external_nic = $network.external.interface
#set network_internal_nic = $network.management.interface
INTERFACE_NAME: $network_external_nic
INTERNAL_INTERFACE: $network_internal_nic
#end for
#set credentials = $getVar('service_credentials', {})
#set rabbit_username = $credentials.rabbitmq.username
#set rabbit_password = $credentials.rabbitmq.password
odl_controller: 10.1.0.15
DEBUG: False
VERBOSE: False
NTP_SERVER_LOCAL: "{{ compute_controller_host }}"
DB_HOST: "{{ db_host }}"
MQ_BROKER: rabbitmq
OPENSTACK_REPO: cloudarchive-juno.list
ADMIN_TOKEN: admin
CEILOMETER_TOKEN: c095d479023a0fd58a54
RABBIT_PASS: $rabbit_password
KEYSTONE_DBPASS: keystone_db_secret
DEMO_PASS: demo_secret
ADMIN_PASS: admin_secret
GLANCE_DBPASS: glance_db_secret
GLANCE_PASS: glance_secret
NOVA_DBPASS: nova_db_secret
NOVA_PASS: nova_secret
DASH_DBPASS: dash_db_secret
CINDER_DBPASS: cinder_db_secret
CINDER_PASS: cinder_secret
NEUTRON_DBPASS: neutron_db_secret
NEUTRON_PASS: netron_secret
NEUTRON_TYPE_DRIVERS: ['flat', 'gre', 'vxlan']
NEUTRON_TENANT_NETWORK_TYPES: ['vxlan']
#NEUTRON_MECHANISM_DRIVERS: ['opendaylight']
NEUTRON_MECHANISM_DRIVERS: ['openvswitch']
NEUTRON_TUNNEL_TYPES: ['vxlan']
METADATA_SECRET: metadata_secret
INSTANCE_TUNNELS_INTERFACE_IP_ADDRESS: 10.1.1.21
EXTERNAL_NETWORK_CIDR: 203.0.113.0/24
EXTERNAL_NETWORK_GATEWAY: 203.0.113.1
FLOATING_IP_START: 203.0.113.101
FLOATING_IP_END: 203.0.113.200
build_in_image: http://cdn.download.cirros-cloud.net/0.3.3/cirros-0.3.3-x86_64-disk.img
build_in_image_name: cirros-0.3.3-x86_64-disk.img
physical_device: /dev/sdb
internal_interface: "ansible_{{ INTERNAL_INTERFACE }}"
internal_ip: "{{ hostvars[inventory_hostname][internal_interface]['ipv4']['address'] }}"
odl_username: admin
odl_password: admin
odl_api_port: 8080

View File

@ -0,0 +1,92 @@
#set controllers = $getVar('controller', [])
#set computes = $getVar('compute', [])
#set networks = $getVar('network', [])
#set storages = $getVar('storage', [])
#if not $isinstance($controllers, list)
#set controllers = [$controllers]
#end if
#if not $isinstance($computes, list)
#set computes = [$computes]
#end if
#if not $isinstance($networks, list)
#set networks = [$networks]
#end if
#if not $isinstance($storages, list)
#set storages = [$storages]
#end if
#for controller in $controllers
#set controller_ip = $controller.management.ip
#set controller_hostname = $controller.hostname
controller_host: $controller_ip
#end for
#for network in $networks
#set network_external_nic = $network.external.interface
#set network_external_subnet = $network.external.subnet
#set network_internal_nic = $network.management.interface
INTERFACE_NAME: $network_external_nic
INTERNAL_INTERFACE: $network_internal_nic
#end for
#set credentials = $getVar('service_credentials', {})
#set rabbit_username = $credentials.rabbitmq.username
#set rabbit_password = $credentials.rabbitmq.password
compute_controller_host: "{{ controller_host }}"
db_host: "{{ controller_host }}"
rabbit_host: "{{ controller_host }}"
storage_controller_host: "{{ controller_host }}"
image_host: "{{ controller_host }}"
identity_host: "{{ controller_host }}"
network_server_host: "{{ controller_host }}"
dashboard_host: "{{ controller_host }}"
odl_controller: 10.1.0.15
DEBUG: False
VERBOSE: False
NTP_SERVER_LOCAL: "{{ controller_host }}"
DB_HOST: "{{ controller_host }}"
MQ_BROKER: rabbitmq
OPENSTACK_REPO: cloudarchive-juno.list
ADMIN_TOKEN: admin
CEILOMETER_TOKEN: c095d479023a0fd58a54
RABBIT_PASS: $rabbit_password
KEYSTONE_DBPASS: keystone_db_secret
DEMO_PASS: demo_secret
ADMIN_PASS: admin_secret
GLANCE_DBPASS: glance_db_secret
GLANCE_PASS: glance_secret
NOVA_DBPASS: nova_db_secret
NOVA_PASS: nova_secret
DASH_DBPASS: dash_db_secret
CINDER_DBPASS: cinder_db_secret
CINDER_PASS: cinder_secret
NEUTRON_DBPASS: neutron_db_secret
NEUTRON_PASS: netron_secret
NEUTRON_TYPE_DRIVERS: ['flat', 'gre', 'vxlan']
NEUTRON_TENANT_NETWORK_TYPES: ['vxlan']
#NEUTRON_MECHANISM_DRIVERS: ['opendaylight']
NEUTRON_MECHANISM_DRIVERS: ['openvswitch']
NEUTRON_TUNNEL_TYPES: ['vxlan']
METADATA_SECRET: metadata_secret
INSTANCE_TUNNELS_INTERFACE_IP_ADDRESS: 10.1.1.21
EXTERNAL_NETWORK_CIDR: 203.0.113.0/24
# EXTERNAL_NETWORK_CIDR: $network_external_subnet
EXTERNAL_NETWORK_GATEWAY: 203.0.113.1
FLOATING_IP_START: 203.0.113.101
FLOATING_IP_END: 203.0.113.200
build_in_image: http://cdn.download.cirros-cloud.net/0.3.3/cirros-0.3.3-x86_64-disk.img
build_in_image_name: cirros-0.3.3-x86_64-disk.img
physical_device: /dev/sdb
internal_interface: "ansible_{{ INTERNAL_INTERFACE }}"
internal_ip: "{{ hostvars[inventory_hostname][internal_interface]['ipv4']['address'] }}"
odl_username: admin
odl_password: admin
odl_api_port: 8080

View File

@ -6,7 +6,7 @@
#set fsid = str(uuid.uuid4())
#set rbd_secret_uuid = str(uuid.uuid4())
#set ceph_osd = $getVar('ceph_osd', None)
#set ceph_osd = $get_role('ceph_osd', None)
#set public_network_subnet = $ceph_osd.public_network.subnet
#set cluster_network_subnet = $ceph_osd.cluster_network.subnet
@ -41,16 +41,16 @@
## OpenStack config
## Contoller node management IP/interface
#set controller = $get_role('os_compute_controller', $getVar('os_ceph_controller', None))
#set controller = $get_role('os_compute_controller', $get_role('os_ceph_controller', None))
#set controller_ip = $controller.management.ip
#set controller_nic = $controller.management.interface
## Compute node management IP/interface
#set compute = $getVar('os_ceph_compute_worker', None)
#set compute = $get_role('os_ceph_compute_worker', None)
#set compute_nic = $compute.management.interface
## Network server/worker node IP/interface
#set network = $getVar('os_network', None)
#set network = $get_role('os_network', None)
#set network_server = $get_role('os_network_server', [$network, $controller])
#set network_worker = $get_role('os_network_worker', [$network, $compute])
@ -108,6 +108,13 @@
## Utility functions definition
#def get_role($role_name, $default_roles=None)
#set target_role = $getVar($role_name, None)
#if isinstance($target_role, list)
#if $target_role
#set target_role = $target_role[0]
#else
#set target_role = None
#end if
#end if
#if $target_role is None and $default_roles is not None
#if isinstance($default_roles, list)
#for $role in $default_roles

View File

@ -1,15 +1,15 @@
#set controller = $get_role('os_compute_controller', [$getVar('os_controller', None), $getVar('allinone_compute', None)])
#set controller = $get_role('os_compute_controller', [$get_role('os_controller', None), $get_role('allinone_compute', None)])
## Contoller node management IP/interface
#set controller_ip = $controller.management.ip
#set controller_nic = $controller.management.interface
## Compute node management IP/interface
#set compute = $get_role('os_compute_worker', $getVar('allinone_compute', None))
#set compute = $get_role('os_compute_worker', $get_role('allinone_compute', None))
#set compute_nic = $compute.management.interface
## Network server/worker node IP/interface
#set network = $getVar('os_network', None)
#set network = $get_role('os_network', None)
#set network_server = $get_role('os_network_server', [$network, $controller])
#set network_worker = $get_role('os_network_worker', [$network, $compute])
@ -73,6 +73,13 @@
## Utility functions definition
#def get_role($role_name, $default_roles=None)
#set target_role = $getVar($role_name, None)
#if isinstance($target_role, list)
#if $target_role
#set target_role = $target_role[0]
#else
#set target_role = None
#end if
#end if
#if $target_role is None and $default_roles is not None
#if isinstance($default_roles, list)
#for $role in $default_roles