Switch to using openstack context templating

This commit is contained in:
James Page 2013-10-18 14:38:01 +01:00
parent ff58b0ebed
commit 3d154836aa
13 changed files with 571 additions and 184 deletions

View File

@ -1,6 +1,7 @@
branch: lp:~openstack-charmers/charm-helpers/to_upstream
branch: lp:charm-helpers
destination: hooks/charmhelpers
include:
- core
- fetch
- contrib.openstack
- contrib.hahelpers

View File

@ -26,3 +26,11 @@ options:
default: RegionOne
type: string
description: OpenStack Region
rabbit-user:
default: ceilometer
type: string
description: Username to request access on rabbitmq-server.
rabbit-vhost:
default: openstack
type: string
description: RabbitMQ virtual host to request access on rabbitmq-server.

View File

@ -0,0 +1,62 @@
import os
import uuid
from charmhelpers.core.hookenv import (
relation_ids,
relation_get,
related_units,
config
)
from charmhelpers.contrib.openstack.context import (
OSContextGenerator,
context_complete
)
CEILOMETER_DB = 'ceilometer'
class LoggingConfigContext(OSContextGenerator):
def __call__(self):
return {'debug': config('debug'), 'verbose': config('verbose')}
class MongoDBContext(OSContextGenerator):
interfaces = ['mongodb']
def __call__(self):
for relid in relation_ids('shared-db'):
for unit in related_units(relid):
conf = {
"db_host": relation_get('hostname', unit, relid),
"db_port": relation_get('port', unit, relid),
"db_name": CEILOMETER_DB
}
if context_complete(conf):
return conf
return {}
SHARED_SECRET = "/etc/ceilometer/secret.txt"
def get_shared_secret():
secret = None
if not os.path.exists(SHARED_SECRET):
secret = str(uuid.uuid4())
with open(SHARED_SECRET, 'w') as secret_file:
secret_file.write(secret)
else:
with open(SHARED_SECRET, 'r') as secret_file:
secret = secret_file.read().strip()
return secret
CEILOMETER_PORT = 8777
class CeilometerContext(OSContextGenerator):
def __call__(self):
ctxt = {
'port': CEILOMETER_PORT,
'metering_secret': get_shared_secret()
}
return ctxt

View File

@ -1,8 +1,6 @@
#!/usr/bin/python
import os
import sys
#import lib.utils as utils
from charmhelpers.fetch import (
apt_install, filter_installed_packages,
apt_update
@ -10,35 +8,31 @@ from charmhelpers.fetch import (
from charmhelpers.core.hookenv import (
open_port,
relation_set,
relation_get,
relation_ids,
related_units,
config,
unit_get,
Hooks, UnregisteredHookError,
log
)
from charmhelpers.core.host import (
service_restart,
restart_on_change
)
from charmhelpers.contrib.openstack.utils import (
configure_installation_source
)
from ceilometer_utils import (
CEILOMETER_PACKAGES,
CEILOMETER_PORT,
RABBIT_USER,
RABBIT_VHOST,
CEILOMETER_DB,
CEILOMETER_SERVICES,
CEILOMETER_SERVICE,
CEILOMETER_ROLE,
CEILOMETER_CONF,
get_shared_secret,
render_template
register_configs,
restart_map,
get_ceilometer_context
)
from ceilometer_contexts import CEILOMETER_PORT
hooks = Hooks()
CONFIGS = register_configs()
@hooks.hook()
@ -52,8 +46,8 @@ def install():
@hooks.hook("amqp-relation-joined")
def amqp_joined():
relation_set(username=RABBIT_USER,
vhost=RABBIT_VHOST)
relation_set(username=config('rabbit-user'),
vhost=config('rabbit-vhost'))
@hooks.hook("shared-db-relation-joined")
@ -64,18 +58,17 @@ def db_joined():
@hooks.hook("amqp-relation-changed",
"shared-db-relation-changed",
"identity-service-relation-changed")
def all_changed():
if render_ceilometer_conf():
for svc in CEILOMETER_SERVICES:
service_restart(svc)
ceilometer_joined()
@restart_on_change(restart_map())
def any_changed():
CONFIGS.write_all()
ceilometer_joined()
@hooks.hook('config-changed',
'upgrade-charm')
def config_changed():
install()
all_changed()
any_changed()
@hooks.hook("identity-service-relation-joined")
@ -83,107 +76,18 @@ def keystone_joined():
url = "http://{}:{}".format(unit_get("private-address"),
CEILOMETER_PORT)
region = config("region")
relation_set(service=CEILOMETER_SERVICE,
public_url=url, admin_url=url, internal_url=url,
requested_roles=CEILOMETER_ROLE,
region=region)
def get_rabbit_conf():
for relid in relation_ids('amqp'):
for unit in related_units(relid):
conf = {
"rabbit_host": relation_get('private-address',
unit, relid),
"rabbit_virtual_host": RABBIT_VHOST,
"rabbit_userid": RABBIT_USER,
"rabbit_password": relation_get('password',
unit, relid)
}
if relation_get('clustered',
unit, relid):
conf["rabbit_host"] = relation_get('vip', unit, relid)
if None not in conf.itervalues():
return conf
return None
def get_db_conf():
for relid in relation_ids('shared-db'):
for unit in related_units(relid):
conf = {
"db_host": relation_get('hostname', unit, relid),
"db_port": relation_get('port', unit, relid),
"db_name": CEILOMETER_DB
}
if None not in conf.itervalues():
return conf
return None
def get_keystone_conf():
for relid in relation_ids('identity-service'):
for unit in related_units(relid):
keystone_username = relation_get('service_username',
unit, relid)
keystone_port = relation_get('service_port',
unit, relid)
keystone_host = relation_get('service_host',
unit, relid)
keystone_password = relation_get('service_password',
unit, relid)
keystone_tenant = relation_get('service_tenant',
unit, relid)
conf = {
"keystone_os_username": keystone_username,
"keystone_os_password": keystone_password,
"keystone_os_tenant": keystone_tenant,
"keystone_host": keystone_host,
"keystone_port": keystone_port
}
if None not in conf.itervalues():
return conf
return None
def render_ceilometer_conf():
context = get_rabbit_conf()
contextdb = get_db_conf()
contextkeystone = get_keystone_conf()
if (context and contextdb and contextkeystone and
os.path.exists(CEILOMETER_CONF)):
# merge contexts
context.update(contextkeystone)
context.update(contextdb)
context['metering_secret'] = get_shared_secret()
context['service_port'] = CEILOMETER_PORT
context['debug'] = config('debug')
context['verbose'] = config('verbose')
with open(CEILOMETER_CONF, "w") as conf:
conf.write(render_template(os.path.basename(CEILOMETER_CONF),
context))
return True
return False
@hooks.hook("ceilometer-service-relation-joined")
def ceilometer_joined():
# set all relationships for ceilometer service
context = get_rabbit_conf()
contextdb = get_db_conf()
contextkeystone = get_keystone_conf()
if context and contextdb and contextkeystone:
context.update(contextdb)
context.update(contextkeystone)
context["metering_secret"] = get_shared_secret()
# set all that info into ceilometer-service relationship
for relid in relation_ids('ceilometer-service'):
relation_set(relid, context)
# Pass local context data onto related agent services
context = get_ceilometer_context()
for relid in relation_ids('ceilometer-service'):
relation_set(relid, context)
if __name__ == '__main__':
try:

View File

@ -1,61 +1,97 @@
import os
import uuid
from charmhelpers.fetch import apt_install as install
from charmhelpers.contrib.openstack import (
templating,
context,
)
from ceilometer_contexts import (
LoggingConfigContext,
MongoDBContext,
CeilometerContext
)
from charmhelpers.contrib.openstack.utils import (
get_os_codename_package
)
RABBIT_USER = "ceilometer"
RABBIT_VHOST = "openstack"
CEILOMETER_CONF = "/etc/ceilometer/ceilometer.conf"
SHARED_SECRET = "/etc/ceilometer/secret.txt"
CEILOMETER_SERVICES = [
'ceilometer-agent-central', 'ceilometer-collector',
'ceilometer-agent-central',
'ceilometer-collector',
'ceilometer-api'
]
CEILOMETER_DB = "ceilometer"
CEILOMETER_SERVICE = "ceilometer"
CEILOMETER_COMPUTE_SERVICES = ['ceilometer-agent-compute']
CEILOMETER_PACKAGES = [
'python-ceilometer', 'ceilometer-common',
'ceilometer-agent-central', 'ceilometer-collector', 'ceilometer-api'
'ceilometer-agent-central',
'ceilometer-collector',
'ceilometer-api'
]
CEILOMETER_AGENT_PACKAGES = [
'python-ceilometer', 'ceilometer-common',
'ceilometer-agent-compute'
]
CEILOMETER_PORT = 8777
CEILOMETER_ROLE = "ResellerAdmin"
NOVA_CONF = "/etc/nova/nova.conf"
NOVA_SETTINGS = [
('DEFAULT', 'instance_usage_audit', 'True'),
('DEFAULT', 'instance_usage_audit_period', 'hour'),
('DEFAULT', 'notification_driver', 'ceilometer.compute.nova_notifier')
]
#NOVA_CONF = "/etc/nova/nova.conf"
#NOVA_SETTINGS = [
# ('DEFAULT', 'instance_usage_audit', 'True'),
# ('DEFAULT', 'instance_usage_audit_period', 'hour'),
# ('DEFAULT', 'notification_driver', 'ceilometer.compute.nova_notifier')
#]
CONFIG_FILES = {
CEILOMETER_CONF: {
'hook_contexts': [context.IdentityServiceContext(),
context.AMQPContext(),
LoggingConfigContext(),
MongoDBContext(),
CeilometerContext()],
'services': CEILOMETER_SERVICES
}
}
TEMPLATES = 'templates'
def get_shared_secret():
secret = None
if not os.path.exists(SHARED_SECRET):
secret = str(uuid.uuid4())
with open(SHARED_SECRET, 'w') as secret_file:
secret_file.write(secret)
else:
with open(SHARED_SECRET, 'r') as secret_file:
secret = secret_file.read().strip()
return secret
def register_configs():
"""
Register config files with their respective contexts.
Regstration of some configs may not be required depending on
existing of certain relations.
"""
# if called without anything installed (eg during install hook)
# just default to earliest supported release. configs dont get touched
# till post-install, anyway.
release = get_os_codename_package('ceilometer-common', fatal=False) \
or 'grizzly'
configs = templating.OSConfigRenderer(templates_dir=TEMPLATES,
openstack_release=release)
for conf in CONFIG_FILES:
configs.register(conf, CONFIG_FILES[conf]['hook_contexts'])
return configs
TEMPLATES_DIR = 'templates'
def restart_map():
'''
Determine the correct resource map to be passed to
charmhelpers.core.restart_on_change() based on the services configured.
try:
import jinja2
except ImportError:
install(['python-jinja2'])
import jinja2
:returns: dict: A dictionary mapping config file to lists of services
that should be restarted when file changes.
'''
_map = {}
for f, ctxt in CONFIG_FILES.iteritems():
svcs = []
for svc in ctxt['services']:
svcs.append(svc)
if svcs:
_map[f] = svcs
return _map
def render_template(template_name, context, template_dir=TEMPLATES_DIR):
templates = \
jinja2.Environment(loader=jinja2.FileSystemLoader(template_dir))
template = templates.get_template(template_name)
return template.render(context)
def get_ceilometer_context():
''' Retrieve a map of all current relation data for agent configuration '''
ctxt = {}
for context in CONFIG_FILES[CEILOMETER_CONF]['hook_contexts']:
ctxt.update(context())
return ctxt

View File

@ -0,0 +1,58 @@
#
# Copyright 2012 Canonical Ltd.
#
# This file is sourced from lp:openstack-charm-helpers
#
# Authors:
# James Page <james.page@ubuntu.com>
# Adam Gandelman <adamg@ubuntu.com>
#
import subprocess
from charmhelpers.core.hookenv import (
config as config_get,
relation_get,
relation_ids,
related_units as relation_list,
log,
INFO,
)
def get_cert():
cert = config_get('ssl_cert')
key = config_get('ssl_key')
if not (cert and key):
log("Inspecting identity-service relations for SSL certificate.",
level=INFO)
cert = key = None
for r_id in relation_ids('identity-service'):
for unit in relation_list(r_id):
if not cert:
cert = relation_get('ssl_cert',
rid=r_id, unit=unit)
if not key:
key = relation_get('ssl_key',
rid=r_id, unit=unit)
return (cert, key)
def get_ca_cert():
ca_cert = None
log("Inspecting identity-service relations for CA SSL certificate.",
level=INFO)
for r_id in relation_ids('identity-service'):
for unit in relation_list(r_id):
if not ca_cert:
ca_cert = relation_get('ca_cert',
rid=r_id, unit=unit)
return ca_cert
def install_ca_cert(ca_cert):
if ca_cert:
with open('/usr/local/share/ca-certificates/keystone_juju_ca_cert.crt',
'w') as crt:
crt.write(ca_cert)
subprocess.check_call(['update-ca-certificates', '--fresh'])

View File

@ -0,0 +1,183 @@
#
# Copyright 2012 Canonical Ltd.
#
# Authors:
# James Page <james.page@ubuntu.com>
# Adam Gandelman <adamg@ubuntu.com>
#
import subprocess
import os
from socket import gethostname as get_unit_hostname
from charmhelpers.core.hookenv import (
log,
relation_ids,
related_units as relation_list,
relation_get,
config as config_get,
INFO,
ERROR,
unit_get,
)
class HAIncompleteConfig(Exception):
pass
def is_clustered():
for r_id in (relation_ids('ha') or []):
for unit in (relation_list(r_id) or []):
clustered = relation_get('clustered',
rid=r_id,
unit=unit)
if clustered:
return True
return False
def is_leader(resource):
cmd = [
"crm", "resource",
"show", resource
]
try:
status = subprocess.check_output(cmd)
except subprocess.CalledProcessError:
return False
else:
if get_unit_hostname() in status:
return True
else:
return False
def peer_units():
peers = []
for r_id in (relation_ids('cluster') or []):
for unit in (relation_list(r_id) or []):
peers.append(unit)
return peers
def oldest_peer(peers):
local_unit_no = int(os.getenv('JUJU_UNIT_NAME').split('/')[1])
for peer in peers:
remote_unit_no = int(peer.split('/')[1])
if remote_unit_no < local_unit_no:
return False
return True
def eligible_leader(resource):
if is_clustered():
if not is_leader(resource):
log('Deferring action to CRM leader.', level=INFO)
return False
else:
peers = peer_units()
if peers and not oldest_peer(peers):
log('Deferring action to oldest service unit.', level=INFO)
return False
return True
def https():
'''
Determines whether enough data has been provided in configuration
or relation data to configure HTTPS
.
returns: boolean
'''
if config_get('use-https') == "yes":
return True
if config_get('ssl_cert') and config_get('ssl_key'):
return True
for r_id in relation_ids('identity-service'):
for unit in relation_list(r_id):
rel_state = [
relation_get('https_keystone', rid=r_id, unit=unit),
relation_get('ssl_cert', rid=r_id, unit=unit),
relation_get('ssl_key', rid=r_id, unit=unit),
relation_get('ca_cert', rid=r_id, unit=unit),
]
# NOTE: works around (LP: #1203241)
if (None not in rel_state) and ('' not in rel_state):
return True
return False
def determine_api_port(public_port):
'''
Determine correct API server listening port based on
existence of HTTPS reverse proxy and/or haproxy.
public_port: int: standard public port for given service
returns: int: the correct listening port for the API service
'''
i = 0
if len(peer_units()) > 0 or is_clustered():
i += 1
if https():
i += 1
return public_port - (i * 10)
def determine_haproxy_port(public_port):
'''
Description: Determine correct proxy listening port based on public IP +
existence of HTTPS reverse proxy.
public_port: int: standard public port for given service
returns: int: the correct listening port for the HAProxy service
'''
i = 0
if https():
i += 1
return public_port - (i * 10)
def get_hacluster_config():
'''
Obtains all relevant configuration from charm configuration required
for initiating a relation to hacluster:
ha-bindiface, ha-mcastport, vip, vip_iface, vip_cidr
returns: dict: A dict containing settings keyed by setting name.
raises: HAIncompleteConfig if settings are missing.
'''
settings = ['ha-bindiface', 'ha-mcastport', 'vip', 'vip_iface', 'vip_cidr']
conf = {}
for setting in settings:
conf[setting] = config_get(setting)
missing = []
[missing.append(s) for s, v in conf.iteritems() if v is None]
if missing:
log('Insufficient config data to configure hacluster.', level=ERROR)
raise HAIncompleteConfig
return conf
def canonical_url(configs, vip_setting='vip'):
'''
Returns the correct HTTP URL to this host given the state of HTTPS
configuration and hacluster.
:configs : OSTemplateRenderer: A config tempating object to inspect for
a complete https context.
:vip_setting: str: Setting in charm config that specifies
VIP address.
'''
scheme = 'http'
if 'https' in configs.complete_contexts():
scheme = 'https'
if is_clustered():
addr = config_get(vip_setting)
else:
addr = unit_get('private-address')
return '%s://%s' % (scheme, addr)

View File

@ -1,3 +1,4 @@
import json
import os
from base64 import b64decode
@ -21,6 +22,7 @@ from charmhelpers.core.hookenv import (
related_units,
unit_get,
unit_private_ip,
ERROR,
WARNING,
)
@ -431,3 +433,90 @@ class OSConfigFlagContext(OSContextGenerator):
flags[k.strip()] = v
ctxt = {'user_config_flags': flags}
return ctxt
class SubordinateConfigContext(OSContextGenerator):
"""
Responsible for inspecting relations to subordinates that
may be exporting required config via a json blob.
The subordinate interface allows subordinates to export their
configuration requirements to the principle for multiple config
files and multiple serivces. Ie, a subordinate that has interfaces
to both glance and nova may export to following yaml blob as json:
glance:
/etc/glance/glance-api.conf:
sections:
DEFAULT:
- [key1, value1]
/etc/glance/glance-registry.conf:
MYSECTION:
- [key2, value2]
nova:
/etc/nova/nova.conf:
sections:
DEFAULT:
- [key3, value3]
It is then up to the principle charms to subscribe this context to
the service+config file it is interestd in. Configuration data will
be available in the template context, in glance's case, as:
ctxt = {
... other context ...
'subordinate_config': {
'DEFAULT': {
'key1': 'value1',
},
'MYSECTION': {
'key2': 'value2',
},
}
}
"""
def __init__(self, service, config_file, interface):
"""
:param service : Service name key to query in any subordinate
data found
:param config_file : Service's config file to query sections
:param interface : Subordinate interface to inspect
"""
self.service = service
self.config_file = config_file
self.interface = interface
def __call__(self):
ctxt = {}
for rid in relation_ids(self.interface):
for unit in related_units(rid):
sub_config = relation_get('subordinate_configuration',
rid=rid, unit=unit)
if sub_config and sub_config != '':
try:
sub_config = json.loads(sub_config)
except:
log('Could not parse JSON from subordinate_config '
'setting from %s' % rid, level=ERROR)
continue
if self.service not in sub_config:
log('Found subordinate_config on %s but it contained'
'nothing for %s service' % (rid, self.service))
continue
sub_config = sub_config[self.service]
if self.config_file not in sub_config:
log('Found subordinate_config on %s but it contained'
'nothing for %s' % (rid, self.config_file))
continue
sub_config = sub_config[self.config_file]
for k, v in sub_config.iteritems():
ctxt[k] = v
if not ctxt:
ctxt['sections'] = {}
return ctxt

View File

@ -85,7 +85,7 @@ def neutron_plugin_attribute(plugin, attr, net_manager=None):
_plugin = plugins[plugin]
except KeyError:
log('Unrecognised plugin for %s: %s' % (manager, plugin), level=ERROR)
raise
raise Exception
try:
return _plugin[attr]
@ -108,7 +108,7 @@ def network_manager():
if release in ['essex']:
# E does not support neutron
log('Neutron networking not supported in Essex.', level=ERROR)
raise
raise Exception
elif release in ['folsom', 'grizzly']:
# neutron is named quantum in F and G
return 'quantum'

View File

@ -1,26 +0,0 @@
[DEFAULT]
debug = {{ debug }}
verbose = {{ verbose }}
metering_secret = {{ metering_secret }}
control_exchange = ceilometer
metering_api_port = {{ service_port }}
rabbit_host = {{ rabbit_host }}
rabbit_port = 5672
rabbit_userid = {{ rabbit_userid }}
rabbit_password = {{ rabbit_password }}
rabbit_virtual_host = {{ rabbit_virtual_host }}
database_connection = mongodb://{{ db_host }}:{{ db_port }}/{{ db_name }}
os_auth_url = http://{{ keystone_host }}:{{ keystone_port }}/v2.0
os_tenant_name = {{ keystone_os_tenant }}
os_username = {{ keystone_os_username }}
os_password = {{ keystone_os_password }}
logdir = /var/log/ceilometer
[keystone_authtoken]
auth_host = {{ keystone_host }}
auth_port = {{ keystone_port }}
auth_protocol = http
admin_tenant_name = {{ keystone_os_tenant }}
admin_user = {{ keystone_os_username }}
admin_password = {{ keystone_os_password }}

View File

@ -0,0 +1,34 @@
# grizzly
###############################################################################
# [ WARNING ]
# ceilometer configuration file maintained by Juju
# local changes may be overwritten.
###############################################################################
[DEFAULT]
debug = {{ debug }}
verbose = {{ verbose }}
metering_secret = {{ metering_secret }}
metering_api_port = {{ port }}
rabbit_host = {{ rabbitmq_host }}
rabbit_userid = {{ rabbitmq_user }}
rabbit_password = {{ rabbitmq_password }}
rabbit_virtual_host = {{ rabbitmq_virtual_host }}
database_connection = mongodb://{{ db_host }}:{{ db_port }}/{{ db_name }}
os_auth_url = {{ auth_protocol }}://{{ auth_host }}:{{ auth_port }}/v2.0
os_tenant_name = {{ admin_tenant_name }}
os_username = {{ admin_user }}
os_password = {{ admin_password }}
logdir = /var/log/ceilometer
[keystone_authtoken]
auth_host = {{ auth_host }}
auth_port = {{ auth_port }}
auth_protocol = {{ auth_protocol }}
admin_tenant_name = {{ admin_tenant_name }}
admin_user = {{ admin_user }}
admin_password = {{ admin_password }}

View File

@ -0,0 +1,38 @@
# havana
###############################################################################
# [ WARNING ]
# ceilometer configuration file maintained by Juju
# local changes may be overwritten.
###############################################################################
[DEFAULT]
debug = {{ debug }}
verbose = {{ verbose }}
rabbit_host = {{ rabbitmq_host }}
rabbit_userid = {{ rabbitmq_user }}
rabbit_password = {{ rabbitmq_password }}
rabbit_virtual_host = {{ rabbitmq_virtual_host }}
[api]
port = {{ port }}
[service_credentials]
os_auth_url = {{ auth_protocol }}://{{ auth_host }}:{{ auth_port }}/v2.0
os_tenant_name = {{ admin_tenant_name }}
os_username = {{ admin_user }}
os_password = {{ admin_password }}
[database]
connection = mongodb://{{ db_host }}:{{ db_port }}/{{ db_name }}
[publisher_rpc]
metering_secret = {{ metering_secret }}
[keystone_authtoken]
auth_host = {{ auth_host }}
auth_port = {{ auth_port }}
auth_protocol = {{ auth_protocol }}
admin_tenant_name = {{ admin_tenant_name }}
admin_user = {{ admin_user }}
admin_password = {{ admin_password }}