From 5c15290088b05d7461f07519f86170b2267621e3 Mon Sep 17 00:00:00 2001 From: Liam Young Date: Tue, 9 Sep 2014 09:47:14 +0000 Subject: [PATCH 01/22] Added 0mq support --- charm-helpers-hooks.yaml | 2 +- .../charmhelpers/contrib/openstack/context.py | 14 +++ hooks/charmhelpers/contrib/openstack/utils.py | 1 + hooks/charmhelpers/core/hookenv.py | 41 +++--- hooks/charmhelpers/core/services/base.py | 3 + hooks/nova_compute_hooks.py | 15 +++ hooks/nova_compute_utils.py | 3 +- hooks/zeromq-configuration-relation-changed | 1 + hooks/zeromq-configuration-relation-joined | 1 + metadata.yaml | 3 + templates/icehouse/nova.conf | 118 ++++++++++++++++++ templates/parts/zeromq | 6 + 12 files changed, 192 insertions(+), 16 deletions(-) create mode 120000 hooks/zeromq-configuration-relation-changed create mode 120000 hooks/zeromq-configuration-relation-joined create mode 100644 templates/icehouse/nova.conf create mode 100644 templates/parts/zeromq diff --git a/charm-helpers-hooks.yaml b/charm-helpers-hooks.yaml index f1b156d4..541eecc2 100644 --- a/charm-helpers-hooks.yaml +++ b/charm-helpers-hooks.yaml @@ -1,4 +1,4 @@ -branch: lp:charm-helpers +branch: lp:~openstack-charmers/charm-helpers/0mq destination: hooks/charmhelpers include: - core diff --git a/hooks/charmhelpers/contrib/openstack/context.py b/hooks/charmhelpers/contrib/openstack/context.py index d41b74a2..988bef19 100644 --- a/hooks/charmhelpers/contrib/openstack/context.py +++ b/hooks/charmhelpers/contrib/openstack/context.py @@ -21,6 +21,7 @@ from charmhelpers.core.hookenv import ( relation_get, relation_ids, related_units, + is_relation_made, relation_set, unit_get, unit_private_ip, @@ -787,3 +788,16 @@ class SyslogContext(OSContextGenerator): 'use_syslog': config('use-syslog') } return ctxt + + +class ZeroMQContext(OSContextGenerator): + interfaces = ['zeromq-configuration'] + + def __call__(self): + ctxt = {} + if is_relation_made('zeromq-configuration', 'host'): + for rid in relation_ids('zeromq-configuration'): + for unit in related_units(rid): + ctxt['zmq_nonce'] = relation_get('nonce', unit, rid) + ctxt['zmq_host'] = relation_get('host', unit, rid) + return ctxt \ No newline at end of file diff --git a/hooks/charmhelpers/contrib/openstack/utils.py b/hooks/charmhelpers/contrib/openstack/utils.py index 20943c20..23d237de 100644 --- a/hooks/charmhelpers/contrib/openstack/utils.py +++ b/hooks/charmhelpers/contrib/openstack/utils.py @@ -70,6 +70,7 @@ SWIFT_CODENAMES = OrderedDict([ ('1.13.0', 'icehouse'), ('1.12.0', 'icehouse'), ('1.11.0', 'icehouse'), + ('2.0.0', 'juno'), ]) DEFAULT_LOOPBACK_SIZE = '5G' diff --git a/hooks/charmhelpers/core/hookenv.py b/hooks/charmhelpers/core/hookenv.py index eb4aa092..f396e03a 100644 --- a/hooks/charmhelpers/core/hookenv.py +++ b/hooks/charmhelpers/core/hookenv.py @@ -156,12 +156,15 @@ def hook_name(): class Config(dict): - """A Juju charm config dictionary that can write itself to - disk (as json) and track which values have changed since - the previous hook invocation. + """A dictionary representation of the charm's config.yaml, with some + extra features: - Do not instantiate this object directly - instead call - ``hookenv.config()`` + - See which values in the dictionary have changed since the previous hook. + - For values that have changed, see what the previous value was. + - Store arbitrary data for use in a later hook. + + NOTE: Do not instantiate this object directly - instead call + ``hookenv.config()``, which will return an instance of :class:`Config`. Example usage:: @@ -170,8 +173,8 @@ class Config(dict): >>> config = hookenv.config() >>> config['foo'] 'bar' + >>> # store a new key/value for later use >>> config['mykey'] = 'myval' - >>> config.save() >>> # user runs `juju set mycharm foo=baz` @@ -188,22 +191,23 @@ class Config(dict): >>> # keys/values that we add are preserved across hooks >>> config['mykey'] 'myval' - >>> # don't forget to save at the end of hook! - >>> config.save() """ CONFIG_FILE_NAME = '.juju-persistent-config' def __init__(self, *args, **kw): super(Config, self).__init__(*args, **kw) + self.implicit_save = True self._prev_dict = None self.path = os.path.join(charm_dir(), Config.CONFIG_FILE_NAME) if os.path.exists(self.path): self.load_previous() def load_previous(self, path=None): - """Load previous copy of config from disk so that current values - can be compared to previous values. + """Load previous copy of config from disk. + + In normal usage you don't need to call this method directly - it + is called automatically at object initialization. :param path: @@ -218,8 +222,8 @@ class Config(dict): self._prev_dict = json.load(f) def changed(self, key): - """Return true if the value for this key has changed since - the last save. + """Return True if the current value for this key is different from + the previous value. """ if self._prev_dict is None: @@ -228,7 +232,7 @@ class Config(dict): def previous(self, key): """Return previous value for this key, or None if there - is no "previous" value. + is no previous value. """ if self._prev_dict: @@ -238,7 +242,13 @@ class Config(dict): def save(self): """Save this config to disk. - Preserves items in _prev_dict that do not exist in self. + If the charm is using the :mod:`Services Framework ` + or :meth:'@hook ' decorator, this + is called automatically at the end of successful hook execution. + Otherwise, it should be called directly by user code. + + To disable automatic saves, set ``implicit_save=False`` on this + instance. """ if self._prev_dict: @@ -478,6 +488,9 @@ class Hooks(object): hook_name = os.path.basename(args[0]) if hook_name in self._hooks: self._hooks[hook_name]() + cfg = config() + if cfg.implicit_save: + cfg.save() else: raise UnregisteredHookError(hook_name) diff --git a/hooks/charmhelpers/core/services/base.py b/hooks/charmhelpers/core/services/base.py index 6b5a1b9f..87ecb130 100644 --- a/hooks/charmhelpers/core/services/base.py +++ b/hooks/charmhelpers/core/services/base.py @@ -118,6 +118,9 @@ class ServiceManager(object): else: self.provide_data() self.reconfigure_services() + cfg = hookenv.config() + if cfg.implicit_save: + cfg.save() def provide_data(self): """ diff --git a/hooks/nova_compute_hooks.py b/hooks/nova_compute_hooks.py index 1b086dba..eb402bab 100755 --- a/hooks/nova_compute_hooks.py +++ b/hooks/nova_compute_hooks.py @@ -50,6 +50,7 @@ from nova_compute_utils import ( ceph_config_file, CEPH_SECRET, enable_shell, disable_shell, fix_path_ownership, + services, ) from nova_compute_context import CEPH_SECRET_UUID @@ -251,6 +252,20 @@ def nova_ceilometer_relation_changed(): CONFIGS.write_all() +@hooks.hook('zeromq-configuration-relation-joined') +def zeromq_configuration_relation_joined(relid=None): + if services: + relation_set(relation_id=relid, + topics=" ".join(services()), + users="nova") + + +@hooks.hook('zeromq-configuration-relation-changed') +@restart_on_change(restart_map(), stopstart=True) +def zeromq_configuration_relation_changed(): + CONFIGS.write(NOVA_CONF) + + def main(): try: hooks.execute(sys.argv) diff --git a/hooks/nova_compute_utils.py b/hooks/nova_compute_utils.py index 0b9b26c1..7671b256 100644 --- a/hooks/nova_compute_utils.py +++ b/hooks/nova_compute_utils.py @@ -79,7 +79,8 @@ BASE_RESOURCE_MAP = { interface='nova-ceilometer', service='nova', config_file=NOVA_CONF), - InstanceConsoleContext(), ], + InstanceConsoleContext(), + context.ZeroMQContext()], }, } diff --git a/hooks/zeromq-configuration-relation-changed b/hooks/zeromq-configuration-relation-changed new file mode 120000 index 00000000..3ba0bdea --- /dev/null +++ b/hooks/zeromq-configuration-relation-changed @@ -0,0 +1 @@ +nova_compute_hooks.py \ No newline at end of file diff --git a/hooks/zeromq-configuration-relation-joined b/hooks/zeromq-configuration-relation-joined new file mode 120000 index 00000000..3ba0bdea --- /dev/null +++ b/hooks/zeromq-configuration-relation-joined @@ -0,0 +1 @@ +nova_compute_hooks.py \ No newline at end of file diff --git a/metadata.yaml b/metadata.yaml index 9b3fd9b7..8b7f916d 100644 --- a/metadata.yaml +++ b/metadata.yaml @@ -30,6 +30,9 @@ requires: neutron-plugin: interface: neutron-plugin scope: container + zeromq-configuration: + interface: zeromq-configuration + scope: container peers: compute-peer: interface: nova diff --git a/templates/icehouse/nova.conf b/templates/icehouse/nova.conf new file mode 100644 index 00000000..41735c92 --- /dev/null +++ b/templates/icehouse/nova.conf @@ -0,0 +1,118 @@ +# icehouse +############################################################################### +# [ WARNING ] +# Configuration file maintained by Juju. Local changes may be overwritten. +{% if restart_trigger -%} +# restart trigger: {{ restart_trigger }} +{% endif -%} +############################################################################### +[DEFAULT] +dhcpbridge_flagfile=/etc/nova/nova.conf +dhcpbridge=/usr/bin/nova-dhcpbridge +logdir=/var/log/nova +state_path=/var/lib/nova +lock_path=/var/lock/nova +force_dhcp_release=True +libvirt_use_virtio_for_bridges=True +verbose=True +use_syslog = {{ use_syslog }} +ec2_private_dns_show_ip=True +api_paste_config=/etc/nova/api-paste.ini +enabled_apis=ec2,osapi_compute,metadata +auth_strategy=keystone +compute_driver=libvirt.LibvirtDriver + +{% include "parts/database" %} + +{% include "parts/rabbitmq" %} + +{% include "parts/zeromq" %} + +{% if glance_api_servers -%} +glance_api_servers = {{ glance_api_servers }} +{% endif -%} + +{% if rbd_pool -%} +rbd_pool = {{ rbd_pool }} +rbd_user = {{ rbd_user }} +rbd_secret_uuid = {{ rbd_secret_uuid }} +{% endif -%} + +{% if console_vnc_type -%} +vnc_enabled = True +novnc_enabled = True +vnc_keymap = {{ console_keymap }} +vncserver_listen = 0.0.0.0 +vncserver_proxyclient_address = {{ console_listen_addr }} +{% if console_access_protocol == 'novnc' or console_access_protocol == 'vnc' -%} +novncproxy_base_url = {{ novnc_proxy_address }} +{% endif -%} +{% if console_access_protocol == 'xvpvnc' or console_access_protocol == 'vnc' -%} +xvpvncproxy_port = {{ xvpvnc_proxy_port }} +xvpvncproxy_host = {{ xvpvnc_proxy_host }} +xvpvncproxy_base_url = {{ xvpvnc_proxy_address }} +{% endif -%} +{% else -%} +vnc_enabled = False +novnc_enabled = False +{% endif -%} + +{% if neutron_plugin and neutron_plugin == 'ovs' -%} +libvirt_vif_driver = nova.virt.libvirt.vif.LibvirtGenericVIFDriver +{% if neutron_security_groups -%} +security_group_api = neutron +firewall_driver = nova.virt.firewall.NoopFirewallDriver +{% endif -%} +{% endif -%} + +{% if neutron_plugin and (neutron_plugin == 'nvp' or neutron_plugin == 'nsx') -%} +libvirt_vif_driver = nova.virt.libvirt.vif.LibvirtOpenVswitchVirtualPortDriver +security_group_api = neutron +firewall_driver = nova.virt.firewall.NoopFirewallDriver +{% endif -%} + +{% if network_manager_config -%} +{% for key, value in network_manager_config.iteritems() -%} +{{ key }} = {{ value }} +{% endfor -%} +{% endif -%} + +{% if network_manager == 'neutron' -%} +network_api_class = nova.network.neutronv2.api.API +{% else -%} +network_manager = nova.network.manager.FlatDHCPManager +{% endif -%} + +{% if volume_service -%} +volume_api_class = nova.volume.cinder.API +{% endif -%} + +{% if user_config_flags -%} +{% for key, value in user_config_flags.iteritems() -%} +{{ key }} = {{ value }} +{% endfor -%} +{% endif -%} + +{% if live_migration_uri -%} +live_migration_uri = {{ live_migration_uri }} +{% endif -%} + +{% if instances_path -%} +instances_path = {{ instances_path }} +{% endif -%} + +{% if sections and 'DEFAULT' in sections -%} +{% for key, value in sections['DEFAULT'] -%} +{{ key }} = {{ value }} +{% endfor -%} +{% endif -%} + +{% if console_access_protocol == 'spice' -%} +[spice] +agent_enabled = True +enabled = True +html5proxy_base_url = {{ spice_proxy_address }} +keymap = {{ console_keymap }} +server_listen = 0.0.0.0 +server_proxyclient_address = {{ console_listen_addr }} +{% endif -%} diff --git a/templates/parts/zeromq b/templates/parts/zeromq new file mode 100644 index 00000000..3e32288c --- /dev/null +++ b/templates/parts/zeromq @@ -0,0 +1,6 @@ +{% if zmq_host -%} +# ZeroMQ configuration (restart-nonce: {{ zmq_nonce }}) +rpc_backend = zmq +rpc_zmq_matchmaker = oslo.messaging._drivers.matchmaker_ring.MatchMakerRing +rpc_zmq_host = {{ zmq_host }} +{% endif -%} From e03f9bc1438033fc9cd9f6f22b4d03ff3c7bdc47 Mon Sep 17 00:00:00 2001 From: Liam Young Date: Tue, 9 Sep 2014 09:52:27 +0000 Subject: [PATCH 02/22] Trigger zeromq-configuration hook on config changed --- hooks/nova_compute_hooks.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/hooks/nova_compute_hooks.py b/hooks/nova_compute_hooks.py index eb402bab..836c3938 100755 --- a/hooks/nova_compute_hooks.py +++ b/hooks/nova_compute_hooks.py @@ -91,7 +91,8 @@ def config_changed(): fix_path_ownership(fp, user='nova') [compute_joined(rid) for rid in relation_ids('cloud-compute')] - + for rid in relation_ids('zeromq-configuration'): + zeromq_configuration_relation_joined(rid) CONFIGS.write_all() From 7ef5ad8ed996d6a97d541e27576a2051dca9dc06 Mon Sep 17 00:00:00 2001 From: Liam Young Date: Tue, 9 Sep 2014 09:58:06 +0000 Subject: [PATCH 03/22] Remove copy/pasta error --- hooks/nova_compute_hooks.py | 7 +++---- 1 file changed, 3 insertions(+), 4 deletions(-) diff --git a/hooks/nova_compute_hooks.py b/hooks/nova_compute_hooks.py index 836c3938..2ef59181 100755 --- a/hooks/nova_compute_hooks.py +++ b/hooks/nova_compute_hooks.py @@ -255,10 +255,9 @@ def nova_ceilometer_relation_changed(): @hooks.hook('zeromq-configuration-relation-joined') def zeromq_configuration_relation_joined(relid=None): - if services: - relation_set(relation_id=relid, - topics=" ".join(services()), - users="nova") + relation_set(relation_id=relid, + topics=" ".join(services()), + users="nova") @hooks.hook('zeromq-configuration-relation-changed') From 22968a1a41b29e721868d9aa715d2838117feaeb Mon Sep 17 00:00:00 2001 From: Liam Young Date: Tue, 9 Sep 2014 10:56:37 +0000 Subject: [PATCH 04/22] Correct topics --- hooks/nova_compute_hooks.py | 3 ++- hooks/nova_compute_utils.py | 3 +++ 2 files changed, 5 insertions(+), 1 deletion(-) diff --git a/hooks/nova_compute_hooks.py b/hooks/nova_compute_hooks.py index 2ef59181..b81bacf1 100755 --- a/hooks/nova_compute_hooks.py +++ b/hooks/nova_compute_hooks.py @@ -51,6 +51,7 @@ from nova_compute_utils import ( enable_shell, disable_shell, fix_path_ownership, services, + get_topics, ) from nova_compute_context import CEPH_SECRET_UUID @@ -256,7 +257,7 @@ def nova_ceilometer_relation_changed(): @hooks.hook('zeromq-configuration-relation-joined') def zeromq_configuration_relation_joined(relid=None): relation_set(relation_id=relid, - topics=" ".join(services()), + topics=" ".join(get_topics()), users="nova") diff --git a/hooks/nova_compute_utils.py b/hooks/nova_compute_utils.py index 7671b256..5042ed59 100644 --- a/hooks/nova_compute_utils.py +++ b/hooks/nova_compute_utils.py @@ -458,3 +458,6 @@ def disable_shell(user): def fix_path_ownership(path, user='nova'): cmd = ['chown', user, path] check_call(cmd) + +def get_topics(): + return ['compute'] From a9f343497430da5f40af0595dab86cee3baf99ba Mon Sep 17 00:00:00 2001 From: Liam Young Date: Wed, 10 Sep 2014 17:17:43 +0000 Subject: [PATCH 05/22] Add support for notifications with zmq --- .../charmhelpers/contrib/openstack/context.py | 26 +++++++++++++- hooks/charmhelpers/contrib/openstack/utils.py | 12 ++++++- hooks/nova_compute_utils.py | 3 +- templates/icehouse/neutron.conf | 36 +++++++++++++++++++ 4 files changed, 74 insertions(+), 3 deletions(-) create mode 100644 templates/icehouse/neutron.conf diff --git a/hooks/charmhelpers/contrib/openstack/context.py b/hooks/charmhelpers/contrib/openstack/context.py index 988bef19..7c55084d 100644 --- a/hooks/charmhelpers/contrib/openstack/context.py +++ b/hooks/charmhelpers/contrib/openstack/context.py @@ -50,6 +50,9 @@ from charmhelpers.contrib.network.ip import ( get_ipv6_addr, ) +from charmhelpers.contrib.openstack.utils import ( + get_matchmaker_map, +) CA_CERT_PATH = '/usr/local/share/ca-certificates/keystone_juju_ca_cert.crt' @@ -800,4 +803,25 @@ class ZeroMQContext(OSContextGenerator): for unit in related_units(rid): ctxt['zmq_nonce'] = relation_get('nonce', unit, rid) ctxt['zmq_host'] = relation_get('host', unit, rid) - return ctxt \ No newline at end of file + return ctxt + +class NotificationDriverContext(OSContextGenerator): + + def __init__(self, zmq_relation='zeromq-configuration', amqp_relation='amqp'): + """ + :param zmq_relation : Name of Zeromq relation to check + """ + self.zmq_relation = zmq_relation + self.amqp_relation = amqp_relation + + def __call__(self): + ctxt = { + 'notifications': "False", + } + if is_relation_made(self.zmq_relation): + matchmaker_data = get_matchmaker_map() + if 'notifications-info' in matchmaker_data: + ctxt['notifications'] = "True" + elif is_relation_made(self.amqp_relation): + ctxt['notifications'] = "True" + return ctxt diff --git a/hooks/charmhelpers/contrib/openstack/utils.py b/hooks/charmhelpers/contrib/openstack/utils.py index 23d237de..4bcd3cff 100644 --- a/hooks/charmhelpers/contrib/openstack/utils.py +++ b/hooks/charmhelpers/contrib/openstack/utils.py @@ -3,15 +3,17 @@ # Common python helper functions used for OpenStack charms. from collections import OrderedDict -import subprocess +import json import os import socket +import subprocess import sys from charmhelpers.core.hookenv import ( config, log as juju_log, charm_dir, + is_relation_made, ERROR, INFO ) @@ -457,3 +459,11 @@ def get_hostname(address, fqdn=True): return result else: return result.split('.')[0] + + +def get_matchmaker_map(mm_file='/etc/oslo/matchmaker_ring.json'): + mm_map = {} + if os.path.isfile(mm_file): + with open(mm_file, 'r') as f: + mm_map = json.load(f) + return mm_map diff --git a/hooks/nova_compute_utils.py b/hooks/nova_compute_utils.py index 5042ed59..a1f127ab 100644 --- a/hooks/nova_compute_utils.py +++ b/hooks/nova_compute_utils.py @@ -80,7 +80,8 @@ BASE_RESOURCE_MAP = { service='nova', config_file=NOVA_CONF), InstanceConsoleContext(), - context.ZeroMQContext()], + context.ZeroMQContext(), + context.NotificationDriverContext()], }, } diff --git a/templates/icehouse/neutron.conf b/templates/icehouse/neutron.conf new file mode 100644 index 00000000..8855c48e --- /dev/null +++ b/templates/icehouse/neutron.conf @@ -0,0 +1,36 @@ +# icehouse +############################################################################### +# [ WARNING ] +# Configuration file maintained by Juju. Local changes may be overwritten. +############################################################################### +[DEFAULT] +state_path = /var/lib/neutron +lock_path = $state_path/lock +bind_host = 0.0.0.0 +bind_port = 9696 + +{% if core_plugin -%} +core_plugin = {{ core_plugin }} +{% endif -%} + +api_paste_config = /etc/neutron/api-paste.ini +auth_strategy = keystone +use_syslog = {{ use_syslog }} +{% if notifications == 'True' -%} +notification_driver = neutron.openstack.common.notifier.list_notifier +default_notification_level = INFO +notification_topics = notifications +{% endif -%} + +{% include "parts/rabbitmq" %} + +[QUOTAS] + +[DEFAULT_SERVICETYPE] + +[AGENT] +root_helper = sudo neutron-rootwrap /etc/neutron/rootwrap.conf + +[keystone_authtoken] +signing_dir = /var/lib/neutron/keystone-signing + From 8c861dc2c7ec55f93546d2687b9b8182736cd02d Mon Sep 17 00:00:00 2001 From: Liam Young Date: Wed, 15 Oct 2014 07:52:43 +0000 Subject: [PATCH 06/22] Sync 0mq charmhelpers --- .../charmhelpers/contrib/hahelpers/apache.py | 13 +- .../charmhelpers/contrib/hahelpers/cluster.py | 3 +- hooks/charmhelpers/contrib/network/ip.py | 207 ++++++++++++-- .../contrib/openstack/amulet/deployment.py | 46 ++- .../contrib/openstack/amulet/utils.py | 9 +- .../charmhelpers/contrib/openstack/context.py | 267 +++++++++++++----- hooks/charmhelpers/contrib/openstack/ip.py | 2 +- .../contrib/openstack/templates/haproxy.cfg | 29 +- .../templates/openstack_https_frontend | 17 +- .../templates/openstack_https_frontend.conf | 17 +- hooks/charmhelpers/contrib/openstack/utils.py | 31 +- hooks/charmhelpers/core/hookenv.py | 21 +- hooks/charmhelpers/core/host.py | 35 ++- hooks/charmhelpers/core/services/helpers.py | 124 +++++++- hooks/charmhelpers/core/sysctl.py | 34 +++ hooks/charmhelpers/fetch/__init__.py | 24 +- hooks/charmhelpers/fetch/archiveurl.py | 53 +++- 17 files changed, 782 insertions(+), 150 deletions(-) create mode 100644 hooks/charmhelpers/core/sysctl.py diff --git a/hooks/charmhelpers/contrib/hahelpers/apache.py b/hooks/charmhelpers/contrib/hahelpers/apache.py index 8d5fb8ba..6616ffff 100644 --- a/hooks/charmhelpers/contrib/hahelpers/apache.py +++ b/hooks/charmhelpers/contrib/hahelpers/apache.py @@ -20,20 +20,27 @@ from charmhelpers.core.hookenv import ( ) -def get_cert(): +def get_cert(cn=None): + # TODO: deal with multiple https endpoints via charm config cert = config_get('ssl_cert') key = config_get('ssl_key') if not (cert and key): log("Inspecting identity-service relations for SSL certificate.", level=INFO) cert = key = None + if cn: + ssl_cert_attr = 'ssl_cert_{}'.format(cn) + ssl_key_attr = 'ssl_key_{}'.format(cn) + else: + ssl_cert_attr = 'ssl_cert' + ssl_key_attr = 'ssl_key' for r_id in relation_ids('identity-service'): for unit in relation_list(r_id): if not cert: - cert = relation_get('ssl_cert', + cert = relation_get(ssl_cert_attr, rid=r_id, unit=unit) if not key: - key = relation_get('ssl_key', + key = relation_get(ssl_key_attr, rid=r_id, unit=unit) return (cert, key) diff --git a/hooks/charmhelpers/contrib/hahelpers/cluster.py b/hooks/charmhelpers/contrib/hahelpers/cluster.py index 7151b1d0..6d972007 100644 --- a/hooks/charmhelpers/contrib/hahelpers/cluster.py +++ b/hooks/charmhelpers/contrib/hahelpers/cluster.py @@ -139,10 +139,9 @@ def https(): return True for r_id in relation_ids('identity-service'): for unit in relation_list(r_id): + # TODO - needs fixing for new helper as ssl_cert/key suffixes with CN rel_state = [ relation_get('https_keystone', rid=r_id, unit=unit), - relation_get('ssl_cert', rid=r_id, unit=unit), - relation_get('ssl_key', rid=r_id, unit=unit), relation_get('ca_cert', rid=r_id, unit=unit), ] # NOTE: works around (LP: #1203241) diff --git a/hooks/charmhelpers/contrib/network/ip.py b/hooks/charmhelpers/contrib/network/ip.py index 7edbcc48..e62e5655 100644 --- a/hooks/charmhelpers/contrib/network/ip.py +++ b/hooks/charmhelpers/contrib/network/ip.py @@ -1,10 +1,16 @@ +import glob +import re +import subprocess import sys from functools import partial +from charmhelpers.core.hookenv import unit_get from charmhelpers.fetch import apt_install from charmhelpers.core.hookenv import ( - ERROR, log, config, + WARNING, + ERROR, + log ) try: @@ -51,6 +57,8 @@ def get_address_in_network(network, fallback=None, fatal=False): else: if fatal: not_found_error_out() + else: + return None _validate_cidr(network) network = netaddr.IPNetwork(network) @@ -132,7 +140,8 @@ def _get_for_address(address, key): if address.version == 4 and netifaces.AF_INET in addresses: addr = addresses[netifaces.AF_INET][0]['addr'] netmask = addresses[netifaces.AF_INET][0]['netmask'] - cidr = netaddr.IPNetwork("%s/%s" % (addr, netmask)) + network = netaddr.IPNetwork("%s/%s" % (addr, netmask)) + cidr = network.cidr if address in cidr: if key == 'iface': return iface @@ -141,11 +150,14 @@ def _get_for_address(address, key): if address.version == 6 and netifaces.AF_INET6 in addresses: for addr in addresses[netifaces.AF_INET6]: if not addr['addr'].startswith('fe80'): - cidr = netaddr.IPNetwork("%s/%s" % (addr['addr'], - addr['netmask'])) + network = netaddr.IPNetwork("%s/%s" % (addr['addr'], + addr['netmask'])) + cidr = network.cidr if address in cidr: if key == 'iface': return iface + elif key == 'netmask' and cidr: + return str(cidr).split('/')[1] else: return addr[key] return None @@ -156,19 +168,182 @@ get_iface_for_address = partial(_get_for_address, key='iface') get_netmask_for_address = partial(_get_for_address, key='netmask') -def get_ipv6_addr(iface="eth0"): +def format_ipv6_addr(address): + """ + IPv6 needs to be wrapped with [] in url link to parse correctly. + """ + if is_ipv6(address): + address = "[%s]" % address + else: + log("Not a valid ipv6 address: %s" % address, level=WARNING) + address = None + + return address + + +def get_iface_addr(iface='eth0', inet_type='AF_INET', inc_aliases=False, + fatal=True, exc_list=None): + """ + Return the assigned IP address for a given interface, if any, or []. + """ + # Extract nic if passed /dev/ethX + if '/' in iface: + iface = iface.split('/')[-1] + if not exc_list: + exc_list = [] try: - iface_addrs = netifaces.ifaddresses(iface) - if netifaces.AF_INET6 not in iface_addrs: - raise Exception("Interface '%s' doesn't have an ipv6 address." % iface) + inet_num = getattr(netifaces, inet_type) + except AttributeError: + raise Exception('Unknown inet type ' + str(inet_type)) - addresses = netifaces.ifaddresses(iface)[netifaces.AF_INET6] - ipv6_addr = [a['addr'] for a in addresses if not a['addr'].startswith('fe80') - and config('vip') != a['addr']] - if not ipv6_addr: - raise Exception("Interface '%s' doesn't have global ipv6 address." % iface) + interfaces = netifaces.interfaces() + if inc_aliases: + ifaces = [] + for _iface in interfaces: + if iface == _iface or _iface.split(':')[0] == iface: + ifaces.append(_iface) + if fatal and not ifaces: + raise Exception("Invalid interface '%s'" % iface) + ifaces.sort() + else: + if iface not in interfaces: + if fatal: + raise Exception("%s not found " % (iface)) + else: + return [] + else: + ifaces = [iface] - return ipv6_addr[0] + addresses = [] + for netiface in ifaces: + net_info = netifaces.ifaddresses(netiface) + if inet_num in net_info: + for entry in net_info[inet_num]: + if 'addr' in entry and entry['addr'] not in exc_list: + addresses.append(entry['addr']) + if fatal and not addresses: + raise Exception("Interface '%s' doesn't have any %s addresses." % + (iface, inet_type)) + return addresses - except ValueError: - raise ValueError("Invalid interface '%s'" % iface) +get_ipv4_addr = partial(get_iface_addr, inet_type='AF_INET') + + +def get_iface_from_addr(addr): + """Work out on which interface the provided address is configured.""" + for iface in netifaces.interfaces(): + addresses = netifaces.ifaddresses(iface) + for inet_type in addresses: + for _addr in addresses[inet_type]: + _addr = _addr['addr'] + # link local + ll_key = re.compile("(.+)%.*") + raw = re.match(ll_key, _addr) + if raw: + _addr = raw.group(1) + if _addr == addr: + log("Address '%s' is configured on iface '%s'" % + (addr, iface)) + return iface + + msg = "Unable to infer net iface on which '%s' is configured" % (addr) + raise Exception(msg) + + +def sniff_iface(f): + """If no iface provided, inject net iface inferred from unit private + address. + """ + def iface_sniffer(*args, **kwargs): + if not kwargs.get('iface', None): + kwargs['iface'] = get_iface_from_addr(unit_get('private-address')) + + return f(*args, **kwargs) + + return iface_sniffer + + +@sniff_iface +def get_ipv6_addr(iface=None, inc_aliases=False, fatal=True, exc_list=None, + dynamic_only=True): + """Get assigned IPv6 address for a given interface. + + Returns list of addresses found. If no address found, returns empty list. + + If iface is None, we infer the current primary interface by doing a reverse + lookup on the unit private-address. + + We currently only support scope global IPv6 addresses i.e. non-temporary + addresses. If no global IPv6 address is found, return the first one found + in the ipv6 address list. + """ + addresses = get_iface_addr(iface=iface, inet_type='AF_INET6', + inc_aliases=inc_aliases, fatal=fatal, + exc_list=exc_list) + + if addresses: + global_addrs = [] + for addr in addresses: + key_scope_link_local = re.compile("^fe80::..(.+)%(.+)") + m = re.match(key_scope_link_local, addr) + if m: + eui_64_mac = m.group(1) + iface = m.group(2) + else: + global_addrs.append(addr) + + if global_addrs: + # Make sure any found global addresses are not temporary + cmd = ['ip', 'addr', 'show', iface] + out = subprocess.check_output(cmd) + if dynamic_only: + key = re.compile("inet6 (.+)/[0-9]+ scope global dynamic.*") + else: + key = re.compile("inet6 (.+)/[0-9]+ scope global.*") + + addrs = [] + for line in out.split('\n'): + line = line.strip() + m = re.match(key, line) + if m and 'temporary' not in line: + # Return the first valid address we find + for addr in global_addrs: + if m.group(1) == addr: + if not dynamic_only or \ + m.group(1).endswith(eui_64_mac): + addrs.append(addr) + + if addrs: + return addrs + + if fatal: + raise Exception("Interface '%s' doesn't have a scope global " + "non-temporary ipv6 address." % iface) + + return [] + + +def get_bridges(vnic_dir='/sys/devices/virtual/net'): + """ + Return a list of bridges on the system or [] + """ + b_rgex = vnic_dir + '/*/bridge' + return [x.replace(vnic_dir, '').split('/')[1] for x in glob.glob(b_rgex)] + + +def get_bridge_nics(bridge, vnic_dir='/sys/devices/virtual/net'): + """ + Return a list of nics comprising a given bridge on the system or [] + """ + brif_rgex = "%s/%s/brif/*" % (vnic_dir, bridge) + return [x.split('/')[-1] for x in glob.glob(brif_rgex)] + + +def is_bridge_member(nic): + """ + Check if a given nic is a member of a bridge + """ + for bridge in get_bridges(): + if nic in get_bridge_nics(bridge): + return True + return False diff --git a/hooks/charmhelpers/contrib/openstack/amulet/deployment.py b/hooks/charmhelpers/contrib/openstack/amulet/deployment.py index 9179eeb1..3c7f422a 100644 --- a/hooks/charmhelpers/contrib/openstack/amulet/deployment.py +++ b/hooks/charmhelpers/contrib/openstack/amulet/deployment.py @@ -10,32 +10,62 @@ class OpenStackAmuletDeployment(AmuletDeployment): that is specifically for use by OpenStack charms. """ - def __init__(self, series=None, openstack=None, source=None): + def __init__(self, series=None, openstack=None, source=None, stable=True): """Initialize the deployment environment.""" super(OpenStackAmuletDeployment, self).__init__(series) self.openstack = openstack self.source = source + self.stable = stable + # Note(coreycb): this needs to be changed when new next branches come + # out. + self.current_next = "trusty" + + def _determine_branch_locations(self, other_services): + """Determine the branch locations for the other services. + + Determine if the local branch being tested is derived from its + stable or next (dev) branch, and based on this, use the corresonding + stable or next branches for the other_services.""" + base_charms = ['mysql', 'mongodb', 'rabbitmq-server'] + + if self.stable: + for svc in other_services: + temp = 'lp:charms/{}' + svc['location'] = temp.format(svc['name']) + else: + for svc in other_services: + if svc['name'] in base_charms: + temp = 'lp:charms/{}' + svc['location'] = temp.format(svc['name']) + else: + temp = 'lp:~openstack-charmers/charms/{}/{}/next' + svc['location'] = temp.format(self.current_next, + svc['name']) + return other_services def _add_services(self, this_service, other_services): - """Add services to the deployment and set openstack-origin.""" + """Add services to the deployment and set openstack-origin/source.""" + other_services = self._determine_branch_locations(other_services) + super(OpenStackAmuletDeployment, self)._add_services(this_service, other_services) - name = 0 + services = other_services services.append(this_service) - use_source = ['mysql', 'mongodb', 'rabbitmq-server', 'ceph'] + use_source = ['mysql', 'mongodb', 'rabbitmq-server', 'ceph', + 'ceph-osd', 'ceph-radosgw'] if self.openstack: for svc in services: - if svc[name] not in use_source: + if svc['name'] not in use_source: config = {'openstack-origin': self.openstack} - self.d.configure(svc[name], config) + self.d.configure(svc['name'], config) if self.source: for svc in services: - if svc[name] in use_source: + if svc['name'] in use_source: config = {'source': self.source} - self.d.configure(svc[name], config) + self.d.configure(svc['name'], config) def _configure_services(self, configs): """Configure all of the services.""" diff --git a/hooks/charmhelpers/contrib/openstack/amulet/utils.py b/hooks/charmhelpers/contrib/openstack/amulet/utils.py index bd327bdc..0f312b99 100644 --- a/hooks/charmhelpers/contrib/openstack/amulet/utils.py +++ b/hooks/charmhelpers/contrib/openstack/amulet/utils.py @@ -187,15 +187,16 @@ class OpenStackAmuletUtils(AmuletUtils): f = opener.open("http://download.cirros-cloud.net/version/released") version = f.read().strip() - cirros_img = "tests/cirros-{}-x86_64-disk.img".format(version) + cirros_img = "cirros-{}-x86_64-disk.img".format(version) + local_path = os.path.join('tests', cirros_img) - if not os.path.exists(cirros_img): + if not os.path.exists(local_path): cirros_url = "http://{}/{}/{}".format("download.cirros-cloud.net", version, cirros_img) - opener.retrieve(cirros_url, cirros_img) + opener.retrieve(cirros_url, local_path) f.close() - with open(cirros_img) as f: + with open(local_path) as f: image = glance.images.create(name=image_name, is_public=True, disk_format='qcow2', container_format='bare', data=f) diff --git a/hooks/charmhelpers/contrib/openstack/context.py b/hooks/charmhelpers/contrib/openstack/context.py index 7c55084d..acd9bca8 100644 --- a/hooks/charmhelpers/contrib/openstack/context.py +++ b/hooks/charmhelpers/contrib/openstack/context.py @@ -8,7 +8,6 @@ from subprocess import ( check_call ) - from charmhelpers.fetch import ( apt_install, filter_installed_packages, @@ -16,12 +15,12 @@ from charmhelpers.fetch import ( from charmhelpers.core.hookenv import ( config, + is_relation_made, local_unit, log, relation_get, relation_ids, related_units, - is_relation_made, relation_set, unit_get, unit_private_ip, @@ -29,6 +28,11 @@ from charmhelpers.core.hookenv import ( INFO ) +from charmhelpers.core.host import ( + mkdir, + write_file +) + from charmhelpers.contrib.hahelpers.cluster import ( determine_apache_port, determine_api_port, @@ -39,6 +43,7 @@ from charmhelpers.contrib.hahelpers.cluster import ( from charmhelpers.contrib.hahelpers.apache import ( get_cert, get_ca_cert, + install_ca_cert, ) from charmhelpers.contrib.openstack.neutron import ( @@ -48,9 +53,13 @@ from charmhelpers.contrib.openstack.neutron import ( from charmhelpers.contrib.network.ip import ( get_address_in_network, get_ipv6_addr, + get_netmask_for_address, + format_ipv6_addr, + is_address_in_network ) from charmhelpers.contrib.openstack.utils import ( + get_host_ip, get_matchmaker_map, ) CA_CERT_PATH = '/usr/local/share/ca-certificates/keystone_juju_ca_cert.crt' @@ -172,8 +181,10 @@ class SharedDBContext(OSContextGenerator): for rid in relation_ids('shared-db'): for unit in related_units(rid): rdata = relation_get(rid=rid, unit=unit) + host = rdata.get('db_host') + host = format_ipv6_addr(host) or host ctxt = { - 'database_host': rdata.get('db_host'), + 'database_host': host, 'database': self.database, 'database_user': self.user, 'database_password': rdata.get(password_setting), @@ -249,10 +260,15 @@ class IdentityServiceContext(OSContextGenerator): for rid in relation_ids('identity-service'): for unit in related_units(rid): rdata = relation_get(rid=rid, unit=unit) + serv_host = rdata.get('service_host') + serv_host = format_ipv6_addr(serv_host) or serv_host + auth_host = rdata.get('auth_host') + auth_host = format_ipv6_addr(auth_host) or auth_host + ctxt = { 'service_port': rdata.get('service_port'), - 'service_host': rdata.get('service_host'), - 'auth_host': rdata.get('auth_host'), + 'service_host': serv_host, + 'auth_host': auth_host, 'auth_port': rdata.get('auth_port'), 'admin_tenant_name': rdata.get('service_tenant'), 'admin_user': rdata.get('service_username'), @@ -301,11 +317,13 @@ class AMQPContext(OSContextGenerator): for unit in related_units(rid): if relation_get('clustered', rid=rid, unit=unit): ctxt['clustered'] = True - ctxt['rabbitmq_host'] = relation_get('vip', rid=rid, - unit=unit) + vip = relation_get('vip', rid=rid, unit=unit) + vip = format_ipv6_addr(vip) or vip + ctxt['rabbitmq_host'] = vip else: - ctxt['rabbitmq_host'] = relation_get('private-address', - rid=rid, unit=unit) + host = relation_get('private-address', rid=rid, unit=unit) + host = format_ipv6_addr(host) or host + ctxt['rabbitmq_host'] = host ctxt.update({ 'rabbitmq_user': username, 'rabbitmq_password': relation_get('password', rid=rid, @@ -344,8 +362,9 @@ class AMQPContext(OSContextGenerator): and len(related_units(rid)) > 1: rabbitmq_hosts = [] for unit in related_units(rid): - rabbitmq_hosts.append(relation_get('private-address', - rid=rid, unit=unit)) + host = relation_get('private-address', rid=rid, unit=unit) + host = format_ipv6_addr(host) or host + rabbitmq_hosts.append(host) ctxt['rabbitmq_hosts'] = ','.join(rabbitmq_hosts) if not context_complete(ctxt): return {} @@ -374,6 +393,7 @@ class CephContext(OSContextGenerator): ceph_addr = \ relation_get('ceph-public-address', rid=rid, unit=unit) or \ relation_get('private-address', rid=rid, unit=unit) + ceph_addr = format_ipv6_addr(ceph_addr) or ceph_addr mon_hosts.append(ceph_addr) ctxt = { @@ -394,6 +414,9 @@ class CephContext(OSContextGenerator): return ctxt +ADDRESS_TYPES = ['admin', 'internal', 'public'] + + class HAProxyContext(OSContextGenerator): interfaces = ['cluster'] @@ -406,25 +429,63 @@ class HAProxyContext(OSContextGenerator): if not relation_ids('cluster'): return {} - cluster_hosts = {} l_unit = local_unit().replace('/', '-') - if config('prefer-ipv6'): - addr = get_ipv6_addr() - else: - addr = unit_get('private-address') - cluster_hosts[l_unit] = get_address_in_network(config('os-internal-network'), - addr) - for rid in relation_ids('cluster'): - for unit in related_units(rid): - _unit = unit.replace('/', '-') - addr = relation_get('private-address', rid=rid, unit=unit) - cluster_hosts[_unit] = addr + if config('prefer-ipv6'): + addr = get_ipv6_addr(exc_list=[config('vip')])[0] + else: + addr = get_host_ip(unit_get('private-address')) + + cluster_hosts = {} + + # NOTE(jamespage): build out map of configured network endpoints + # and associated backends + for addr_type in ADDRESS_TYPES: + laddr = get_address_in_network( + config('os-{}-network'.format(addr_type))) + if laddr: + cluster_hosts[laddr] = {} + cluster_hosts[laddr]['network'] = "{}/{}".format( + laddr, + get_netmask_for_address(laddr) + ) + cluster_hosts[laddr]['backends'] = {} + cluster_hosts[laddr]['backends'][l_unit] = laddr + for rid in relation_ids('cluster'): + for unit in related_units(rid): + _unit = unit.replace('/', '-') + _laddr = relation_get('{}-address'.format(addr_type), + rid=rid, unit=unit) + if _laddr: + cluster_hosts[laddr]['backends'][_unit] = _laddr + + # NOTE(jamespage) no split configurations found, just use + # private addresses + if not cluster_hosts: + cluster_hosts[addr] = {} + cluster_hosts[addr]['network'] = "{}/{}".format( + addr, + get_netmask_for_address(addr) + ) + cluster_hosts[addr]['backends'] = {} + cluster_hosts[addr]['backends'][l_unit] = addr + for rid in relation_ids('cluster'): + for unit in related_units(rid): + _unit = unit.replace('/', '-') + _laddr = relation_get('private-address', + rid=rid, unit=unit) + if _laddr: + cluster_hosts[addr]['backends'][_unit] = _laddr ctxt = { - 'units': cluster_hosts, + 'frontends': cluster_hosts, } + if config('haproxy-server-timeout'): + ctxt['haproxy_server_timeout'] = config('haproxy-server-timeout') + if config('haproxy-client-timeout'): + ctxt['haproxy_client_timeout'] = config('haproxy-client-timeout') + if config('prefer-ipv6'): ctxt['local_host'] = 'ip6-localhost' ctxt['haproxy_host'] = '::' @@ -434,12 +495,13 @@ class HAProxyContext(OSContextGenerator): ctxt['haproxy_host'] = '0.0.0.0' ctxt['stat_port'] = ':8888' - if len(cluster_hosts.keys()) > 1: - # Enable haproxy when we have enough peers. - log('Ensuring haproxy enabled in /etc/default/haproxy.') - with open('/etc/default/haproxy', 'w') as out: - out.write('ENABLED=1\n') - return ctxt + for frontend in cluster_hosts: + if len(cluster_hosts[frontend]['backends']) > 1: + # Enable haproxy when we have enough peers. + log('Ensuring haproxy enabled in /etc/default/haproxy.') + with open('/etc/default/haproxy', 'w') as out: + out.write('ENABLED=1\n') + return ctxt log('HAProxy context is incomplete, this unit has no peers.') return {} @@ -494,22 +556,36 @@ class ApacheSSLContext(OSContextGenerator): cmd = ['a2enmod', 'ssl', 'proxy', 'proxy_http'] check_call(cmd) - def configure_cert(self): - if not os.path.isdir('/etc/apache2/ssl'): - os.mkdir('/etc/apache2/ssl') + def configure_cert(self, cn=None): ssl_dir = os.path.join('/etc/apache2/ssl/', self.service_namespace) - if not os.path.isdir(ssl_dir): - os.mkdir(ssl_dir) - cert, key = get_cert() - with open(os.path.join(ssl_dir, 'cert'), 'w') as cert_out: - cert_out.write(b64decode(cert)) - with open(os.path.join(ssl_dir, 'key'), 'w') as key_out: - key_out.write(b64decode(key)) + mkdir(path=ssl_dir) + cert, key = get_cert(cn) + if cn: + cert_filename = 'cert_{}'.format(cn) + key_filename = 'key_{}'.format(cn) + else: + cert_filename = 'cert' + key_filename = 'key' + write_file(path=os.path.join(ssl_dir, cert_filename), + content=b64decode(cert)) + write_file(path=os.path.join(ssl_dir, key_filename), + content=b64decode(key)) + + def configure_ca(self): ca_cert = get_ca_cert() if ca_cert: - with open(CA_CERT_PATH, 'w') as ca_out: - ca_out.write(b64decode(ca_cert)) - check_call(['update-ca-certificates']) + install_ca_cert(b64decode(ca_cert)) + + def canonical_names(self): + '''Figure out which canonical names clients will access this service''' + cns = [] + for r_id in relation_ids('identity-service'): + for unit in related_units(r_id): + rdata = relation_get(rid=r_id, unit=unit) + for k in rdata: + if k.startswith('ssl_key_'): + cns.append(k.lstrip('ssl_key_')) + return list(set(cns)) def __call__(self): if isinstance(self.external_ports, basestring): @@ -517,21 +593,47 @@ class ApacheSSLContext(OSContextGenerator): if (not self.external_ports or not https()): return {} - self.configure_cert() + self.configure_ca() self.enable_modules() ctxt = { 'namespace': self.service_namespace, - 'private_address': unit_get('private-address'), - 'endpoints': [] + 'endpoints': [], + 'ext_ports': [] } - if is_clustered(): - ctxt['private_address'] = config('vip') - for api_port in self.external_ports: - ext_port = determine_apache_port(api_port) - int_port = determine_api_port(api_port) - portmap = (int(ext_port), int(int_port)) - ctxt['endpoints'].append(portmap) + + for cn in self.canonical_names(): + self.configure_cert(cn) + + addresses = [] + vips = [] + if config('vip'): + vips = config('vip').split() + + for network_type in ['os-internal-network', + 'os-admin-network', + 'os-public-network']: + address = get_address_in_network(config(network_type), + unit_get('private-address')) + if len(vips) > 0 and is_clustered(): + for vip in vips: + if is_address_in_network(config(network_type), + vip): + addresses.append((address, vip)) + break + elif is_clustered(): + addresses.append((address, config('vip'))) + else: + addresses.append((address, address)) + + for address, endpoint in set(addresses): + for api_port in self.external_ports: + ext_port = determine_apache_port(api_port) + int_port = determine_api_port(api_port) + portmap = (address, endpoint, int(ext_port), int(int_port)) + ctxt['endpoints'].append(portmap) + ctxt['ext_ports'].append(int(ext_port)) + ctxt['ext_ports'] = list(set(ctxt['ext_ports'])) return ctxt @@ -661,22 +763,22 @@ class NeutronContext(OSContextGenerator): class OSConfigFlagContext(OSContextGenerator): - """ - Responsible for adding user-defined config-flags in charm config to a - template context. + """ + Responsible for adding user-defined config-flags in charm config to a + template context. - NOTE: the value of config-flags may be a comma-separated list of - key=value pairs and some Openstack config files support - comma-separated lists as values. - """ + NOTE: the value of config-flags may be a comma-separated list of + key=value pairs and some Openstack config files support + comma-separated lists as values. + """ - def __call__(self): - config_flags = config('config-flags') - if not config_flags: - return {} + def __call__(self): + config_flags = config('config-flags') + if not config_flags: + return {} - flags = config_flags_parser(config_flags) - return {'user_config_flags': flags} + flags = config_flags_parser(config_flags) + return {'user_config_flags': flags} class SubordinateConfigContext(OSContextGenerator): @@ -793,6 +895,38 @@ class SyslogContext(OSContextGenerator): return ctxt +class BindHostContext(OSContextGenerator): + + def __call__(self): + if config('prefer-ipv6'): + return { + 'bind_host': '::' + } + else: + return { + 'bind_host': '0.0.0.0' + } + + +class WorkerConfigContext(OSContextGenerator): + + @property + def num_cpus(self): + try: + from psutil import NUM_CPUS + except ImportError: + apt_install('python-psutil', fatal=True) + from psutil import NUM_CPUS + return NUM_CPUS + + def __call__(self): + multiplier = config('worker-multiplier') or 1 + ctxt = { + "workers": self.num_cpus * multiplier + } + return ctxt + + class ZeroMQContext(OSContextGenerator): interfaces = ['zeromq-configuration'] @@ -805,6 +939,7 @@ class ZeroMQContext(OSContextGenerator): ctxt['zmq_host'] = relation_get('host', unit, rid) return ctxt + class NotificationDriverContext(OSContextGenerator): def __init__(self, zmq_relation='zeromq-configuration', amqp_relation='amqp'): diff --git a/hooks/charmhelpers/contrib/openstack/ip.py b/hooks/charmhelpers/contrib/openstack/ip.py index affe8cd1..bc84fc45 100644 --- a/hooks/charmhelpers/contrib/openstack/ip.py +++ b/hooks/charmhelpers/contrib/openstack/ip.py @@ -66,7 +66,7 @@ def resolve_address(endpoint_type=PUBLIC): resolved_address = vip else: if config('prefer-ipv6'): - fallback_addr = get_ipv6_addr() + fallback_addr = get_ipv6_addr(exc_list=[config('vip')])[0] else: fallback_addr = unit_get(_address_map[endpoint_type]['fallback']) resolved_address = get_address_in_network( diff --git a/hooks/charmhelpers/contrib/openstack/templates/haproxy.cfg b/hooks/charmhelpers/contrib/openstack/templates/haproxy.cfg index ce0e2738..19c9b856 100644 --- a/hooks/charmhelpers/contrib/openstack/templates/haproxy.cfg +++ b/hooks/charmhelpers/contrib/openstack/templates/haproxy.cfg @@ -14,8 +14,17 @@ defaults retries 3 timeout queue 1000 timeout connect 1000 +{% if haproxy_client_timeout -%} + timeout client {{ haproxy_client_timeout }} +{% else -%} timeout client 30000 +{% endif -%} + +{% if haproxy_server_timeout -%} + timeout server {{ haproxy_server_timeout }} +{% else -%} timeout server 30000 +{% endif -%} listen stats {{ stat_port }} mode http @@ -25,17 +34,21 @@ listen stats {{ stat_port }} stats uri / stats auth admin:password -{% if units -%} +{% if frontends -%} {% for service, ports in service_ports.iteritems() -%} -listen {{ service }}_ipv4 0.0.0.0:{{ ports[0] }} - balance roundrobin - {% for unit, address in units.iteritems() -%} - server {{ unit }} {{ address }}:{{ ports[1] }} check +frontend tcp-in_{{ service }} + bind *:{{ ports[0] }} + bind :::{{ ports[0] }} + {% for frontend in frontends -%} + acl net_{{ frontend }} dst {{ frontends[frontend]['network'] }} + use_backend {{ service }}_{{ frontend }} if net_{{ frontend }} {% endfor %} -listen {{ service }}_ipv6 :::{{ ports[0] }} - balance roundrobin - {% for unit, address in units.iteritems() -%} +{% for frontend in frontends -%} +backend {{ service }}_{{ frontend }} + balance leastconn + {% for unit, address in frontends[frontend]['backends'].iteritems() -%} server {{ unit }} {{ address }}:{{ ports[1] }} check {% endfor %} {% endfor -%} +{% endfor -%} {% endif -%} diff --git a/hooks/charmhelpers/contrib/openstack/templates/openstack_https_frontend b/hooks/charmhelpers/contrib/openstack/templates/openstack_https_frontend index e02dc751..ce28fa3f 100644 --- a/hooks/charmhelpers/contrib/openstack/templates/openstack_https_frontend +++ b/hooks/charmhelpers/contrib/openstack/templates/openstack_https_frontend @@ -1,16 +1,18 @@ {% if endpoints -%} -{% for ext, int in endpoints -%} -Listen {{ ext }} -NameVirtualHost *:{{ ext }} - - ServerName {{ private_address }} +{% for ext_port in ext_ports -%} +Listen {{ ext_port }} +{% endfor -%} +{% for address, endpoint, ext, int in endpoints -%} + + ServerName {{ endpoint }} SSLEngine on - SSLCertificateFile /etc/apache2/ssl/{{ namespace }}/cert - SSLCertificateKeyFile /etc/apache2/ssl/{{ namespace }}/key + SSLCertificateFile /etc/apache2/ssl/{{ namespace }}/cert_{{ endpoint }} + SSLCertificateKeyFile /etc/apache2/ssl/{{ namespace }}/key_{{ endpoint }} ProxyPass / http://localhost:{{ int }}/ ProxyPassReverse / http://localhost:{{ int }}/ ProxyPreserveHost on +{% endfor -%} Order deny,allow Allow from all @@ -19,5 +21,4 @@ NameVirtualHost *:{{ ext }} Order allow,deny Allow from all -{% endfor -%} {% endif -%} diff --git a/hooks/charmhelpers/contrib/openstack/templates/openstack_https_frontend.conf b/hooks/charmhelpers/contrib/openstack/templates/openstack_https_frontend.conf index e02dc751..ce28fa3f 100644 --- a/hooks/charmhelpers/contrib/openstack/templates/openstack_https_frontend.conf +++ b/hooks/charmhelpers/contrib/openstack/templates/openstack_https_frontend.conf @@ -1,16 +1,18 @@ {% if endpoints -%} -{% for ext, int in endpoints -%} -Listen {{ ext }} -NameVirtualHost *:{{ ext }} - - ServerName {{ private_address }} +{% for ext_port in ext_ports -%} +Listen {{ ext_port }} +{% endfor -%} +{% for address, endpoint, ext, int in endpoints -%} + + ServerName {{ endpoint }} SSLEngine on - SSLCertificateFile /etc/apache2/ssl/{{ namespace }}/cert - SSLCertificateKeyFile /etc/apache2/ssl/{{ namespace }}/key + SSLCertificateFile /etc/apache2/ssl/{{ namespace }}/cert_{{ endpoint }} + SSLCertificateKeyFile /etc/apache2/ssl/{{ namespace }}/key_{{ endpoint }} ProxyPass / http://localhost:{{ int }}/ ProxyPassReverse / http://localhost:{{ int }}/ ProxyPreserveHost on +{% endfor -%} Order deny,allow Allow from all @@ -19,5 +21,4 @@ NameVirtualHost *:{{ ext }} Order allow,deny Allow from all -{% endfor -%} {% endif -%} diff --git a/hooks/charmhelpers/contrib/openstack/utils.py b/hooks/charmhelpers/contrib/openstack/utils.py index 4bcd3cff..90bdad61 100644 --- a/hooks/charmhelpers/contrib/openstack/utils.py +++ b/hooks/charmhelpers/contrib/openstack/utils.py @@ -3,19 +3,20 @@ # Common python helper functions used for OpenStack charms. from collections import OrderedDict +import subprocess import json import os import socket -import subprocess import sys from charmhelpers.core.hookenv import ( config, log as juju_log, charm_dir, - is_relation_made, ERROR, - INFO + INFO, + relation_ids, + relation_set ) from charmhelpers.contrib.storage.linux.lvm import ( @@ -24,6 +25,10 @@ from charmhelpers.contrib.storage.linux.lvm import ( remove_lvm_physical_volume, ) +from charmhelpers.contrib.network.ip import ( + get_ipv6_addr +) + from charmhelpers.core.host import lsb_release, mounts, umount from charmhelpers.fetch import apt_install, apt_cache from charmhelpers.contrib.storage.linux.utils import is_block_device, zap_disk @@ -73,6 +78,8 @@ SWIFT_CODENAMES = OrderedDict([ ('1.12.0', 'icehouse'), ('1.11.0', 'icehouse'), ('2.0.0', 'juno'), + ('2.1.0', 'juno'), + ('2.2.0', 'juno'), ]) DEFAULT_LOOPBACK_SIZE = '5G' @@ -467,3 +474,21 @@ def get_matchmaker_map(mm_file='/etc/oslo/matchmaker_ring.json'): with open(mm_file, 'r') as f: mm_map = json.load(f) return mm_map + + +def sync_db_with_multi_ipv6_addresses(database, database_user, + relation_prefix=None): + hosts = get_ipv6_addr(dynamic_only=False) + + kwargs = {'database': database, + 'username': database_user, + 'hostname': json.dumps(hosts)} + + if relation_prefix: + keys = kwargs.keys() + for key in keys: + kwargs["%s_%s" % (relation_prefix, key)] = kwargs[key] + del kwargs[key] + + for rid in relation_ids('shared-db'): + relation_set(relation_id=rid, **kwargs) diff --git a/hooks/charmhelpers/core/hookenv.py b/hooks/charmhelpers/core/hookenv.py index f396e03a..af8fe2db 100644 --- a/hooks/charmhelpers/core/hookenv.py +++ b/hooks/charmhelpers/core/hookenv.py @@ -203,6 +203,17 @@ class Config(dict): if os.path.exists(self.path): self.load_previous() + def __getitem__(self, key): + """For regular dict lookups, check the current juju config first, + then the previous (saved) copy. This ensures that user-saved values + will be returned by a dict lookup. + + """ + try: + return dict.__getitem__(self, key) + except KeyError: + return (self._prev_dict or {})[key] + def load_previous(self, path=None): """Load previous copy of config from disk. @@ -475,9 +486,10 @@ class Hooks(object): hooks.execute(sys.argv) """ - def __init__(self): + def __init__(self, config_save=True): super(Hooks, self).__init__() self._hooks = {} + self._config_save = config_save def register(self, name, function): """Register a hook""" @@ -488,9 +500,10 @@ class Hooks(object): hook_name = os.path.basename(args[0]) if hook_name in self._hooks: self._hooks[hook_name]() - cfg = config() - if cfg.implicit_save: - cfg.save() + if self._config_save: + cfg = config() + if cfg.implicit_save: + cfg.save() else: raise UnregisteredHookError(hook_name) diff --git a/hooks/charmhelpers/core/host.py b/hooks/charmhelpers/core/host.py index b85b0280..d7ce1e4c 100644 --- a/hooks/charmhelpers/core/host.py +++ b/hooks/charmhelpers/core/host.py @@ -68,8 +68,8 @@ def service_available(service_name): """Determine whether a system service is available""" try: subprocess.check_output(['service', service_name, 'status'], stderr=subprocess.STDOUT) - except subprocess.CalledProcessError: - return False + except subprocess.CalledProcessError as e: + return 'unrecognized service' not in e.output else: return True @@ -209,10 +209,15 @@ def mounts(): return system_mounts -def file_hash(path): - """Generate a md5 hash of the contents of 'path' or None if not found """ +def file_hash(path, hash_type='md5'): + """ + Generate a hash checksum of the contents of 'path' or None if not found. + + :param str hash_type: Any hash alrgorithm supported by :mod:`hashlib`, + such as md5, sha1, sha256, sha512, etc. + """ if os.path.exists(path): - h = hashlib.md5() + h = getattr(hashlib, hash_type)() with open(path, 'r') as source: h.update(source.read()) # IGNORE:E1101 - it does have update return h.hexdigest() @@ -220,6 +225,26 @@ def file_hash(path): return None +def check_hash(path, checksum, hash_type='md5'): + """ + Validate a file using a cryptographic checksum. + + :param str checksum: Value of the checksum used to validate the file. + :param str hash_type: Hash algorithm used to generate `checksum`. + Can be any hash alrgorithm supported by :mod:`hashlib`, + such as md5, sha1, sha256, sha512, etc. + :raises ChecksumError: If the file fails the checksum + + """ + actual_checksum = file_hash(path, hash_type) + if checksum != actual_checksum: + raise ChecksumError("'%s' != '%s'" % (checksum, actual_checksum)) + + +class ChecksumError(ValueError): + pass + + def restart_on_change(restart_map, stopstart=False): """Restart services based on configuration files changing diff --git a/hooks/charmhelpers/core/services/helpers.py b/hooks/charmhelpers/core/services/helpers.py index 4b90589b..7067b94b 100644 --- a/hooks/charmhelpers/core/services/helpers.py +++ b/hooks/charmhelpers/core/services/helpers.py @@ -1,3 +1,5 @@ +import os +import yaml from charmhelpers.core import hookenv from charmhelpers.core import templating @@ -19,15 +21,21 @@ class RelationContext(dict): the `name` attribute that are complete will used to populate the dictionary values (see `get_data`, below). - The generated context will be namespaced under the interface type, to prevent - potential naming conflicts. + The generated context will be namespaced under the relation :attr:`name`, + to prevent potential naming conflicts. + + :param str name: Override the relation :attr:`name`, since it can vary from charm to charm + :param list additional_required_keys: Extend the list of :attr:`required_keys` """ name = None interface = None required_keys = [] - def __init__(self, *args, **kwargs): - super(RelationContext, self).__init__(*args, **kwargs) + def __init__(self, name=None, additional_required_keys=None): + if name is not None: + self.name = name + if additional_required_keys is not None: + self.required_keys.extend(additional_required_keys) self.get_data() def __bool__(self): @@ -101,9 +109,115 @@ class RelationContext(dict): return {} +class MysqlRelation(RelationContext): + """ + Relation context for the `mysql` interface. + + :param str name: Override the relation :attr:`name`, since it can vary from charm to charm + :param list additional_required_keys: Extend the list of :attr:`required_keys` + """ + name = 'db' + interface = 'mysql' + required_keys = ['host', 'user', 'password', 'database'] + + +class HttpRelation(RelationContext): + """ + Relation context for the `http` interface. + + :param str name: Override the relation :attr:`name`, since it can vary from charm to charm + :param list additional_required_keys: Extend the list of :attr:`required_keys` + """ + name = 'website' + interface = 'http' + required_keys = ['host', 'port'] + + def provide_data(self): + return { + 'host': hookenv.unit_get('private-address'), + 'port': 80, + } + + +class RequiredConfig(dict): + """ + Data context that loads config options with one or more mandatory options. + + Once the required options have been changed from their default values, all + config options will be available, namespaced under `config` to prevent + potential naming conflicts (for example, between a config option and a + relation property). + + :param list *args: List of options that must be changed from their default values. + """ + + def __init__(self, *args): + self.required_options = args + self['config'] = hookenv.config() + with open(os.path.join(hookenv.charm_dir(), 'config.yaml')) as fp: + self.config = yaml.load(fp).get('options', {}) + + def __bool__(self): + for option in self.required_options: + if option not in self['config']: + return False + current_value = self['config'][option] + default_value = self.config[option].get('default') + if current_value == default_value: + return False + if current_value in (None, '') and default_value in (None, ''): + return False + return True + + def __nonzero__(self): + return self.__bool__() + + +class StoredContext(dict): + """ + A data context that always returns the data that it was first created with. + + This is useful to do a one-time generation of things like passwords, that + will thereafter use the same value that was originally generated, instead + of generating a new value each time it is run. + """ + def __init__(self, file_name, config_data): + """ + If the file exists, populate `self` with the data from the file. + Otherwise, populate with the given data and persist it to the file. + """ + if os.path.exists(file_name): + self.update(self.read_context(file_name)) + else: + self.store_context(file_name, config_data) + self.update(config_data) + + def store_context(self, file_name, config_data): + if not os.path.isabs(file_name): + file_name = os.path.join(hookenv.charm_dir(), file_name) + with open(file_name, 'w') as file_stream: + os.fchmod(file_stream.fileno(), 0600) + yaml.dump(config_data, file_stream) + + def read_context(self, file_name): + if not os.path.isabs(file_name): + file_name = os.path.join(hookenv.charm_dir(), file_name) + with open(file_name, 'r') as file_stream: + data = yaml.load(file_stream) + if not data: + raise OSError("%s is empty" % file_name) + return data + + class TemplateCallback(ManagerCallback): """ - Callback class that will render a template, for use as a ready action. + Callback class that will render a Jinja2 template, for use as a ready action. + + :param str source: The template source file, relative to `$CHARM_DIR/templates` + :param str target: The target to write the rendered template to + :param str owner: The owner of the rendered file + :param str group: The group of the rendered file + :param int perms: The permissions of the rendered file """ def __init__(self, source, target, owner='root', group='root', perms=0444): self.source = source diff --git a/hooks/charmhelpers/core/sysctl.py b/hooks/charmhelpers/core/sysctl.py new file mode 100644 index 00000000..0f299630 --- /dev/null +++ b/hooks/charmhelpers/core/sysctl.py @@ -0,0 +1,34 @@ +#!/usr/bin/env python +# -*- coding: utf-8 -*- + +__author__ = 'Jorge Niedbalski R. ' + +import yaml + +from subprocess import check_call + +from charmhelpers.core.hookenv import ( + log, + DEBUG, +) + + +def create(sysctl_dict, sysctl_file): + """Creates a sysctl.conf file from a YAML associative array + + :param sysctl_dict: a dict of sysctl options eg { 'kernel.max_pid': 1337 } + :type sysctl_dict: dict + :param sysctl_file: path to the sysctl file to be saved + :type sysctl_file: str or unicode + :returns: None + """ + sysctl_dict = yaml.load(sysctl_dict) + + with open(sysctl_file, "w") as fd: + for key, value in sysctl_dict.items(): + fd.write("{}={}\n".format(key, value)) + + log("Updating sysctl_file: %s values: %s" % (sysctl_file, sysctl_dict), + level=DEBUG) + + check_call(["sysctl", "-p", sysctl_file]) diff --git a/hooks/charmhelpers/fetch/__init__.py b/hooks/charmhelpers/fetch/__init__.py index 8e9d3804..32a673d6 100644 --- a/hooks/charmhelpers/fetch/__init__.py +++ b/hooks/charmhelpers/fetch/__init__.py @@ -208,7 +208,8 @@ def add_source(source, key=None): """Add a package source to this system. @param source: a URL or sources.list entry, as supported by - add-apt-repository(1). Examples: + add-apt-repository(1). Examples:: + ppa:charmers/example deb https://stub:key@private.example.com/ubuntu trusty main @@ -311,22 +312,35 @@ def configure_sources(update=False, apt_update(fatal=True) -def install_remote(source): +def install_remote(source, *args, **kwargs): """ Install a file tree from a remote source The specified source should be a url of the form: scheme://[host]/path[#[option=value][&...]] - Schemes supported are based on this modules submodules - Options supported are submodule-specific""" + Schemes supported are based on this modules submodules. + Options supported are submodule-specific. + Additional arguments are passed through to the submodule. + + For example:: + + dest = install_remote('http://example.com/archive.tgz', + checksum='deadbeef', + hash_type='sha1') + + This will download `archive.tgz`, validate it using SHA1 and, if + the file is ok, extract it and return the directory in which it + was extracted. If the checksum fails, it will raise + :class:`charmhelpers.core.host.ChecksumError`. + """ # We ONLY check for True here because can_handle may return a string # explaining why it can't handle a given source. handlers = [h for h in plugins() if h.can_handle(source) is True] installed_to = None for handler in handlers: try: - installed_to = handler.install(source) + installed_to = handler.install(source, *args, **kwargs) except UnhandledSource: pass if not installed_to: diff --git a/hooks/charmhelpers/fetch/archiveurl.py b/hooks/charmhelpers/fetch/archiveurl.py index 87e7071a..8c045650 100644 --- a/hooks/charmhelpers/fetch/archiveurl.py +++ b/hooks/charmhelpers/fetch/archiveurl.py @@ -1,6 +1,8 @@ import os import urllib2 +from urllib import urlretrieve import urlparse +import hashlib from charmhelpers.fetch import ( BaseFetchHandler, @@ -10,11 +12,19 @@ from charmhelpers.payload.archive import ( get_archive_handler, extract, ) -from charmhelpers.core.host import mkdir +from charmhelpers.core.host import mkdir, check_hash class ArchiveUrlFetchHandler(BaseFetchHandler): - """Handler for archives via generic URLs""" + """ + Handler to download archive files from arbitrary URLs. + + Can fetch from http, https, ftp, and file URLs. + + Can install either tarballs (.tar, .tgz, .tbz2, etc) or zip files. + + Installs the contents of the archive in $CHARM_DIR/fetched/. + """ def can_handle(self, source): url_parts = self.parse_url(source) if url_parts.scheme not in ('http', 'https', 'ftp', 'file'): @@ -24,6 +34,12 @@ class ArchiveUrlFetchHandler(BaseFetchHandler): return False def download(self, source, dest): + """ + Download an archive file. + + :param str source: URL pointing to an archive file. + :param str dest: Local path location to download archive file to. + """ # propogate all exceptions # URLError, OSError, etc proto, netloc, path, params, query, fragment = urlparse.urlparse(source) @@ -48,7 +64,30 @@ class ArchiveUrlFetchHandler(BaseFetchHandler): os.unlink(dest) raise e - def install(self, source): + # Mandatory file validation via Sha1 or MD5 hashing. + def download_and_validate(self, url, hashsum, validate="sha1"): + tempfile, headers = urlretrieve(url) + check_hash(tempfile, hashsum, validate) + return tempfile + + def install(self, source, dest=None, checksum=None, hash_type='sha1'): + """ + Download and install an archive file, with optional checksum validation. + + The checksum can also be given on the `source` URL's fragment. + For example:: + + handler.install('http://example.com/file.tgz#sha1=deadbeef') + + :param str source: URL pointing to an archive file. + :param str dest: Local destination path to install to. If not given, + installs to `$CHARM_DIR/archives/archive_file_name`. + :param str checksum: If given, validate the archive file after download. + :param str hash_type: Algorithm used to generate `checksum`. + Can be any hash alrgorithm supported by :mod:`hashlib`, + such as md5, sha1, sha256, sha512, etc. + + """ url_parts = self.parse_url(source) dest_dir = os.path.join(os.environ.get('CHARM_DIR'), 'fetched') if not os.path.exists(dest_dir): @@ -60,4 +99,10 @@ class ArchiveUrlFetchHandler(BaseFetchHandler): raise UnhandledSource(e.reason) except OSError as e: raise UnhandledSource(e.strerror) - return extract(dld_file) + options = urlparse.parse_qs(url_parts.fragment) + for key, value in options.items(): + if key in hashlib.algorithms: + check_hash(dld_file, value, key) + if checksum: + check_hash(dld_file, checksum, hash_type) + return extract(dld_file, dest) From 0430a6c87cefae4ec3eb7963ba441d8b04c11259 Mon Sep 17 00:00:00 2001 From: Liam Young Date: Wed, 15 Oct 2014 08:47:20 +0000 Subject: [PATCH 07/22] Add 0mq to nova.comf template --- templates/juno/nova.conf | 2 ++ 1 file changed, 2 insertions(+) diff --git a/templates/juno/nova.conf b/templates/juno/nova.conf index 7e51a503..0343664c 100644 --- a/templates/juno/nova.conf +++ b/templates/juno/nova.conf @@ -26,6 +26,8 @@ compute_driver=libvirt.LibvirtDriver {% include "parts/rabbitmq" %} +{% include "parts/zeromq" %} + {% if glance_api_servers -%} glance_api_servers = {{ glance_api_servers }} {% endif -%} From cf06719ff0a200541feb630da64d61b538825e35 Mon Sep 17 00:00:00 2001 From: Liam Young Date: Mon, 20 Oct 2014 09:58:40 +0000 Subject: [PATCH 08/22] Point charm-helpers sync back at trunk and sync --- charm-helpers-hooks.yaml | 2 +- hooks/charmhelpers/contrib/openstack/utils.py | 16 ++++++++++++++++ hooks/charmhelpers/contrib/storage/linux/ceph.py | 7 ++++--- hooks/charmhelpers/core/host.py | 9 ++++++++- 4 files changed, 29 insertions(+), 5 deletions(-) diff --git a/charm-helpers-hooks.yaml b/charm-helpers-hooks.yaml index 541eecc2..f1b156d4 100644 --- a/charm-helpers-hooks.yaml +++ b/charm-helpers-hooks.yaml @@ -1,4 +1,4 @@ -branch: lp:~openstack-charmers/charm-helpers/0mq +branch: lp:charm-helpers destination: hooks/charmhelpers include: - core diff --git a/hooks/charmhelpers/contrib/openstack/utils.py b/hooks/charmhelpers/contrib/openstack/utils.py index 90bdad61..ae24fb91 100644 --- a/hooks/charmhelpers/contrib/openstack/utils.py +++ b/hooks/charmhelpers/contrib/openstack/utils.py @@ -2,6 +2,7 @@ # Common python helper functions used for OpenStack charms. from collections import OrderedDict +from functools import wraps import subprocess import json @@ -492,3 +493,18 @@ def sync_db_with_multi_ipv6_addresses(database, database_user, for rid in relation_ids('shared-db'): relation_set(relation_id=rid, **kwargs) + + +def os_requires_version(ostack_release, pkg): + """ + Decorator for hook to specify minimum supported release + """ + def wrap(f): + @wraps(f) + def wrapped_f(*args): + if os_release(pkg) < ostack_release: + raise Exception("This hook is not supported on releases" + " before %s" % ostack_release) + f(*args) + return wrapped_f + return wrap diff --git a/hooks/charmhelpers/contrib/storage/linux/ceph.py b/hooks/charmhelpers/contrib/storage/linux/ceph.py index 768438a4..598ec263 100644 --- a/hooks/charmhelpers/contrib/storage/linux/ceph.py +++ b/hooks/charmhelpers/contrib/storage/linux/ceph.py @@ -113,7 +113,7 @@ def get_osds(service): return None -def create_pool(service, name, replicas=2): +def create_pool(service, name, replicas=3): ''' Create a new RADOS pool ''' if pool_exists(service, name): log("Ceph pool {} already exists, skipping creation".format(name), @@ -300,7 +300,8 @@ def copy_files(src, dst, symlinks=False, ignore=None): def ensure_ceph_storage(service, pool, rbd_img, sizemb, mount_point, - blk_device, fstype, system_services=[]): + blk_device, fstype, system_services=[], + replicas=3): """ NOTE: This function must only be called from a single service unit for the same rbd_img otherwise data loss will occur. @@ -317,7 +318,7 @@ def ensure_ceph_storage(service, pool, rbd_img, sizemb, mount_point, # Ensure pool, RBD image, RBD mappings are in place. if not pool_exists(service, pool): log('ceph: Creating new pool {}.'.format(pool)) - create_pool(service, pool) + create_pool(service, pool, replicas=replicas) if not rbd_exists(service, pool, rbd_img): log('ceph: Creating RBD image ({}).'.format(rbd_img)) diff --git a/hooks/charmhelpers/core/host.py b/hooks/charmhelpers/core/host.py index d7ce1e4c..8a91a5d6 100644 --- a/hooks/charmhelpers/core/host.py +++ b/hooks/charmhelpers/core/host.py @@ -6,6 +6,7 @@ # Matthew Wedgwood import os +import re import pwd import grp import random @@ -317,7 +318,13 @@ def list_nics(nic_type): ip_output = (line for line in ip_output if line) for line in ip_output: if line.split()[1].startswith(int_type): - interfaces.append(line.split()[1].replace(":", "")) + matched = re.search('.*: (bond[0-9]+\.[0-9]+)@.*', line) + if matched: + interface = matched.groups()[0] + else: + interface = line.split()[1].replace(":", "") + interfaces.append(interface) + return interfaces From 6cc94116f11fcc1f900c8335c8ccf9b4a4bb09ce Mon Sep 17 00:00:00 2001 From: Liam Young Date: Mon, 20 Oct 2014 10:15:18 +0000 Subject: [PATCH 09/22] Use os_requires_version to ensure that zmq is only used with >= juno --- hooks/nova_compute_hooks.py | 2 ++ 1 file changed, 2 insertions(+) diff --git a/hooks/nova_compute_hooks.py b/hooks/nova_compute_hooks.py index 79f2f222..0020a5f3 100755 --- a/hooks/nova_compute_hooks.py +++ b/hooks/nova_compute_hooks.py @@ -28,6 +28,7 @@ from charmhelpers.fetch import ( from charmhelpers.contrib.openstack.utils import ( configure_installation_source, openstack_upgrade_available, + os_requires_version, ) from charmhelpers.contrib.storage.linux.ceph import ensure_ceph_keyring @@ -264,6 +265,7 @@ def nova_ceilometer_relation_changed(): @hooks.hook('zeromq-configuration-relation-joined') +@os_requires_version('juno', 'nova-common') def zeromq_configuration_relation_joined(relid=None): relation_set(relation_id=relid, topics=" ".join(get_topics()), From 0a3a6d14ec296e263ed3064edadbc3a1d82bd91c Mon Sep 17 00:00:00 2001 From: Liam Young Date: Mon, 20 Oct 2014 11:17:54 +0000 Subject: [PATCH 10/22] Sync charmhelpers --- hooks/charmhelpers/contrib/openstack/context.py | 9 ++------- 1 file changed, 2 insertions(+), 7 deletions(-) diff --git a/hooks/charmhelpers/contrib/openstack/context.py b/hooks/charmhelpers/contrib/openstack/context.py index acd9bca8..f65658a7 100644 --- a/hooks/charmhelpers/contrib/openstack/context.py +++ b/hooks/charmhelpers/contrib/openstack/context.py @@ -60,7 +60,6 @@ from charmhelpers.contrib.network.ip import ( from charmhelpers.contrib.openstack.utils import ( get_host_ip, - get_matchmaker_map, ) CA_CERT_PATH = '/usr/local/share/ca-certificates/keystone_juju_ca_cert.crt' @@ -951,12 +950,8 @@ class NotificationDriverContext(OSContextGenerator): def __call__(self): ctxt = { - 'notifications': "False", + 'notifications': 'False', } - if is_relation_made(self.zmq_relation): - matchmaker_data = get_matchmaker_map() - if 'notifications-info' in matchmaker_data: - ctxt['notifications'] = "True" - elif is_relation_made(self.amqp_relation): + if is_relation_made(self.amqp_relation): ctxt['notifications'] = "True" return ctxt From ed3eed8290b9c01405749b70b58202919f576230 Mon Sep 17 00:00:00 2001 From: Liam Young Date: Mon, 20 Oct 2014 14:16:22 +0000 Subject: [PATCH 11/22] Fix unit tests and lint --- hooks/nova_compute_utils.py | 2 +- unit_tests/test_nova_compute_hooks.py | 6 ++++++ 2 files changed, 7 insertions(+), 1 deletion(-) diff --git a/hooks/nova_compute_utils.py b/hooks/nova_compute_utils.py index d3b3acb3..206f6001 100644 --- a/hooks/nova_compute_utils.py +++ b/hooks/nova_compute_utils.py @@ -90,7 +90,7 @@ BASE_RESOURCE_MAP = { service='nova', config_file=NOVA_CONF), InstanceConsoleContext(), - context.ZeroMQContext(), + context.ZeroMQContext(), context.NotificationDriverContext()], }, } diff --git a/unit_tests/test_nova_compute_hooks.py b/unit_tests/test_nova_compute_hooks.py index cd4aa80d..598bc458 100644 --- a/unit_tests/test_nova_compute_hooks.py +++ b/unit_tests/test_nova_compute_hooks.py @@ -89,6 +89,7 @@ class NovaComputeRelationsTests(CharmTestCase): @patch.object(hooks, 'compute_joined') def test_config_changed_with_migration(self, compute_joined): self.migration_enabled.return_value = True + _zmq_joined = self.patch('zeromq_configuration_relation_joined') self.test_config.set('migration-auth-type', 'ssh') self.relation_ids.return_value = [ 'cloud-compute:0', @@ -101,10 +102,12 @@ class NovaComputeRelationsTests(CharmTestCase): ] self.assertEquals(ex, compute_joined.call_args_list) self.assertTrue(self.initialize_ssh_keys.called) + self.assertTrue(_zmq_joined.called) @patch.object(hooks, 'compute_joined') def test_config_changed_with_resize(self, compute_joined): self.test_config.set('enable-resize', True) + _zmq_joined = self.patch('zeromq_configuration_relation_joined') self.relation_ids.return_value = [ 'cloud-compute:0', 'cloud-compute:1' @@ -117,10 +120,12 @@ class NovaComputeRelationsTests(CharmTestCase): self.assertEquals(ex, compute_joined.call_args_list) self.initialize_ssh_keys.assert_called_with(user='nova') self.enable_shell.assert_called_with(user='nova') + self.assertTrue(_zmq_joined.called) @patch.object(hooks, 'compute_joined') def test_config_changed_without_resize(self, compute_joined): self.test_config.set('enable-resize', False) + _zmq_joined = self.patch('zeromq_configuration_relation_joined') self.relation_ids.return_value = [ 'cloud-compute:0', 'cloud-compute:1' @@ -132,6 +137,7 @@ class NovaComputeRelationsTests(CharmTestCase): ] self.assertEquals(ex, compute_joined.call_args_list) self.disable_shell.assert_called_with(user='nova') + self.assertTrue(_zmq_joined.called) @patch.object(hooks, 'compute_joined') def test_config_changed_no_upgrade_no_migration(self, compute_joined): From a13324477156144690acc68b7be754cb083c976f Mon Sep 17 00:00:00 2001 From: Liam Young Date: Thu, 23 Oct 2014 10:24:12 +0100 Subject: [PATCH 12/22] Remove superfluous stopstart=True from 0mq config restart_on_change decorator --- hooks/nova_compute_hooks.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/hooks/nova_compute_hooks.py b/hooks/nova_compute_hooks.py index 0020a5f3..25e51b2c 100755 --- a/hooks/nova_compute_hooks.py +++ b/hooks/nova_compute_hooks.py @@ -273,7 +273,7 @@ def zeromq_configuration_relation_joined(relid=None): @hooks.hook('zeromq-configuration-relation-changed') -@restart_on_change(restart_map(), stopstart=True) +@restart_on_change(restart_map()) def zeromq_configuration_relation_changed(): CONFIGS.write(NOVA_CONF) From f73a99c93f1e280fbeccfdcce3af093a31e08134 Mon Sep 17 00:00:00 2001 From: "james.page@ubuntu.com" <> Date: Mon, 19 Jan 2015 09:17:01 +0000 Subject: [PATCH 13/22] Resync 0mq helpers, use centralized template --- charm-helpers-hooks.yaml | 2 +- .../charmhelpers/contrib/openstack/context.py | 39 +++++++++++-------- .../contrib/openstack/templates/haproxy.cfg | 4 +- .../contrib/openstack/templates}/zeromq | 8 +++- .../contrib/storage/linux/ceph.py | 11 ++++++ templates/juno/nova.conf | 4 +- 6 files changed, 47 insertions(+), 21 deletions(-) rename {templates/parts => hooks/charmhelpers/contrib/openstack/templates}/zeromq (53%) diff --git a/charm-helpers-hooks.yaml b/charm-helpers-hooks.yaml index 22fe260e..a1d15227 100644 --- a/charm-helpers-hooks.yaml +++ b/charm-helpers-hooks.yaml @@ -1,4 +1,4 @@ -branch: lp:charm-helpers +branch: lp:~openstack-charmers/charm-helpers/0mq destination: hooks/charmhelpers include: - core diff --git a/hooks/charmhelpers/contrib/openstack/context.py b/hooks/charmhelpers/contrib/openstack/context.py index 180bfad2..f63f0078 100644 --- a/hooks/charmhelpers/contrib/openstack/context.py +++ b/hooks/charmhelpers/contrib/openstack/context.py @@ -468,21 +468,25 @@ class HAProxyContext(OSContextGenerator): _unit = unit.replace('/', '-') cluster_hosts[laddr]['backends'][_unit] = _laddr - # NOTE(jamespage) no split configurations found, just use - # private addresses - if not cluster_hosts: - netmask = get_netmask_for_address(addr) - cluster_hosts[addr] = {'network': "{}/{}".format(addr, netmask), - 'backends': {l_unit: addr}} - for rid in relation_ids('cluster'): - for unit in related_units(rid): - _laddr = relation_get('private-address', - rid=rid, unit=unit) - if _laddr: - _unit = unit.replace('/', '-') - cluster_hosts[addr]['backends'][_unit] = _laddr + # NOTE(jamespage) add backend based on private address - this + # with either be the only backend or the fallback if no acls + # match in the frontend + cluster_hosts[addr] = {} + netmask = get_netmask_for_address(addr) + cluster_hosts[addr] = {'network': "{}/{}".format(addr, netmask), + 'backends': {l_unit: addr}} + for rid in relation_ids('cluster'): + for unit in related_units(rid): + _laddr = relation_get('private-address', + rid=rid, unit=unit) + if _laddr: + _unit = unit.replace('/', '-') + cluster_hosts[addr]['backends'][_unit] = _laddr - ctxt = {'frontends': cluster_hosts} + ctxt = { + 'frontends': cluster_hosts, + 'default_backend': addr + } if config('haproxy-server-timeout'): ctxt['haproxy_server_timeout'] = config('haproxy-server-timeout') @@ -663,8 +667,9 @@ class ApacheSSLContext(OSContextGenerator): addresses = self.get_network_addresses() for address, endpoint in sorted(set(addresses)): for api_port in self.external_ports: - ext_port = determine_apache_port(api_port) - int_port = determine_api_port(api_port) + ext_port = determine_apache_port(api_port, + singlenode_mode=True) + int_port = determine_api_port(api_port, singlenode_mode=True) portmap = (address, endpoint, int(ext_port), int(int_port)) ctxt['endpoints'].append(portmap) ctxt['ext_ports'].append(int(ext_port)) @@ -1000,6 +1005,8 @@ class ZeroMQContext(OSContextGenerator): for unit in related_units(rid): ctxt['zmq_nonce'] = relation_get('nonce', unit, rid) ctxt['zmq_host'] = relation_get('host', unit, rid) + ctxt['zmq_redis_address'] = relation_get( + 'zmq_redis_address', unit, rid) return ctxt diff --git a/hooks/charmhelpers/contrib/openstack/templates/haproxy.cfg b/hooks/charmhelpers/contrib/openstack/templates/haproxy.cfg index 9ae1efb9..ad875f16 100644 --- a/hooks/charmhelpers/contrib/openstack/templates/haproxy.cfg +++ b/hooks/charmhelpers/contrib/openstack/templates/haproxy.cfg @@ -44,7 +44,9 @@ frontend tcp-in_{{ service }} {% for frontend in frontends -%} acl net_{{ frontend }} dst {{ frontends[frontend]['network'] }} use_backend {{ service }}_{{ frontend }} if net_{{ frontend }} - {% endfor %} + {% endfor -%} + default_backend {{ service }}_{{ default_backend }} + {% for frontend in frontends -%} backend {{ service }}_{{ frontend }} balance leastconn diff --git a/templates/parts/zeromq b/hooks/charmhelpers/contrib/openstack/templates/zeromq similarity index 53% rename from templates/parts/zeromq rename to hooks/charmhelpers/contrib/openstack/templates/zeromq index 3e32288c..ab796d93 100644 --- a/templates/parts/zeromq +++ b/hooks/charmhelpers/contrib/openstack/templates/zeromq @@ -1,6 +1,12 @@ {% if zmq_host -%} # ZeroMQ configuration (restart-nonce: {{ zmq_nonce }}) rpc_backend = zmq -rpc_zmq_matchmaker = oslo.messaging._drivers.matchmaker_ring.MatchMakerRing rpc_zmq_host = {{ zmq_host }} +{% if zmq_redis_address -%} +rpc_zmq_matchmaker = oslo.messaging._drivers.matchmaker_redis.MatchMakerRedis +[matchmaker_redis] +host = {{ zmq_redis_address }} +{% else -%} +rpc_zmq_matchmaker = oslo.messaging._drivers.matchmaker_ring.MatchMakerRing +{% endif -%} {% endif -%} diff --git a/hooks/charmhelpers/contrib/storage/linux/ceph.py b/hooks/charmhelpers/contrib/storage/linux/ceph.py index 1479f4f3..6ebeab5c 100644 --- a/hooks/charmhelpers/contrib/storage/linux/ceph.py +++ b/hooks/charmhelpers/contrib/storage/linux/ceph.py @@ -157,6 +157,17 @@ def create_keyring(service, key): log('Created new ceph keyring at %s.' % keyring, level=DEBUG) +def delete_keyring(service): + """Delete an existing Ceph keyring.""" + keyring = _keyring_path(service) + if not os.path.exists(keyring): + log('Keyring does not exist at %s' % keyring, level=WARNING) + return + + os.remove(keyring) + log('Deleted ring at %s.' % keyring, level=INFO) + + def create_key_file(service, key): """Create a file containing key.""" keyfile = _keyfile_path(service) diff --git a/templates/juno/nova.conf b/templates/juno/nova.conf index c1de01ef..f9cafb9a 100644 --- a/templates/juno/nova.conf +++ b/templates/juno/nova.conf @@ -27,8 +27,6 @@ my_ip = {{ host_ip }} {% include "parts/rabbitmq" %} -{% include "parts/zeromq" %} - {% if glance_api_servers -%} glance_api_servers = {{ glance_api_servers }} {% endif -%} @@ -98,6 +96,8 @@ instances_path = {{ instances_path }} {% endfor -%} {% endif -%} +{% include "parts/zeromq" %} + {% if console_access_protocol == 'spice' -%} [spice] agent_enabled = True From b0eff7e4636a581b31136e064d9e50bce72ad1e8 Mon Sep 17 00:00:00 2001 From: "james.page@ubuntu.com" <> Date: Mon, 19 Jan 2015 09:18:27 +0000 Subject: [PATCH 14/22] Drop parts from zeromq snippet use --- templates/juno/nova.conf | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/templates/juno/nova.conf b/templates/juno/nova.conf index f9cafb9a..1b7cbcf9 100644 --- a/templates/juno/nova.conf +++ b/templates/juno/nova.conf @@ -96,7 +96,7 @@ instances_path = {{ instances_path }} {% endfor -%} {% endif -%} -{% include "parts/zeromq" %} +{% include "zeromq" %} {% if console_access_protocol == 'spice' -%} [spice] From 0271be21daf752fdb7c5667d7b3150971f2daf01 Mon Sep 17 00:00:00 2001 From: "james.page@ubuntu.com" <> Date: Tue, 20 Jan 2015 11:38:12 +0000 Subject: [PATCH 15/22] Resync helper --- hooks/charmhelpers/contrib/openstack/templates/zeromq | 2 ++ 1 file changed, 2 insertions(+) diff --git a/hooks/charmhelpers/contrib/openstack/templates/zeromq b/hooks/charmhelpers/contrib/openstack/templates/zeromq index ab796d93..41d64666 100644 --- a/hooks/charmhelpers/contrib/openstack/templates/zeromq +++ b/hooks/charmhelpers/contrib/openstack/templates/zeromq @@ -6,6 +6,8 @@ rpc_zmq_host = {{ zmq_host }} rpc_zmq_matchmaker = oslo.messaging._drivers.matchmaker_redis.MatchMakerRedis [matchmaker_redis] host = {{ zmq_redis_address }} +matchmaker_heartbeat_freq = 15 +matchmaker_heartbeat_ttl = 30 {% else -%} rpc_zmq_matchmaker = oslo.messaging._drivers.matchmaker_ring.MatchMakerRing {% endif -%} From 314c0ee75f7b4f157937eb5fe01bfdaecc5932c5 Mon Sep 17 00:00:00 2001 From: "james.page@ubuntu.com" <> Date: Tue, 20 Jan 2015 12:05:53 +0000 Subject: [PATCH 16/22] Resync helper --- hooks/charmhelpers/contrib/openstack/templates/zeromq | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/hooks/charmhelpers/contrib/openstack/templates/zeromq b/hooks/charmhelpers/contrib/openstack/templates/zeromq index 41d64666..0695eef1 100644 --- a/hooks/charmhelpers/contrib/openstack/templates/zeromq +++ b/hooks/charmhelpers/contrib/openstack/templates/zeromq @@ -4,10 +4,10 @@ rpc_backend = zmq rpc_zmq_host = {{ zmq_host }} {% if zmq_redis_address -%} rpc_zmq_matchmaker = oslo.messaging._drivers.matchmaker_redis.MatchMakerRedis -[matchmaker_redis] -host = {{ zmq_redis_address }} matchmaker_heartbeat_freq = 15 matchmaker_heartbeat_ttl = 30 +[matchmaker_redis] +host = {{ zmq_redis_address }} {% else -%} rpc_zmq_matchmaker = oslo.messaging._drivers.matchmaker_ring.MatchMakerRing {% endif -%} From bae43bede317c09d5cc6b34f39ceef7e2cefa2ae Mon Sep 17 00:00:00 2001 From: "james.page@ubuntu.com" <> Date: Tue, 24 Feb 2015 11:19:46 +0000 Subject: [PATCH 17/22] Drop icehouse specific config files - juno upwards only --- templates/icehouse/neutron.conf | 36 ---------- templates/icehouse/nova.conf | 118 -------------------------------- 2 files changed, 154 deletions(-) delete mode 100644 templates/icehouse/neutron.conf delete mode 100644 templates/icehouse/nova.conf diff --git a/templates/icehouse/neutron.conf b/templates/icehouse/neutron.conf deleted file mode 100644 index 8855c48e..00000000 --- a/templates/icehouse/neutron.conf +++ /dev/null @@ -1,36 +0,0 @@ -# icehouse -############################################################################### -# [ WARNING ] -# Configuration file maintained by Juju. Local changes may be overwritten. -############################################################################### -[DEFAULT] -state_path = /var/lib/neutron -lock_path = $state_path/lock -bind_host = 0.0.0.0 -bind_port = 9696 - -{% if core_plugin -%} -core_plugin = {{ core_plugin }} -{% endif -%} - -api_paste_config = /etc/neutron/api-paste.ini -auth_strategy = keystone -use_syslog = {{ use_syslog }} -{% if notifications == 'True' -%} -notification_driver = neutron.openstack.common.notifier.list_notifier -default_notification_level = INFO -notification_topics = notifications -{% endif -%} - -{% include "parts/rabbitmq" %} - -[QUOTAS] - -[DEFAULT_SERVICETYPE] - -[AGENT] -root_helper = sudo neutron-rootwrap /etc/neutron/rootwrap.conf - -[keystone_authtoken] -signing_dir = /var/lib/neutron/keystone-signing - diff --git a/templates/icehouse/nova.conf b/templates/icehouse/nova.conf deleted file mode 100644 index 41735c92..00000000 --- a/templates/icehouse/nova.conf +++ /dev/null @@ -1,118 +0,0 @@ -# icehouse -############################################################################### -# [ WARNING ] -# Configuration file maintained by Juju. Local changes may be overwritten. -{% if restart_trigger -%} -# restart trigger: {{ restart_trigger }} -{% endif -%} -############################################################################### -[DEFAULT] -dhcpbridge_flagfile=/etc/nova/nova.conf -dhcpbridge=/usr/bin/nova-dhcpbridge -logdir=/var/log/nova -state_path=/var/lib/nova -lock_path=/var/lock/nova -force_dhcp_release=True -libvirt_use_virtio_for_bridges=True -verbose=True -use_syslog = {{ use_syslog }} -ec2_private_dns_show_ip=True -api_paste_config=/etc/nova/api-paste.ini -enabled_apis=ec2,osapi_compute,metadata -auth_strategy=keystone -compute_driver=libvirt.LibvirtDriver - -{% include "parts/database" %} - -{% include "parts/rabbitmq" %} - -{% include "parts/zeromq" %} - -{% if glance_api_servers -%} -glance_api_servers = {{ glance_api_servers }} -{% endif -%} - -{% if rbd_pool -%} -rbd_pool = {{ rbd_pool }} -rbd_user = {{ rbd_user }} -rbd_secret_uuid = {{ rbd_secret_uuid }} -{% endif -%} - -{% if console_vnc_type -%} -vnc_enabled = True -novnc_enabled = True -vnc_keymap = {{ console_keymap }} -vncserver_listen = 0.0.0.0 -vncserver_proxyclient_address = {{ console_listen_addr }} -{% if console_access_protocol == 'novnc' or console_access_protocol == 'vnc' -%} -novncproxy_base_url = {{ novnc_proxy_address }} -{% endif -%} -{% if console_access_protocol == 'xvpvnc' or console_access_protocol == 'vnc' -%} -xvpvncproxy_port = {{ xvpvnc_proxy_port }} -xvpvncproxy_host = {{ xvpvnc_proxy_host }} -xvpvncproxy_base_url = {{ xvpvnc_proxy_address }} -{% endif -%} -{% else -%} -vnc_enabled = False -novnc_enabled = False -{% endif -%} - -{% if neutron_plugin and neutron_plugin == 'ovs' -%} -libvirt_vif_driver = nova.virt.libvirt.vif.LibvirtGenericVIFDriver -{% if neutron_security_groups -%} -security_group_api = neutron -firewall_driver = nova.virt.firewall.NoopFirewallDriver -{% endif -%} -{% endif -%} - -{% if neutron_plugin and (neutron_plugin == 'nvp' or neutron_plugin == 'nsx') -%} -libvirt_vif_driver = nova.virt.libvirt.vif.LibvirtOpenVswitchVirtualPortDriver -security_group_api = neutron -firewall_driver = nova.virt.firewall.NoopFirewallDriver -{% endif -%} - -{% if network_manager_config -%} -{% for key, value in network_manager_config.iteritems() -%} -{{ key }} = {{ value }} -{% endfor -%} -{% endif -%} - -{% if network_manager == 'neutron' -%} -network_api_class = nova.network.neutronv2.api.API -{% else -%} -network_manager = nova.network.manager.FlatDHCPManager -{% endif -%} - -{% if volume_service -%} -volume_api_class = nova.volume.cinder.API -{% endif -%} - -{% if user_config_flags -%} -{% for key, value in user_config_flags.iteritems() -%} -{{ key }} = {{ value }} -{% endfor -%} -{% endif -%} - -{% if live_migration_uri -%} -live_migration_uri = {{ live_migration_uri }} -{% endif -%} - -{% if instances_path -%} -instances_path = {{ instances_path }} -{% endif -%} - -{% if sections and 'DEFAULT' in sections -%} -{% for key, value in sections['DEFAULT'] -%} -{{ key }} = {{ value }} -{% endfor -%} -{% endif -%} - -{% if console_access_protocol == 'spice' -%} -[spice] -agent_enabled = True -enabled = True -html5proxy_base_url = {{ spice_proxy_address }} -keymap = {{ console_keymap }} -server_listen = 0.0.0.0 -server_proxyclient_address = {{ console_listen_addr }} -{% endif -%} From bf49ea89c33cce3003400c43439e8f7f0d93f568 Mon Sep 17 00:00:00 2001 From: "james.page@ubuntu.com" <> Date: Tue, 24 Feb 2015 11:21:01 +0000 Subject: [PATCH 18/22] Add zeromq snippet for kilo --- templates/kilo/nova.conf | 2 ++ 1 file changed, 2 insertions(+) diff --git a/templates/kilo/nova.conf b/templates/kilo/nova.conf index 7677bcb5..61245ed1 100644 --- a/templates/kilo/nova.conf +++ b/templates/kilo/nova.conf @@ -91,6 +91,8 @@ instances_path = {{ instances_path }} {% endfor -%} {% endif -%} +{% include "zeromq" %} + {% if network_manager == 'neutron' and network_manager_config -%} [neutron] url = {{ network_manager_config.neutron_url }} From 806ae3ac943af04ae9fca804f135d8a16e8e1c58 Mon Sep 17 00:00:00 2001 From: "james.page@ubuntu.com" <> Date: Tue, 24 Feb 2015 12:02:08 +0000 Subject: [PATCH 19/22] Switch to trunk of charm-helpers --- charm-helpers-hooks.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/charm-helpers-hooks.yaml b/charm-helpers-hooks.yaml index a1d15227..22fe260e 100644 --- a/charm-helpers-hooks.yaml +++ b/charm-helpers-hooks.yaml @@ -1,4 +1,4 @@ -branch: lp:~openstack-charmers/charm-helpers/0mq +branch: lp:charm-helpers destination: hooks/charmhelpers include: - core From cf1f792267090bda68293b8531e6d8da664103ef Mon Sep 17 00:00:00 2001 From: "james.page@ubuntu.com" <> Date: Tue, 24 Feb 2015 13:44:48 +0000 Subject: [PATCH 20/22] Specialized zeromq snippet for kilo --- templates/kilo/zeromq | 14 ++++++++++++++ 1 file changed, 14 insertions(+) create mode 100644 templates/kilo/zeromq diff --git a/templates/kilo/zeromq b/templates/kilo/zeromq new file mode 100644 index 00000000..873be80e --- /dev/null +++ b/templates/kilo/zeromq @@ -0,0 +1,14 @@ +{% if zmq_host -%} +# ZeroMQ configuration (restart-nonce: {{ zmq_nonce }}) +rpc_backend = zmq +rpc_zmq_host = {{ zmq_host }} +{% if zmq_redis_address -%} +rpc_zmq_matchmaker = oslo_messaging._drivers.matchmaker_redis.MatchMakerRedis +matchmaker_heartbeat_freq = 15 +matchmaker_heartbeat_ttl = 30 +[matchmaker_redis] +host = {{ zmq_redis_address }} +{% else -%} +rpc_zmq_matchmaker = oslo_messaging._drivers.matchmaker_ring.MatchMakerRing +{% endif -%} +{% endif -%} From 84a44dcb03cddd38509037e014bb8fbcb5e49975 Mon Sep 17 00:00:00 2001 From: "james.page@ubuntu.com" <> Date: Mon, 16 Mar 2015 14:17:38 +0000 Subject: [PATCH 21/22] Fixup pydev --- .pydevproject | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/.pydevproject b/.pydevproject index 03a4f191..a69fbde8 100644 --- a/.pydevproject +++ b/.pydevproject @@ -3,7 +3,7 @@ python 2.7 Default -/nova-compute-charm/hooks -/nova-compute-charm/unit_tests +/nova-compute/hooks +/nova-compute/unit_tests From 3145496f867fb717554828206236a7698c1788cd Mon Sep 17 00:00:00 2001 From: "james.page@ubuntu.com" <> Date: Mon, 16 Mar 2015 16:14:33 +0000 Subject: [PATCH 22/22] Make 0mq support >= kilo --- hooks/nova_compute_hooks.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/hooks/nova_compute_hooks.py b/hooks/nova_compute_hooks.py index 10e4c436..f3364589 100755 --- a/hooks/nova_compute_hooks.py +++ b/hooks/nova_compute_hooks.py @@ -317,7 +317,7 @@ def nova_ceilometer_relation_changed(): @hooks.hook('zeromq-configuration-relation-joined') -@os_requires_version('juno', 'nova-common') +@os_requires_version('kilo', 'nova-common') def zeromq_configuration_relation_joined(relid=None): relation_set(relation_id=relid, topics=" ".join(get_topics()),