diff --git a/charmhelpers/contrib/charmsupport/nrpe.py b/charmhelpers/contrib/charmsupport/nrpe.py index e3d10c1c..10d86ac0 100644 --- a/charmhelpers/contrib/charmsupport/nrpe.py +++ b/charmhelpers/contrib/charmsupport/nrpe.py @@ -416,15 +416,20 @@ def copy_nrpe_checks(nrpe_files_dir=None): """ NAGIOS_PLUGINS = '/usr/local/lib/nagios/plugins' - default_nrpe_files_dir = os.path.join( - os.getenv('CHARM_DIR'), - 'hooks', - 'charmhelpers', - 'contrib', - 'openstack', - 'files') - if not nrpe_files_dir: - nrpe_files_dir = default_nrpe_files_dir + if nrpe_files_dir is None: + # determine if "charmhelpers" is in CHARMDIR or CHARMDIR/hooks + for segment in ['.', 'hooks']: + nrpe_files_dir = os.path.abspath(os.path.join( + os.getenv('CHARM_DIR'), + segment, + 'charmhelpers', + 'contrib', + 'openstack', + 'files')) + if os.path.isdir(nrpe_files_dir): + break + else: + raise RuntimeError("Couldn't find charmhelpers directory") if not os.path.exists(NAGIOS_PLUGINS): os.makedirs(NAGIOS_PLUGINS) for fname in glob.glob(os.path.join(nrpe_files_dir, "check_*")): diff --git a/charmhelpers/contrib/openstack/amulet/deployment.py b/charmhelpers/contrib/openstack/amulet/deployment.py index 1c96752a..5b7e3cfb 100644 --- a/charmhelpers/contrib/openstack/amulet/deployment.py +++ b/charmhelpers/contrib/openstack/amulet/deployment.py @@ -168,7 +168,8 @@ class OpenStackAmuletDeployment(AmuletDeployment): 'nrpe', 'openvswitch-odl', 'neutron-api-odl', 'odl-controller', 'cinder-backup', 'nexentaedge-data', 'nexentaedge-iscsi-gw', 'nexentaedge-swift-gw', - 'cinder-nexentaedge', 'nexentaedge-mgmt'])) + 'cinder-nexentaedge', 'nexentaedge-mgmt', + 'ceilometer-agent'])) if self.openstack: for svc in services: diff --git a/charmhelpers/contrib/openstack/ha/utils.py b/charmhelpers/contrib/openstack/ha/utils.py index add8eb9a..bbdee78d 100644 --- a/charmhelpers/contrib/openstack/ha/utils.py +++ b/charmhelpers/contrib/openstack/ha/utils.py @@ -23,6 +23,7 @@ Helpers for high availability. """ +import hashlib import json import re @@ -35,7 +36,6 @@ from charmhelpers.core.hookenv import ( config, status_set, DEBUG, - WARNING, ) from charmhelpers.core.host import ( @@ -124,13 +124,29 @@ def expect_ha(): return len(ha_related_units) > 0 or config('vip') or config('dns-ha') -def generate_ha_relation_data(service): +def generate_ha_relation_data(service, extra_settings=None): """ Generate relation data for ha relation Based on configuration options and unit interfaces, generate a json encoded dict of relation data items for the hacluster relation, providing configuration for DNS HA or VIP's + haproxy clone sets. + Example of supplying additional settings:: + + COLO_CONSOLEAUTH = 'inf: res_nova_consoleauth grp_nova_vips' + AGENT_CONSOLEAUTH = 'ocf:openstack:nova-consoleauth' + AGENT_CA_PARAMS = 'op monitor interval="5s"' + + ha_console_settings = { + 'colocations': {'vip_consoleauth': COLO_CONSOLEAUTH}, + 'init_services': {'res_nova_consoleauth': 'nova-consoleauth'}, + 'resources': {'res_nova_consoleauth': AGENT_CONSOLEAUTH}, + 'resource_params': {'res_nova_consoleauth': AGENT_CA_PARAMS}) + generate_ha_relation_data('nova', extra_settings=ha_console_settings) + + + @param service: Name of the service being configured + @param extra_settings: Dict of additional resource data @returns dict: json encoded data for use with relation_set """ _haproxy_res = 'res_{}_haproxy'.format(service) @@ -149,6 +165,13 @@ def generate_ha_relation_data(service): }, } + if extra_settings: + for k, v in extra_settings.items(): + if _relation_data.get(k): + _relation_data[k].update(v) + else: + _relation_data[k] = v + if config('dns-ha'): update_hacluster_dns_ha(service, _relation_data) else: @@ -232,39 +255,67 @@ def update_hacluster_vip(service, relation_data): """ cluster_config = get_hacluster_config() vip_group = [] + vips_to_delete = [] for vip in cluster_config['vip'].split(): if is_ipv6(vip): - res_neutron_vip = 'ocf:heartbeat:IPv6addr' + res_vip = 'ocf:heartbeat:IPv6addr' vip_params = 'ipv6addr' else: - res_neutron_vip = 'ocf:heartbeat:IPaddr2' + res_vip = 'ocf:heartbeat:IPaddr2' vip_params = 'ip' - iface = (get_iface_for_address(vip) or - config('vip_iface')) - netmask = (get_netmask_for_address(vip) or - config('vip_cidr')) + iface = get_iface_for_address(vip) + netmask = get_netmask_for_address(vip) + + fallback_params = False + if iface is None: + iface = config('vip_iface') + fallback_params = True + if netmask is None: + netmask = config('vip_cidr') + fallback_params = True if iface is not None: + # NOTE(jamespage): Delete old VIP resources + # Old style naming encoding iface in name + # does not work well in environments where + # interface/subnet wiring is not consistent vip_key = 'res_{}_{}_vip'.format(service, iface) - if vip_key in vip_group: - if vip not in relation_data['resource_params'][vip_key]: - vip_key = '{}_{}'.format(vip_key, vip_params) - else: - log("Resource '%s' (vip='%s') already exists in " - "vip group - skipping" % (vip_key, vip), WARNING) - continue + if vip_key in vips_to_delete: + vip_key = '{}_{}'.format(vip_key, vip_params) + vips_to_delete.append(vip_key) + + vip_key = 'res_{}_{}_vip'.format( + service, + hashlib.sha1(vip.encode('UTF-8')).hexdigest()[:7]) + + relation_data['resources'][vip_key] = res_vip + # NOTE(jamespage): + # Use option provided vip params if these where used + # instead of auto-detected values + if fallback_params: + relation_data['resource_params'][vip_key] = ( + 'params {ip}="{vip}" cidr_netmask="{netmask}" ' + 'nic="{iface}"'.format(ip=vip_params, + vip=vip, + iface=iface, + netmask=netmask) + ) + else: + # NOTE(jamespage): + # let heartbeat figure out which interface and + # netmask to configure, which works nicely + # when network interface naming is not + # consistent across units. + relation_data['resource_params'][vip_key] = ( + 'params {ip}="{vip}"'.format(ip=vip_params, + vip=vip)) - relation_data['resources'][vip_key] = res_neutron_vip - relation_data['resource_params'][vip_key] = ( - 'params {ip}="{vip}" cidr_netmask="{netmask}" ' - 'nic="{iface}"'.format(ip=vip_params, - vip=vip, - iface=iface, - netmask=netmask) - ) vip_group.append(vip_key) + if vips_to_delete: + relation_data['delete_resources'] = vips_to_delete + if len(vip_group) >= 1: relation_data['groups'] = { 'grp_{}_vips'.format(service): ' '.join(vip_group) diff --git a/charmhelpers/contrib/openstack/utils.py b/charmhelpers/contrib/openstack/utils.py index 29cad083..59312fcf 100644 --- a/charmhelpers/contrib/openstack/utils.py +++ b/charmhelpers/contrib/openstack/utils.py @@ -73,6 +73,8 @@ from charmhelpers.core.host import ( service_running, service_pause, service_resume, + service_stop, + service_start, restart_on_change_helper, ) from charmhelpers.fetch import ( @@ -299,7 +301,7 @@ def get_os_codename_install_source(src): rel = '' if src is None: return rel - if src in ['distro', 'distro-proposed']: + if src in ['distro', 'distro-proposed', 'proposed']: try: rel = UBUNTU_OPENSTACK_RELEASE[ubuntu_rel] except KeyError: @@ -1303,6 +1305,65 @@ def is_unit_paused_set(): return False +def manage_payload_services(action, services=None, charm_func=None): + """Run an action against all services. + + An optional charm_func() can be called. It should raise an Exception to + indicate that the function failed. If it was succesfull it should return + None or an optional message. + + The signature for charm_func is: + charm_func() -> message: str + + charm_func() is executed after any services are stopped, if supplied. + + The services object can either be: + - None : no services were passed (an empty dict is returned) + - a list of strings + - A dictionary (optionally OrderedDict) {service_name: {'service': ..}} + - An array of [{'service': service_name, ...}, ...] + + :param action: Action to run: pause, resume, start or stop. + :type action: str + :param services: See above + :type services: See above + :param charm_func: function to run for custom charm pausing. + :type charm_func: f() + :returns: Status boolean and list of messages + :rtype: (bool, []) + :raises: RuntimeError + """ + actions = { + 'pause': service_pause, + 'resume': service_resume, + 'start': service_start, + 'stop': service_stop} + action = action.lower() + if action not in actions.keys(): + raise RuntimeError( + "action: {} must be one of: {}".format(action, + ', '.join(actions.keys()))) + services = _extract_services_list_helper(services) + messages = [] + success = True + if services: + for service in services.keys(): + rc = actions[action](service) + if not rc: + success = False + messages.append("{} didn't {} cleanly.".format(service, + action)) + if charm_func: + try: + message = charm_func() + if message: + messages.append(message) + except Exception as e: + success = False + messages.append(str(e)) + return success, messages + + def pause_unit(assess_status_func, services=None, ports=None, charm_func=None): """Pause a unit by stopping the services and setting 'unit-paused' @@ -1333,20 +1394,10 @@ def pause_unit(assess_status_func, services=None, ports=None, @returns None @raises Exception(message) on an error for action_fail(). """ - services = _extract_services_list_helper(services) - messages = [] - if services: - for service in services.keys(): - stopped = service_pause(service) - if not stopped: - messages.append("{} didn't stop cleanly.".format(service)) - if charm_func: - try: - message = charm_func() - if message: - messages.append(message) - except Exception as e: - message.append(str(e)) + _, messages = manage_payload_services( + 'pause', + services=services, + charm_func=charm_func) set_unit_paused() if assess_status_func: message = assess_status_func() @@ -1385,20 +1436,10 @@ def resume_unit(assess_status_func, services=None, ports=None, @returns None @raises Exception(message) on an error for action_fail(). """ - services = _extract_services_list_helper(services) - messages = [] - if services: - for service in services.keys(): - started = service_resume(service) - if not started: - messages.append("{} didn't start cleanly.".format(service)) - if charm_func: - try: - message = charm_func() - if message: - messages.append(message) - except Exception as e: - message.append(str(e)) + _, messages = manage_payload_services( + 'resume', + services=services, + charm_func=charm_func) clear_unit_paused() if assess_status_func: message = assess_status_func() diff --git a/hooks/glance_relations.py b/hooks/glance_relations.py index 3fd2f517..53226f99 100755 --- a/hooks/glance_relations.py +++ b/hooks/glance_relations.py @@ -63,7 +63,6 @@ from charmhelpers.core.hookenv import ( Hooks, log as juju_log, DEBUG, - WARNING, open_port, local_unit, relation_get, @@ -89,10 +88,9 @@ from charmhelpers.fetch import ( from charmhelpers.contrib.hahelpers.cluster import ( is_clustered, is_elected_leader, - get_hacluster_config ) from charmhelpers.contrib.openstack.ha.utils import ( - update_dns_ha_resource_params, + generate_ha_relation_data, ) from charmhelpers.contrib.openstack.utils import ( configure_installation_source, @@ -118,9 +116,6 @@ from charmhelpers.payload.execd import ( execd_preinstall ) from charmhelpers.contrib.network.ip import ( - get_netmask_for_address, - get_iface_for_address, - is_ipv6, get_relation_ip, ) from charmhelpers.contrib.openstack.ip import ( @@ -426,75 +421,8 @@ def upgrade_charm(): @hooks.hook('ha-relation-joined') def ha_relation_joined(relation_id=None): - cluster_config = get_hacluster_config() - - resources = { - 'res_glance_haproxy': 'lsb:haproxy' - } - - resource_params = { - 'res_glance_haproxy': 'op monitor interval="5s"' - } - - if config('dns-ha'): - update_dns_ha_resource_params(relation_id=relation_id, - resources=resources, - resource_params=resource_params) - else: - vip_group = [] - for vip in cluster_config['vip'].split(): - if is_ipv6(vip): - res_ks_vip = 'ocf:heartbeat:IPv6addr' - vip_params = 'ipv6addr' - else: - res_ks_vip = 'ocf:heartbeat:IPaddr2' - vip_params = 'ip' - - iface = (get_iface_for_address(vip) or - config('vip_iface')) - netmask = (get_netmask_for_address(vip) or - config('vip_cidr')) - - if iface is not None: - vip_key = 'res_glance_{}_vip'.format(iface) - if vip_key in vip_group: - if vip not in resource_params[vip_key]: - vip_key = '{}_{}'.format(vip_key, vip_params) - else: - juju_log("Resource '{}' (vip='{}') already exists in " - "vip group - skipping".format(vip_key, vip), - WARNING) - continue - - resources[vip_key] = res_ks_vip - resource_params[vip_key] = ( - 'params {ip}="{vip}" cidr_netmask="{netmask}"' - ' nic="{iface}"'.format(ip=vip_params, - vip=vip, - iface=iface, - netmask=netmask) - ) - vip_group.append(vip_key) - - if len(vip_group) >= 1: - relation_set(relation_id=relation_id, - groups={'grp_glance_vips': ' '.join(vip_group)}) - - init_services = { - 'res_glance_haproxy': 'haproxy', - } - - clones = { - 'cl_glance_haproxy': 'res_glance_haproxy', - } - - relation_set(relation_id=relation_id, - init_services=init_services, - corosync_bindiface=cluster_config['ha-bindiface'], - corosync_mcastport=cluster_config['ha-mcastport'], - resources=resources, - resource_params=resource_params, - clones=clones) + settings = generate_ha_relation_data('glance') + relation_set(relation_id=relation_id, **settings) @hooks.hook('ha-relation-changed') diff --git a/unit_tests/test_glance_relations.py b/unit_tests/test_glance_relations.py index decb99d9..a620bfb4 100644 --- a/unit_tests/test_glance_relations.py +++ b/unit_tests/test_glance_relations.py @@ -77,7 +77,7 @@ TO_PATCH = [ 'os_release', 'openstack_upgrade_available', # charmhelpers.contrib.openstack.ha.utils - 'update_dns_ha_resource_params', + 'generate_ha_relation_data', 'is_clustered', # charmhelpers.contrib.hahelpers.cluster_utils 'is_elected_leader', @@ -99,9 +99,6 @@ TO_PATCH = [ 'execd_preinstall', 'lsb_release', 'filter_installed_packages', - 'get_hacluster_config', - 'get_netmask_for_address', - 'get_iface_for_address', 'sync_db_with_multi_ipv6_addresses', 'delete_keyring', 'get_relation_ip', @@ -599,122 +596,10 @@ class GlanceRelationTests(CharmTestCase): self.service_restart.assert_called_once_with('glance-api') def test_ha_relation_joined(self): - self.get_hacluster_config.return_value = { - 'ha-bindiface': 'em0', - 'ha-mcastport': '8080', - 'vip': '10.10.10.10', - } - self.get_iface_for_address.return_value = 'eth1' - self.get_netmask_for_address.return_value = '255.255.0.0' - relations.ha_relation_joined() - args = { - 'relation_id': None, - 'corosync_bindiface': 'em0', - 'corosync_mcastport': '8080', - 'init_services': {'res_glance_haproxy': 'haproxy'}, - 'resources': {'res_glance_eth1_vip': 'ocf:heartbeat:IPaddr2', - 'res_glance_haproxy': 'lsb:haproxy'}, - 'resource_params': { - 'res_glance_eth1_vip': 'params ip="10.10.10.10"' - ' cidr_netmask="255.255.0.0" nic="eth1"', - 'res_glance_haproxy': 'op monitor interval="5s"'}, - 'clones': {'cl_glance_haproxy': 'res_glance_haproxy'} - } - self.relation_set.assert_has_calls([ - call(relation_id=None, - groups={'grp_glance_vips': 'res_glance_eth1_vip'}), - call(**args), - ]) - - def test_ha_relation_joined_no_bound_ip(self): - self.get_hacluster_config.return_value = { - 'ha-bindiface': 'em0', - 'ha-mcastport': '8080', - 'vip': '10.10.10.10', - } - self.test_config.set('vip_iface', 'eth120') - self.test_config.set('vip_cidr', '21') - self.get_iface_for_address.return_value = None - self.get_netmask_for_address.return_value = None - relations.ha_relation_joined() - args = { - 'relation_id': None, - 'corosync_bindiface': 'em0', - 'corosync_mcastport': '8080', - 'init_services': {'res_glance_haproxy': 'haproxy'}, - 'resources': {'res_glance_eth120_vip': 'ocf:heartbeat:IPaddr2', - 'res_glance_haproxy': 'lsb:haproxy'}, - 'resource_params': { - 'res_glance_eth120_vip': 'params ip="10.10.10.10"' - ' cidr_netmask="21" nic="eth120"', - 'res_glance_haproxy': 'op monitor interval="5s"'}, - 'clones': {'cl_glance_haproxy': 'res_glance_haproxy'} - } - self.relation_set.assert_has_calls([ - call(relation_id=None, - groups={'grp_glance_vips': 'res_glance_eth120_vip'}), - call(**args), - ]) - - def test_ha_relation_joined_with_ipv6(self): - self.test_config.set('prefer-ipv6', True) - self.get_hacluster_config.return_value = { - 'ha-bindiface': 'em0', - 'ha-mcastport': '8080', - 'vip': '2001:db8:1::1', - } - self.get_iface_for_address.return_value = 'eth1' - self.get_netmask_for_address.return_value = '64' - relations.ha_relation_joined() - args = { - 'relation_id': None, - 'corosync_bindiface': 'em0', - 'corosync_mcastport': '8080', - 'init_services': {'res_glance_haproxy': 'haproxy'}, - 'resources': {'res_glance_eth1_vip': 'ocf:heartbeat:IPv6addr', - 'res_glance_haproxy': 'lsb:haproxy'}, - 'resource_params': { - 'res_glance_eth1_vip': 'params ipv6addr="2001:db8:1::1"' - ' cidr_netmask="64" nic="eth1"', - 'res_glance_haproxy': 'op monitor interval="5s"'}, - 'clones': {'cl_glance_haproxy': 'res_glance_haproxy'} - } - self.relation_set.assert_called_with(**args) - - def test_ha_joined_dns_ha(self): - def _fake_update(resources, resource_params, relation_id=None): - resources.update({'res_glance_public_hostname': 'ocf:maas:dns'}) - resource_params.update({'res_glance_public_hostname': - 'params fqdn="keystone.maas" ' - 'ip_address="10.0.0.1"'}) - - self.test_config.set('dns-ha', True) - self.get_hacluster_config.return_value = { - 'vip': None, - 'ha-bindiface': 'em0', - 'ha-mcastport': '8080', - 'os-admin-hostname': None, - 'os-internal-hostname': None, - 'os-public-hostname': 'keystone.maas', - } - args = { - 'relation_id': None, - 'corosync_bindiface': 'em0', - 'corosync_mcastport': '8080', - 'init_services': {'res_glance_haproxy': 'haproxy'}, - 'resources': {'res_glance_public_hostname': 'ocf:maas:dns', - 'res_glance_haproxy': 'lsb:haproxy'}, - 'resource_params': { - 'res_glance_public_hostname': 'params fqdn="keystone.maas" ' - 'ip_address="10.0.0.1"', - 'res_glance_haproxy': 'op monitor interval="5s"'}, - 'clones': {'cl_glance_haproxy': 'res_glance_haproxy'} - } - self.update_dns_ha_resource_params.side_effect = _fake_update - - relations.ha_relation_joined() - self.assertTrue(self.update_dns_ha_resource_params.called) - self.relation_set.assert_called_with(**args) + self.generate_ha_relation_data.return_value = {'rel_data': 'data'} + relations.ha_relation_joined(relation_id='rid:23') + self.relation_set.assert_called_once_with( + relation_id='rid:23', rel_data='data') def test_ha_relation_changed_not_clustered(self): self.relation_get.return_value = False