From 29b586c61bc0d8f9a655970e847942b8a6a61cd0 Mon Sep 17 00:00:00 2001 From: Liam Young Date: Tue, 4 Dec 2018 10:24:55 +0000 Subject: [PATCH] Use chelper to configure vip and dnsha settings Use helpers from charmhelpers to generate the data to send down the relation to the hacluster charm. This results in a few changes in behaviour: 1) The charm will no longer specify a nic name to bind the vip. This is because Pacemaker VIP resources are able to automatically detect and configure correct iface and netmask parameters based on local configuration of the unit. 2) The original iface named VIP resource will be stopped and deleted prior to the creation of the new short hash named VIP resource. Change-Id: Id3804fb7913662b8c573f59d84e663561a687b1f --- charmhelpers/contrib/charmsupport/nrpe.py | 23 +- charmhelpers/contrib/openstack/ha/utils.py | 115 +++++++--- charmhelpers/contrib/openstack/utils.py | 111 +++++++--- .../contrib/storage/linux/loopback.py | 6 +- charmhelpers/fetch/ubuntu.py | 8 + hooks/percona_hooks.py | 80 +++---- tests/basic_deployment.py | 6 +- unit_tests/test_percona_hooks.py | 203 ++++++++---------- 8 files changed, 324 insertions(+), 228 deletions(-) diff --git a/charmhelpers/contrib/charmsupport/nrpe.py b/charmhelpers/contrib/charmsupport/nrpe.py index e3d10c1..10d86ac 100644 --- a/charmhelpers/contrib/charmsupport/nrpe.py +++ b/charmhelpers/contrib/charmsupport/nrpe.py @@ -416,15 +416,20 @@ def copy_nrpe_checks(nrpe_files_dir=None): """ NAGIOS_PLUGINS = '/usr/local/lib/nagios/plugins' - default_nrpe_files_dir = os.path.join( - os.getenv('CHARM_DIR'), - 'hooks', - 'charmhelpers', - 'contrib', - 'openstack', - 'files') - if not nrpe_files_dir: - nrpe_files_dir = default_nrpe_files_dir + if nrpe_files_dir is None: + # determine if "charmhelpers" is in CHARMDIR or CHARMDIR/hooks + for segment in ['.', 'hooks']: + nrpe_files_dir = os.path.abspath(os.path.join( + os.getenv('CHARM_DIR'), + segment, + 'charmhelpers', + 'contrib', + 'openstack', + 'files')) + if os.path.isdir(nrpe_files_dir): + break + else: + raise RuntimeError("Couldn't find charmhelpers directory") if not os.path.exists(NAGIOS_PLUGINS): os.makedirs(NAGIOS_PLUGINS) for fname in glob.glob(os.path.join(nrpe_files_dir, "check_*")): diff --git a/charmhelpers/contrib/openstack/ha/utils.py b/charmhelpers/contrib/openstack/ha/utils.py index add8eb9..01c0010 100644 --- a/charmhelpers/contrib/openstack/ha/utils.py +++ b/charmhelpers/contrib/openstack/ha/utils.py @@ -23,6 +23,7 @@ Helpers for high availability. """ +import hashlib import json import re @@ -35,7 +36,6 @@ from charmhelpers.core.hookenv import ( config, status_set, DEBUG, - WARNING, ) from charmhelpers.core.host import ( @@ -63,6 +63,9 @@ JSON_ENCODE_OPTIONS = dict( separators=(',', ':'), ) +VIP_GROUP_NAME = 'grp_{service}_vips' +DNSHA_GROUP_NAME = 'grp_{service}_hostnames' + class DNSHAException(Exception): """Raised when an error occurs setting up DNS HA @@ -124,13 +127,29 @@ def expect_ha(): return len(ha_related_units) > 0 or config('vip') or config('dns-ha') -def generate_ha_relation_data(service): +def generate_ha_relation_data(service, extra_settings=None): """ Generate relation data for ha relation Based on configuration options and unit interfaces, generate a json encoded dict of relation data items for the hacluster relation, providing configuration for DNS HA or VIP's + haproxy clone sets. + Example of supplying additional settings:: + + COLO_CONSOLEAUTH = 'inf: res_nova_consoleauth grp_nova_vips' + AGENT_CONSOLEAUTH = 'ocf:openstack:nova-consoleauth' + AGENT_CA_PARAMS = 'op monitor interval="5s"' + + ha_console_settings = { + 'colocations': {'vip_consoleauth': COLO_CONSOLEAUTH}, + 'init_services': {'res_nova_consoleauth': 'nova-consoleauth'}, + 'resources': {'res_nova_consoleauth': AGENT_CONSOLEAUTH}, + 'resource_params': {'res_nova_consoleauth': AGENT_CA_PARAMS}) + generate_ha_relation_data('nova', extra_settings=ha_console_settings) + + + @param service: Name of the service being configured + @param extra_settings: Dict of additional resource data @returns dict: json encoded data for use with relation_set """ _haproxy_res = 'res_{}_haproxy'.format(service) @@ -149,6 +168,13 @@ def generate_ha_relation_data(service): }, } + if extra_settings: + for k, v in extra_settings.items(): + if _relation_data.get(k): + _relation_data[k].update(v) + else: + _relation_data[k] = v + if config('dns-ha'): update_hacluster_dns_ha(service, _relation_data) else: @@ -216,7 +242,7 @@ def update_hacluster_dns_ha(service, relation_data, 'Informing the ha relation'.format(' '.join(hostname_group)), DEBUG) relation_data['groups'] = { - 'grp_{}_hostnames'.format(service): ' '.join(hostname_group) + DNSHA_GROUP_NAME.format(service=service): ' '.join(hostname_group) } else: msg = 'DNS HA: Hostname group has no members.' @@ -232,40 +258,75 @@ def update_hacluster_vip(service, relation_data): """ cluster_config = get_hacluster_config() vip_group = [] + vips_to_delete = [] for vip in cluster_config['vip'].split(): if is_ipv6(vip): - res_neutron_vip = 'ocf:heartbeat:IPv6addr' + res_vip = 'ocf:heartbeat:IPv6addr' vip_params = 'ipv6addr' else: - res_neutron_vip = 'ocf:heartbeat:IPaddr2' + res_vip = 'ocf:heartbeat:IPaddr2' vip_params = 'ip' - iface = (get_iface_for_address(vip) or - config('vip_iface')) - netmask = (get_netmask_for_address(vip) or - config('vip_cidr')) + iface = get_iface_for_address(vip) + netmask = get_netmask_for_address(vip) + + fallback_params = False + if iface is None: + iface = config('vip_iface') + fallback_params = True + if netmask is None: + netmask = config('vip_cidr') + fallback_params = True if iface is not None: + # NOTE(jamespage): Delete old VIP resources + # Old style naming encoding iface in name + # does not work well in environments where + # interface/subnet wiring is not consistent vip_key = 'res_{}_{}_vip'.format(service, iface) - if vip_key in vip_group: - if vip not in relation_data['resource_params'][vip_key]: - vip_key = '{}_{}'.format(vip_key, vip_params) - else: - log("Resource '%s' (vip='%s') already exists in " - "vip group - skipping" % (vip_key, vip), WARNING) - continue + if vip_key in vips_to_delete: + vip_key = '{}_{}'.format(vip_key, vip_params) + vips_to_delete.append(vip_key) + + vip_key = 'res_{}_{}_vip'.format( + service, + hashlib.sha1(vip.encode('UTF-8')).hexdigest()[:7]) + + relation_data['resources'][vip_key] = res_vip + # NOTE(jamespage): + # Use option provided vip params if these where used + # instead of auto-detected values + if fallback_params: + relation_data['resource_params'][vip_key] = ( + 'params {ip}="{vip}" cidr_netmask="{netmask}" ' + 'nic="{iface}"'.format(ip=vip_params, + vip=vip, + iface=iface, + netmask=netmask) + ) + else: + # NOTE(jamespage): + # let heartbeat figure out which interface and + # netmask to configure, which works nicely + # when network interface naming is not + # consistent across units. + relation_data['resource_params'][vip_key] = ( + 'params {ip}="{vip}"'.format(ip=vip_params, + vip=vip)) - relation_data['resources'][vip_key] = res_neutron_vip - relation_data['resource_params'][vip_key] = ( - 'params {ip}="{vip}" cidr_netmask="{netmask}" ' - 'nic="{iface}"'.format(ip=vip_params, - vip=vip, - iface=iface, - netmask=netmask) - ) vip_group.append(vip_key) + if vips_to_delete: + try: + relation_data['delete_resources'].extend(vips_to_delete) + except KeyError: + relation_data['delete_resources'] = vips_to_delete + if len(vip_group) >= 1: - relation_data['groups'] = { - 'grp_{}_vips'.format(service): ' '.join(vip_group) - } + key = VIP_GROUP_NAME.format(service=service) + try: + relation_data['groups'][key] = ' '.join(vip_group) + except KeyError: + relation_data['groups'] = { + key: ' '.join(vip_group) + } diff --git a/charmhelpers/contrib/openstack/utils.py b/charmhelpers/contrib/openstack/utils.py index 385b9ef..4e432a2 100644 --- a/charmhelpers/contrib/openstack/utils.py +++ b/charmhelpers/contrib/openstack/utils.py @@ -73,6 +73,8 @@ from charmhelpers.core.host import ( service_running, service_pause, service_resume, + service_stop, + service_start, restart_on_change_helper, ) from charmhelpers.fetch import ( @@ -116,6 +118,7 @@ OPENSTACK_RELEASES = ( 'pike', 'queens', 'rocky', + 'stein', ) UBUNTU_OPENSTACK_RELEASE = OrderedDict([ @@ -134,6 +137,7 @@ UBUNTU_OPENSTACK_RELEASE = OrderedDict([ ('artful', 'pike'), ('bionic', 'queens'), ('cosmic', 'rocky'), + ('disco', 'stein'), ]) @@ -153,6 +157,7 @@ OPENSTACK_CODENAMES = OrderedDict([ ('2017.2', 'pike'), ('2018.1', 'queens'), ('2018.2', 'rocky'), + ('2019.1', 'stein'), ]) # The ugly duckling - must list releases oldest to newest @@ -187,6 +192,8 @@ SWIFT_CODENAMES = OrderedDict([ ['2.16.0', '2.17.0']), ('rocky', ['2.18.0', '2.19.0']), + ('stein', + ['2.19.0']), ]) # >= Liberty version->codename mapping @@ -199,6 +206,7 @@ PACKAGE_CODENAMES = { ('16', 'pike'), ('17', 'queens'), ('18', 'rocky'), + ('19', 'stein'), ]), 'neutron-common': OrderedDict([ ('7', 'liberty'), @@ -208,6 +216,7 @@ PACKAGE_CODENAMES = { ('11', 'pike'), ('12', 'queens'), ('13', 'rocky'), + ('14', 'stein'), ]), 'cinder-common': OrderedDict([ ('7', 'liberty'), @@ -217,6 +226,7 @@ PACKAGE_CODENAMES = { ('11', 'pike'), ('12', 'queens'), ('13', 'rocky'), + ('14', 'stein'), ]), 'keystone': OrderedDict([ ('8', 'liberty'), @@ -226,6 +236,7 @@ PACKAGE_CODENAMES = { ('12', 'pike'), ('13', 'queens'), ('14', 'rocky'), + ('15', 'stein'), ]), 'horizon-common': OrderedDict([ ('8', 'liberty'), @@ -235,6 +246,7 @@ PACKAGE_CODENAMES = { ('12', 'pike'), ('13', 'queens'), ('14', 'rocky'), + ('15', 'stein'), ]), 'ceilometer-common': OrderedDict([ ('5', 'liberty'), @@ -244,6 +256,7 @@ PACKAGE_CODENAMES = { ('9', 'pike'), ('10', 'queens'), ('11', 'rocky'), + ('12', 'stein'), ]), 'heat-common': OrderedDict([ ('5', 'liberty'), @@ -253,6 +266,7 @@ PACKAGE_CODENAMES = { ('9', 'pike'), ('10', 'queens'), ('11', 'rocky'), + ('12', 'stein'), ]), 'glance-common': OrderedDict([ ('11', 'liberty'), @@ -262,6 +276,7 @@ PACKAGE_CODENAMES = { ('15', 'pike'), ('16', 'queens'), ('17', 'rocky'), + ('18', 'stein'), ]), 'openstack-dashboard': OrderedDict([ ('8', 'liberty'), @@ -271,6 +286,7 @@ PACKAGE_CODENAMES = { ('12', 'pike'), ('13', 'queens'), ('14', 'rocky'), + ('15', 'stein'), ]), } @@ -1303,6 +1319,65 @@ def is_unit_paused_set(): return False +def manage_payload_services(action, services=None, charm_func=None): + """Run an action against all services. + + An optional charm_func() can be called. It should raise an Exception to + indicate that the function failed. If it was succesfull it should return + None or an optional message. + + The signature for charm_func is: + charm_func() -> message: str + + charm_func() is executed after any services are stopped, if supplied. + + The services object can either be: + - None : no services were passed (an empty dict is returned) + - a list of strings + - A dictionary (optionally OrderedDict) {service_name: {'service': ..}} + - An array of [{'service': service_name, ...}, ...] + + :param action: Action to run: pause, resume, start or stop. + :type action: str + :param services: See above + :type services: See above + :param charm_func: function to run for custom charm pausing. + :type charm_func: f() + :returns: Status boolean and list of messages + :rtype: (bool, []) + :raises: RuntimeError + """ + actions = { + 'pause': service_pause, + 'resume': service_resume, + 'start': service_start, + 'stop': service_stop} + action = action.lower() + if action not in actions.keys(): + raise RuntimeError( + "action: {} must be one of: {}".format(action, + ', '.join(actions.keys()))) + services = _extract_services_list_helper(services) + messages = [] + success = True + if services: + for service in services.keys(): + rc = actions[action](service) + if not rc: + success = False + messages.append("{} didn't {} cleanly.".format(service, + action)) + if charm_func: + try: + message = charm_func() + if message: + messages.append(message) + except Exception as e: + success = False + messages.append(str(e)) + return success, messages + + def pause_unit(assess_status_func, services=None, ports=None, charm_func=None): """Pause a unit by stopping the services and setting 'unit-paused' @@ -1333,20 +1408,10 @@ def pause_unit(assess_status_func, services=None, ports=None, @returns None @raises Exception(message) on an error for action_fail(). """ - services = _extract_services_list_helper(services) - messages = [] - if services: - for service in services.keys(): - stopped = service_pause(service) - if not stopped: - messages.append("{} didn't stop cleanly.".format(service)) - if charm_func: - try: - message = charm_func() - if message: - messages.append(message) - except Exception as e: - message.append(str(e)) + _, messages = manage_payload_services( + 'pause', + services=services, + charm_func=charm_func) set_unit_paused() if assess_status_func: message = assess_status_func() @@ -1385,20 +1450,10 @@ def resume_unit(assess_status_func, services=None, ports=None, @returns None @raises Exception(message) on an error for action_fail(). """ - services = _extract_services_list_helper(services) - messages = [] - if services: - for service in services.keys(): - started = service_resume(service) - if not started: - messages.append("{} didn't start cleanly.".format(service)) - if charm_func: - try: - message = charm_func() - if message: - messages.append(message) - except Exception as e: - message.append(str(e)) + _, messages = manage_payload_services( + 'resume', + services=services, + charm_func=charm_func) clear_unit_paused() if assess_status_func: message = assess_status_func() diff --git a/charmhelpers/contrib/storage/linux/loopback.py b/charmhelpers/contrib/storage/linux/loopback.py index 0dfdae5..82472ff 100644 --- a/charmhelpers/contrib/storage/linux/loopback.py +++ b/charmhelpers/contrib/storage/linux/loopback.py @@ -36,8 +36,10 @@ def loopback_devices(): ''' loopbacks = {} cmd = ['losetup', '-a'] - devs = [d.strip().split(' ') for d in - check_output(cmd).splitlines() if d != ''] + output = check_output(cmd) + if six.PY3: + output = output.decode('utf-8') + devs = [d.strip().split(' ') for d in output.splitlines() if d != ''] for dev, _, f in devs: loopbacks[dev.replace(':', '')] = re.search(r'\((\S+)\)', f).groups()[0] return loopbacks diff --git a/charmhelpers/fetch/ubuntu.py b/charmhelpers/fetch/ubuntu.py index c7ad128..8a5cadf 100644 --- a/charmhelpers/fetch/ubuntu.py +++ b/charmhelpers/fetch/ubuntu.py @@ -166,6 +166,14 @@ CLOUD_ARCHIVE_POCKETS = { 'rocky/proposed': 'bionic-proposed/rocky', 'bionic-rocky/proposed': 'bionic-proposed/rocky', 'bionic-proposed/rocky': 'bionic-proposed/rocky', + # Stein + 'stein': 'bionic-updates/stein', + 'bionic-stein': 'bionic-updates/stein', + 'bionic-stein/updates': 'bionic-updates/stein', + 'bionic-updates/stein': 'bionic-updates/stein', + 'stein/proposed': 'bionic-proposed/stein', + 'bionic-stein/proposed': 'bionic-proposed/stein', + 'bionic-proposed/stein': 'bionic-proposed/stein', } diff --git a/hooks/percona_hooks.py b/hooks/percona_hooks.py index 480e47b..75780ff 100755 --- a/hooks/percona_hooks.py +++ b/hooks/percona_hooks.py @@ -23,7 +23,6 @@ from charmhelpers.core.hookenv import ( WARNING, is_leader, network_get_primary_address, - charm_name, leader_get, leader_set, open_port, @@ -54,13 +53,10 @@ from charmhelpers.contrib.database.mysql import ( ) from charmhelpers.contrib.hahelpers.cluster import ( is_clustered, - get_hacluster_config, ) from charmhelpers.payload.execd import execd_preinstall from charmhelpers.contrib.network.ip import ( get_address_in_network, - get_iface_for_address, - get_netmask_for_address, get_ipv6_addr, is_address_in_network, resolve_network_cidr, @@ -77,7 +73,11 @@ from charmhelpers.contrib.openstack.utils import ( clear_unit_paused, ) from charmhelpers.contrib.openstack.ha.utils import ( - update_dns_ha_resource_params, + DNSHA_GROUP_NAME, + JSON_ENCODE_OPTIONS, + VIP_GROUP_NAME, + update_hacluster_vip, + update_hacluster_dns_ha, ) from percona_utils import ( @@ -872,60 +872,36 @@ def shared_db_changed(relation_id=None, unit=None): @hooks.hook('ha-relation-joined') def ha_relation_joined(relation_id=None): - cluster_config = get_hacluster_config() sstpsswd = sst_password() - resources = {'res_mysql_monitor': 'ocf:percona:mysql_monitor'} - resource_params = {'res_mysql_monitor': - RES_MONITOR_PARAMS % {'sstpass': sstpsswd}} + _relation_data = { + 'resources': { + 'res_mysql_monitor': 'ocf:percona:mysql_monitor'}, + 'resource_params': { + 'res_mysql_monitor': RES_MONITOR_PARAMS % {'sstpass': sstpsswd}}, + 'clones': { + 'cl_mysql_monitor': 'res_mysql_monitor meta interleave=true'}, + 'delete_resources': ['loc_percona_cluster', 'grp_percona_cluster', + 'res_mysql_vip'] + } if config('dns-ha'): - update_dns_ha_resource_params(relation_id=relation_id, - resources=resources, - resource_params=resource_params) - group_name = 'grp_{}_hostnames'.format(charm_name()) - groups = {group_name: 'res_{}_access_hostname'.format(charm_name())} - + update_hacluster_dns_ha('mysql', _relation_data) + group_name = DNSHA_GROUP_NAME.format(service='mysql') else: - vip_iface = (get_iface_for_address(cluster_config['vip']) or - config('vip_iface')) - vip_cidr = (get_netmask_for_address(cluster_config['vip']) or - config('vip_cidr')) + update_hacluster_vip('mysql', _relation_data) + group_name = VIP_GROUP_NAME.format(service='mysql') - if config('prefer-ipv6'): - res_mysql_vip = 'ocf:heartbeat:IPv6addr' - vip_params = 'params ipv6addr="%s" cidr_netmask="%s" nic="%s"' % \ - (cluster_config['vip'], vip_cidr, vip_iface) - else: - res_mysql_vip = 'ocf:heartbeat:IPaddr2' - vip_params = 'params ip="%s" cidr_netmask="%s" nic="%s"' % \ - (cluster_config['vip'], vip_cidr, vip_iface) - - resources['res_mysql_vip'] = res_mysql_vip - - resource_params['res_mysql_vip'] = vip_params - - group_name = 'grp_percona_cluster' - groups = {group_name: 'res_mysql_vip'} - - clones = {'cl_mysql_monitor': 'res_mysql_monitor meta interleave=true'} - - colocations = {'colo_percona_cluster': 'inf: {} cl_mysql_monitor' - ''.format(group_name)} - - locations = {'loc_percona_cluster': - '{} rule inf: writable eq 1' - ''.format(group_name)} + _relation_data['locations'] = { + 'loc_mysql': '{} rule inf: writable eq 1'.format(group_name)} + _relation_data['colocations'] = { + 'colo_mysql': 'inf: {} cl_mysql_monitor'.format(group_name)} + settings = { + 'json_{}'.format(k): json.dumps(v, **JSON_ENCODE_OPTIONS) + for k, v in _relation_data.items() if v + } for rel_id in relation_ids('ha'): - relation_set(relation_id=rel_id, - corosync_bindiface=cluster_config['ha-bindiface'], - corosync_mcastport=cluster_config['ha-mcastport'], - resources=resources, - resource_params=resource_params, - groups=groups, - clones=clones, - colocations=colocations, - locations=locations) + relation_set(relation_id=rel_id, **settings) @hooks.hook('ha-relation-changed') diff --git a/tests/basic_deployment.py b/tests/basic_deployment.py index 5abd9a1..1f17920 100644 --- a/tests/basic_deployment.py +++ b/tests/basic_deployment.py @@ -1,6 +1,7 @@ # basic deployment test class for percona-xtradb-cluster import amulet +import hashlib import re import os import socket @@ -132,7 +133,10 @@ class BasicDeployment(OpenStackAmuletDeployment): _, code = self.master_unit.run('sudo crm_verify --live-check') assert code == 0, "'crm_verify --live-check' failed" - resources = ['res_mysql_vip'] + vip_key = 'res_mysql_{}_vip'.format( + hashlib.sha1(self.vip.encode('UTF-8')).hexdigest()[:7]) + resources = [vip_key] + resources += ['res_mysql_monitor:%d' % m for m in range(self.units)] diff --git a/unit_tests/test_percona_hooks.py b/unit_tests/test_percona_hooks.py index b06f2fd..3e6f1b4 100644 --- a/unit_tests/test_percona_hooks.py +++ b/unit_tests/test_percona_hooks.py @@ -1,8 +1,11 @@ +import json import mock import shutil import sys import tempfile +import charmhelpers.contrib.openstack.ha.utils as ch_ha_utils + from test_utils import CharmTestCase sys.modules['MySQLdb'] = mock.Mock() @@ -21,8 +24,6 @@ TO_PATCH = ['log', 'config', 'relation_ids', 'relation_set', 'update_nrpe_config', - 'get_iface_for_address', - 'get_netmask_for_address', 'is_bootstrapped', 'network_get_primary_address', 'resolve_network_cidr', @@ -30,8 +31,8 @@ TO_PATCH = ['log', 'config', 'resolve_hostname_to_ip', 'is_clustered', 'get_ipv6_addr', - 'get_hacluster_config', - 'update_dns_ha_resource_params', + 'update_hacluster_dns_ha', + 'update_hacluster_vip', 'sst_password', 'seeded', 'is_leader', @@ -113,117 +114,101 @@ class TestHARelation(CharmTestCase): self.network_get_primary_address.side_effect = NotImplementedError self.sst_password.return_value = 'ubuntu' - def test_resources(self): - self.relation_ids.return_value = ['ha:1'] - password = 'ubuntu' - helper = mock.Mock() - attrs = {'get_mysql_password.return_value': password} - helper.configure_mock(**attrs) - self.get_db_helper.return_value = helper - self.get_netmask_for_address.return_value = None - self.get_iface_for_address.return_value = None - self.test_config.set('vip', '10.0.3.3') - self.get_hacluster_config.return_value = { - 'vip': '10.0.3.3', - 'ha-bindiface': 'eth0', - 'ha-mcastport': 5490, - } + def test_ha_relation_joined(self): + # dns-ha: False + self.config.return_value = False + self.relation_ids.return_value = ['rid:23'] - def f(k): - return self.test_config.get(k) - - self.config.side_effect = f + def _add_vip_info(svc, rel_info): + rel_info['groups'] = { + 'grp_mysql_vips': 'res_mysql_1e39e82_vip'} + print(rel_info) + self.update_hacluster_vip.side_effect = _add_vip_info hooks.ha_relation_joined() - - resources = {'res_mysql_vip': 'ocf:heartbeat:IPaddr2', - 'res_mysql_monitor': 'ocf:percona:mysql_monitor'} - resource_params = {'res_mysql_vip': ('params ip="10.0.3.3" ' - 'cidr_netmask="24" ' - 'nic="eth0"'), - 'res_mysql_monitor': - hooks.RES_MONITOR_PARAMS % {'sstpass': 'ubuntu'}} - groups = {'grp_percona_cluster': 'res_mysql_vip'} - - clones = {'cl_mysql_monitor': 'res_mysql_monitor meta interleave=true'} - - colocations = {'colo_percona_cluster': 'inf: grp_percona_cluster cl_mysql_monitor'} # noqa - - locations = {'loc_percona_cluster': - 'grp_percona_cluster rule inf: writable eq 1'} - - self.relation_set.assert_called_with( - relation_id='ha:1', corosync_bindiface=f('ha-bindiface'), - corosync_mcastport=f('ha-mcastport'), resources=resources, - resource_params=resource_params, groups=groups, - clones=clones, colocations=colocations, locations=locations) - - def test_resource_params_vip_cidr_iface_autodetection(self): - """ - Auto-detected values for vip_cidr and vip_iface are used to configure - VIPs, even when explicit config options are provided. - """ - self.relation_ids.return_value = ['ha:1'] - helper = mock.Mock() - self.get_db_helper.return_value = helper - self.get_netmask_for_address.return_value = '20' - self.get_iface_for_address.return_value = 'eth1' - self.test_config.set('vip', '10.0.3.3') - self.test_config.set('vip_cidr', '16') - self.test_config.set('vip_iface', 'eth0') - self.get_hacluster_config.return_value = { - 'vip': '10.0.3.3', - 'ha-bindiface': 'eth0', - 'ha-mcastport': 5490, + base_settings = { + 'clones': { + 'cl_mysql_monitor': ( + 'res_mysql_monitor meta interleave=true')}, + 'colocations': { + 'colo_mysql': ( + 'inf: grp_mysql_vips ' + 'cl_mysql_monitor')}, + 'resource_params': { + 'res_mysql_monitor': ( + 'params user="sstuser" ' + 'password="ubuntu" ' + 'pid="/var/run/mysqld/mysqld.pid" ' + 'socket="/var/run/mysqld/mysqld.sock" ' + 'max_slave_lag="5" ' + 'cluster_type="pxc" ' + 'op monitor interval="1s" ' + 'timeout="30s" ' + 'OCF_CHECK_LEVEL="1"')}, + 'locations': { + 'loc_mysql': ( + 'grp_mysql_vips ' + 'rule inf: writable eq 1')}, + 'resources': { + 'res_mysql_monitor': 'ocf:percona:mysql_monitor'}, + 'delete_resources': ['loc_percona_cluster', 'grp_percona_cluster', + 'res_mysql_vip'], + 'groups': { + 'grp_mysql_vips': 'res_mysql_1e39e82_vip'}} + self.update_hacluster_vip.assert_called_once_with( + 'mysql', + base_settings) + settings = { + 'json_{}'.format(k): json.dumps(v, + **ch_ha_utils.JSON_ENCODE_OPTIONS) + for k, v in base_settings.items() if v } + self.relation_set.assert_called_once_with( + relation_id='rid:23', + **settings) - def f(k): - return self.test_config.get(k) - - self.config.side_effect = f + def test_ha_relation_joined_dnsha(self): + # dns-ha: False + self.config.return_value = True + self.relation_ids.return_value = ['rid:23'] hooks.ha_relation_joined() - - resource_params = {'res_mysql_vip': ('params ip="10.0.3.3" ' - 'cidr_netmask="20" ' - 'nic="eth1"'), - 'res_mysql_monitor': - hooks.RES_MONITOR_PARAMS % {'sstpass': 'ubuntu'}} - - call_args, call_kwargs = self.relation_set.call_args - self.assertEqual(resource_params, call_kwargs['resource_params']) - - def test_resource_params_no_vip_cidr_iface_autodetection(self): - """ - When autodetecting vip_cidr and vip_iface fails, values from - vip_cidr and vip_iface config options are used instead. - """ - self.relation_ids.return_value = ['ha:1'] - helper = mock.Mock() - self.get_db_helper.return_value = helper - self.get_netmask_for_address.return_value = None - self.get_iface_for_address.return_value = None - self.test_config.set('vip', '10.0.3.3') - self.test_config.set('vip_cidr', '16') - self.test_config.set('vip_iface', 'eth1') - self.get_hacluster_config.return_value = { - 'vip': '10.0.3.3', - 'ha-bindiface': 'eth1', - 'ha-mcastport': 5490, + base_settings = { + 'clones': { + 'cl_mysql_monitor': ( + 'res_mysql_monitor meta interleave=true')}, + 'colocations': { + 'colo_mysql': ( + 'inf: grp_mysql_hostnames ' + 'cl_mysql_monitor')}, + 'resource_params': { + 'res_mysql_monitor': ( + 'params user="sstuser" ' + 'password="ubuntu" ' + 'pid="/var/run/mysqld/mysqld.pid" ' + 'socket="/var/run/mysqld/mysqld.sock" ' + 'max_slave_lag="5" ' + 'cluster_type="pxc" ' + 'op monitor interval="1s" ' + 'timeout="30s" ' + 'OCF_CHECK_LEVEL="1"')}, + 'locations': { + 'loc_mysql': ( + 'grp_mysql_hostnames ' + 'rule inf: writable eq 1')}, + 'delete_resources': ['loc_percona_cluster', 'grp_percona_cluster', + 'res_mysql_vip'], + 'resources': { + 'res_mysql_monitor': 'ocf:percona:mysql_monitor'}} + self.update_hacluster_dns_ha.assert_called_once_with( + 'mysql', + base_settings) + settings = { + 'json_{}'.format(k): json.dumps(v, + **ch_ha_utils.JSON_ENCODE_OPTIONS) + for k, v in base_settings.items() if v } - - def f(k): - return self.test_config.get(k) - - self.config.side_effect = f - hooks.ha_relation_joined() - - resource_params = {'res_mysql_vip': ('params ip="10.0.3.3" ' - 'cidr_netmask="16" ' - 'nic="eth1"'), - 'res_mysql_monitor': - hooks.RES_MONITOR_PARAMS % {'sstpass': 'ubuntu'}} - - call_args, call_kwargs = self.relation_set.call_args - self.assertEqual(resource_params, call_kwargs['resource_params']) + self.relation_set.assert_called_once_with( + relation_id='rid:23', + **settings) class TestHostResolution(CharmTestCase):