diff --git a/src/config.yaml b/src/config.yaml index 11b6f8a..178f2be 100644 --- a/src/config.yaml +++ b/src/config.yaml @@ -46,3 +46,15 @@ options: in a situation that upstream DNS servers do not support DNSSEC, and BIND9 reports "Unable to fetch DNSKEY". For production deployments, it's encouraged to keep DNSSEC enabled. + service_ips: + default: "" + type: string + description: | + Service IPs are list of Virtual IPs that will be assigned to the + designate-bind units. This option accepts comma separated list of + IPv4 or IPv6 addresses that belong to the at least one directly + connected network. + This option also requires that relation with subordinate hacluster + unit is created, otherwise the designate-bind units go into the the + blocked state until the hacluster relation is made, or this option + is uncofigured. diff --git a/src/layer.yaml b/src/layer.yaml index 5199fe4..bf58223 100644 --- a/src/layer.yaml +++ b/src/layer.yaml @@ -1,4 +1,8 @@ -includes: ['layer:openstack', 'interface:bind-rndc', 'interface:openstack-ha'] +includes: + - 'layer:openstack' + - 'interface:bind-rndc' + - 'interface:openstack-ha' + - 'interface:hacluster' options: basic: use_venv: True diff --git a/src/lib/charm/openstack/designate_bind.py b/src/lib/charm/openstack/designate_bind.py index ce888c4..f34e9ec 100644 --- a/src/lib/charm/openstack/designate_bind.py +++ b/src/lib/charm/openstack/designate_bind.py @@ -20,6 +20,9 @@ import subprocess import hmac import hashlib import base64 +import json + +from ipaddress import IPv4Address, ip_address import charms_openstack.charm as openstack_charm import charms_openstack.adapters as adapters @@ -28,7 +31,9 @@ import charmhelpers.core.decorators as ch_decorators import charmhelpers.core.hookenv as hookenv import charmhelpers.core.host as host +from charmhelpers.core.hookenv import WORKLOAD_STATES from charmhelpers.contrib.network import ip as ch_ip +from relations.hacluster.interface_hacluster.common import CRM LEADERDB_SECRET_KEY = 'rndc_key' @@ -37,6 +42,9 @@ LEADERDB_SYNC_TIME_KEY = 'sync_time' CLUSTER_SYNC_KEY = 'sync_request' WWW_DIR = '/var/www/html' ZONE_DIR = '/var/cache/bind' +SERVICE_IP_PREFIX = "service_ip_" +COLOCATION_NAME = "service_ip_colocation" +AWAITING_HACLUSTER_FLAG = "service_ip.waiting_for_hacluster" def install(): @@ -151,6 +159,107 @@ def assess_status(): DesignateBindCharm.singleton.assess_status() +def parse_service_ip_config(): + """Parse "service_ips" config option value into IP address objects. + + Expected format is comma separated IPv4 or IPv6 addresses. + + :raises: ValueError if "service_ips" is not in expected format. + :return: List of IPv4 or IPv6 addresses + :rtype: list[IPv4Address | ipaddress.IPv6Address] + """ + raw_service_ips = hookenv.config('service_ips') + service_ips = [] + for ip_addr in raw_service_ips.split(","): + if ip_addr: + service_ips.append(ip_address(ip_addr.strip())) + return service_ips + + +def remove_service_ips(exclude=None): + """Remove all service IPs except those stated in the `exclude` parameter. + + :param exclude: List of IPs that should be kept alive. + :type exclude: list[IPv4Address | ipaddress.IPv6Address] + :return: None + """ + if exclude is None: + exclude = [] + hookenv.log('Clearing Service IPs.', hookenv.INFO) + ha_cluster = reactive.endpoint_from_flag('ha.connected') + + if not ha_cluster: + hookenv.log('No relation with "ha-cluster" charm. Nothing to clear.', + hookenv.WARNING) + return + + managed_resources = json.loads(ha_cluster.get_local("json_resources") or + "{}") + excluded_ips = [str(addr) for addr in exclude] + for resource in managed_resources: + if resource.startswith(SERVICE_IP_PREFIX): + value = resource[len(SERVICE_IP_PREFIX):] + if value not in excluded_ips: + ha_cluster.delete_resource(resource) + + ha_cluster.remove_colocation(COLOCATION_NAME) + ha_cluster.bind_resources() + + +def add_service_ips(service_ips): + """Configure (virtual) service IPs on the unit. + + These IPs and their assignment are handled by corosync (hacluster charm) + and by default they will spread to every unit of designate-bind. Id there + are more IPs configured than there are units, some units will ned up with + more than one IP. + + :param service_ips: List of Service IPs to configure + :type service_ips: list[IPv4Address | ipaddress.IPv6Address] + :return: None + """ + ha_cluster = reactive.endpoint_from_flag('ha.connected') + + if not service_ips: + hookenv.log('Skipping Service IP configuration. No IP addresses ' + 'provided', hookenv.DEBUG) + reactive.clear_flag(AWAITING_HACLUSTER_FLAG) + return + if not ha_cluster: + hookenv.log('Failed to configure service IPs. "ha-cluster" relation ' + 'missing', hookenv.ERROR) + reactive.set_flag(AWAITING_HACLUSTER_FLAG) + return + + crm = CRM() + all_ip_resources = [] + + for ip in service_ips: + resource_name = "{}{}".format(SERVICE_IP_PREFIX, ip) + addr_type = "IPaddr2" if isinstance(ip, IPv4Address) else "IPv6addr" + all_ip_resources.append(resource_name) + crm.primitive(resource_name, 'ocf:heartbeat:{}'.format(addr_type), + params='ip={}'.format(ip), op='monitor interval="10s"') + + crm.colocation(COLOCATION_NAME, -10, *all_ip_resources) + ha_cluster.manage_resources(crm) + + +def reconfigure_service_ips(): + """Configure IPs on units according to 'service_ips' config option.""" + hookenv.log("Configuring Service IPs: {}".format( + hookenv.config('service_ips')), hookenv.INFO) + try: + service_ips = parse_service_ip_config() + except ValueError: + pass + else: + remove_service_ips(exclude=service_ips) + add_service_ips(service_ips) + finally: + assess_status() + + @adapters.adapter_property("dns-backend") def dns_egress_subnets(dns_backend): """Generate list of CIDR's for remote units @@ -508,3 +617,32 @@ class DesignateBindCharm(openstack_charm.OpenStackCharm): if not os.path.isfile(apparmor_file): open(apparmor_file, 'w').close() host.service_reload('apparmor') + + def _assess_status(self): + """Overall unit status assessment. + + This override appends additional status checks related to the + service_ip configuration. + """ + super()._assess_status() + # Subordinate charm "ha-cluster" is needed for management of Service + # IPs. If service IPs are configured but there's no relation with + # "hacluster", put unit into blocked state. + try: + parse_service_ip_config() + except ValueError: + bad_format_msg = ('Config option "service_ips" does not have ' + 'an expected format.') + hookenv.status_set(WORKLOAD_STATES.BLOCKED, bad_format_msg) + return + + if reactive.is_flag_set(AWAITING_HACLUSTER_FLAG): + hookenv.log('Config option "service_ips" is set but relation with ' + '"hacluster" is missing. Either add subordinate ' + '"hacluster" charm or unconfigure "service_ips" ' + 'option', + hookenv.ERROR) + hookenv.status_set(WORKLOAD_STATES.BLOCKED, 'Failed to configure ' + '"service_ips", ' + 'hacluster relation is' + ' missing.') diff --git a/src/metadata.yaml b/src/metadata.yaml index c1951ee..ef4761a 100644 --- a/src/metadata.yaml +++ b/src/metadata.yaml @@ -23,3 +23,6 @@ provides: peers: cluster: interface: openstack-ha +requires: + ha: + interface: hacluster diff --git a/src/reactive/designate_bind_handlers.py b/src/reactive/designate_bind_handlers.py index d889abb..b5fc5da 100644 --- a/src/reactive/designate_bind_handlers.py +++ b/src/reactive/designate_bind_handlers.py @@ -112,3 +112,27 @@ def process_sync_requests(hacluster): @reactive.when('zones.initialised') def assess_status(): designate_bind.assess_status() + + +@reactive.when('config.changed.service_ips') +def service_ips_changed(): + """Reconfigure service IPs on the unit.""" + designate_bind.reconfigure_service_ips() + + +@reactive.when('ha.connected') +def hacluster_connected(_): + """Check if service IPs are awaiting configuration via hacluster.""" + if reactive.is_flag_set(designate_bind.AWAITING_HACLUSTER_FLAG): + hookenv.log('hacluster connected, configuring Service IPs', + hookenv.INFO) + designate_bind.reconfigure_service_ips() + reactive.clear_flag(designate_bind.AWAITING_HACLUSTER_FLAG) + + +@reactive.when('ha-relation-departed') +def hacluster_departed(_): + """Set blocked state if hacluster leaves and service_ips are configured.""" + if hookenv.config('service_ips'): + reactive.set_flag(designate_bind.AWAITING_HACLUSTER_FLAG) + designate_bind.assess_status() diff --git a/src/tests/bundles/focal-xena.yaml b/src/tests/bundles/focal-xena.yaml index bec9735..7ddedd1 100644 --- a/src/tests/bundles/focal-xena.yaml +++ b/src/tests/bundles/focal-xena.yaml @@ -101,6 +101,11 @@ applications: nrpe: charm: cs:nrpe + designate-bind-hacluster: + charm: cs:hacluster + options: + cluster_count: 1 + relations: - - 'keystone:shared-db' - 'keystone-mysql-router:shared-db' @@ -140,3 +145,6 @@ relations: - - 'designate:nrpe-external-master' - 'nrpe:nrpe-external-master' + + - - 'designate-bind' + - 'designate-bind-hacluster' diff --git a/src/tests/bundles/focal-yoga.yaml b/src/tests/bundles/focal-yoga.yaml index 1b543e9..15e3490 100644 --- a/src/tests/bundles/focal-yoga.yaml +++ b/src/tests/bundles/focal-yoga.yaml @@ -101,6 +101,11 @@ applications: nrpe: charm: cs:nrpe + designate-bind-hacluster: + charm: cs:hacluster + options: + cluster_count: 1 + relations: - - 'keystone:shared-db' - 'keystone-mysql-router:shared-db' @@ -140,3 +145,6 @@ relations: - - 'designate:nrpe-external-master' - 'nrpe:nrpe-external-master' + + - - 'designate-bind' + - 'designate-bind-hacluster' diff --git a/src/tests/bundles/impish-xena.yaml b/src/tests/bundles/impish-xena.yaml index 64fffe6..3952fcb 100644 --- a/src/tests/bundles/impish-xena.yaml +++ b/src/tests/bundles/impish-xena.yaml @@ -102,6 +102,11 @@ applications: charm: cs:nrpe series: focal + designate-bind-hacluster: + charm: cs:hacluster + options: + cluster_count: 1 + relations: - - 'keystone:shared-db' - 'keystone-mysql-router:shared-db' @@ -141,3 +146,6 @@ relations: - - 'designate:nrpe-external-master' - 'nrpe:nrpe-external-master' + + - - 'designate-bind' + - 'designate-bind-hacluster' diff --git a/src/tests/bundles/jammy-yoga.yaml b/src/tests/bundles/jammy-yoga.yaml index 5ae7900..7bb80d9 100644 --- a/src/tests/bundles/jammy-yoga.yaml +++ b/src/tests/bundles/jammy-yoga.yaml @@ -102,6 +102,11 @@ applications: charm: cs:nrpe series: focal + designate-bind-hacluster: + charm: cs:hacluster + options: + cluster_count: 1 + relations: - - 'keystone:shared-db' - 'keystone-mysql-router:shared-db' @@ -141,3 +146,6 @@ relations: - - 'designate:nrpe-external-master' - 'nrpe:nrpe-external-master' + + - - 'designate-bind' + - 'designate-bind-hacluster' diff --git a/src/tests/tests.yaml b/src/tests/tests.yaml index 5eb4525..9fa550c 100644 --- a/src/tests/tests.yaml +++ b/src/tests/tests.yaml @@ -13,6 +13,7 @@ dev_bundles: tests: - zaza.openstack.charm_tests.designate.tests.DesignateTests + - zaza.openstack.charm_tests.designate_bind.tests.DesignateBindServiceIPsTest target_deploy_status: nrpe: diff --git a/unit_tests/test_designate_bind_handlers.py b/unit_tests/test_designate_bind_handlers.py index c9654f6..70208eb 100644 --- a/unit_tests/test_designate_bind_handlers.py +++ b/unit_tests/test_designate_bind_handlers.py @@ -15,6 +15,12 @@ import unittest from unittest import mock +import sys + +# Modules imported from other interfaces/layers need to be mocked +sys.modules[ + 'relations.hacluster.interface_hacluster.common' +] = mock.MagicMock() import reactive.designate_bind_handlers as handlers @@ -120,6 +126,9 @@ class TestDesignateHandlers(unittest.TestCase): ('zones.initialised', ), ], 'assess_status': [('zones.initialised', )], + 'service_ips_changed': [('config.changed.service_ips', )], + 'hacluster_connected': [('ha.connected', )], + 'hacluster_departed': [('ha-relation-departed',)], } when_not_patterns = { 'install_packages': [('installed', )], @@ -236,3 +245,42 @@ class TestDesignateHandlers(unittest.TestCase): self.is_leader.return_value = True handlers.process_sync_requests('hacluster') self.process_requests.assert_called_once_with('hacluster') + + def test_configure_service_ips(self): + """Test that change of 'service_ips' triggers reconfiguration.""" + self.patch(handlers.designate_bind, 'reconfigure_service_ips') + + handlers.service_ips_changed() + + handlers.designate_bind.reconfigure_service_ips.assert_called_once() + + def test_hacluster_connected_reconfigure_service_ips(self): + """Test that hacluster connect hook configures 'service_ips' on join. + + When relation with hacluster is joined and 'service_ips' are waiting + for configuration, 'reconfigure_service_ips' should be trigerred. + """ + self.patch(handlers.designate_bind, 'reconfigure_service_ips') + self.patch(handlers.reactive, 'is_flag_set', True) + self.patch(handlers.reactive, 'clear_flag') + + handlers.hacluster_connected(None) + + handlers.designate_bind.reconfigure_service_ips.assert_called_once() + handlers.reactive.clear_flag.assert_called_once_with( + handlers.designate_bind.AWAITING_HACLUSTER_FLAG + ) + + def test_hacluster_connect_no_service_ips(self): + """Test that hacluster connect hook passes if there are no IPs. + + If there are no service_ips configured when the hacluster joins, + this hook should not execute anything. + """ + self.patch(handlers.designate_bind, 'reconfigure_service_ips') + self.patch(handlers.reactive, 'is_flag_set', False) + self.patch(handlers.reactive, 'clear_flag') + + handlers.hacluster_connected(None) + + handlers.designate_bind.reconfigure_service_ips.assert_not_called() diff --git a/unit_tests/test_lib_charm_openstack_designate_bind.py b/unit_tests/test_lib_charm_openstack_designate_bind.py index ad0fc4b..0f10973 100644 --- a/unit_tests/test_lib_charm_openstack_designate_bind.py +++ b/unit_tests/test_lib_charm_openstack_designate_bind.py @@ -14,6 +14,14 @@ from unittest import mock +import sys + +from ipaddress import IPv4Address, IPv6Address + +# Modules imported from other interfaces/layers need to be mocked +sys.modules[ + 'relations.hacluster.interface_hacluster.common' +] = mock.MagicMock() import charms_openstack.test_utils as test_utils @@ -141,6 +149,123 @@ class TestOpenStackDesignateBind(Helper): self.render_with_interfaces.assert_called_once_with('interface_list') +class TestServiceIPFunctions(Helper): + """Collection of tests for functions doing 'service_ip' configuration.""" + + def test_parse_service_ip_config(self): + """Test function parsing 'service_ips' config option.""" + service_ips = "10.0.0.1, 10.0.0.2, 2001:db8::2:1" + + with mock.patch.object(designate_bind.hookenv, 'config') as config: + config.return_value = service_ips + parsed_ips = designate_bind.parse_service_ip_config() + + expected_output = [ + IPv4Address("10.0.0.1"), + IPv4Address("10.0.0.2"), + IPv6Address("2001:db8::2:1") + ] + + self.assertEqual(expected_output, parsed_ips) + + def test_remove_service_ips(self): + """Test removing already configured Service IPs.""" + ip_to_remove = IPv4Address("10.0.0.1") + ip_to_retain = IPv4Address("10.0.0.2") + configured_resources = '["{0}{1}", "{0}{2}"]'.format( + designate_bind.SERVICE_IP_PREFIX, + ip_to_remove, + ip_to_retain + ) + ha_cluster = mock.MagicMock() + ha_cluster.get_local.return_value = configured_resources + + with mock.patch.object(designate_bind.reactive, 'endpoint_from_flag', + return_value=ha_cluster): + designate_bind.remove_service_ips(exclude=[ip_to_retain]) + + ha_cluster.delete_resource.assert_called_once_with("{}{}".format( + designate_bind.SERVICE_IP_PREFIX, ip_to_remove + )) + ha_cluster.remove_colocation.assert_called_once_with( + designate_bind.COLOCATION_NAME + ) + ha_cluster.bind_resources.assert_called_once_with() + + def test_remove_service_ips_no_hacluster(self): + """Test that warning is logged if there's no hacluster.""" + with mock.patch.object(designate_bind.reactive, 'endpoint_from_flag', + return_value=None): + designate_bind.remove_service_ips() + + expected_message = ('No relation with "ha-cluster" charm. Nothing to ' + 'clear.') + expected_level = designate_bind.hookenv.WARNING + designate_bind.hookenv.log.assert_called_with(expected_message, + expected_level) + + def test_add_service_ips(self): + """Test configuring Service IP via hacluster resources.""" + ha_cluster_mock = mock.MagicMock() + crm_mock = mock.MagicMock() + crm_patch = mock.patch.object(designate_bind, 'CRM', + return_value=crm_mock) + crm_patch.start() + + add_ips = [IPv4Address("10.0.0.1"), IPv6Address("2001:db8::2:1")] + expected_primitive_calls = [] + all_ip_resources = [] + for ip_ in add_ips: + resource = "{}{}".format(designate_bind.SERVICE_IP_PREFIX, ip_) + ip_type = "IPaddr2" if isinstance(ip_, IPv4Address) else "IPv6addr" + expected_primitive_calls.append(mock.call( + resource, + "ocf:heartbeat:{}".format(ip_type), + params="ip={}".format(ip_), + op='monitor interval="10s"' + )) + all_ip_resources.append(resource) + + with mock.patch.object(designate_bind.reactive, 'endpoint_from_flag', + return_value=ha_cluster_mock): + designate_bind.add_service_ips(add_ips) + + crm_mock.primitive.assert_has_calls(expected_primitive_calls) + crm_mock.colocation.assert_called_once_with( + designate_bind.COLOCATION_NAME, + -10, + *all_ip_resources + ) + ha_cluster_mock.manage_resources.assert_called_once_with(crm_mock) + + crm_patch.stop() + + def test_add_service_ips_no_config(self): + """Test that function passes when there are no IPs to configure.""" + ha_cluster_mock = mock.MagicMock() + + with mock.patch.object(designate_bind.reactive, 'endpoint_from_flag', + return_value=ha_cluster_mock): + designate_bind.add_service_ips([]) + + ha_cluster_mock.manage_resources.assert_not_called() + # Assert that we also clear flag that signals that service_ips config + # option is configured and charm is waiting for hacluster relation. + designate_bind.reactive.clear_flag.assert_called_with( + designate_bind.AWAITING_HACLUSTER_FLAG + ) + + def test_add_service_ips_no_hacluster(self): + """Test that function sets correct flag if it's missing hacluster.""" + with mock.patch.object(designate_bind.reactive, 'endpoint_from_flag', + return_value=None): + designate_bind.add_service_ips([IPv4Address("10.0.0.1")]) + + designate_bind.reactive.set_flag.assert_called_with( + designate_bind.AWAITING_HACLUSTER_FLAG + ) + + class TestEgressSubnets(Helper): def test_egress_subnets(self): @@ -481,3 +606,56 @@ class TestDesignateBindCharm(Helper): bob.assert_called_once_with( '/etc/apparmor.d/disable/usr.sbin.named', 'w') + + def test_asses_status_passes(self): + """Test scenario where _assess_status passes without blocking charm.""" + self.patch_object(designate_bind.openstack_charm.OpenStackCharm, + '_assess_status') + self.patch_object(designate_bind, 'parse_service_ip_config') + designate_bind.reactive.is_flag_set.return_value = False + + charm_ = designate_bind.DesignateBindCharm() + charm_._assess_status() + + designate_bind.parse_service_ip_config.assert_called_once_with() + designate_bind.hookenv.status_set.assert_not_called() + + def test_assess_status_bad_ip_config(self): + """Test that unit is blocked if 'service_ips' config has bad format.""" + self.patch_object(designate_bind.openstack_charm.OpenStackCharm, + '_assess_status') + self.patch_object(designate_bind, 'parse_service_ip_config') + designate_bind.parse_service_ip_config.side_effect = ValueError + + charm_ = designate_bind.DesignateBindCharm() + charm_._assess_status() + + expected_state = designate_bind.hookenv.WORKLOAD_STATES.BLOCKED + expected_message = ('Config option "service_ips" does not have an ' + 'expected format.') + + designate_bind.hookenv.status_set.assert_called_once_with( + expected_state, expected_message + ) + + def test_assess_status_blocks_on_awaiting_hacluster(self): + """Test that unit is blocked if it's awaiting hacluster relation. + + This occurs when 'service_ips' config option is set but relation with + hacluster charm is missing. + """ + self.patch_object(designate_bind.openstack_charm.OpenStackCharm, + '_assess_status') + self.patch_object(designate_bind, 'parse_service_ip_config') + designate_bind.reactive.is_flag_set.return_value = True + + charm_ = designate_bind.DesignateBindCharm() + charm_._assess_status() + + expected_state = designate_bind.hookenv.WORKLOAD_STATES.BLOCKED + expected_message = ('Failed to configure "service_ips", hacluster ' + 'relation is missing.') + + designate_bind.hookenv.status_set.assert_called_once_with( + expected_state, expected_message + )