diff --git a/src/layer.yaml b/src/layer.yaml index 9d3fc42..4f9e893 100644 --- a/src/layer.yaml +++ b/src/layer.yaml @@ -3,3 +3,4 @@ options: basic: use_venv: True include_system_packages: True +repo: https://github.com/openstack/charm-designate diff --git a/src/lib/charm/openstack/designate.py b/src/lib/charm/openstack/designate.py index cd89f1e..9ba9003 100644 --- a/src/lib/charm/openstack/designate.py +++ b/src/lib/charm/openstack/designate.py @@ -25,6 +25,7 @@ import charms_openstack.ip as os_ip import charmhelpers.core.decorators as decorators import charmhelpers.core.hookenv as hookenv import charmhelpers.core.host as host +import charms.reactive as reactive DESIGNATE_DIR = '/etc/designate' DESIGNATE_DEFAULT = '/etc/default/openstack' @@ -282,10 +283,9 @@ class BindRNDCRelationAdapter(openstack_adapters.OpenStackRelationAdapter): class DesignateConfigurationAdapter( openstack_adapters.APIConfigurationAdapter): - def __init__(self, port_map=None): + def __init__(self, port_map=None, *args, **kwargs): super(DesignateConfigurationAdapter, self).__init__( - port_map=port_map, - service_name='designate') + port_map=port_map, service_name='designate', *args, **kwargs) @property def pool_config(self): @@ -303,18 +303,37 @@ class DesignateConfigurationAdapter( ...] """ pconfig = [] - for entry in self.dns_slaves.split(): - address, port, key = entry.split(':') - unit_name = address.replace('.', '_') - pconfig.append({ - 'nameserver': 'nameserver_{}'.format(unit_name), - 'pool_target': 'nameserver_{}'.format(unit_name), - 'address': address, - 'rndc_key_file': '/etc/designate/rndc_{}.key'.format( - unit_name), - }) + if self.dns_slaves: + for entry in self.dns_slaves.split(): + try: + address, port, key = entry.split(':') + unit_name = address.replace('.', '_') + pconfig.append({ + 'nameserver': 'nameserver_{}'.format(unit_name), + 'pool_target': 'nameserver_{}'.format(unit_name), + 'address': address, + 'rndc_key_file': '/etc/designate/rndc_{}.key'.format( + unit_name), + }) + except ValueError: + # the entry doesn't until 3 values, so ignore it. + pass return pconfig + def invalid_pool_config(self): + """Validates that the pool config at least looks like something that + can be used. + + @returns: Error string or None if okay + """ + if self.dns_slaves: + for entry in self.dns_slaves.split(): + try: + _, __, ___ = entry.split(':') + except ValueError: + return "dns_slaves is malformed" + return None + @property def pool_targets(self): """List of pool_target section names @@ -426,8 +445,7 @@ class DesignateCharm(openstack_charm.HAOpenStackCharm): } } - required_relations = ['shared-db', 'amqp', 'identity-service', - 'dns-backend'] + required_relations = ['shared-db', 'amqp', 'identity-service', ] restart_map = { '/etc/default/openstack': services, @@ -442,6 +460,7 @@ class DesignateCharm(openstack_charm.HAOpenStackCharm): default_service = 'designate-api' sync_cmd = ['designate-manage', 'database', 'sync'] adapters_class = DesignateAdapters + configuration_class = DesignateConfigurationAdapter ha_resources = ['vips', 'haproxy'] release = 'mitaka' @@ -518,10 +537,14 @@ class DesignateCharm(openstack_charm.HAOpenStackCharm): @returns None """ slaves = hookenv.config('dns-slaves') or '' - for entry in slaves.split(): - address, port, key = entry.split(':') - unit_name = address.replace('.', '_') - self.write_key_file(unit_name, key) + try: + for entry in slaves.split(): + address, port, key = entry.split(':') + unit_name = address.replace('.', '_') + self.write_key_file(unit_name, key) + except ValueError as e: + hookenv.log("Problem with 'dns-slaves' config: {}" + .format(str(e)), level=hookenv.ERROR) @classmethod def get_domain_id(cls, domain): @@ -645,4 +668,13 @@ class DesignateCharm(openstack_charm.HAOpenStackCharm): hookenv.config('neutron-domain'))): return 'blocked', ('nameservers must be set when specifying' ' nova-domain or neutron-domain') + dns_backend_available = (reactive + .RelationBase + .from_state('dns-backend.available')) + invalid_dns = self.options.invalid_pool_config() + if invalid_dns: + return 'blocked', invalid_dns + if not (dns_backend_available or hookenv.config('dns-slaves')): + return 'blocked', ('Need either a dns-backend relation or ' + 'config(dns-slaves) or both.') return None, None diff --git a/src/reactive/designate_handlers.py b/src/reactive/designate_handlers.py index 3be6a92..c333b3b 100644 --- a/src/reactive/designate_handlers.py +++ b/src/reactive/designate_handlers.py @@ -12,18 +12,50 @@ # See the License for the specific language governing permissions and # limitations under the License. - import charm.openstack.designate as designate import charms.reactive as reactive +import charmhelpers.core.hookenv as hookenv + +from charms_openstack.charm import provide_charm_instance + +# If either dns-backend.available is set OR config('dns-slaves') is valid, then +# the following state will be set. +DNS_CONFIG_AVAILABLE = 'dns-config.available' COMPLETE_INTERFACE_STATES = [ - 'dns-backend.available', + DNS_CONFIG_AVAILABLE, 'shared-db.available', 'identity-service.available', 'amqp.available', ] +@reactive.hook('config-changed') +def check_dns_slaves(): + """verify if the config('dns-slaves') is valid and set or remove the state + accordingly. Note, that hooks run BEFORE the reactive handlers so this + should happen first during a hook. + """ + if hookenv.config('dns-slaves'): + with provide_charm_instance() as instance: + if not instance.options.invalid_pool_config(): + reactive.set_state('dns-slaves-config-valid') + return + reactive.remove_state('dns-slaves-config-valid') + + +@reactive.when_any('dns-slaves-config-valid', + 'dns-backend.available') +def set_dns_config_available(*args): + reactive.set_state(DNS_CONFIG_AVAILABLE) + + +@reactive.when_none('dns-slaves-config-valid', + 'dns-backend.available') +def clear_dns_config_available(): + reactive.remove_state(DNS_CONFIG_AVAILABLE) + + @reactive.when_not('installed') def install_packages(): """Install charms packages""" @@ -64,6 +96,9 @@ def configure_designate_basic(*args): cluster = reactive.RelationBase.from_state('cluster.available') if cluster is not None: args = args + (cluster, ) + dns_backend = reactive.RelationBase.from_state('dns-backend.available') + if dns_backend is not None: + args = args + (dns_backend, ) designate.render_base_config(args) reactive.set_state('base-config.rendered') @@ -92,6 +127,9 @@ def configure_designate_full(*args): cluster = reactive.RelationBase.from_state('cluster.available') if cluster is not None: args = args + (cluster, ) + dns_backend = reactive.RelationBase.from_state('dns-backend.available') + if dns_backend is not None: + args = args + (dns_backend, ) designate.upgrade_if_available(args) designate.configure_ssl() designate.render_full_config(args) @@ -99,6 +137,7 @@ def configure_designate_full(*args): designate.render_sink_configs(args) designate.render_rndc_keys() designate.update_pools() + designate.assess_status() @reactive.when('ha.connected') diff --git a/src/templates/mitaka/pools.yaml b/src/templates/mitaka/pools.yaml index 679e591..7c6e86b 100644 --- a/src/templates/mitaka/pools.yaml +++ b/src/templates/mitaka/pools.yaml @@ -9,21 +9,21 @@ {% endif %} nameservers: -{% if dns_backend.pool_config %} +{% if dns_backend and dns_backend.pool_config %} {% for slave in dns_backend.pool_config %} - host: {{ slave.address }} port: 53 {% endfor %} {% endif %} {% if options.pool_config %} -{% for slave in dns_backend.pool_config %} +{% for slave in options.pool_config %} - host: {{ slave.address }} port: 53 {% endfor %} {% endif %} targets: -{% if dns_backend.pool_config %} +{% if dns_backend and dns_backend.pool_config %} {% for slave in dns_backend.pool_config %} - type: bind9 masters: @@ -51,14 +51,14 @@ {% endif %} also_notifies: -{% if dns_backend.pool_config %} +{% if dns_backend and dns_backend.pool_config %} {% for slave in dns_backend.pool_config %} - host: {{ slave.address }} port: 53 {% endfor %} {% endif %} {% if options.pool_config %} -{% for slave in dns_backend.pool_config %} +{% for slave in options.pool_config %} - host: {{ slave.address }} port: 53 {% endfor %} diff --git a/src/templates/rndc.key b/src/templates/rndc.key index 39bd1d4..7ae04c3 100644 --- a/src/templates/rndc.key +++ b/src/templates/rndc.key @@ -1,4 +1,6 @@ +{% if dns_backend %} key "rndc-key" { algorithm {{ dns_backend.rndc_info.algorithm }}; secret "{{ dns_backend.rndc_info.secret }}"; }; +{% endif %} diff --git a/unit_tests/test_designate_handlers.py b/unit_tests/test_designate_handlers.py index b93acf7..bd48596 100644 --- a/unit_tests/test_designate_handlers.py +++ b/unit_tests/test_designate_handlers.py @@ -1,147 +1,72 @@ -from __future__ import absolute_import -from __future__ import print_function - -import unittest - import mock import reactive.designate_handlers as handlers - -_when_args = {} -_when_not_args = {} +import charms_openstack.test_utils as test_utils -def mock_hook_factory(d): +class TestRegisteredHooks(test_utils.TestRegisteredHooks): - def mock_hook(*args, **kwargs): - - def inner(f): - # remember what we were passed. Note that we can't actually - # determine the class we're attached to, as the decorator only gets - # the function. - try: - d[f.__name__].append(dict(args=args, kwargs=kwargs)) - except KeyError: - d[f.__name__] = [dict(args=args, kwargs=kwargs)] - return f - return inner - return mock_hook - - -class TestDesignateHandlers(unittest.TestCase): - - @classmethod - def setUpClass(cls): - cls._patched_when = mock.patch('charms.reactive.when', - mock_hook_factory(_when_args)) - cls._patched_when_started = cls._patched_when.start() - cls._patched_when_not = mock.patch('charms.reactive.when_not', - mock_hook_factory(_when_not_args)) - cls._patched_when_not_started = cls._patched_when_not.start() - # force requires to rerun the mock_hook decorator: - # try except is Python2/Python3 compatibility as Python3 has moved - # reload to importlib. - try: - reload(handlers) - except NameError: - import importlib - importlib.reload(handlers) - - @classmethod - def tearDownClass(cls): - cls._patched_when.stop() - cls._patched_when_started = None - cls._patched_when = None - cls._patched_when_not.stop() - cls._patched_when_not_started = None - cls._patched_when_not = None - # and fix any breakage we did to the module - try: - reload(handlers) - except NameError: - import importlib - importlib.reload(handlers) - - def setUp(self): - self._patches = {} - self._patches_start = {} - - def tearDown(self): - for k, v in self._patches.items(): - v.stop() - setattr(self, k, None) - self._patches = None - self._patches_start = None - - def patch(self, obj, attr, return_value=None): - mocked = mock.patch.object(obj, attr) - self._patches[attr] = mocked - started = mocked.start() - started.return_value = return_value - self._patches_start[attr] = started - setattr(self, attr, started) - - def test_registered_hooks(self): + def test_hooks(self): # test that the hooks actually registered the relation expressions that # are meaningful for this interface: this is to handle regressions. # The keys are the function names that the hook attaches to. all_interfaces = ( - 'dns-backend.available', + 'dns-config.available', 'shared-db.available', 'identity-service.available', 'amqp.available') - when_patterns = { - 'setup_amqp_req': [('amqp.connected', )], - 'setup_database': [('shared-db.connected', )], - 'setup_endpoint': [('identity-service.connected', )], - 'configure_ssl': [('identity-service.available', )], - 'update_peers': [('cluster.available', )], - 'config_changed': [('config.changed', )], - 'cluster_connected': [('ha.connected', )], - 'create_servers_and_domains': [ - all_interfaces, - ('base-config.rendered', ), - ('db.synched', ), - ], - 'configure_designate_full': [ - all_interfaces, - ('db.synched', ), - ], - 'run_db_migration': [ - all_interfaces, - ('base-config.rendered', ), - ], - 'configure_designate_basic': [ - all_interfaces, - ], + hook_set = { + 'when': { + 'setup_amqp_req': ('amqp.connected', ), + 'setup_database': ('shared-db.connected', ), + 'setup_endpoint': ('identity-service.connected', ), + 'configure_ssl': ('identity-service.available', ), + 'update_peers': ('cluster.available', ), + 'config_changed': ('config.changed', ), + 'cluster_connected': ('ha.connected', ), + 'create_servers_and_domains': ( + all_interfaces + ('base-config.rendered', 'db.synched')), + 'configure_designate_full': ( + all_interfaces + ('db.synched', )), + 'run_db_migration': ( + all_interfaces + ('base-config.rendered', )), + 'configure_designate_basic': all_interfaces, + }, + 'when_not': { + 'install_packages': ('installed', ), + 'run_db_migration': ('db.synched', ), + 'configure_designate_basic': ('base-config.rendered', ), + 'create_servers_and_domains': ('domains.created', ), + }, + 'when_any': { + 'set_dns_config_available': ( + 'dns-slaves-config-valid', 'dns-backend.available', ), + }, + 'when_none': { + 'clear_dns_config_available': ( + 'dns-slaves-config-valid', 'dns-backend.available', ), + }, + 'hook': { + 'check_dns_slaves': ('config-changed', ), + }, } - when_not_patterns = { - 'install_packages': [('installed', )], - 'run_db_migration': [('db.synched', )], - 'configure_designate_basic': [('base-config.rendered', )], - 'create_servers_and_domains': [('domains.created', )], - } - # check the when hooks are attached to the expected functions - for t, p in [(_when_args, when_patterns), - (_when_not_args, when_not_patterns)]: - for f, args in t.items(): - # check that function is in patterns - print(f) - self.assertTrue(f in p.keys()) - # check that the lists are equal - l = [a['args'] for a in args] - self.assertEqual(l, p[f]) + # test that the hooks were registered via the + # reactive.barbican_handlers + self.registered_hooks_test_helper(handlers, hook_set, []) + + +class TestHandlers(test_utils.PatchHelper): def test_install_packages(self): - self.patch(handlers.designate, 'install') - self.patch(handlers.reactive, 'set_state') + self.patch_object(handlers.designate, 'install') + self.patch_object(handlers.reactive, 'set_state') handlers.install_packages() self.install.assert_called_once_with() self.set_state.assert_called_once_with('installed') def test_setup_amqp_req(self): - self.patch(handlers.designate, 'assess_status') + self.patch_object(handlers.designate, 'assess_status') amqp = mock.MagicMock() handlers.setup_amqp_req(amqp) amqp.request_access.assert_called_once_with( @@ -149,7 +74,7 @@ class TestDesignateHandlers(unittest.TestCase): self.assess_status.assert_called_once_with() def test_database(self): - self.patch(handlers.designate, 'assess_status') + self.patch_object(handlers.designate, 'assess_status') database = mock.MagicMock() handlers.setup_database(database) calls = [ @@ -166,24 +91,25 @@ class TestDesignateHandlers(unittest.TestCase): self.assess_status.assert_called_once_with() def test_setup_endpoint(self): - self.patch(handlers.designate, 'assess_status') - self.patch(handlers.designate, 'register_endpoints') + self.patch_object(handlers.designate, 'assess_status') + self.patch_object(handlers.designate, 'register_endpoints') handlers.setup_endpoint('endpoint_object') self.register_endpoints.assert_called_once_with('endpoint_object') self.assess_status.assert_called_once_with() def test_configure_designate_basic(self): - self.patch(handlers.reactive, 'set_state') - self.patch(handlers.designate, 'render_base_config') - self.patch(handlers.reactive.RelationBase, 'from_state') + self.patch_object(handlers.reactive, 'set_state') + self.patch_object(handlers.designate, 'render_base_config') + self.patch_object(handlers.reactive.RelationBase, 'from_state', + return_value=None) handlers.configure_designate_basic('arg1', 'arg2') self.render_base_config.assert_called_once_with(('arg1', 'arg2', )) self.set_state.assert_called_once_with('base-config.rendered') def test_run_db_migration(self): - self.patch(handlers.reactive, 'set_state') - self.patch(handlers.designate, 'db_sync') - self.patch(handlers.designate, 'db_sync_done') + self.patch_object(handlers.reactive, 'set_state') + self.patch_object(handlers.designate, 'db_sync') + self.patch_object(handlers.designate, 'db_sync_done') self.db_sync_done.return_value = False handlers.run_db_migration('arg1', 'arg2') self.db_sync.assert_called_once_with() @@ -196,20 +122,22 @@ class TestDesignateHandlers(unittest.TestCase): def test_update_peers(self): cluster = mock.MagicMock() - self.patch(handlers.designate, 'update_peers') + self.patch_object(handlers.designate, 'update_peers') handlers.update_peers(cluster) self.update_peers.assert_called_once_with(cluster) def test_configure_designate_full(self): - self.patch(handlers.reactive.RelationBase, 'from_state', - return_value=None) - self.patch(handlers.designate, 'upgrade_if_available') - self.patch(handlers.designate, 'configure_ssl') - self.patch(handlers.designate, 'render_full_config') - self.patch(handlers.designate, 'create_initial_servers_and_domains') - self.patch(handlers.designate, 'render_sink_configs') - self.patch(handlers.designate, 'render_rndc_keys') - self.patch(handlers.designate, 'update_pools') + self.patch_object(handlers.reactive.RelationBase, + 'from_state', + return_value=None) + self.patch_object(handlers.designate, 'upgrade_if_available') + self.patch_object(handlers.designate, 'configure_ssl') + self.patch_object(handlers.designate, 'render_full_config') + self.patch_object(handlers.designate, + 'create_initial_servers_and_domains') + self.patch_object(handlers.designate, 'render_sink_configs') + self.patch_object(handlers.designate, 'render_rndc_keys') + self.patch_object(handlers.designate, 'update_pools') handlers.configure_designate_full('arg1', 'arg2') self.configure_ssl.assert_called_once_with() self.render_full_config.assert_called_once_with(('arg1', 'arg2', )) @@ -221,13 +149,13 @@ class TestDesignateHandlers(unittest.TestCase): def test_cluster_connected(self): hacluster = mock.MagicMock() - self.patch(handlers.designate, 'configure_ha_resources') - self.patch(handlers.designate, 'assess_status') + self.patch_object(handlers.designate, 'configure_ha_resources') + self.patch_object(handlers.designate, 'assess_status') handlers.cluster_connected(hacluster) self.configure_ha_resources.assert_called_once_with(hacluster) self.assess_status.assert_called_once_with() def test_config_changed(self): - self.patch(handlers.designate, 'assess_status') + self.patch_object(handlers.designate, 'assess_status') handlers.config_changed() self.assess_status.assert_called_once_with()