diff --git a/.gitignore b/.gitignore index 34a9602..0d08763 100644 --- a/.gitignore +++ b/.gitignore @@ -8,3 +8,4 @@ tags .unit-state.db trusty .stestr +func-results.json diff --git a/1 b/1 deleted file mode 100644 index c704f1f..0000000 --- a/1 +++ /dev/null @@ -1,66 +0,0 @@ -# Copyright 2016 Canonical Ltd -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -from mock import patch -import os - -os.environ['JUJU_UNIT_NAME'] = 'ceilometer' - -with patch('ceilometer_utils.register_configs') as register_configs: - with patch('ceilometer_utils.restart_map') as restart_map: - import openstack_upgrade - -from test_utils import ( - CharmTestCase -) - -TO_PATCH = [ - 'config_changed', - 'do_openstack_upgrade', -] - - -class TestCinderUpgradeActions(CharmTestCase): - - def setUp(self): - super(TestCinderUpgradeActions, self).setUp(openstack_upgrade, - TO_PATCH) - - @patch('charmhelpers.contrib.openstack.utils.juju_log') - @patch('charmhelpers.contrib.openstack.utils.config') - @patch('charmhelpers.contrib.openstack.utils.action_set') - @patch('charmhelpers.contrib.openstack.utils.openstack_upgrade_available') - def test_openstack_upgrade_true(self, upgrade_avail, - action_set, config, log): - upgrade_avail.return_value = True - config.return_value = True - - openstack_upgrade.openstack_upgrade() - - self.assertTrue(self.do_openstack_upgrade.called) - self.assertTrue(self.config_changed.called) - - @patch('charmhelpers.contrib.openstack.utils.juju_log') - @patch('charmhelpers.contrib.openstack.utils.config') - @patch('charmhelpers.contrib.openstack.utils.action_set') - @patch('charmhelpers.contrib.openstack.utils.openstack_upgrade_available') - def test_openstack_upgrade_false(self, upgrade_avail, - action_set, config, log): - upgrade_avail.return_value = True - config.return_value = False - - openstack_upgrade.openstack_upgrade() - - self.assertFalse(self.do_openstack_upgrade.called) - self.assertFalse(self.config_changed.called) diff --git a/hooks/charmhelpers/contrib/hahelpers/apache.py b/hooks/charmhelpers/contrib/hahelpers/apache.py index 22acb68..605a1be 100644 --- a/hooks/charmhelpers/contrib/hahelpers/apache.py +++ b/hooks/charmhelpers/contrib/hahelpers/apache.py @@ -65,7 +65,8 @@ def get_ca_cert(): if ca_cert is None: log("Inspecting identity-service relations for CA SSL certificate.", level=INFO) - for r_id in relation_ids('identity-service'): + for r_id in (relation_ids('identity-service') + + relation_ids('identity-credentials')): for unit in relation_list(r_id): if ca_cert is None: ca_cert = relation_get('ca_cert', @@ -76,7 +77,7 @@ def get_ca_cert(): def retrieve_ca_cert(cert_file): cert = None if os.path.isfile(cert_file): - with open(cert_file, 'r') as crt: + with open(cert_file, 'rb') as crt: cert = crt.read() return cert diff --git a/hooks/charmhelpers/contrib/hahelpers/cluster.py b/hooks/charmhelpers/contrib/hahelpers/cluster.py index 4207e42..47facd9 100644 --- a/hooks/charmhelpers/contrib/hahelpers/cluster.py +++ b/hooks/charmhelpers/contrib/hahelpers/cluster.py @@ -371,6 +371,7 @@ def distributed_wait(modulo=None, wait=None, operation_name='operation'): ''' Distribute operations by waiting based on modulo_distribution If modulo and or wait are not set, check config_get for those values. + If config values are not set, default to modulo=3 and wait=30. :param modulo: int The modulo number creates the group distribution :param wait: int The constant time wait value @@ -382,10 +383,17 @@ def distributed_wait(modulo=None, wait=None, operation_name='operation'): :side effect: Calls time.sleep() ''' if modulo is None: - modulo = config_get('modulo-nodes') + modulo = config_get('modulo-nodes') or 3 if wait is None: - wait = config_get('known-wait') - calculated_wait = modulo_distribution(modulo=modulo, wait=wait) + wait = config_get('known-wait') or 30 + if juju_is_leader(): + # The leader should never wait + calculated_wait = 0 + else: + # non_zero_wait=True guarantees the non-leader who gets modulo 0 + # will still wait + calculated_wait = modulo_distribution(modulo=modulo, wait=wait, + non_zero_wait=True) msg = "Waiting {} seconds for {} ...".format(calculated_wait, operation_name) log(msg, DEBUG) diff --git a/hooks/charmhelpers/contrib/openstack/amulet/deployment.py b/hooks/charmhelpers/contrib/openstack/amulet/deployment.py index 5afbbd8..66beeda 100644 --- a/hooks/charmhelpers/contrib/openstack/amulet/deployment.py +++ b/hooks/charmhelpers/contrib/openstack/amulet/deployment.py @@ -21,6 +21,9 @@ from collections import OrderedDict from charmhelpers.contrib.amulet.deployment import ( AmuletDeployment ) +from charmhelpers.contrib.openstack.amulet.utils import ( + OPENSTACK_RELEASES_PAIRS +) DEBUG = logging.DEBUG ERROR = logging.ERROR @@ -271,11 +274,8 @@ class OpenStackAmuletDeployment(AmuletDeployment): release. """ # Must be ordered by OpenStack release (not by Ubuntu release): - (self.trusty_icehouse, self.trusty_kilo, self.trusty_liberty, - self.trusty_mitaka, self.xenial_mitaka, self.xenial_newton, - self.yakkety_newton, self.xenial_ocata, self.zesty_ocata, - self.xenial_pike, self.artful_pike, self.xenial_queens, - self.bionic_queens,) = range(13) + for i, os_pair in enumerate(OPENSTACK_RELEASES_PAIRS): + setattr(self, os_pair, i) releases = { ('trusty', None): self.trusty_icehouse, diff --git a/hooks/charmhelpers/contrib/openstack/amulet/utils.py b/hooks/charmhelpers/contrib/openstack/amulet/utils.py index d93cff3..84e87f5 100644 --- a/hooks/charmhelpers/contrib/openstack/amulet/utils.py +++ b/hooks/charmhelpers/contrib/openstack/amulet/utils.py @@ -50,6 +50,13 @@ ERROR = logging.ERROR NOVA_CLIENT_VERSION = "2" +OPENSTACK_RELEASES_PAIRS = [ + 'trusty_icehouse', 'trusty_kilo', 'trusty_liberty', + 'trusty_mitaka', 'xenial_mitaka', 'xenial_newton', + 'yakkety_newton', 'xenial_ocata', 'zesty_ocata', + 'xenial_pike', 'artful_pike', 'xenial_queens', + 'bionic_queens'] + class OpenStackAmuletUtils(AmuletUtils): """OpenStack amulet utilities. @@ -63,7 +70,34 @@ class OpenStackAmuletUtils(AmuletUtils): super(OpenStackAmuletUtils, self).__init__(log_level) def validate_endpoint_data(self, endpoints, admin_port, internal_port, - public_port, expected): + public_port, expected, openstack_release=None): + """Validate endpoint data. Pick the correct validator based on + OpenStack release. Expected data should be in the v2 format: + { + 'id': id, + 'region': region, + 'adminurl': adminurl, + 'internalurl': internalurl, + 'publicurl': publicurl, + 'service_id': service_id} + + """ + validation_function = self.validate_v2_endpoint_data + xenial_queens = OPENSTACK_RELEASES_PAIRS.index('xenial_queens') + if openstack_release and openstack_release >= xenial_queens: + validation_function = self.validate_v3_endpoint_data + expected = { + 'id': expected['id'], + 'region': expected['region'], + 'region_id': 'RegionOne', + 'url': self.valid_url, + 'interface': self.not_null, + 'service_id': expected['service_id']} + return validation_function(endpoints, admin_port, internal_port, + public_port, expected) + + def validate_v2_endpoint_data(self, endpoints, admin_port, internal_port, + public_port, expected): """Validate endpoint data. Validate actual endpoint data vs expected endpoint data. The ports @@ -141,7 +175,86 @@ class OpenStackAmuletUtils(AmuletUtils): if len(found) != expected_num_eps: return 'Unexpected number of endpoints found' - def validate_svc_catalog_endpoint_data(self, expected, actual): + def convert_svc_catalog_endpoint_data_to_v3(self, ep_data): + """Convert v2 endpoint data into v3. + + { + 'service_name1': [ + { + 'adminURL': adminURL, + 'id': id, + 'region': region. + 'publicURL': publicURL, + 'internalURL': internalURL + }], + 'service_name2': [ + { + 'adminURL': adminURL, + 'id': id, + 'region': region. + 'publicURL': publicURL, + 'internalURL': internalURL + }], + } + """ + self.log.warn("Endpoint ID and Region ID validation is limited to not " + "null checks after v2 to v3 conversion") + for svc in ep_data.keys(): + assert len(ep_data[svc]) == 1, "Unknown data format" + svc_ep_data = ep_data[svc][0] + ep_data[svc] = [ + { + 'url': svc_ep_data['adminURL'], + 'interface': 'admin', + 'region': svc_ep_data['region'], + 'region_id': self.not_null, + 'id': self.not_null}, + { + 'url': svc_ep_data['publicURL'], + 'interface': 'public', + 'region': svc_ep_data['region'], + 'region_id': self.not_null, + 'id': self.not_null}, + { + 'url': svc_ep_data['internalURL'], + 'interface': 'internal', + 'region': svc_ep_data['region'], + 'region_id': self.not_null, + 'id': self.not_null}] + return ep_data + + def validate_svc_catalog_endpoint_data(self, expected, actual, + openstack_release=None): + """Validate service catalog endpoint data. Pick the correct validator + for the OpenStack version. Expected data should be in the v2 format: + { + 'service_name1': [ + { + 'adminURL': adminURL, + 'id': id, + 'region': region. + 'publicURL': publicURL, + 'internalURL': internalURL + }], + 'service_name2': [ + { + 'adminURL': adminURL, + 'id': id, + 'region': region. + 'publicURL': publicURL, + 'internalURL': internalURL + }], + } + + """ + validation_function = self.validate_v2_svc_catalog_endpoint_data + xenial_queens = OPENSTACK_RELEASES_PAIRS.index('xenial_queens') + if openstack_release and openstack_release >= xenial_queens: + validation_function = self.validate_v3_svc_catalog_endpoint_data + expected = self.convert_svc_catalog_endpoint_data_to_v3(expected) + return validation_function(expected, actual) + + def validate_v2_svc_catalog_endpoint_data(self, expected, actual): """Validate service catalog endpoint data. Validate a list of actual service catalog endpoints vs a list of @@ -328,7 +441,7 @@ class OpenStackAmuletUtils(AmuletUtils): if rel.get('api_version') != str(api_version): raise Exception("api_version not propagated through relation" " data yet ('{}' != '{}')." - "".format(rel['api_version'], api_version)) + "".format(rel.get('api_version'), api_version)) def keystone_configure_api_version(self, sentry_relation_pairs, deployment, api_version): @@ -350,16 +463,13 @@ class OpenStackAmuletUtils(AmuletUtils): deployment._auto_wait_for_status() self.keystone_wait_for_propagation(sentry_relation_pairs, api_version) - def authenticate_cinder_admin(self, keystone_sentry, username, - password, tenant, api_version=2): + def authenticate_cinder_admin(self, keystone, api_version=2): """Authenticates admin user with cinder.""" - # NOTE(beisner): cinder python client doesn't accept tokens. - keystone_ip = keystone_sentry.info['public-address'] - ept = "http://{}:5000/v2.0".format(keystone_ip.strip().decode('utf-8')) + self.log.debug('Authenticating cinder admin...') _clients = { 1: cinder_client.Client, 2: cinder_clientv2.Client} - return _clients[api_version](username, password, tenant, ept) + return _clients[api_version](session=keystone.session) def authenticate_keystone(self, keystone_ip, username, password, api_version=False, admin_port=False, @@ -367,13 +477,36 @@ class OpenStackAmuletUtils(AmuletUtils): project_domain_name=None, project_name=None): """Authenticate with Keystone""" self.log.debug('Authenticating with keystone...') - port = 5000 - if admin_port: - port = 35357 - base_ep = "http://{}:{}".format(keystone_ip.strip().decode('utf-8'), - port) - if not api_version or api_version == 2: - ep = base_ep + "/v2.0" + if not api_version: + api_version = 2 + sess, auth = self.get_keystone_session( + keystone_ip=keystone_ip, + username=username, + password=password, + api_version=api_version, + admin_port=admin_port, + user_domain_name=user_domain_name, + domain_name=domain_name, + project_domain_name=project_domain_name, + project_name=project_name + ) + if api_version == 2: + client = keystone_client.Client(session=sess) + else: + client = keystone_client_v3.Client(session=sess) + # This populates the client.service_catalog + client.auth_ref = auth.get_access(sess) + return client + + def get_keystone_session(self, keystone_ip, username, password, + api_version=False, admin_port=False, + user_domain_name=None, domain_name=None, + project_domain_name=None, project_name=None): + """Return a keystone session object""" + ep = self.get_keystone_endpoint(keystone_ip, + api_version=api_version, + admin_port=admin_port) + if api_version == 2: auth = v2.Password( username=username, password=password, @@ -381,12 +514,7 @@ class OpenStackAmuletUtils(AmuletUtils): auth_url=ep ) sess = keystone_session.Session(auth=auth) - client = keystone_client.Client(session=sess) - # This populates the client.service_catalog - client.auth_ref = auth.get_access(sess) - return client else: - ep = base_ep + "/v3" auth = v3.Password( user_domain_name=user_domain_name, username=username, @@ -397,10 +525,57 @@ class OpenStackAmuletUtils(AmuletUtils): auth_url=ep ) sess = keystone_session.Session(auth=auth) - client = keystone_client_v3.Client(session=sess) - # This populates the client.service_catalog - client.auth_ref = auth.get_access(sess) - return client + return (sess, auth) + + def get_keystone_endpoint(self, keystone_ip, api_version=None, + admin_port=False): + """Return keystone endpoint""" + port = 5000 + if admin_port: + port = 35357 + base_ep = "http://{}:{}".format(keystone_ip.strip().decode('utf-8'), + port) + if api_version == 2: + ep = base_ep + "/v2.0" + else: + ep = base_ep + "/v3" + return ep + + def get_default_keystone_session(self, keystone_sentry, + openstack_release=None): + """Return a keystone session object and client object assuming standard + default settings + + Example call in amulet tests: + self.keystone_session, self.keystone = u.get_default_keystone_session( + self.keystone_sentry, + openstack_release=self._get_openstack_release()) + + The session can then be used to auth other clients: + neutronclient.Client(session=session) + aodh_client.Client(session=session) + eyc + """ + self.log.debug('Authenticating keystone admin...') + api_version = 2 + client_class = keystone_client.Client + # 11 => xenial_queens + if openstack_release and openstack_release >= 11: + api_version = 3 + client_class = keystone_client_v3.Client + keystone_ip = keystone_sentry.info['public-address'] + session, auth = self.get_keystone_session( + keystone_ip, + api_version=api_version, + username='admin', + password='openstack', + project_name='admin', + user_domain_name='admin_domain', + project_domain_name='admin_domain') + client = client_class(session=session) + # This populates the client.service_catalog + client.auth_ref = auth.get_access(session) + return session, client def authenticate_keystone_admin(self, keystone_sentry, user, password, tenant=None, api_version=None, diff --git a/hooks/charmhelpers/contrib/openstack/context.py b/hooks/charmhelpers/contrib/openstack/context.py index 36cf32f..2d91f0a 100644 --- a/hooks/charmhelpers/contrib/openstack/context.py +++ b/hooks/charmhelpers/contrib/openstack/context.py @@ -384,6 +384,7 @@ class IdentityServiceContext(OSContextGenerator): # so a missing value just indicates keystone needs # upgrading ctxt['admin_tenant_id'] = rdata.get('service_tenant_id') + ctxt['admin_domain_id'] = rdata.get('service_domain_id') return ctxt return {} @@ -796,9 +797,9 @@ class ApacheSSLContext(OSContextGenerator): key_filename = 'key' write_file(path=os.path.join(ssl_dir, cert_filename), - content=b64decode(cert)) + content=b64decode(cert), perms=0o640) write_file(path=os.path.join(ssl_dir, key_filename), - content=b64decode(key)) + content=b64decode(key), perms=0o640) def configure_ca(self): ca_cert = get_ca_cert() @@ -1872,10 +1873,11 @@ class EnsureDirContext(OSContextGenerator): context is needed to do that before rendering a template. ''' - def __init__(self, dirname): + def __init__(self, dirname, **kwargs): '''Used merely to ensure that a given directory exists.''' self.dirname = dirname + self.kwargs = kwargs def __call__(self): - mkdir(self.dirname) + mkdir(self.dirname, **self.kwargs) return {} diff --git a/hooks/charmhelpers/contrib/openstack/templates/section-oslo-middleware b/hooks/charmhelpers/contrib/openstack/templates/section-oslo-middleware new file mode 100644 index 0000000..dd73230 --- /dev/null +++ b/hooks/charmhelpers/contrib/openstack/templates/section-oslo-middleware @@ -0,0 +1,5 @@ +[oslo_middleware] + +# Bug #1758675 +enable_proxy_headers_parsing = true + diff --git a/hooks/charmhelpers/contrib/openstack/templates/section-oslo-notifications b/hooks/charmhelpers/contrib/openstack/templates/section-oslo-notifications index 5dccd4b..021a3c2 100644 --- a/hooks/charmhelpers/contrib/openstack/templates/section-oslo-notifications +++ b/hooks/charmhelpers/contrib/openstack/templates/section-oslo-notifications @@ -5,4 +5,7 @@ transport_url = {{ transport_url }} {% if notification_topics -%} topics = {{ notification_topics }} {% endif -%} +{% if notification_format -%} +notification_format = {{ notification_format }} +{% endif -%} {% endif -%} diff --git a/hooks/charmhelpers/contrib/openstack/utils.py b/hooks/charmhelpers/contrib/openstack/utils.py index b753275..6184abd 100644 --- a/hooks/charmhelpers/contrib/openstack/utils.py +++ b/hooks/charmhelpers/contrib/openstack/utils.py @@ -182,7 +182,7 @@ SWIFT_CODENAMES = OrderedDict([ ('pike', ['2.13.0', '2.15.0']), ('queens', - ['2.16.0']), + ['2.16.0', '2.17.0']), ]) # >= Liberty version->codename mapping @@ -306,7 +306,7 @@ def get_os_codename_install_source(src): if src.startswith('cloud:'): ca_rel = src.split(':')[1] - ca_rel = ca_rel.split('%s-' % ubuntu_rel)[1].split('/')[0] + ca_rel = ca_rel.split('-')[1].split('/')[0] return ca_rel # Best guess match based on deb string provided diff --git a/hooks/charmhelpers/contrib/openstack/vaultlocker.py b/hooks/charmhelpers/contrib/openstack/vaultlocker.py new file mode 100644 index 0000000..a8e4bf8 --- /dev/null +++ b/hooks/charmhelpers/contrib/openstack/vaultlocker.py @@ -0,0 +1,126 @@ +# Copyright 2018 Canonical Limited. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import json +import os + +import charmhelpers.contrib.openstack.alternatives as alternatives +import charmhelpers.contrib.openstack.context as context + +import charmhelpers.core.hookenv as hookenv +import charmhelpers.core.host as host +import charmhelpers.core.templating as templating +import charmhelpers.core.unitdata as unitdata + +VAULTLOCKER_BACKEND = 'charm-vaultlocker' + + +class VaultKVContext(context.OSContextGenerator): + """Vault KV context for interaction with vault-kv interfaces""" + interfaces = ['secrets-storage'] + + def __init__(self, secret_backend=None): + super(context.OSContextGenerator, self).__init__() + self.secret_backend = ( + secret_backend or 'charm-{}'.format(hookenv.service_name()) + ) + + def __call__(self): + db = unitdata.kv() + last_token = db.get('last-token') + secret_id = db.get('secret-id') + for relation_id in hookenv.relation_ids(self.interfaces[0]): + for unit in hookenv.related_units(relation_id): + data = hookenv.relation_get(unit=unit, + rid=relation_id) + vault_url = data.get('vault_url') + role_id = data.get('{}_role_id'.format(hookenv.local_unit())) + token = data.get('{}_token'.format(hookenv.local_unit())) + + if all([vault_url, role_id, token]): + token = json.loads(token) + vault_url = json.loads(vault_url) + + # Tokens may change when secret_id's are being + # reissued - if so use token to get new secret_id + if token != last_token: + secret_id = retrieve_secret_id( + url=vault_url, + token=token + ) + db.set('secret-id', secret_id) + db.set('last-token', token) + db.flush() + + ctxt = { + 'vault_url': vault_url, + 'role_id': json.loads(role_id), + 'secret_id': secret_id, + 'secret_backend': self.secret_backend, + } + vault_ca = data.get('vault_ca') + if vault_ca: + ctxt['vault_ca'] = json.loads(vault_ca) + self.complete = True + return ctxt + return {} + + +def write_vaultlocker_conf(context, priority=100): + """Write vaultlocker configuration to disk and install alternative + + :param context: Dict of data from vault-kv relation + :ptype: context: dict + :param priority: Priority of alternative configuration + :ptype: priority: int""" + charm_vl_path = "/var/lib/charm/{}/vaultlocker.conf".format( + hookenv.service_name() + ) + host.mkdir(os.path.dirname(charm_vl_path), perms=0o700) + templating.render(source='vaultlocker.conf.j2', + target=charm_vl_path, + context=context, perms=0o600), + alternatives.install_alternative('vaultlocker.conf', + '/etc/vaultlocker/vaultlocker.conf', + charm_vl_path, priority) + + +def vault_relation_complete(backend=None): + """Determine whether vault relation is complete + + :param backend: Name of secrets backend requested + :ptype backend: string + :returns: whether the relation to vault is complete + :rtype: bool""" + vault_kv = VaultKVContext(secret_backend=backend or VAULTLOCKER_BACKEND) + vault_kv() + return vault_kv.complete + + +# TODO: contrib a high level unwrap method to hvac that works +def retrieve_secret_id(url, token): + """Retrieve a response-wrapped secret_id from Vault + + :param url: URL to Vault Server + :ptype url: str + :param token: One shot Token to use + :ptype token: str + :returns: secret_id to use for Vault Access + :rtype: str""" + import hvac + client = hvac.Client(url=url, token=token) + response = client._post('/v1/sys/wrapping/unwrap') + if response.status_code == 200: + data = response.json() + return data['data']['secret_id'] diff --git a/hooks/charmhelpers/contrib/storage/linux/ceph.py b/hooks/charmhelpers/contrib/storage/linux/ceph.py index e13e60a..7682820 100644 --- a/hooks/charmhelpers/contrib/storage/linux/ceph.py +++ b/hooks/charmhelpers/contrib/storage/linux/ceph.py @@ -291,7 +291,7 @@ class Pool(object): class ReplicatedPool(Pool): def __init__(self, service, name, pg_num=None, replicas=2, - percent_data=10.0): + percent_data=10.0, app_name=None): super(ReplicatedPool, self).__init__(service=service, name=name) self.replicas = replicas if pg_num: @@ -301,6 +301,10 @@ class ReplicatedPool(Pool): self.pg_num = min(pg_num, max_pgs) else: self.pg_num = self.get_pgs(self.replicas, percent_data) + if app_name: + self.app_name = app_name + else: + self.app_name = 'unknown' def create(self): if not pool_exists(self.service, self.name): @@ -313,6 +317,12 @@ class ReplicatedPool(Pool): update_pool(client=self.service, pool=self.name, settings={'size': str(self.replicas)}) + try: + set_app_name_for_pool(client=self.service, + pool=self.name, + name=self.app_name) + except CalledProcessError: + log('Could not set app name for pool {}'.format(self.name, level=WARNING)) except CalledProcessError: raise @@ -320,10 +330,14 @@ class ReplicatedPool(Pool): # Default jerasure erasure coded pool class ErasurePool(Pool): def __init__(self, service, name, erasure_code_profile="default", - percent_data=10.0): + percent_data=10.0, app_name=None): super(ErasurePool, self).__init__(service=service, name=name) self.erasure_code_profile = erasure_code_profile self.percent_data = percent_data + if app_name: + self.app_name = app_name + else: + self.app_name = 'unknown' def create(self): if not pool_exists(self.service, self.name): @@ -355,6 +369,12 @@ class ErasurePool(Pool): 'erasure', self.erasure_code_profile] try: check_call(cmd) + try: + set_app_name_for_pool(client=self.service, + pool=self.name, + name=self.app_name) + except CalledProcessError: + log('Could not set app name for pool {}'.format(self.name, level=WARNING)) except CalledProcessError: raise @@ -778,6 +798,25 @@ def update_pool(client, pool, settings): check_call(cmd) +def set_app_name_for_pool(client, pool, name): + """ + Calls `osd pool application enable` for the specified pool name + + :param client: Name of the ceph client to use + :type client: str + :param pool: Pool to set app name for + :type pool: str + :param name: app name for the specified pool + :type name: str + + :raises: CalledProcessError if ceph call fails + """ + if ceph_version() >= '12.0.0': + cmd = ['ceph', '--id', client, 'osd', 'pool', + 'application', 'enable', pool, name] + check_call(cmd) + + def create_pool(service, name, replicas=3, pg_num=None): """Create a new RADOS pool.""" if pool_exists(service, name): diff --git a/hooks/charmhelpers/contrib/storage/linux/lvm.py b/hooks/charmhelpers/contrib/storage/linux/lvm.py index 79a7a24..c8bde69 100644 --- a/hooks/charmhelpers/contrib/storage/linux/lvm.py +++ b/hooks/charmhelpers/contrib/storage/linux/lvm.py @@ -151,3 +151,32 @@ def extend_logical_volume_by_device(lv_name, block_device): ''' cmd = ['lvextend', lv_name, block_device] check_call(cmd) + + +def create_logical_volume(lv_name, volume_group, size=None): + ''' + Create a new logical volume in an existing volume group + + :param lv_name: str: name of logical volume to be created. + :param volume_group: str: Name of volume group to use for the new volume. + :param size: str: Size of logical volume to create (100% if not supplied) + :raises subprocess.CalledProcessError: in the event that the lvcreate fails. + ''' + if size: + check_call([ + 'lvcreate', + '--yes', + '-L', + '{}'.format(size), + '-n', lv_name, volume_group + ]) + # create the lv with all the space available, this is needed because the + # system call is different for LVM + else: + check_call([ + 'lvcreate', + '--yes', + '-l', + '100%FREE', + '-n', lv_name, volume_group + ]) diff --git a/hooks/charmhelpers/contrib/storage/linux/utils.py b/hooks/charmhelpers/contrib/storage/linux/utils.py index c942889..6f846b0 100644 --- a/hooks/charmhelpers/contrib/storage/linux/utils.py +++ b/hooks/charmhelpers/contrib/storage/linux/utils.py @@ -67,3 +67,19 @@ def is_device_mounted(device): except Exception: return False return bool(re.search(r'MOUNTPOINT=".+"', out)) + + +def mkfs_xfs(device, force=False): + """Format device with XFS filesystem. + + By default this should fail if the device already has a filesystem on it. + :param device: Full path to device to format + :ptype device: tr + :param force: Force operation + :ptype: force: boolean""" + cmd = ['mkfs.xfs'] + if force: + cmd.append("-f") + + cmd += ['-i', 'size=1024', device] + check_call(cmd) diff --git a/hooks/charmhelpers/core/hookenv.py b/hooks/charmhelpers/core/hookenv.py index 7ed1cc4..627d8f7 100644 --- a/hooks/charmhelpers/core/hookenv.py +++ b/hooks/charmhelpers/core/hookenv.py @@ -27,6 +27,7 @@ import glob import os import json import yaml +import re import subprocess import sys import errno @@ -67,7 +68,7 @@ def cached(func): @wraps(func) def wrapper(*args, **kwargs): global cache - key = str((func, args, kwargs)) + key = json.dumps((func, args, kwargs), sort_keys=True, default=str) try: return cache[key] except KeyError: @@ -289,7 +290,7 @@ class Config(dict): self.implicit_save = True self._prev_dict = None self.path = os.path.join(charm_dir(), Config.CONFIG_FILE_NAME) - if os.path.exists(self.path): + if os.path.exists(self.path) and os.stat(self.path).st_size: self.load_previous() atexit(self._implicit_save) @@ -309,7 +310,11 @@ class Config(dict): """ self.path = path or self.path with open(self.path) as f: - self._prev_dict = json.load(f) + try: + self._prev_dict = json.load(f) + except ValueError as e: + log('Unable to parse previous config data - {}'.format(str(e)), + level=ERROR) for k, v in copy.deepcopy(self._prev_dict).items(): if k not in self: self[k] = v @@ -353,22 +358,40 @@ class Config(dict): self.save() -@cached +_cache_config = None + + def config(scope=None): - """Juju charm configuration""" - config_cmd_line = ['config-get'] - if scope is not None: - config_cmd_line.append(scope) - else: - config_cmd_line.append('--all') - config_cmd_line.append('--format=json') + """ + Get the juju charm configuration (scope==None) or individual key, + (scope=str). The returned value is a Python data structure loaded as + JSON from the Juju config command. + + :param scope: If set, return the value for the specified key. + :type scope: Optional[str] + :returns: Either the whole config as a Config, or a key from it. + :rtype: Any + """ + global _cache_config + config_cmd_line = ['config-get', '--all', '--format=json'] try: - config_data = json.loads( - subprocess.check_output(config_cmd_line).decode('UTF-8')) + # JSON Decode Exception for Python3.5+ + exc_json = json.decoder.JSONDecodeError + except AttributeError: + # JSON Decode Exception for Python2.7 through Python3.4 + exc_json = ValueError + try: + if _cache_config is None: + config_data = json.loads( + subprocess.check_output(config_cmd_line).decode('UTF-8')) + _cache_config = Config(config_data) if scope is not None: - return config_data - return Config(config_data) - except ValueError: + return _cache_config.get(scope) + return _cache_config + except (exc_json, UnicodeDecodeError) as e: + log('Unable to parse output from config-get: config_cmd_line="{}" ' + 'message="{}"' + .format(config_cmd_line, str(e)), level=ERROR) return None @@ -1043,7 +1066,6 @@ def juju_version(): universal_newlines=True).strip() -@cached def has_juju_version(minimum_version): """Return True if the Juju version is at least the provided version""" return LooseVersion(juju_version()) >= LooseVersion(minimum_version) @@ -1103,6 +1125,8 @@ def _run_atexit(): @translate_exc(from_exc=OSError, to_exc=NotImplementedError) def network_get_primary_address(binding): ''' + Deprecated since Juju 2.3; use network_get() + Retrieve the primary network address for a named binding :param binding: string. The name of a relation of extra-binding @@ -1123,7 +1147,6 @@ def network_get_primary_address(binding): return response -@translate_exc(from_exc=OSError, to_exc=NotImplementedError) def network_get(endpoint, relation_id=None): """ Retrieve the network details for a relation endpoint @@ -1131,24 +1154,20 @@ def network_get(endpoint, relation_id=None): :param endpoint: string. The name of a relation endpoint :param relation_id: int. The ID of the relation for the current context. :return: dict. The loaded YAML output of the network-get query. - :raise: NotImplementedError if run on Juju < 2.1 + :raise: NotImplementedError if request not supported by the Juju version. """ + if not has_juju_version('2.2'): + raise NotImplementedError(juju_version()) # earlier versions require --primary-address + if relation_id and not has_juju_version('2.3'): + raise NotImplementedError # 2.3 added the -r option + cmd = ['network-get', endpoint, '--format', 'yaml'] if relation_id: cmd.append('-r') cmd.append(relation_id) - try: - response = subprocess.check_output( - cmd, - stderr=subprocess.STDOUT).decode('UTF-8').strip() - except CalledProcessError as e: - # Early versions of Juju 2.0.x required the --primary-address argument. - # We catch that condition here and raise NotImplementedError since - # the requested semantics are not available - the caller can then - # use the network_get_primary_address() method instead. - if '--primary-address is currently required' in e.output.decode('UTF-8'): - raise NotImplementedError - raise + response = subprocess.check_output( + cmd, + stderr=subprocess.STDOUT).decode('UTF-8').strip() return yaml.safe_load(response) @@ -1204,9 +1223,23 @@ def iter_units_for_relation_name(relation_name): def ingress_address(rid=None, unit=None): """ - Retrieve the ingress-address from a relation when available. Otherwise, - return the private-address. This function is to be used on the consuming - side of the relation. + Retrieve the ingress-address from a relation when available. + Otherwise, return the private-address. + + When used on the consuming side of the relation (unit is a remote + unit), the ingress-address is the IP address that this unit needs + to use to reach the provided service on the remote unit. + + When used on the providing side of the relation (unit == local_unit()), + the ingress-address is the IP address that is advertised to remote + units on this relation. Remote units need to use this address to + reach the local provided service on this unit. + + Note that charms may document some other method to use in + preference to the ingress_address(), such as an address provided + on a different relation attribute or a service discovery mechanism. + This allows charms to redirect inbound connections to their peers + or different applications such as load balancers. Usage: addresses = [ingress_address(rid=u.rid, unit=u.unit) @@ -1220,3 +1253,40 @@ def ingress_address(rid=None, unit=None): settings = relation_get(rid=rid, unit=unit) return (settings.get('ingress-address') or settings.get('private-address')) + + +def egress_subnets(rid=None, unit=None): + """ + Retrieve the egress-subnets from a relation. + + This function is to be used on the providing side of the + relation, and provides the ranges of addresses that client + connections may come from. The result is uninteresting on + the consuming side of a relation (unit == local_unit()). + + Returns a stable list of subnets in CIDR format. + eg. ['192.168.1.0/24', '2001::F00F/128'] + + If egress-subnets is not available, falls back to using the published + ingress-address, or finally private-address. + + :param rid: string relation id + :param unit: string unit name + :side effect: calls relation_get + :return: list of subnets in CIDR format. eg. ['192.168.1.0/24', '2001::F00F/128'] + """ + def _to_range(addr): + if re.search(r'^(?:\d{1,3}\.){3}\d{1,3}$', addr) is not None: + addr += '/32' + elif ':' in addr and '/' not in addr: # IPv6 + addr += '/128' + return addr + + settings = relation_get(rid=rid, unit=unit) + if 'egress-subnets' in settings: + return [n.strip() for n in settings['egress-subnets'].split(',') if n.strip()] + if 'ingress-address' in settings: + return [_to_range(settings['ingress-address'])] + if 'private-address' in settings: + return [_to_range(settings['private-address'])] + return [] # Should never happen diff --git a/hooks/charmhelpers/core/host.py b/hooks/charmhelpers/core/host.py index fd14d60..322ab2a 100644 --- a/hooks/charmhelpers/core/host.py +++ b/hooks/charmhelpers/core/host.py @@ -993,7 +993,7 @@ def updatedb(updatedb_text, new_path): return output -def modulo_distribution(modulo=3, wait=30): +def modulo_distribution(modulo=3, wait=30, non_zero_wait=False): """ Modulo distribution This helper uses the unit number, a modulo value and a constant wait time @@ -1015,7 +1015,14 @@ def modulo_distribution(modulo=3, wait=30): @param modulo: int The modulo number creates the group distribution @param wait: int The constant time wait value + @param non_zero_wait: boolean Override unit % modulo == 0, + return modulo * wait. Used to avoid collisions with + leader nodes which are often given priority. @return: int Calculated time to wait for unit operation """ unit_number = int(local_unit().split('/')[1]) - return (unit_number % modulo) * wait + calculated_wait_time = (unit_number % modulo) * wait + if non_zero_wait and calculated_wait_time == 0: + return modulo * wait + else: + return calculated_wait_time diff --git a/hooks/charmhelpers/core/services/base.py b/hooks/charmhelpers/core/services/base.py index ca9dc99..179ad4f 100644 --- a/hooks/charmhelpers/core/services/base.py +++ b/hooks/charmhelpers/core/services/base.py @@ -307,23 +307,34 @@ class PortManagerCallback(ManagerCallback): """ def __call__(self, manager, service_name, event_name): service = manager.get_service(service_name) - new_ports = service.get('ports', []) + # turn this generator into a list, + # as we'll be going over it multiple times + new_ports = list(service.get('ports', [])) port_file = os.path.join(hookenv.charm_dir(), '.{}.ports'.format(service_name)) if os.path.exists(port_file): with open(port_file) as fp: old_ports = fp.read().split(',') for old_port in old_ports: - if bool(old_port): - old_port = int(old_port) - if old_port not in new_ports: - hookenv.close_port(old_port) + if bool(old_port) and not self.ports_contains(old_port, new_ports): + hookenv.close_port(old_port) with open(port_file, 'w') as fp: fp.write(','.join(str(port) for port in new_ports)) for port in new_ports: + # A port is either a number or 'ICMP' + protocol = 'TCP' + if str(port).upper() == 'ICMP': + protocol = 'ICMP' if event_name == 'start': - hookenv.open_port(port) + hookenv.open_port(port, protocol) elif event_name == 'stop': - hookenv.close_port(port) + hookenv.close_port(port, protocol) + + def ports_contains(self, port, ports): + if not bool(port): + return False + if str(port).upper() != 'ICMP': + port = int(port) + return port in ports def service_stop(service_name): diff --git a/hooks/charmhelpers/core/sysctl.py b/hooks/charmhelpers/core/sysctl.py index 6e413e3..1f188d8 100644 --- a/hooks/charmhelpers/core/sysctl.py +++ b/hooks/charmhelpers/core/sysctl.py @@ -31,18 +31,22 @@ __author__ = 'Jorge Niedbalski R. ' def create(sysctl_dict, sysctl_file): """Creates a sysctl.conf file from a YAML associative array - :param sysctl_dict: a YAML-formatted string of sysctl options eg "{ 'kernel.max_pid': 1337 }" + :param sysctl_dict: a dict or YAML-formatted string of sysctl + options eg "{ 'kernel.max_pid': 1337 }" :type sysctl_dict: str :param sysctl_file: path to the sysctl file to be saved :type sysctl_file: str or unicode :returns: None """ - try: - sysctl_dict_parsed = yaml.safe_load(sysctl_dict) - except yaml.YAMLError: - log("Error parsing YAML sysctl_dict: {}".format(sysctl_dict), - level=ERROR) - return + if type(sysctl_dict) is not dict: + try: + sysctl_dict_parsed = yaml.safe_load(sysctl_dict) + except yaml.YAMLError: + log("Error parsing YAML sysctl_dict: {}".format(sysctl_dict), + level=ERROR) + return + else: + sysctl_dict_parsed = sysctl_dict with open(sysctl_file, "w") as fd: for key, value in sysctl_dict_parsed.items(): diff --git a/hooks/charmhelpers/core/unitdata.py b/hooks/charmhelpers/core/unitdata.py index 6d7b494..ab55432 100644 --- a/hooks/charmhelpers/core/unitdata.py +++ b/hooks/charmhelpers/core/unitdata.py @@ -166,6 +166,10 @@ class Storage(object): To support dicts, lists, integer, floats, and booleans values are automatically json encoded/decoded. + + Note: to facilitate unit testing, ':memory:' can be passed as the + path parameter which causes sqlite3 to only build the db in memory. + This should only be used for testing purposes. """ def __init__(self, path=None): self.db_path = path @@ -175,8 +179,9 @@ class Storage(object): else: self.db_path = os.path.join( os.environ.get('CHARM_DIR', ''), '.unit-state.db') - with open(self.db_path, 'a') as f: - os.fchmod(f.fileno(), 0o600) + if self.db_path != ':memory:': + with open(self.db_path, 'a') as f: + os.fchmod(f.fileno(), 0o600) self.conn = sqlite3.connect('%s' % self.db_path) self.cursor = self.conn.cursor() self.revision = None diff --git a/hooks/charmhelpers/fetch/ubuntu.py b/hooks/charmhelpers/fetch/ubuntu.py index 910e96a..653d58f 100644 --- a/hooks/charmhelpers/fetch/ubuntu.py +++ b/hooks/charmhelpers/fetch/ubuntu.py @@ -44,6 +44,7 @@ ARCH_TO_PROPOSED_POCKET = { 'x86_64': PROPOSED_POCKET, 'ppc64le': PROPOSED_PORTS_POCKET, 'aarch64': PROPOSED_PORTS_POCKET, + 's390x': PROPOSED_PORTS_POCKET, } CLOUD_ARCHIVE_URL = "http://ubuntu-cloud.archive.canonical.com/ubuntu" CLOUD_ARCHIVE_KEY_ID = '5EDB1B62EC4926EA' diff --git a/tests/basic_deployment.py b/tests/basic_deployment.py index 5fef59c..f30b25c 100644 --- a/tests/basic_deployment.py +++ b/tests/basic_deployment.py @@ -32,6 +32,9 @@ u = OpenStackAmuletUtils(DEBUG) class CeiloAgentBasicDeployment(OpenStackAmuletDeployment): """Amulet tests on a basic ceilometer-agent deployment.""" + no_origin = ['memcached', 'percona-cluster', 'rabbitmq-server', + 'ceph-mon', 'ceph-osd'] + def __init__(self, series, openstack=None, source=None, stable=False): """Deploy the entire test environment.""" super(CeiloAgentBasicDeployment, self).__init__(series, openstack, @@ -42,7 +45,7 @@ class CeiloAgentBasicDeployment(OpenStackAmuletDeployment): self._deploy() u.log.info('Waiting on extended status checks...') - exclude_services = ['mongodb'] + exclude_services = ['mongodb', 'memcached'] self._auto_wait_for_status(exclude_services=exclude_services) self.d.sentry.wait() @@ -58,24 +61,32 @@ class CeiloAgentBasicDeployment(OpenStackAmuletDeployment): # Note: ceilometer-agent becomes a subordinate of nova-compute this_service = {'name': 'ceilometer-agent'} other_services = [ - {'name': 'percona-cluster'}, + {'name': 'percona-cluster', 'constraints': {'mem': '3072M'}}, {'name': 'rabbitmq-server'}, {'name': 'keystone'}, - {'name': 'mongodb', - 'location': 'cs:~1chb1n/{}/mongodb'.format(self.series)}, {'name': 'glance'}, # to satisfy workload status {'name': 'ceilometer'}, {'name': 'nova-compute'} ] - super(CeiloAgentBasicDeployment, self)._add_services(this_service, - other_services) + if self._get_openstack_release() >= self.xenial_pike: + other_services.extend([ + {'name': 'gnocchi'}, + {'name': 'memcached', 'location': 'cs:memcached'}, + {'name': 'ceph-mon', 'units': 3}, + {'name': 'ceph-osd', 'units': 3}]) + else: + other_services.append({ + 'name': 'mongodb', + 'location': 'cs:~thedac/{}/mongodb'.format(self.series)}) + super(CeiloAgentBasicDeployment, self)._add_services( + this_service, + other_services, + no_origin=self.no_origin) def _add_relations(self): """Add all of the relations for the services.""" relations = { - 'ceilometer:shared-db': 'mongodb:database', 'ceilometer:amqp': 'rabbitmq-server:amqp', - 'ceilometer:identity-service': 'keystone:identity-service', 'ceilometer:identity-notifications': 'keystone:' 'identity-notifications', 'keystone:shared-db': 'percona-cluster:shared-db', @@ -89,6 +100,28 @@ class CeiloAgentBasicDeployment(OpenStackAmuletDeployment): 'glance:amqp': 'rabbitmq-server:amqp', 'nova-compute:image-service': 'glance:image-service' } + if self._get_openstack_release() >= self.xenial_pike: + additional_relations = { + 'ceilometer:metric-service': 'gnocchi:metric-service', + 'ceph-mon:osd': 'ceph-osd:mon', + 'gnocchi:identity-service': 'keystone:identity-service', + 'gnocchi:shared-db': 'percona-cluster:shared-db', + 'gnocchi:storage-ceph': 'ceph-mon:client', + 'gnocchi:coordinator-memcached': 'memcached:cache', + } + + if self._get_openstack_release() >= self.xenial_queens: + identity_relations = {'ceilometer:identity-credentials': + 'keystone:identity-credentials'} + else: + identity_relations = {'ceilometer:identity-service': + 'keystone:identity-service'} + additional_relations.update(identity_relations) + else: + additional_relations = { + 'ceilometer:shared-db': 'mongodb:database', + 'ceilometer:identity-service': 'keystone:identity-service'} + relations.update(additional_relations) super(CeiloAgentBasicDeployment, self)._add_relations(relations) def _configure_services(self): @@ -104,6 +137,10 @@ class CeiloAgentBasicDeployment(OpenStackAmuletDeployment): 'keystone': keystone_config, 'percona-cluster': pxc_config, } + if self._get_openstack_release() >= self.xenial_pike: + configs['ceph-osd'] = {'osd-devices': '/dev/vdb', + 'osd-reformat': True, + 'ephemeral-unmount': '/mnt'} super(CeiloAgentBasicDeployment, self)._configure_services(configs) def _get_token(self): @@ -117,42 +154,50 @@ class CeiloAgentBasicDeployment(OpenStackAmuletDeployment): self.pxc_sentry = self.d.sentry['percona-cluster'][0] self.keystone_sentry = self.d.sentry['keystone'][0] self.rabbitmq_sentry = self.d.sentry['rabbitmq-server'][0] - self.mongodb_sentry = self.d.sentry['mongodb'][0] self.nova_sentry = self.d.sentry['nova-compute'][0] + if self._get_openstack_release() >= self.xenial_pike: + self.gnocchi_sentry = self.d.sentry['gnocchi'][0] + else: + self.mongodb_sentry = self.d.sentry['mongodb'][0] u.log.debug('openstack release val: {}'.format( self._get_openstack_release())) u.log.debug('openstack release str: {}'.format( self._get_openstack_release_string())) # Authenticate admin with keystone endpoint - self.keystone = u.authenticate_keystone_admin(self.keystone_sentry, - user='admin', - password='openstack', - tenant='admin') + self.keystone_session, self.keystone = u.get_default_keystone_session( + self.keystone_sentry, + openstack_release=self._get_openstack_release()) - # Authenticate admin with ceilometer endpoint - ep = self.keystone.service_catalog.url_for(service_type='metering', - interface='publicURL') - os_token = self.keystone.auth_token self.log.debug('Instantiating ceilometer client...') - self.ceil = ceilo_client.Client(endpoint=ep, token=os_token) + if self._get_openstack_release() >= self.xenial_pike: + self.ceil = ceilo_client.Client(session=self.keystone_session,) + else: + # Authenticate admin with ceilometer endpoint + ep = self.keystone.service_catalog.url_for(service_type='metering', + interface='publicURL') + os_token = self.keystone.auth_token + self.ceil = ceilo_client.Client(endpoint=ep, token=os_token) def test_100_services(self): """Verify the expected services are running on the corresponding service units.""" u.log.debug('Checking system services on units...') - + release = self._get_openstack_release() ceilometer_svcs = [ 'ceilometer-agent-central', - 'ceilometer-collector', - 'ceilometer-api', 'ceilometer-agent-notification', ] + if release < self.xenial_pike: + ceilometer_svcs.append('ceilometer-collector') - if self._get_openstack_release() >= self.xenial_ocata: + if (release >= self.xenial_ocata and release < self.xenial_pike): ceilometer_svcs.append('apache2') - ceilometer_svcs.remove('ceilometer-api') - elif self._get_openstack_release() < self.trusty_mitaka: + + if release < self.xenial_ocata: + ceilometer_svcs.append('ceilometer-api') + + if release < self.trusty_mitaka: ceilometer_svcs.append('ceilometer-alarm-evaluator') ceilometer_svcs.append('ceilometer-alarm-notifier') @@ -168,6 +213,10 @@ class CeiloAgentBasicDeployment(OpenStackAmuletDeployment): def test_110_service_catalog(self): """Verify that the service catalog endpoint data is valid.""" + if self._get_openstack_release() >= self.xenial_pike: + u.log.debug('Skipping catalogue checks as ceilometer no longer ' + 'registers endpoints') + return u.log.debug('Checking keystone service catalog data...') endpoint_check = { 'adminURL': u.valid_url, @@ -182,7 +231,10 @@ class CeiloAgentBasicDeployment(OpenStackAmuletDeployment): } actual = self.keystone.service_catalog.get_endpoints() - ret = u.validate_svc_catalog_endpoint_data(expected, actual) + ret = u.validate_svc_catalog_endpoint_data( + expected, + actual, + openstack_release=self._get_openstack_release()) if ret: amulet.raise_status(amulet.FAIL, msg=ret) @@ -190,6 +242,10 @@ class CeiloAgentBasicDeployment(OpenStackAmuletDeployment): def test_112_keystone_api_endpoint(self): """Verify the ceilometer api endpoint data.""" + if self._get_openstack_release() >= self.xenial_pike: + u.log.debug('Skipping catalogue checks as ceilometer no longer ' + 'registers endpoints') + return u.log.debug('Checking keystone api endpoint data...') endpoints = self.keystone.endpoints.list() u.log.debug(endpoints) @@ -202,8 +258,13 @@ class CeiloAgentBasicDeployment(OpenStackAmuletDeployment): 'publicurl': u.valid_url, 'service_id': u.not_null} - ret = u.validate_endpoint_data(endpoints, admin_port, internal_port, - public_port, expected) + ret = u.validate_endpoint_data( + endpoints, + admin_port, + internal_port, + public_port, + expected, + openstack_release=self._get_openstack_release()) if ret: message = 'Keystone endpoint: {}'.format(ret) amulet.raise_status(amulet.FAIL, msg=message) @@ -212,6 +273,10 @@ class CeiloAgentBasicDeployment(OpenStackAmuletDeployment): def test_114_ceilometer_api_endpoint(self): """Verify the ceilometer api endpoint data.""" + if self._get_openstack_release() >= self.xenial_pike: + u.log.debug('Skipping catalogue checks as ceilometer no longer ' + 'registers endpoints') + return u.log.debug('Checking ceilometer api endpoint data...') endpoints = self.keystone.endpoints.list() u.log.debug(endpoints) @@ -239,6 +304,10 @@ class CeiloAgentBasicDeployment(OpenStackAmuletDeployment): def test_200_ceilometer_identity_relation(self): """Verify the ceilometer to keystone identity-service relation data""" + if self._get_openstack_release() >= self.xenial_pike: + u.log.debug('Skipping identity-service checks as ceilometer no ' + 'longer has this rerlation') + return u.log.debug('Checking ceilometer to keystone identity-service ' 'relation data...') unit = self.ceil_sentry @@ -266,6 +335,10 @@ class CeiloAgentBasicDeployment(OpenStackAmuletDeployment): def test_201_keystone_ceilometer_identity_relation(self): """Verify the keystone to ceilometer identity-service relation data""" + if self._get_openstack_release() >= self.xenial_pike: + u.log.debug('Skipping identity-service checks as ceilometer no ' + 'longer has this rerlation') + return u.log.debug('Checking keystone:ceilometer identity relation data...') unit = self.keystone_sentry relation = ['identity-service', 'ceilometer:identity-service'] @@ -302,7 +375,7 @@ class CeiloAgentBasicDeployment(OpenStackAmuletDeployment): # May be glance- or keystone- or another endpoint-changed value, so # check that at least one ???-endpoint-changed value exists. unit = self.keystone_sentry - relation_data = unit.relation('identity-service', + relation_data = unit.relation('identity-notifications', 'ceilometer:identity-notifications') expected = '-endpoint-changed' @@ -358,6 +431,10 @@ class CeiloAgentBasicDeployment(OpenStackAmuletDeployment): def test_205_ceilometer_to_mongodb_relation(self): """Verify the ceilometer to mongodb relation data""" + if self._get_openstack_release() >= self.xenial_pike: + u.log.debug('Skipping monodb checks for OS release less than pike') + return + u.log.debug('Checking ceilometer:mongodb relation data...') unit = self.ceil_sentry relation = ['shared-db', 'mongodb:database'] @@ -375,6 +452,9 @@ class CeiloAgentBasicDeployment(OpenStackAmuletDeployment): def test_206_mongodb_to_ceilometer_relation(self): """Verify the mongodb to ceilometer relation data""" + if self._get_openstack_release() >= self.xenial_pike: + u.log.debug('Skipping monodb checks for OS release less than pike') + return u.log.debug('Checking mongodb:ceilometer relation data...') unit = self.mongodb_sentry relation = ['database', 'ceilometer:shared-db'] @@ -402,18 +482,23 @@ class CeiloAgentBasicDeployment(OpenStackAmuletDeployment): 'rabbitmq_user': 'ceilometer', 'verbose': 'False', 'rabbitmq_host': u.valid_ip, - 'service_ports': "{'ceilometer_api': [8777, 8767]}", 'use_syslog': 'False', 'metering_secret': u.not_null, 'rabbitmq_virtual_host': 'openstack', - 'db_port': '27017', 'private-address': u.valid_ip, - 'db_name': 'ceilometer', - 'db_host': u.valid_ip, 'debug': 'False', 'rabbitmq_password': u.not_null, 'port': '8767' } + if self._get_openstack_release() >= self.xenial_pike: + expected['gnocchi_url'] = u.valid_url + if self._get_openstack_release() >= self.xenial_queens: + expected['port'] = '8777' + else: + expected['db_port'] = '27017' + expected['db_name'] = 'ceilometer' + expected['db_host'] = u.valid_ip + expected['service_ports'] = "{'ceilometer_api': [8777, 8767]}" ret = u.validate_relation_data(unit, relation, expected) if ret: @@ -475,21 +560,31 @@ class CeiloAgentBasicDeployment(OpenStackAmuletDeployment): """Verify the data in the ceilometer config file.""" u.log.debug('Checking ceilometer config file data...') unit = self.ceil_agent_sentry - ks_rel = self.keystone_sentry.relation('identity-service', - 'ceilometer:identity-service') conf = '/etc/ceilometer/ceilometer.conf' expected = { 'DEFAULT': { 'verbose': 'False', 'debug': 'False', - } + }, } + if self._get_openstack_release() >= self.xenial_pike: + if self._get_openstack_release() >= self.xenial_queens: + ks_rel = self.keystone_sentry.relation( + 'identity-credentials', + 'ceilometer:identity-credentials') + ks_key_prefix = 'credentials' + else: + ks_rel = self.keystone_sentry.relation( + 'identity-service', + 'ceilometer:identity-service') + ks_key_prefix = 'service' + else: + ks_rel = self.keystone_sentry.relation( + 'identity-service', + 'ceilometer:identity-service') + ks_key_prefix = 'service' if self._get_openstack_release() < self.trusty_mitaka: - expected['DEFAULT'].update({'rabbit_userid': 'ceilometer', - 'rabbit_virtual_host': 'openstack', - 'rabbit_password': u.not_null, - 'rabbit_host': u.valid_ip}) auth_uri = '%s://%s:%s/v2.0' % (ks_rel['service_protocol'], ks_rel['service_host'], ks_rel['service_port']) @@ -499,14 +594,10 @@ class CeiloAgentBasicDeployment(OpenStackAmuletDeployment): 'os_password': ks_rel['service_password']} else: - expected['oslo_messaging_rabbit'] = {'rabbit_userid': 'ceilometer', - 'rabbit_virtual_host': - 'openstack', - 'rabbit_password': u.not_null, - 'rabbit_host': u.valid_ip} - auth_uri = '%s://%s:%s' % (ks_rel['service_protocol'], - ks_rel['service_host'], - ks_rel['service_port']) + auth_uri = '%s://%s:%s' % ( + ks_rel['{}_protocol'.format(ks_key_prefix)], + ks_rel['{}_host'.format(ks_key_prefix)], + ks_rel['{}_port'.format(ks_key_prefix)]) # NOTE(dosaboy): os_ prefix is deprecated and no longer used as # of Mitaka. project_domain_name = 'default' @@ -514,25 +605,21 @@ class CeiloAgentBasicDeployment(OpenStackAmuletDeployment): if 'api_version' in ks_rel and float(ks_rel['api_version']) > 2: project_domain_name = 'service_domain' user_domain_name = 'service_domain' - expected['service_credentials'] = {'auth_url': auth_uri, - 'project_name': 'services', - 'project_domain_name': - project_domain_name, - 'user_domain_name': - user_domain_name, - 'username': 'ceilometer', - 'password': - ks_rel['service_password']} - expected['keystone_authtoken'] = {'auth_uri': auth_uri, - 'auth_type': 'password', - 'project_domain_name': - project_domain_name, - 'user_domain_name': - user_domain_name, - 'project_name': 'services', - 'username': 'ceilometer', - 'password': - ks_rel['service_password']} + expected['service_credentials'] = { + 'auth_url': auth_uri, + 'project_name': 'services', + 'project_domain_name': project_domain_name, + 'user_domain_name': user_domain_name, + 'username': 'ceilometer', + 'password': ks_rel['{}_password'.format(ks_key_prefix)]} + expected['keystone_authtoken'] = { + 'auth_uri': auth_uri, + 'auth_type': 'password', + 'project_domain_name': project_domain_name, + 'user_domain_name': user_domain_name, + 'project_name': 'services', + 'username': 'ceilometer', + 'password': ks_rel['{}_password'.format(ks_key_prefix)]} for section, pairs in expected.iteritems(): ret = u.validate_config_data(unit, conf, section, pairs) @@ -666,6 +753,10 @@ class CeiloAgentBasicDeployment(OpenStackAmuletDeployment): def test_400_api_connection(self): """Simple api calls to check service is up and responding""" + if self._get_openstack_release() >= self.xenial_pike: + u.log.debug('Skipping API checks as ceilometer api has been ' + 'removed') + return u.log.debug('Checking api functionality...') assert(self.ceil.samples.list() == []) assert(self.ceil.meters.list() == []) @@ -687,21 +778,27 @@ class CeiloAgentBasicDeployment(OpenStackAmuletDeployment): # Services which are expected to restart upon config change, # and corresponding config files affected by the change conf_file = '/etc/ceilometer/ceilometer.conf' - if self._get_openstack_release() >= self.xenial_ocata: + if self._get_openstack_release() >= self.xenial_pike: services = { - 'ceilometer-collector: CollectorService worker(0)': conf_file, - 'apache2': conf_file, 'ceilometer-polling: AgentManager worker(0)': conf_file, 'ceilometer-agent-notification: NotificationService worker(0)': conf_file, } + elif self._get_openstack_release() >= self.xenial_ocata: + services = { + 'ceilometer-collector: CollectorService worker(0)': conf_file, + 'ceilometer-polling: AgentManager worker(0)': conf_file, + 'ceilometer-agent-notification: NotificationService worker(0)': + conf_file, + 'apache2': conf_file, + } elif self._get_openstack_release() >= self.xenial_newton: services = { 'ceilometer-collector - CollectorService(0)': conf_file, - 'ceilometer-api': conf_file, 'ceilometer-polling - AgentManager(0)': conf_file, 'ceilometer-agent-notification - NotificationService(0)': conf_file, + 'ceilometer-api': conf_file, } else: services = { diff --git a/tests/charmhelpers/contrib/openstack/amulet/deployment.py b/tests/charmhelpers/contrib/openstack/amulet/deployment.py index 5afbbd8..66beeda 100644 --- a/tests/charmhelpers/contrib/openstack/amulet/deployment.py +++ b/tests/charmhelpers/contrib/openstack/amulet/deployment.py @@ -21,6 +21,9 @@ from collections import OrderedDict from charmhelpers.contrib.amulet.deployment import ( AmuletDeployment ) +from charmhelpers.contrib.openstack.amulet.utils import ( + OPENSTACK_RELEASES_PAIRS +) DEBUG = logging.DEBUG ERROR = logging.ERROR @@ -271,11 +274,8 @@ class OpenStackAmuletDeployment(AmuletDeployment): release. """ # Must be ordered by OpenStack release (not by Ubuntu release): - (self.trusty_icehouse, self.trusty_kilo, self.trusty_liberty, - self.trusty_mitaka, self.xenial_mitaka, self.xenial_newton, - self.yakkety_newton, self.xenial_ocata, self.zesty_ocata, - self.xenial_pike, self.artful_pike, self.xenial_queens, - self.bionic_queens,) = range(13) + for i, os_pair in enumerate(OPENSTACK_RELEASES_PAIRS): + setattr(self, os_pair, i) releases = { ('trusty', None): self.trusty_icehouse, diff --git a/tests/charmhelpers/contrib/openstack/amulet/utils.py b/tests/charmhelpers/contrib/openstack/amulet/utils.py index d93cff3..84e87f5 100644 --- a/tests/charmhelpers/contrib/openstack/amulet/utils.py +++ b/tests/charmhelpers/contrib/openstack/amulet/utils.py @@ -50,6 +50,13 @@ ERROR = logging.ERROR NOVA_CLIENT_VERSION = "2" +OPENSTACK_RELEASES_PAIRS = [ + 'trusty_icehouse', 'trusty_kilo', 'trusty_liberty', + 'trusty_mitaka', 'xenial_mitaka', 'xenial_newton', + 'yakkety_newton', 'xenial_ocata', 'zesty_ocata', + 'xenial_pike', 'artful_pike', 'xenial_queens', + 'bionic_queens'] + class OpenStackAmuletUtils(AmuletUtils): """OpenStack amulet utilities. @@ -63,7 +70,34 @@ class OpenStackAmuletUtils(AmuletUtils): super(OpenStackAmuletUtils, self).__init__(log_level) def validate_endpoint_data(self, endpoints, admin_port, internal_port, - public_port, expected): + public_port, expected, openstack_release=None): + """Validate endpoint data. Pick the correct validator based on + OpenStack release. Expected data should be in the v2 format: + { + 'id': id, + 'region': region, + 'adminurl': adminurl, + 'internalurl': internalurl, + 'publicurl': publicurl, + 'service_id': service_id} + + """ + validation_function = self.validate_v2_endpoint_data + xenial_queens = OPENSTACK_RELEASES_PAIRS.index('xenial_queens') + if openstack_release and openstack_release >= xenial_queens: + validation_function = self.validate_v3_endpoint_data + expected = { + 'id': expected['id'], + 'region': expected['region'], + 'region_id': 'RegionOne', + 'url': self.valid_url, + 'interface': self.not_null, + 'service_id': expected['service_id']} + return validation_function(endpoints, admin_port, internal_port, + public_port, expected) + + def validate_v2_endpoint_data(self, endpoints, admin_port, internal_port, + public_port, expected): """Validate endpoint data. Validate actual endpoint data vs expected endpoint data. The ports @@ -141,7 +175,86 @@ class OpenStackAmuletUtils(AmuletUtils): if len(found) != expected_num_eps: return 'Unexpected number of endpoints found' - def validate_svc_catalog_endpoint_data(self, expected, actual): + def convert_svc_catalog_endpoint_data_to_v3(self, ep_data): + """Convert v2 endpoint data into v3. + + { + 'service_name1': [ + { + 'adminURL': adminURL, + 'id': id, + 'region': region. + 'publicURL': publicURL, + 'internalURL': internalURL + }], + 'service_name2': [ + { + 'adminURL': adminURL, + 'id': id, + 'region': region. + 'publicURL': publicURL, + 'internalURL': internalURL + }], + } + """ + self.log.warn("Endpoint ID and Region ID validation is limited to not " + "null checks after v2 to v3 conversion") + for svc in ep_data.keys(): + assert len(ep_data[svc]) == 1, "Unknown data format" + svc_ep_data = ep_data[svc][0] + ep_data[svc] = [ + { + 'url': svc_ep_data['adminURL'], + 'interface': 'admin', + 'region': svc_ep_data['region'], + 'region_id': self.not_null, + 'id': self.not_null}, + { + 'url': svc_ep_data['publicURL'], + 'interface': 'public', + 'region': svc_ep_data['region'], + 'region_id': self.not_null, + 'id': self.not_null}, + { + 'url': svc_ep_data['internalURL'], + 'interface': 'internal', + 'region': svc_ep_data['region'], + 'region_id': self.not_null, + 'id': self.not_null}] + return ep_data + + def validate_svc_catalog_endpoint_data(self, expected, actual, + openstack_release=None): + """Validate service catalog endpoint data. Pick the correct validator + for the OpenStack version. Expected data should be in the v2 format: + { + 'service_name1': [ + { + 'adminURL': adminURL, + 'id': id, + 'region': region. + 'publicURL': publicURL, + 'internalURL': internalURL + }], + 'service_name2': [ + { + 'adminURL': adminURL, + 'id': id, + 'region': region. + 'publicURL': publicURL, + 'internalURL': internalURL + }], + } + + """ + validation_function = self.validate_v2_svc_catalog_endpoint_data + xenial_queens = OPENSTACK_RELEASES_PAIRS.index('xenial_queens') + if openstack_release and openstack_release >= xenial_queens: + validation_function = self.validate_v3_svc_catalog_endpoint_data + expected = self.convert_svc_catalog_endpoint_data_to_v3(expected) + return validation_function(expected, actual) + + def validate_v2_svc_catalog_endpoint_data(self, expected, actual): """Validate service catalog endpoint data. Validate a list of actual service catalog endpoints vs a list of @@ -328,7 +441,7 @@ class OpenStackAmuletUtils(AmuletUtils): if rel.get('api_version') != str(api_version): raise Exception("api_version not propagated through relation" " data yet ('{}' != '{}')." - "".format(rel['api_version'], api_version)) + "".format(rel.get('api_version'), api_version)) def keystone_configure_api_version(self, sentry_relation_pairs, deployment, api_version): @@ -350,16 +463,13 @@ class OpenStackAmuletUtils(AmuletUtils): deployment._auto_wait_for_status() self.keystone_wait_for_propagation(sentry_relation_pairs, api_version) - def authenticate_cinder_admin(self, keystone_sentry, username, - password, tenant, api_version=2): + def authenticate_cinder_admin(self, keystone, api_version=2): """Authenticates admin user with cinder.""" - # NOTE(beisner): cinder python client doesn't accept tokens. - keystone_ip = keystone_sentry.info['public-address'] - ept = "http://{}:5000/v2.0".format(keystone_ip.strip().decode('utf-8')) + self.log.debug('Authenticating cinder admin...') _clients = { 1: cinder_client.Client, 2: cinder_clientv2.Client} - return _clients[api_version](username, password, tenant, ept) + return _clients[api_version](session=keystone.session) def authenticate_keystone(self, keystone_ip, username, password, api_version=False, admin_port=False, @@ -367,13 +477,36 @@ class OpenStackAmuletUtils(AmuletUtils): project_domain_name=None, project_name=None): """Authenticate with Keystone""" self.log.debug('Authenticating with keystone...') - port = 5000 - if admin_port: - port = 35357 - base_ep = "http://{}:{}".format(keystone_ip.strip().decode('utf-8'), - port) - if not api_version or api_version == 2: - ep = base_ep + "/v2.0" + if not api_version: + api_version = 2 + sess, auth = self.get_keystone_session( + keystone_ip=keystone_ip, + username=username, + password=password, + api_version=api_version, + admin_port=admin_port, + user_domain_name=user_domain_name, + domain_name=domain_name, + project_domain_name=project_domain_name, + project_name=project_name + ) + if api_version == 2: + client = keystone_client.Client(session=sess) + else: + client = keystone_client_v3.Client(session=sess) + # This populates the client.service_catalog + client.auth_ref = auth.get_access(sess) + return client + + def get_keystone_session(self, keystone_ip, username, password, + api_version=False, admin_port=False, + user_domain_name=None, domain_name=None, + project_domain_name=None, project_name=None): + """Return a keystone session object""" + ep = self.get_keystone_endpoint(keystone_ip, + api_version=api_version, + admin_port=admin_port) + if api_version == 2: auth = v2.Password( username=username, password=password, @@ -381,12 +514,7 @@ class OpenStackAmuletUtils(AmuletUtils): auth_url=ep ) sess = keystone_session.Session(auth=auth) - client = keystone_client.Client(session=sess) - # This populates the client.service_catalog - client.auth_ref = auth.get_access(sess) - return client else: - ep = base_ep + "/v3" auth = v3.Password( user_domain_name=user_domain_name, username=username, @@ -397,10 +525,57 @@ class OpenStackAmuletUtils(AmuletUtils): auth_url=ep ) sess = keystone_session.Session(auth=auth) - client = keystone_client_v3.Client(session=sess) - # This populates the client.service_catalog - client.auth_ref = auth.get_access(sess) - return client + return (sess, auth) + + def get_keystone_endpoint(self, keystone_ip, api_version=None, + admin_port=False): + """Return keystone endpoint""" + port = 5000 + if admin_port: + port = 35357 + base_ep = "http://{}:{}".format(keystone_ip.strip().decode('utf-8'), + port) + if api_version == 2: + ep = base_ep + "/v2.0" + else: + ep = base_ep + "/v3" + return ep + + def get_default_keystone_session(self, keystone_sentry, + openstack_release=None): + """Return a keystone session object and client object assuming standard + default settings + + Example call in amulet tests: + self.keystone_session, self.keystone = u.get_default_keystone_session( + self.keystone_sentry, + openstack_release=self._get_openstack_release()) + + The session can then be used to auth other clients: + neutronclient.Client(session=session) + aodh_client.Client(session=session) + eyc + """ + self.log.debug('Authenticating keystone admin...') + api_version = 2 + client_class = keystone_client.Client + # 11 => xenial_queens + if openstack_release and openstack_release >= 11: + api_version = 3 + client_class = keystone_client_v3.Client + keystone_ip = keystone_sentry.info['public-address'] + session, auth = self.get_keystone_session( + keystone_ip, + api_version=api_version, + username='admin', + password='openstack', + project_name='admin', + user_domain_name='admin_domain', + project_domain_name='admin_domain') + client = client_class(session=session) + # This populates the client.service_catalog + client.auth_ref = auth.get_access(session) + return session, client def authenticate_keystone_admin(self, keystone_sentry, user, password, tenant=None, api_version=None, diff --git a/tests/charmhelpers/core/hookenv.py b/tests/charmhelpers/core/hookenv.py index 7ed1cc4..627d8f7 100644 --- a/tests/charmhelpers/core/hookenv.py +++ b/tests/charmhelpers/core/hookenv.py @@ -27,6 +27,7 @@ import glob import os import json import yaml +import re import subprocess import sys import errno @@ -67,7 +68,7 @@ def cached(func): @wraps(func) def wrapper(*args, **kwargs): global cache - key = str((func, args, kwargs)) + key = json.dumps((func, args, kwargs), sort_keys=True, default=str) try: return cache[key] except KeyError: @@ -289,7 +290,7 @@ class Config(dict): self.implicit_save = True self._prev_dict = None self.path = os.path.join(charm_dir(), Config.CONFIG_FILE_NAME) - if os.path.exists(self.path): + if os.path.exists(self.path) and os.stat(self.path).st_size: self.load_previous() atexit(self._implicit_save) @@ -309,7 +310,11 @@ class Config(dict): """ self.path = path or self.path with open(self.path) as f: - self._prev_dict = json.load(f) + try: + self._prev_dict = json.load(f) + except ValueError as e: + log('Unable to parse previous config data - {}'.format(str(e)), + level=ERROR) for k, v in copy.deepcopy(self._prev_dict).items(): if k not in self: self[k] = v @@ -353,22 +358,40 @@ class Config(dict): self.save() -@cached +_cache_config = None + + def config(scope=None): - """Juju charm configuration""" - config_cmd_line = ['config-get'] - if scope is not None: - config_cmd_line.append(scope) - else: - config_cmd_line.append('--all') - config_cmd_line.append('--format=json') + """ + Get the juju charm configuration (scope==None) or individual key, + (scope=str). The returned value is a Python data structure loaded as + JSON from the Juju config command. + + :param scope: If set, return the value for the specified key. + :type scope: Optional[str] + :returns: Either the whole config as a Config, or a key from it. + :rtype: Any + """ + global _cache_config + config_cmd_line = ['config-get', '--all', '--format=json'] try: - config_data = json.loads( - subprocess.check_output(config_cmd_line).decode('UTF-8')) + # JSON Decode Exception for Python3.5+ + exc_json = json.decoder.JSONDecodeError + except AttributeError: + # JSON Decode Exception for Python2.7 through Python3.4 + exc_json = ValueError + try: + if _cache_config is None: + config_data = json.loads( + subprocess.check_output(config_cmd_line).decode('UTF-8')) + _cache_config = Config(config_data) if scope is not None: - return config_data - return Config(config_data) - except ValueError: + return _cache_config.get(scope) + return _cache_config + except (exc_json, UnicodeDecodeError) as e: + log('Unable to parse output from config-get: config_cmd_line="{}" ' + 'message="{}"' + .format(config_cmd_line, str(e)), level=ERROR) return None @@ -1043,7 +1066,6 @@ def juju_version(): universal_newlines=True).strip() -@cached def has_juju_version(minimum_version): """Return True if the Juju version is at least the provided version""" return LooseVersion(juju_version()) >= LooseVersion(minimum_version) @@ -1103,6 +1125,8 @@ def _run_atexit(): @translate_exc(from_exc=OSError, to_exc=NotImplementedError) def network_get_primary_address(binding): ''' + Deprecated since Juju 2.3; use network_get() + Retrieve the primary network address for a named binding :param binding: string. The name of a relation of extra-binding @@ -1123,7 +1147,6 @@ def network_get_primary_address(binding): return response -@translate_exc(from_exc=OSError, to_exc=NotImplementedError) def network_get(endpoint, relation_id=None): """ Retrieve the network details for a relation endpoint @@ -1131,24 +1154,20 @@ def network_get(endpoint, relation_id=None): :param endpoint: string. The name of a relation endpoint :param relation_id: int. The ID of the relation for the current context. :return: dict. The loaded YAML output of the network-get query. - :raise: NotImplementedError if run on Juju < 2.1 + :raise: NotImplementedError if request not supported by the Juju version. """ + if not has_juju_version('2.2'): + raise NotImplementedError(juju_version()) # earlier versions require --primary-address + if relation_id and not has_juju_version('2.3'): + raise NotImplementedError # 2.3 added the -r option + cmd = ['network-get', endpoint, '--format', 'yaml'] if relation_id: cmd.append('-r') cmd.append(relation_id) - try: - response = subprocess.check_output( - cmd, - stderr=subprocess.STDOUT).decode('UTF-8').strip() - except CalledProcessError as e: - # Early versions of Juju 2.0.x required the --primary-address argument. - # We catch that condition here and raise NotImplementedError since - # the requested semantics are not available - the caller can then - # use the network_get_primary_address() method instead. - if '--primary-address is currently required' in e.output.decode('UTF-8'): - raise NotImplementedError - raise + response = subprocess.check_output( + cmd, + stderr=subprocess.STDOUT).decode('UTF-8').strip() return yaml.safe_load(response) @@ -1204,9 +1223,23 @@ def iter_units_for_relation_name(relation_name): def ingress_address(rid=None, unit=None): """ - Retrieve the ingress-address from a relation when available. Otherwise, - return the private-address. This function is to be used on the consuming - side of the relation. + Retrieve the ingress-address from a relation when available. + Otherwise, return the private-address. + + When used on the consuming side of the relation (unit is a remote + unit), the ingress-address is the IP address that this unit needs + to use to reach the provided service on the remote unit. + + When used on the providing side of the relation (unit == local_unit()), + the ingress-address is the IP address that is advertised to remote + units on this relation. Remote units need to use this address to + reach the local provided service on this unit. + + Note that charms may document some other method to use in + preference to the ingress_address(), such as an address provided + on a different relation attribute or a service discovery mechanism. + This allows charms to redirect inbound connections to their peers + or different applications such as load balancers. Usage: addresses = [ingress_address(rid=u.rid, unit=u.unit) @@ -1220,3 +1253,40 @@ def ingress_address(rid=None, unit=None): settings = relation_get(rid=rid, unit=unit) return (settings.get('ingress-address') or settings.get('private-address')) + + +def egress_subnets(rid=None, unit=None): + """ + Retrieve the egress-subnets from a relation. + + This function is to be used on the providing side of the + relation, and provides the ranges of addresses that client + connections may come from. The result is uninteresting on + the consuming side of a relation (unit == local_unit()). + + Returns a stable list of subnets in CIDR format. + eg. ['192.168.1.0/24', '2001::F00F/128'] + + If egress-subnets is not available, falls back to using the published + ingress-address, or finally private-address. + + :param rid: string relation id + :param unit: string unit name + :side effect: calls relation_get + :return: list of subnets in CIDR format. eg. ['192.168.1.0/24', '2001::F00F/128'] + """ + def _to_range(addr): + if re.search(r'^(?:\d{1,3}\.){3}\d{1,3}$', addr) is not None: + addr += '/32' + elif ':' in addr and '/' not in addr: # IPv6 + addr += '/128' + return addr + + settings = relation_get(rid=rid, unit=unit) + if 'egress-subnets' in settings: + return [n.strip() for n in settings['egress-subnets'].split(',') if n.strip()] + if 'ingress-address' in settings: + return [_to_range(settings['ingress-address'])] + if 'private-address' in settings: + return [_to_range(settings['private-address'])] + return [] # Should never happen diff --git a/tests/charmhelpers/core/host.py b/tests/charmhelpers/core/host.py index fd14d60..322ab2a 100644 --- a/tests/charmhelpers/core/host.py +++ b/tests/charmhelpers/core/host.py @@ -993,7 +993,7 @@ def updatedb(updatedb_text, new_path): return output -def modulo_distribution(modulo=3, wait=30): +def modulo_distribution(modulo=3, wait=30, non_zero_wait=False): """ Modulo distribution This helper uses the unit number, a modulo value and a constant wait time @@ -1015,7 +1015,14 @@ def modulo_distribution(modulo=3, wait=30): @param modulo: int The modulo number creates the group distribution @param wait: int The constant time wait value + @param non_zero_wait: boolean Override unit % modulo == 0, + return modulo * wait. Used to avoid collisions with + leader nodes which are often given priority. @return: int Calculated time to wait for unit operation """ unit_number = int(local_unit().split('/')[1]) - return (unit_number % modulo) * wait + calculated_wait_time = (unit_number % modulo) * wait + if non_zero_wait and calculated_wait_time == 0: + return modulo * wait + else: + return calculated_wait_time diff --git a/tests/charmhelpers/core/services/base.py b/tests/charmhelpers/core/services/base.py index ca9dc99..179ad4f 100644 --- a/tests/charmhelpers/core/services/base.py +++ b/tests/charmhelpers/core/services/base.py @@ -307,23 +307,34 @@ class PortManagerCallback(ManagerCallback): """ def __call__(self, manager, service_name, event_name): service = manager.get_service(service_name) - new_ports = service.get('ports', []) + # turn this generator into a list, + # as we'll be going over it multiple times + new_ports = list(service.get('ports', [])) port_file = os.path.join(hookenv.charm_dir(), '.{}.ports'.format(service_name)) if os.path.exists(port_file): with open(port_file) as fp: old_ports = fp.read().split(',') for old_port in old_ports: - if bool(old_port): - old_port = int(old_port) - if old_port not in new_ports: - hookenv.close_port(old_port) + if bool(old_port) and not self.ports_contains(old_port, new_ports): + hookenv.close_port(old_port) with open(port_file, 'w') as fp: fp.write(','.join(str(port) for port in new_ports)) for port in new_ports: + # A port is either a number or 'ICMP' + protocol = 'TCP' + if str(port).upper() == 'ICMP': + protocol = 'ICMP' if event_name == 'start': - hookenv.open_port(port) + hookenv.open_port(port, protocol) elif event_name == 'stop': - hookenv.close_port(port) + hookenv.close_port(port, protocol) + + def ports_contains(self, port, ports): + if not bool(port): + return False + if str(port).upper() != 'ICMP': + port = int(port) + return port in ports def service_stop(service_name): diff --git a/tests/charmhelpers/core/sysctl.py b/tests/charmhelpers/core/sysctl.py index 6e413e3..1f188d8 100644 --- a/tests/charmhelpers/core/sysctl.py +++ b/tests/charmhelpers/core/sysctl.py @@ -31,18 +31,22 @@ __author__ = 'Jorge Niedbalski R. ' def create(sysctl_dict, sysctl_file): """Creates a sysctl.conf file from a YAML associative array - :param sysctl_dict: a YAML-formatted string of sysctl options eg "{ 'kernel.max_pid': 1337 }" + :param sysctl_dict: a dict or YAML-formatted string of sysctl + options eg "{ 'kernel.max_pid': 1337 }" :type sysctl_dict: str :param sysctl_file: path to the sysctl file to be saved :type sysctl_file: str or unicode :returns: None """ - try: - sysctl_dict_parsed = yaml.safe_load(sysctl_dict) - except yaml.YAMLError: - log("Error parsing YAML sysctl_dict: {}".format(sysctl_dict), - level=ERROR) - return + if type(sysctl_dict) is not dict: + try: + sysctl_dict_parsed = yaml.safe_load(sysctl_dict) + except yaml.YAMLError: + log("Error parsing YAML sysctl_dict: {}".format(sysctl_dict), + level=ERROR) + return + else: + sysctl_dict_parsed = sysctl_dict with open(sysctl_file, "w") as fd: for key, value in sysctl_dict_parsed.items(): diff --git a/tests/charmhelpers/core/unitdata.py b/tests/charmhelpers/core/unitdata.py index 6d7b494..ab55432 100644 --- a/tests/charmhelpers/core/unitdata.py +++ b/tests/charmhelpers/core/unitdata.py @@ -166,6 +166,10 @@ class Storage(object): To support dicts, lists, integer, floats, and booleans values are automatically json encoded/decoded. + + Note: to facilitate unit testing, ':memory:' can be passed as the + path parameter which causes sqlite3 to only build the db in memory. + This should only be used for testing purposes. """ def __init__(self, path=None): self.db_path = path @@ -175,8 +179,9 @@ class Storage(object): else: self.db_path = os.path.join( os.environ.get('CHARM_DIR', ''), '.unit-state.db') - with open(self.db_path, 'a') as f: - os.fchmod(f.fileno(), 0o600) + if self.db_path != ':memory:': + with open(self.db_path, 'a') as f: + os.fchmod(f.fileno(), 0o600) self.conn = sqlite3.connect('%s' % self.db_path) self.cursor = self.conn.cursor() self.revision = None diff --git a/tests/dev-basic-bionic-queens b/tests/gate-basic-bionic-queens similarity index 100% rename from tests/dev-basic-bionic-queens rename to tests/gate-basic-bionic-queens diff --git a/tests/gate-basic-xenial-queens b/tests/gate-basic-xenial-queens new file mode 100755 index 0000000..9f8a51f --- /dev/null +++ b/tests/gate-basic-xenial-queens @@ -0,0 +1,24 @@ +#!/usr/bin/env python +# +# Copyright 2016 Canonical Ltd +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Amulet tests on a basic ceilometer-agent deployment on xenial-queens.""" + +from basic_deployment import CeiloAgentBasicDeployment + +if __name__ == '__main__': + deployment = CeiloAgentBasicDeployment(series='xenial', + openstack='cloud:xenial-queens') + deployment.run_tests() diff --git a/tox.ini b/tox.ini index 4319064..09ca045 100644 --- a/tox.ini +++ b/tox.ini @@ -60,7 +60,7 @@ basepython = python2.7 deps = -r{toxinidir}/requirements.txt -r{toxinidir}/test-requirements.txt commands = - bundletester -vl DEBUG -r json -o func-results.json gate-basic-xenial-pike --no-destroy + bundletester -vl DEBUG -r json -o func-results.json gate-basic-bionic-queens --no-destroy [testenv:func27-dfs] # Charm Functional Test