diff --git a/.gitignore b/.gitignore index 813d956..7947e2f 100644 --- a/.gitignore +++ b/.gitignore @@ -5,3 +5,4 @@ bin *.sw[nop] *.py[oc] *.pyc +func-results.json diff --git a/hooks/charmhelpers/contrib/hahelpers/apache.py b/hooks/charmhelpers/contrib/hahelpers/apache.py index 22acb68..605a1be 100644 --- a/hooks/charmhelpers/contrib/hahelpers/apache.py +++ b/hooks/charmhelpers/contrib/hahelpers/apache.py @@ -65,7 +65,8 @@ def get_ca_cert(): if ca_cert is None: log("Inspecting identity-service relations for CA SSL certificate.", level=INFO) - for r_id in relation_ids('identity-service'): + for r_id in (relation_ids('identity-service') + + relation_ids('identity-credentials')): for unit in relation_list(r_id): if ca_cert is None: ca_cert = relation_get('ca_cert', @@ -76,7 +77,7 @@ def get_ca_cert(): def retrieve_ca_cert(cert_file): cert = None if os.path.isfile(cert_file): - with open(cert_file, 'r') as crt: + with open(cert_file, 'rb') as crt: cert = crt.read() return cert diff --git a/hooks/charmhelpers/contrib/hahelpers/cluster.py b/hooks/charmhelpers/contrib/hahelpers/cluster.py index 4207e42..47facd9 100644 --- a/hooks/charmhelpers/contrib/hahelpers/cluster.py +++ b/hooks/charmhelpers/contrib/hahelpers/cluster.py @@ -371,6 +371,7 @@ def distributed_wait(modulo=None, wait=None, operation_name='operation'): ''' Distribute operations by waiting based on modulo_distribution If modulo and or wait are not set, check config_get for those values. + If config values are not set, default to modulo=3 and wait=30. :param modulo: int The modulo number creates the group distribution :param wait: int The constant time wait value @@ -382,10 +383,17 @@ def distributed_wait(modulo=None, wait=None, operation_name='operation'): :side effect: Calls time.sleep() ''' if modulo is None: - modulo = config_get('modulo-nodes') + modulo = config_get('modulo-nodes') or 3 if wait is None: - wait = config_get('known-wait') - calculated_wait = modulo_distribution(modulo=modulo, wait=wait) + wait = config_get('known-wait') or 30 + if juju_is_leader(): + # The leader should never wait + calculated_wait = 0 + else: + # non_zero_wait=True guarantees the non-leader who gets modulo 0 + # will still wait + calculated_wait = modulo_distribution(modulo=modulo, wait=wait, + non_zero_wait=True) msg = "Waiting {} seconds for {} ...".format(calculated_wait, operation_name) log(msg, DEBUG) diff --git a/hooks/charmhelpers/contrib/openstack/amulet/deployment.py b/hooks/charmhelpers/contrib/openstack/amulet/deployment.py index 5afbbd8..66beeda 100644 --- a/hooks/charmhelpers/contrib/openstack/amulet/deployment.py +++ b/hooks/charmhelpers/contrib/openstack/amulet/deployment.py @@ -21,6 +21,9 @@ from collections import OrderedDict from charmhelpers.contrib.amulet.deployment import ( AmuletDeployment ) +from charmhelpers.contrib.openstack.amulet.utils import ( + OPENSTACK_RELEASES_PAIRS +) DEBUG = logging.DEBUG ERROR = logging.ERROR @@ -271,11 +274,8 @@ class OpenStackAmuletDeployment(AmuletDeployment): release. """ # Must be ordered by OpenStack release (not by Ubuntu release): - (self.trusty_icehouse, self.trusty_kilo, self.trusty_liberty, - self.trusty_mitaka, self.xenial_mitaka, self.xenial_newton, - self.yakkety_newton, self.xenial_ocata, self.zesty_ocata, - self.xenial_pike, self.artful_pike, self.xenial_queens, - self.bionic_queens,) = range(13) + for i, os_pair in enumerate(OPENSTACK_RELEASES_PAIRS): + setattr(self, os_pair, i) releases = { ('trusty', None): self.trusty_icehouse, diff --git a/hooks/charmhelpers/contrib/openstack/amulet/utils.py b/hooks/charmhelpers/contrib/openstack/amulet/utils.py index d93cff3..84e87f5 100644 --- a/hooks/charmhelpers/contrib/openstack/amulet/utils.py +++ b/hooks/charmhelpers/contrib/openstack/amulet/utils.py @@ -50,6 +50,13 @@ ERROR = logging.ERROR NOVA_CLIENT_VERSION = "2" +OPENSTACK_RELEASES_PAIRS = [ + 'trusty_icehouse', 'trusty_kilo', 'trusty_liberty', + 'trusty_mitaka', 'xenial_mitaka', 'xenial_newton', + 'yakkety_newton', 'xenial_ocata', 'zesty_ocata', + 'xenial_pike', 'artful_pike', 'xenial_queens', + 'bionic_queens'] + class OpenStackAmuletUtils(AmuletUtils): """OpenStack amulet utilities. @@ -63,7 +70,34 @@ class OpenStackAmuletUtils(AmuletUtils): super(OpenStackAmuletUtils, self).__init__(log_level) def validate_endpoint_data(self, endpoints, admin_port, internal_port, - public_port, expected): + public_port, expected, openstack_release=None): + """Validate endpoint data. Pick the correct validator based on + OpenStack release. Expected data should be in the v2 format: + { + 'id': id, + 'region': region, + 'adminurl': adminurl, + 'internalurl': internalurl, + 'publicurl': publicurl, + 'service_id': service_id} + + """ + validation_function = self.validate_v2_endpoint_data + xenial_queens = OPENSTACK_RELEASES_PAIRS.index('xenial_queens') + if openstack_release and openstack_release >= xenial_queens: + validation_function = self.validate_v3_endpoint_data + expected = { + 'id': expected['id'], + 'region': expected['region'], + 'region_id': 'RegionOne', + 'url': self.valid_url, + 'interface': self.not_null, + 'service_id': expected['service_id']} + return validation_function(endpoints, admin_port, internal_port, + public_port, expected) + + def validate_v2_endpoint_data(self, endpoints, admin_port, internal_port, + public_port, expected): """Validate endpoint data. Validate actual endpoint data vs expected endpoint data. The ports @@ -141,7 +175,86 @@ class OpenStackAmuletUtils(AmuletUtils): if len(found) != expected_num_eps: return 'Unexpected number of endpoints found' - def validate_svc_catalog_endpoint_data(self, expected, actual): + def convert_svc_catalog_endpoint_data_to_v3(self, ep_data): + """Convert v2 endpoint data into v3. + + { + 'service_name1': [ + { + 'adminURL': adminURL, + 'id': id, + 'region': region. + 'publicURL': publicURL, + 'internalURL': internalURL + }], + 'service_name2': [ + { + 'adminURL': adminURL, + 'id': id, + 'region': region. + 'publicURL': publicURL, + 'internalURL': internalURL + }], + } + """ + self.log.warn("Endpoint ID and Region ID validation is limited to not " + "null checks after v2 to v3 conversion") + for svc in ep_data.keys(): + assert len(ep_data[svc]) == 1, "Unknown data format" + svc_ep_data = ep_data[svc][0] + ep_data[svc] = [ + { + 'url': svc_ep_data['adminURL'], + 'interface': 'admin', + 'region': svc_ep_data['region'], + 'region_id': self.not_null, + 'id': self.not_null}, + { + 'url': svc_ep_data['publicURL'], + 'interface': 'public', + 'region': svc_ep_data['region'], + 'region_id': self.not_null, + 'id': self.not_null}, + { + 'url': svc_ep_data['internalURL'], + 'interface': 'internal', + 'region': svc_ep_data['region'], + 'region_id': self.not_null, + 'id': self.not_null}] + return ep_data + + def validate_svc_catalog_endpoint_data(self, expected, actual, + openstack_release=None): + """Validate service catalog endpoint data. Pick the correct validator + for the OpenStack version. Expected data should be in the v2 format: + { + 'service_name1': [ + { + 'adminURL': adminURL, + 'id': id, + 'region': region. + 'publicURL': publicURL, + 'internalURL': internalURL + }], + 'service_name2': [ + { + 'adminURL': adminURL, + 'id': id, + 'region': region. + 'publicURL': publicURL, + 'internalURL': internalURL + }], + } + + """ + validation_function = self.validate_v2_svc_catalog_endpoint_data + xenial_queens = OPENSTACK_RELEASES_PAIRS.index('xenial_queens') + if openstack_release and openstack_release >= xenial_queens: + validation_function = self.validate_v3_svc_catalog_endpoint_data + expected = self.convert_svc_catalog_endpoint_data_to_v3(expected) + return validation_function(expected, actual) + + def validate_v2_svc_catalog_endpoint_data(self, expected, actual): """Validate service catalog endpoint data. Validate a list of actual service catalog endpoints vs a list of @@ -328,7 +441,7 @@ class OpenStackAmuletUtils(AmuletUtils): if rel.get('api_version') != str(api_version): raise Exception("api_version not propagated through relation" " data yet ('{}' != '{}')." - "".format(rel['api_version'], api_version)) + "".format(rel.get('api_version'), api_version)) def keystone_configure_api_version(self, sentry_relation_pairs, deployment, api_version): @@ -350,16 +463,13 @@ class OpenStackAmuletUtils(AmuletUtils): deployment._auto_wait_for_status() self.keystone_wait_for_propagation(sentry_relation_pairs, api_version) - def authenticate_cinder_admin(self, keystone_sentry, username, - password, tenant, api_version=2): + def authenticate_cinder_admin(self, keystone, api_version=2): """Authenticates admin user with cinder.""" - # NOTE(beisner): cinder python client doesn't accept tokens. - keystone_ip = keystone_sentry.info['public-address'] - ept = "http://{}:5000/v2.0".format(keystone_ip.strip().decode('utf-8')) + self.log.debug('Authenticating cinder admin...') _clients = { 1: cinder_client.Client, 2: cinder_clientv2.Client} - return _clients[api_version](username, password, tenant, ept) + return _clients[api_version](session=keystone.session) def authenticate_keystone(self, keystone_ip, username, password, api_version=False, admin_port=False, @@ -367,13 +477,36 @@ class OpenStackAmuletUtils(AmuletUtils): project_domain_name=None, project_name=None): """Authenticate with Keystone""" self.log.debug('Authenticating with keystone...') - port = 5000 - if admin_port: - port = 35357 - base_ep = "http://{}:{}".format(keystone_ip.strip().decode('utf-8'), - port) - if not api_version or api_version == 2: - ep = base_ep + "/v2.0" + if not api_version: + api_version = 2 + sess, auth = self.get_keystone_session( + keystone_ip=keystone_ip, + username=username, + password=password, + api_version=api_version, + admin_port=admin_port, + user_domain_name=user_domain_name, + domain_name=domain_name, + project_domain_name=project_domain_name, + project_name=project_name + ) + if api_version == 2: + client = keystone_client.Client(session=sess) + else: + client = keystone_client_v3.Client(session=sess) + # This populates the client.service_catalog + client.auth_ref = auth.get_access(sess) + return client + + def get_keystone_session(self, keystone_ip, username, password, + api_version=False, admin_port=False, + user_domain_name=None, domain_name=None, + project_domain_name=None, project_name=None): + """Return a keystone session object""" + ep = self.get_keystone_endpoint(keystone_ip, + api_version=api_version, + admin_port=admin_port) + if api_version == 2: auth = v2.Password( username=username, password=password, @@ -381,12 +514,7 @@ class OpenStackAmuletUtils(AmuletUtils): auth_url=ep ) sess = keystone_session.Session(auth=auth) - client = keystone_client.Client(session=sess) - # This populates the client.service_catalog - client.auth_ref = auth.get_access(sess) - return client else: - ep = base_ep + "/v3" auth = v3.Password( user_domain_name=user_domain_name, username=username, @@ -397,10 +525,57 @@ class OpenStackAmuletUtils(AmuletUtils): auth_url=ep ) sess = keystone_session.Session(auth=auth) - client = keystone_client_v3.Client(session=sess) - # This populates the client.service_catalog - client.auth_ref = auth.get_access(sess) - return client + return (sess, auth) + + def get_keystone_endpoint(self, keystone_ip, api_version=None, + admin_port=False): + """Return keystone endpoint""" + port = 5000 + if admin_port: + port = 35357 + base_ep = "http://{}:{}".format(keystone_ip.strip().decode('utf-8'), + port) + if api_version == 2: + ep = base_ep + "/v2.0" + else: + ep = base_ep + "/v3" + return ep + + def get_default_keystone_session(self, keystone_sentry, + openstack_release=None): + """Return a keystone session object and client object assuming standard + default settings + + Example call in amulet tests: + self.keystone_session, self.keystone = u.get_default_keystone_session( + self.keystone_sentry, + openstack_release=self._get_openstack_release()) + + The session can then be used to auth other clients: + neutronclient.Client(session=session) + aodh_client.Client(session=session) + eyc + """ + self.log.debug('Authenticating keystone admin...') + api_version = 2 + client_class = keystone_client.Client + # 11 => xenial_queens + if openstack_release and openstack_release >= 11: + api_version = 3 + client_class = keystone_client_v3.Client + keystone_ip = keystone_sentry.info['public-address'] + session, auth = self.get_keystone_session( + keystone_ip, + api_version=api_version, + username='admin', + password='openstack', + project_name='admin', + user_domain_name='admin_domain', + project_domain_name='admin_domain') + client = client_class(session=session) + # This populates the client.service_catalog + client.auth_ref = auth.get_access(session) + return session, client def authenticate_keystone_admin(self, keystone_sentry, user, password, tenant=None, api_version=None, diff --git a/hooks/charmhelpers/contrib/openstack/context.py b/hooks/charmhelpers/contrib/openstack/context.py index 36cf32f..2d91f0a 100644 --- a/hooks/charmhelpers/contrib/openstack/context.py +++ b/hooks/charmhelpers/contrib/openstack/context.py @@ -384,6 +384,7 @@ class IdentityServiceContext(OSContextGenerator): # so a missing value just indicates keystone needs # upgrading ctxt['admin_tenant_id'] = rdata.get('service_tenant_id') + ctxt['admin_domain_id'] = rdata.get('service_domain_id') return ctxt return {} @@ -796,9 +797,9 @@ class ApacheSSLContext(OSContextGenerator): key_filename = 'key' write_file(path=os.path.join(ssl_dir, cert_filename), - content=b64decode(cert)) + content=b64decode(cert), perms=0o640) write_file(path=os.path.join(ssl_dir, key_filename), - content=b64decode(key)) + content=b64decode(key), perms=0o640) def configure_ca(self): ca_cert = get_ca_cert() @@ -1872,10 +1873,11 @@ class EnsureDirContext(OSContextGenerator): context is needed to do that before rendering a template. ''' - def __init__(self, dirname): + def __init__(self, dirname, **kwargs): '''Used merely to ensure that a given directory exists.''' self.dirname = dirname + self.kwargs = kwargs def __call__(self): - mkdir(self.dirname) + mkdir(self.dirname, **self.kwargs) return {} diff --git a/hooks/charmhelpers/contrib/openstack/templates/section-oslo-middleware b/hooks/charmhelpers/contrib/openstack/templates/section-oslo-middleware new file mode 100644 index 0000000..dd73230 --- /dev/null +++ b/hooks/charmhelpers/contrib/openstack/templates/section-oslo-middleware @@ -0,0 +1,5 @@ +[oslo_middleware] + +# Bug #1758675 +enable_proxy_headers_parsing = true + diff --git a/hooks/charmhelpers/contrib/openstack/templates/section-oslo-notifications b/hooks/charmhelpers/contrib/openstack/templates/section-oslo-notifications index 5dccd4b..021a3c2 100644 --- a/hooks/charmhelpers/contrib/openstack/templates/section-oslo-notifications +++ b/hooks/charmhelpers/contrib/openstack/templates/section-oslo-notifications @@ -5,4 +5,7 @@ transport_url = {{ transport_url }} {% if notification_topics -%} topics = {{ notification_topics }} {% endif -%} +{% if notification_format -%} +notification_format = {{ notification_format }} +{% endif -%} {% endif -%} diff --git a/hooks/charmhelpers/contrib/openstack/utils.py b/hooks/charmhelpers/contrib/openstack/utils.py index b753275..6184abd 100644 --- a/hooks/charmhelpers/contrib/openstack/utils.py +++ b/hooks/charmhelpers/contrib/openstack/utils.py @@ -182,7 +182,7 @@ SWIFT_CODENAMES = OrderedDict([ ('pike', ['2.13.0', '2.15.0']), ('queens', - ['2.16.0']), + ['2.16.0', '2.17.0']), ]) # >= Liberty version->codename mapping @@ -306,7 +306,7 @@ def get_os_codename_install_source(src): if src.startswith('cloud:'): ca_rel = src.split(':')[1] - ca_rel = ca_rel.split('%s-' % ubuntu_rel)[1].split('/')[0] + ca_rel = ca_rel.split('-')[1].split('/')[0] return ca_rel # Best guess match based on deb string provided diff --git a/hooks/charmhelpers/contrib/openstack/vaultlocker.py b/hooks/charmhelpers/contrib/openstack/vaultlocker.py new file mode 100644 index 0000000..a8e4bf8 --- /dev/null +++ b/hooks/charmhelpers/contrib/openstack/vaultlocker.py @@ -0,0 +1,126 @@ +# Copyright 2018 Canonical Limited. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import json +import os + +import charmhelpers.contrib.openstack.alternatives as alternatives +import charmhelpers.contrib.openstack.context as context + +import charmhelpers.core.hookenv as hookenv +import charmhelpers.core.host as host +import charmhelpers.core.templating as templating +import charmhelpers.core.unitdata as unitdata + +VAULTLOCKER_BACKEND = 'charm-vaultlocker' + + +class VaultKVContext(context.OSContextGenerator): + """Vault KV context for interaction with vault-kv interfaces""" + interfaces = ['secrets-storage'] + + def __init__(self, secret_backend=None): + super(context.OSContextGenerator, self).__init__() + self.secret_backend = ( + secret_backend or 'charm-{}'.format(hookenv.service_name()) + ) + + def __call__(self): + db = unitdata.kv() + last_token = db.get('last-token') + secret_id = db.get('secret-id') + for relation_id in hookenv.relation_ids(self.interfaces[0]): + for unit in hookenv.related_units(relation_id): + data = hookenv.relation_get(unit=unit, + rid=relation_id) + vault_url = data.get('vault_url') + role_id = data.get('{}_role_id'.format(hookenv.local_unit())) + token = data.get('{}_token'.format(hookenv.local_unit())) + + if all([vault_url, role_id, token]): + token = json.loads(token) + vault_url = json.loads(vault_url) + + # Tokens may change when secret_id's are being + # reissued - if so use token to get new secret_id + if token != last_token: + secret_id = retrieve_secret_id( + url=vault_url, + token=token + ) + db.set('secret-id', secret_id) + db.set('last-token', token) + db.flush() + + ctxt = { + 'vault_url': vault_url, + 'role_id': json.loads(role_id), + 'secret_id': secret_id, + 'secret_backend': self.secret_backend, + } + vault_ca = data.get('vault_ca') + if vault_ca: + ctxt['vault_ca'] = json.loads(vault_ca) + self.complete = True + return ctxt + return {} + + +def write_vaultlocker_conf(context, priority=100): + """Write vaultlocker configuration to disk and install alternative + + :param context: Dict of data from vault-kv relation + :ptype: context: dict + :param priority: Priority of alternative configuration + :ptype: priority: int""" + charm_vl_path = "/var/lib/charm/{}/vaultlocker.conf".format( + hookenv.service_name() + ) + host.mkdir(os.path.dirname(charm_vl_path), perms=0o700) + templating.render(source='vaultlocker.conf.j2', + target=charm_vl_path, + context=context, perms=0o600), + alternatives.install_alternative('vaultlocker.conf', + '/etc/vaultlocker/vaultlocker.conf', + charm_vl_path, priority) + + +def vault_relation_complete(backend=None): + """Determine whether vault relation is complete + + :param backend: Name of secrets backend requested + :ptype backend: string + :returns: whether the relation to vault is complete + :rtype: bool""" + vault_kv = VaultKVContext(secret_backend=backend or VAULTLOCKER_BACKEND) + vault_kv() + return vault_kv.complete + + +# TODO: contrib a high level unwrap method to hvac that works +def retrieve_secret_id(url, token): + """Retrieve a response-wrapped secret_id from Vault + + :param url: URL to Vault Server + :ptype url: str + :param token: One shot Token to use + :ptype token: str + :returns: secret_id to use for Vault Access + :rtype: str""" + import hvac + client = hvac.Client(url=url, token=token) + response = client._post('/v1/sys/wrapping/unwrap') + if response.status_code == 200: + data = response.json() + return data['data']['secret_id'] diff --git a/hooks/charmhelpers/contrib/storage/linux/ceph.py b/hooks/charmhelpers/contrib/storage/linux/ceph.py index e13e60a..7682820 100644 --- a/hooks/charmhelpers/contrib/storage/linux/ceph.py +++ b/hooks/charmhelpers/contrib/storage/linux/ceph.py @@ -291,7 +291,7 @@ class Pool(object): class ReplicatedPool(Pool): def __init__(self, service, name, pg_num=None, replicas=2, - percent_data=10.0): + percent_data=10.0, app_name=None): super(ReplicatedPool, self).__init__(service=service, name=name) self.replicas = replicas if pg_num: @@ -301,6 +301,10 @@ class ReplicatedPool(Pool): self.pg_num = min(pg_num, max_pgs) else: self.pg_num = self.get_pgs(self.replicas, percent_data) + if app_name: + self.app_name = app_name + else: + self.app_name = 'unknown' def create(self): if not pool_exists(self.service, self.name): @@ -313,6 +317,12 @@ class ReplicatedPool(Pool): update_pool(client=self.service, pool=self.name, settings={'size': str(self.replicas)}) + try: + set_app_name_for_pool(client=self.service, + pool=self.name, + name=self.app_name) + except CalledProcessError: + log('Could not set app name for pool {}'.format(self.name, level=WARNING)) except CalledProcessError: raise @@ -320,10 +330,14 @@ class ReplicatedPool(Pool): # Default jerasure erasure coded pool class ErasurePool(Pool): def __init__(self, service, name, erasure_code_profile="default", - percent_data=10.0): + percent_data=10.0, app_name=None): super(ErasurePool, self).__init__(service=service, name=name) self.erasure_code_profile = erasure_code_profile self.percent_data = percent_data + if app_name: + self.app_name = app_name + else: + self.app_name = 'unknown' def create(self): if not pool_exists(self.service, self.name): @@ -355,6 +369,12 @@ class ErasurePool(Pool): 'erasure', self.erasure_code_profile] try: check_call(cmd) + try: + set_app_name_for_pool(client=self.service, + pool=self.name, + name=self.app_name) + except CalledProcessError: + log('Could not set app name for pool {}'.format(self.name, level=WARNING)) except CalledProcessError: raise @@ -778,6 +798,25 @@ def update_pool(client, pool, settings): check_call(cmd) +def set_app_name_for_pool(client, pool, name): + """ + Calls `osd pool application enable` for the specified pool name + + :param client: Name of the ceph client to use + :type client: str + :param pool: Pool to set app name for + :type pool: str + :param name: app name for the specified pool + :type name: str + + :raises: CalledProcessError if ceph call fails + """ + if ceph_version() >= '12.0.0': + cmd = ['ceph', '--id', client, 'osd', 'pool', + 'application', 'enable', pool, name] + check_call(cmd) + + def create_pool(service, name, replicas=3, pg_num=None): """Create a new RADOS pool.""" if pool_exists(service, name): diff --git a/hooks/charmhelpers/contrib/storage/linux/lvm.py b/hooks/charmhelpers/contrib/storage/linux/lvm.py index 79a7a24..c8bde69 100644 --- a/hooks/charmhelpers/contrib/storage/linux/lvm.py +++ b/hooks/charmhelpers/contrib/storage/linux/lvm.py @@ -151,3 +151,32 @@ def extend_logical_volume_by_device(lv_name, block_device): ''' cmd = ['lvextend', lv_name, block_device] check_call(cmd) + + +def create_logical_volume(lv_name, volume_group, size=None): + ''' + Create a new logical volume in an existing volume group + + :param lv_name: str: name of logical volume to be created. + :param volume_group: str: Name of volume group to use for the new volume. + :param size: str: Size of logical volume to create (100% if not supplied) + :raises subprocess.CalledProcessError: in the event that the lvcreate fails. + ''' + if size: + check_call([ + 'lvcreate', + '--yes', + '-L', + '{}'.format(size), + '-n', lv_name, volume_group + ]) + # create the lv with all the space available, this is needed because the + # system call is different for LVM + else: + check_call([ + 'lvcreate', + '--yes', + '-l', + '100%FREE', + '-n', lv_name, volume_group + ]) diff --git a/hooks/charmhelpers/contrib/storage/linux/utils.py b/hooks/charmhelpers/contrib/storage/linux/utils.py index c942889..6f846b0 100644 --- a/hooks/charmhelpers/contrib/storage/linux/utils.py +++ b/hooks/charmhelpers/contrib/storage/linux/utils.py @@ -67,3 +67,19 @@ def is_device_mounted(device): except Exception: return False return bool(re.search(r'MOUNTPOINT=".+"', out)) + + +def mkfs_xfs(device, force=False): + """Format device with XFS filesystem. + + By default this should fail if the device already has a filesystem on it. + :param device: Full path to device to format + :ptype device: tr + :param force: Force operation + :ptype: force: boolean""" + cmd = ['mkfs.xfs'] + if force: + cmd.append("-f") + + cmd += ['-i', 'size=1024', device] + check_call(cmd) diff --git a/hooks/charmhelpers/core/hookenv.py b/hooks/charmhelpers/core/hookenv.py index 7ed1cc4..627d8f7 100644 --- a/hooks/charmhelpers/core/hookenv.py +++ b/hooks/charmhelpers/core/hookenv.py @@ -27,6 +27,7 @@ import glob import os import json import yaml +import re import subprocess import sys import errno @@ -67,7 +68,7 @@ def cached(func): @wraps(func) def wrapper(*args, **kwargs): global cache - key = str((func, args, kwargs)) + key = json.dumps((func, args, kwargs), sort_keys=True, default=str) try: return cache[key] except KeyError: @@ -289,7 +290,7 @@ class Config(dict): self.implicit_save = True self._prev_dict = None self.path = os.path.join(charm_dir(), Config.CONFIG_FILE_NAME) - if os.path.exists(self.path): + if os.path.exists(self.path) and os.stat(self.path).st_size: self.load_previous() atexit(self._implicit_save) @@ -309,7 +310,11 @@ class Config(dict): """ self.path = path or self.path with open(self.path) as f: - self._prev_dict = json.load(f) + try: + self._prev_dict = json.load(f) + except ValueError as e: + log('Unable to parse previous config data - {}'.format(str(e)), + level=ERROR) for k, v in copy.deepcopy(self._prev_dict).items(): if k not in self: self[k] = v @@ -353,22 +358,40 @@ class Config(dict): self.save() -@cached +_cache_config = None + + def config(scope=None): - """Juju charm configuration""" - config_cmd_line = ['config-get'] - if scope is not None: - config_cmd_line.append(scope) - else: - config_cmd_line.append('--all') - config_cmd_line.append('--format=json') + """ + Get the juju charm configuration (scope==None) or individual key, + (scope=str). The returned value is a Python data structure loaded as + JSON from the Juju config command. + + :param scope: If set, return the value for the specified key. + :type scope: Optional[str] + :returns: Either the whole config as a Config, or a key from it. + :rtype: Any + """ + global _cache_config + config_cmd_line = ['config-get', '--all', '--format=json'] try: - config_data = json.loads( - subprocess.check_output(config_cmd_line).decode('UTF-8')) + # JSON Decode Exception for Python3.5+ + exc_json = json.decoder.JSONDecodeError + except AttributeError: + # JSON Decode Exception for Python2.7 through Python3.4 + exc_json = ValueError + try: + if _cache_config is None: + config_data = json.loads( + subprocess.check_output(config_cmd_line).decode('UTF-8')) + _cache_config = Config(config_data) if scope is not None: - return config_data - return Config(config_data) - except ValueError: + return _cache_config.get(scope) + return _cache_config + except (exc_json, UnicodeDecodeError) as e: + log('Unable to parse output from config-get: config_cmd_line="{}" ' + 'message="{}"' + .format(config_cmd_line, str(e)), level=ERROR) return None @@ -1043,7 +1066,6 @@ def juju_version(): universal_newlines=True).strip() -@cached def has_juju_version(minimum_version): """Return True if the Juju version is at least the provided version""" return LooseVersion(juju_version()) >= LooseVersion(minimum_version) @@ -1103,6 +1125,8 @@ def _run_atexit(): @translate_exc(from_exc=OSError, to_exc=NotImplementedError) def network_get_primary_address(binding): ''' + Deprecated since Juju 2.3; use network_get() + Retrieve the primary network address for a named binding :param binding: string. The name of a relation of extra-binding @@ -1123,7 +1147,6 @@ def network_get_primary_address(binding): return response -@translate_exc(from_exc=OSError, to_exc=NotImplementedError) def network_get(endpoint, relation_id=None): """ Retrieve the network details for a relation endpoint @@ -1131,24 +1154,20 @@ def network_get(endpoint, relation_id=None): :param endpoint: string. The name of a relation endpoint :param relation_id: int. The ID of the relation for the current context. :return: dict. The loaded YAML output of the network-get query. - :raise: NotImplementedError if run on Juju < 2.1 + :raise: NotImplementedError if request not supported by the Juju version. """ + if not has_juju_version('2.2'): + raise NotImplementedError(juju_version()) # earlier versions require --primary-address + if relation_id and not has_juju_version('2.3'): + raise NotImplementedError # 2.3 added the -r option + cmd = ['network-get', endpoint, '--format', 'yaml'] if relation_id: cmd.append('-r') cmd.append(relation_id) - try: - response = subprocess.check_output( - cmd, - stderr=subprocess.STDOUT).decode('UTF-8').strip() - except CalledProcessError as e: - # Early versions of Juju 2.0.x required the --primary-address argument. - # We catch that condition here and raise NotImplementedError since - # the requested semantics are not available - the caller can then - # use the network_get_primary_address() method instead. - if '--primary-address is currently required' in e.output.decode('UTF-8'): - raise NotImplementedError - raise + response = subprocess.check_output( + cmd, + stderr=subprocess.STDOUT).decode('UTF-8').strip() return yaml.safe_load(response) @@ -1204,9 +1223,23 @@ def iter_units_for_relation_name(relation_name): def ingress_address(rid=None, unit=None): """ - Retrieve the ingress-address from a relation when available. Otherwise, - return the private-address. This function is to be used on the consuming - side of the relation. + Retrieve the ingress-address from a relation when available. + Otherwise, return the private-address. + + When used on the consuming side of the relation (unit is a remote + unit), the ingress-address is the IP address that this unit needs + to use to reach the provided service on the remote unit. + + When used on the providing side of the relation (unit == local_unit()), + the ingress-address is the IP address that is advertised to remote + units on this relation. Remote units need to use this address to + reach the local provided service on this unit. + + Note that charms may document some other method to use in + preference to the ingress_address(), such as an address provided + on a different relation attribute or a service discovery mechanism. + This allows charms to redirect inbound connections to their peers + or different applications such as load balancers. Usage: addresses = [ingress_address(rid=u.rid, unit=u.unit) @@ -1220,3 +1253,40 @@ def ingress_address(rid=None, unit=None): settings = relation_get(rid=rid, unit=unit) return (settings.get('ingress-address') or settings.get('private-address')) + + +def egress_subnets(rid=None, unit=None): + """ + Retrieve the egress-subnets from a relation. + + This function is to be used on the providing side of the + relation, and provides the ranges of addresses that client + connections may come from. The result is uninteresting on + the consuming side of a relation (unit == local_unit()). + + Returns a stable list of subnets in CIDR format. + eg. ['192.168.1.0/24', '2001::F00F/128'] + + If egress-subnets is not available, falls back to using the published + ingress-address, or finally private-address. + + :param rid: string relation id + :param unit: string unit name + :side effect: calls relation_get + :return: list of subnets in CIDR format. eg. ['192.168.1.0/24', '2001::F00F/128'] + """ + def _to_range(addr): + if re.search(r'^(?:\d{1,3}\.){3}\d{1,3}$', addr) is not None: + addr += '/32' + elif ':' in addr and '/' not in addr: # IPv6 + addr += '/128' + return addr + + settings = relation_get(rid=rid, unit=unit) + if 'egress-subnets' in settings: + return [n.strip() for n in settings['egress-subnets'].split(',') if n.strip()] + if 'ingress-address' in settings: + return [_to_range(settings['ingress-address'])] + if 'private-address' in settings: + return [_to_range(settings['private-address'])] + return [] # Should never happen diff --git a/hooks/charmhelpers/core/host.py b/hooks/charmhelpers/core/host.py index fd14d60..322ab2a 100644 --- a/hooks/charmhelpers/core/host.py +++ b/hooks/charmhelpers/core/host.py @@ -993,7 +993,7 @@ def updatedb(updatedb_text, new_path): return output -def modulo_distribution(modulo=3, wait=30): +def modulo_distribution(modulo=3, wait=30, non_zero_wait=False): """ Modulo distribution This helper uses the unit number, a modulo value and a constant wait time @@ -1015,7 +1015,14 @@ def modulo_distribution(modulo=3, wait=30): @param modulo: int The modulo number creates the group distribution @param wait: int The constant time wait value + @param non_zero_wait: boolean Override unit % modulo == 0, + return modulo * wait. Used to avoid collisions with + leader nodes which are often given priority. @return: int Calculated time to wait for unit operation """ unit_number = int(local_unit().split('/')[1]) - return (unit_number % modulo) * wait + calculated_wait_time = (unit_number % modulo) * wait + if non_zero_wait and calculated_wait_time == 0: + return modulo * wait + else: + return calculated_wait_time diff --git a/hooks/charmhelpers/core/services/base.py b/hooks/charmhelpers/core/services/base.py index ca9dc99..179ad4f 100644 --- a/hooks/charmhelpers/core/services/base.py +++ b/hooks/charmhelpers/core/services/base.py @@ -307,23 +307,34 @@ class PortManagerCallback(ManagerCallback): """ def __call__(self, manager, service_name, event_name): service = manager.get_service(service_name) - new_ports = service.get('ports', []) + # turn this generator into a list, + # as we'll be going over it multiple times + new_ports = list(service.get('ports', [])) port_file = os.path.join(hookenv.charm_dir(), '.{}.ports'.format(service_name)) if os.path.exists(port_file): with open(port_file) as fp: old_ports = fp.read().split(',') for old_port in old_ports: - if bool(old_port): - old_port = int(old_port) - if old_port not in new_ports: - hookenv.close_port(old_port) + if bool(old_port) and not self.ports_contains(old_port, new_ports): + hookenv.close_port(old_port) with open(port_file, 'w') as fp: fp.write(','.join(str(port) for port in new_ports)) for port in new_ports: + # A port is either a number or 'ICMP' + protocol = 'TCP' + if str(port).upper() == 'ICMP': + protocol = 'ICMP' if event_name == 'start': - hookenv.open_port(port) + hookenv.open_port(port, protocol) elif event_name == 'stop': - hookenv.close_port(port) + hookenv.close_port(port, protocol) + + def ports_contains(self, port, ports): + if not bool(port): + return False + if str(port).upper() != 'ICMP': + port = int(port) + return port in ports def service_stop(service_name): diff --git a/hooks/charmhelpers/core/sysctl.py b/hooks/charmhelpers/core/sysctl.py index 6e413e3..1f188d8 100644 --- a/hooks/charmhelpers/core/sysctl.py +++ b/hooks/charmhelpers/core/sysctl.py @@ -31,18 +31,22 @@ __author__ = 'Jorge Niedbalski R. ' def create(sysctl_dict, sysctl_file): """Creates a sysctl.conf file from a YAML associative array - :param sysctl_dict: a YAML-formatted string of sysctl options eg "{ 'kernel.max_pid': 1337 }" + :param sysctl_dict: a dict or YAML-formatted string of sysctl + options eg "{ 'kernel.max_pid': 1337 }" :type sysctl_dict: str :param sysctl_file: path to the sysctl file to be saved :type sysctl_file: str or unicode :returns: None """ - try: - sysctl_dict_parsed = yaml.safe_load(sysctl_dict) - except yaml.YAMLError: - log("Error parsing YAML sysctl_dict: {}".format(sysctl_dict), - level=ERROR) - return + if type(sysctl_dict) is not dict: + try: + sysctl_dict_parsed = yaml.safe_load(sysctl_dict) + except yaml.YAMLError: + log("Error parsing YAML sysctl_dict: {}".format(sysctl_dict), + level=ERROR) + return + else: + sysctl_dict_parsed = sysctl_dict with open(sysctl_file, "w") as fd: for key, value in sysctl_dict_parsed.items(): diff --git a/hooks/charmhelpers/core/unitdata.py b/hooks/charmhelpers/core/unitdata.py index 6d7b494..ab55432 100644 --- a/hooks/charmhelpers/core/unitdata.py +++ b/hooks/charmhelpers/core/unitdata.py @@ -166,6 +166,10 @@ class Storage(object): To support dicts, lists, integer, floats, and booleans values are automatically json encoded/decoded. + + Note: to facilitate unit testing, ':memory:' can be passed as the + path parameter which causes sqlite3 to only build the db in memory. + This should only be used for testing purposes. """ def __init__(self, path=None): self.db_path = path @@ -175,8 +179,9 @@ class Storage(object): else: self.db_path = os.path.join( os.environ.get('CHARM_DIR', ''), '.unit-state.db') - with open(self.db_path, 'a') as f: - os.fchmod(f.fileno(), 0o600) + if self.db_path != ':memory:': + with open(self.db_path, 'a') as f: + os.fchmod(f.fileno(), 0o600) self.conn = sqlite3.connect('%s' % self.db_path) self.cursor = self.conn.cursor() self.revision = None diff --git a/hooks/charmhelpers/fetch/ubuntu.py b/hooks/charmhelpers/fetch/ubuntu.py index 910e96a..653d58f 100644 --- a/hooks/charmhelpers/fetch/ubuntu.py +++ b/hooks/charmhelpers/fetch/ubuntu.py @@ -44,6 +44,7 @@ ARCH_TO_PROPOSED_POCKET = { 'x86_64': PROPOSED_POCKET, 'ppc64le': PROPOSED_PORTS_POCKET, 'aarch64': PROPOSED_PORTS_POCKET, + 's390x': PROPOSED_PORTS_POCKET, } CLOUD_ARCHIVE_URL = "http://ubuntu-cloud.archive.canonical.com/ubuntu" CLOUD_ARCHIVE_KEY_ID = '5EDB1B62EC4926EA' diff --git a/tests/basic_deployment.py b/tests/basic_deployment.py index 337bc98..88504bc 100644 --- a/tests/basic_deployment.py +++ b/tests/basic_deployment.py @@ -67,7 +67,8 @@ class CinderBackupBasicDeployment(OpenStackAmuletDeployment): {'name': 'percona-cluster'}, {'name': 'keystone'}, {'name': 'rabbitmq-server'}, - {'name': 'ceph', 'units': 3}, + {'name': 'ceph-mon', 'units': 3}, + {'name': 'ceph-osd', 'units': 3}, {'name': 'cinder'}, {'name': 'cinder-ceph'}, ] @@ -78,8 +79,9 @@ class CinderBackupBasicDeployment(OpenStackAmuletDeployment): """Add all of the relations for the services.""" relations = { - 'cinder-backup:ceph': 'ceph:client', - 'cinder-ceph:ceph': 'ceph:client', + 'cinder-backup:ceph': 'ceph-mon:client', + 'cinder-ceph:ceph': 'ceph-mon:client', + 'ceph-osd:mon': 'ceph-mon:osd', 'cinder:storage-backend': 'cinder-ceph:storage-backend', 'cinder:backup-backend': 'cinder-backup:backup-backend', 'keystone:shared-db': 'percona-cluster:shared-db', @@ -108,10 +110,16 @@ class CinderBackupBasicDeployment(OpenStackAmuletDeployment): 'auth-supported': 'none', 'fsid': '6547bd3e-1397-11e2-82e5-53567c8d32dc', 'monitor-secret': 'AQCXrnZQwI7KGBAAiPofmKEXKxu5bUzoYLVkbQ==', - 'osd-reformat': 'yes', - 'ephemeral-unmount': '/mnt', - 'osd-devices': '/dev/vdb /srv/ceph' } + + # Include a non-existent device as osd-devices is a whitelist, + # and this will catch cases where proposals attempt to change that. + ceph_osd_config = { + 'osd-reformat': True, + 'ephemeral-unmount': '/mnt', + 'osd-devices': '/dev/vdb /srv/ceph /dev/test-non-existent' + } + cinder_ceph_config = { 'ceph-osd-replication-count': '3', } @@ -119,7 +127,8 @@ class CinderBackupBasicDeployment(OpenStackAmuletDeployment): 'keystone': keystone_config, 'percona-cluster': pxc_config, 'cinder': cinder_config, - 'ceph': ceph_config, + 'ceph-mon': ceph_config, + 'ceph-osd': ceph_osd_config, 'cinder-ceph': cinder_ceph_config, 'cinder-backup': cinder_ceph_config, } @@ -132,9 +141,12 @@ class CinderBackupBasicDeployment(OpenStackAmuletDeployment): self.keystone_sentry = self.d.sentry['keystone'][0] self.rabbitmq_sentry = self.d.sentry['rabbitmq-server'][0] self.cinder_sentry = self.d.sentry['cinder'][0] - self.ceph0_sentry = self.d.sentry['ceph'][0] - self.ceph1_sentry = self.d.sentry['ceph'][1] - self.ceph2_sentry = self.d.sentry['ceph'][2] + self.ceph0_sentry = self.d.sentry['ceph-mon'][0] + self.ceph1_sentry = self.d.sentry['ceph-mon'][1] + self.ceph2_sentry = self.d.sentry['ceph-mon'][2] + self.ceph_osd0_sentry = self.d.sentry['ceph-osd'][0] + self.ceph_osd1_sentry = self.d.sentry['ceph-osd'][1] + self.ceph_osd2_sentry = self.d.sentry['ceph-osd'][2] self.cinder_backup_sentry = self.d.sentry['cinder-backup'][0] u.log.debug('openstack release val: {}'.format( self._get_openstack_release())) @@ -142,147 +154,46 @@ class CinderBackupBasicDeployment(OpenStackAmuletDeployment): self._get_openstack_release_string())) # Authenticate admin with keystone - self.keystone = u.authenticate_keystone_admin(self.keystone_sentry, - user='admin', - password='openstack', - tenant='admin') + self.keystone_session, self.keystone = u.get_default_keystone_session( + self.keystone_sentry, + openstack_release=self._get_openstack_release()) + # Authenticate admin with cinder endpoint - self.cinder = u.authenticate_cinder_admin(self.keystone_sentry, - username='admin', - password='openstack', - tenant='admin') - - # Create a demo tenant/role/user - self.demo_tenant = 'demoTenant' - self.demo_role = 'demoRole' - self.demo_user = 'demoUser' - if not u.tenant_exists(self.keystone, self.demo_tenant): - tenant = self.keystone.tenants.create(tenant_name=self.demo_tenant, - description='demo tenant', - enabled=True) - self.keystone.roles.create(name=self.demo_role) - self.keystone.users.create(name=self.demo_user, - password='password', - tenant_id=tenant.id, - email='demo@demo.com') - - # Authenticate demo user with keystone - self.keystone_demo = u.authenticate_keystone_user(self.keystone, - self.demo_user, - 'password', - self.demo_tenant) - - # Authenticate demo user with nova-api - self.nova_demo = u.authenticate_nova_user(self.keystone, - self.demo_user, - 'password', - self.demo_tenant) - - def test_100_ceph_processes(self): - """Verify that the expected service processes are running - on each ceph unit.""" - - # Process name and quantity of processes to expect on each unit - ceph_processes = { - 'ceph-mon': 1, - 'ceph-osd': 2 - } - - # Units with process names and PID quantities expected - expected_processes = { - self.ceph0_sentry: ceph_processes, - self.ceph1_sentry: ceph_processes, - self.ceph2_sentry: ceph_processes - } - - actual_pids = u.get_unit_process_ids(expected_processes) - ret = u.validate_unit_process_ids(expected_processes, actual_pids) - if ret: - amulet.raise_status(amulet.FAIL, msg=ret) + if self._get_openstack_release() >= self.xenial_pike: + api_version = 2 + else: + api_version = 1 + self.cinder = u.authenticate_cinder_admin(self.keystone, api_version) def test_102_services(self): """Verify the expected services are running on the service units.""" + if self._get_openstack_release() >= self.xenial_ocata: + cinder_services = ['apache2', + 'cinder-scheduler', + 'cinder-volume'] + else: + cinder_services = ['cinder-api', + 'cinder-scheduler', + 'cinder-volume'] services = { - self.rabbitmq_sentry: ['rabbitmq-server'], - self.keystone_sentry: ['keystone'], - self.cinder_sentry: ['cinder-api', - 'cinder-scheduler', - 'cinder-volume'], + self.cinder_sentry: cinder_services, } - if self._get_openstack_release() < self.xenial_mitaka: - # For upstart systems only. Ceph services under systemd - # are checked by process name instead. - ceph_services = [ - 'ceph-mon-all', - 'ceph-mon id=`hostname`', - 'ceph-osd-all', - 'ceph-osd id={}'.format(u.get_ceph_osd_id_cmd(0)), - 'ceph-osd id={}'.format(u.get_ceph_osd_id_cmd(1)) - ] - services[self.ceph0_sentry] = ceph_services - services[self.ceph1_sentry] = ceph_services - services[self.ceph2_sentry] = ceph_services - - if self._get_openstack_release() >= self.trusty_liberty: - services[self.keystone_sentry] = ['apache2'] - - if self._get_openstack_release() >= self.xenial_ocata: - services[self.cinder_sentry].remove('cinder-api') - ret = u.validate_services_by_name(services) if ret: amulet.raise_status(amulet.FAIL, msg=ret) - def test_110_users(self): - """Verify expected users.""" - u.log.debug('Checking keystone users...') - - if self._get_openstack_release() < self.xenial_pike: - expected = [{ - 'name': 'cinder_cinderv2', - 'enabled': True, - 'tenantId': u.not_null, - 'id': u.not_null, - 'email': 'juju@localhost', - }] - else: - expected = [{ - 'name': 'cinderv2_cinderv3', - 'enabled': True, - 'tenantId': u.not_null, - 'id': u.not_null, - 'email': 'juju@localhost', - }] - - expected.append({ - 'name': 'admin', - 'enabled': True, - 'tenantId': u.not_null, - 'id': u.not_null, - 'email': 'juju@localhost', - }) - - actual = self.keystone.users.list() - ret = u.validate_user_data(expected, actual) - if ret: - amulet.raise_status(amulet.FAIL, msg=ret) - def test_112_service_catalog(self): """Verify that the service catalog endpoint data""" u.log.debug('Checking keystone service catalog...') - endpoint_vol = { - 'adminURL': u.valid_url, - 'region': 'RegionOne', - 'publicURL': u.valid_url, - 'internalURL': u.valid_url - } - endpoint_id = { - 'adminURL': u.valid_url, - 'region': 'RegionOne', - 'publicURL': u.valid_url, - 'internalURL': u.valid_url - } + endpoint_vol = {'adminURL': u.valid_url, + 'region': 'RegionOne', + 'publicURL': u.valid_url, + 'internalURL': u.valid_url} + endpoint_id = {'adminURL': u.valid_url, + 'region': 'RegionOne', + 'publicURL': u.valid_url, + 'internalURL': u.valid_url} if self._get_openstack_release() >= self.trusty_icehouse: endpoint_vol['id'] = u.not_null endpoint_id['id'] = u.not_null @@ -295,29 +206,49 @@ class CinderBackupBasicDeployment(OpenStackAmuletDeployment): # Ocata and prior expected = {'identity': [endpoint_id], 'volume': [endpoint_id]} - actual = self.keystone.service_catalog.get_endpoints() - ret = u.validate_svc_catalog_endpoint_data(expected, actual) + ret = u.validate_svc_catalog_endpoint_data( + expected, + actual, + openstack_release=self._get_openstack_release()) if ret: amulet.raise_status(amulet.FAIL, msg=ret) def test_114_cinder_endpoint(self): """Verify the cinder endpoint data.""" - u.log.debug('Checking cinder api endpoint data...') + u.log.debug('Checking cinder endpoint...') endpoints = self.keystone.endpoints.list() admin_port = internal_port = public_port = '8776' - expected = { - 'id': u.not_null, - 'region': 'RegionOne', - 'adminurl': u.valid_url, - 'internalurl': u.valid_url, - 'publicurl': u.valid_url, - 'service_id': u.not_null - } - - ret = u.validate_endpoint_data(endpoints, admin_port, internal_port, - public_port, expected) + if self._get_openstack_release() >= self.xenial_queens: + expected = { + 'id': u.not_null, + 'region': 'RegionOne', + 'region_id': 'RegionOne', + 'url': u.valid_url, + 'interface': u.not_null, + 'service_id': u.not_null} + ret = u.validate_v3_endpoint_data( + endpoints, + admin_port, + internal_port, + public_port, + expected, + 6) + else: + expected = { + 'id': u.not_null, + 'region': 'RegionOne', + 'adminurl': u.valid_url, + 'internalurl': u.valid_url, + 'publicurl': u.valid_url, + 'service_id': u.not_null} + ret = u.validate_v2_endpoint_data( + endpoints, + admin_port, + internal_port, + public_port, + expected) if ret: amulet.raise_status(amulet.FAIL, msg='cinder endpoint: {}'.format(ret)) @@ -342,7 +273,7 @@ class CinderBackupBasicDeployment(OpenStackAmuletDeployment): client_unit = self.cinder_backup_sentry broker_req = json.loads(client_unit.relation( 'ceph', - 'ceph:client')['broker_req']) + 'ceph-mon:client')['broker_req']) return broker_req def get_broker_response(self): @@ -371,7 +302,7 @@ class CinderBackupBasicDeployment(OpenStackAmuletDeployment): u.log.debug('Checking cinder-backup:ceph to ceph:client ' 'relation data...') unit = self.cinder_backup_sentry - relation = ['ceph', 'ceph:client'] + relation = ['ceph', 'ceph-mon:client'] req = { "api-version": 1, @@ -385,15 +316,15 @@ class CinderBackupBasicDeployment(OpenStackAmuletDeployment): } ret = u.validate_relation_data(unit, relation, expected) if ret: - msg = u.relation_error('cinder-backup ceph', ret) + msg = u.relation_error('cinder-backup ceph-mon', ret) amulet.raise_status(amulet.FAIL, msg=msg) ret = self.validate_broker_req(unit, relation, req) if ret: - msg = u.relation_error('cinder-backup ceph', ret) + msg = u.relation_error('cinder-backup ceph-mon', ret) amulet.raise_status(amulet.FAIL, msg=msg) def test_201_ceph_cinderbackup_ceph_relation(self): - u.log.debug('Checking ceph:client to cinder-backup:ceph ' + u.log.debug('Checking ceph-mon:client to cinder-backup:ceph ' 'relation data...') ceph_unit = self.ceph0_sentry relation = ['client', 'cinder-backup:ceph'] @@ -816,17 +747,27 @@ class CinderBackupBasicDeployment(OpenStackAmuletDeployment): name = "demo-vol" vols = self.cinder.volumes.list() - cinder_vols = [v for v in vols if v.name == name] + try: + cinder_vols = [v for v in vols if v.name == name] + except AttributeError: + cinder_vols = [v for v in vols if v.display_name == name] if not cinder_vols: # NOTE(hopem): it appears that at some point cinder-backup stopped # restoring volume metadata properly so revert to default name if # original is not found name = "restore_backup_%s" % (vol_backup.id) - cinder_vols = [v for v in vols if v.name == name] + try: + cinder_vols = [v for v in vols if v.name == name] + except AttributeError: + cinder_vols = [v for v in vols if v.display_name == name] if not cinder_vols: - msg = ("Could not find restore vol '%s' in %s" % - (name, [v.name for v in vols])) + try: + msg = ("Could not find restore vol '%s' in %s" % + (name, [v.name for v in vols])) + except AttributeError: + msg = ("Could not find restore vol '%s' in %s" % + (name, [v.display_name for v in vols])) u.log.error(msg) amulet.raise_status(amulet.FAIL, msg=msg) diff --git a/tests/charmhelpers/contrib/openstack/amulet/deployment.py b/tests/charmhelpers/contrib/openstack/amulet/deployment.py index 5afbbd8..66beeda 100644 --- a/tests/charmhelpers/contrib/openstack/amulet/deployment.py +++ b/tests/charmhelpers/contrib/openstack/amulet/deployment.py @@ -21,6 +21,9 @@ from collections import OrderedDict from charmhelpers.contrib.amulet.deployment import ( AmuletDeployment ) +from charmhelpers.contrib.openstack.amulet.utils import ( + OPENSTACK_RELEASES_PAIRS +) DEBUG = logging.DEBUG ERROR = logging.ERROR @@ -271,11 +274,8 @@ class OpenStackAmuletDeployment(AmuletDeployment): release. """ # Must be ordered by OpenStack release (not by Ubuntu release): - (self.trusty_icehouse, self.trusty_kilo, self.trusty_liberty, - self.trusty_mitaka, self.xenial_mitaka, self.xenial_newton, - self.yakkety_newton, self.xenial_ocata, self.zesty_ocata, - self.xenial_pike, self.artful_pike, self.xenial_queens, - self.bionic_queens,) = range(13) + for i, os_pair in enumerate(OPENSTACK_RELEASES_PAIRS): + setattr(self, os_pair, i) releases = { ('trusty', None): self.trusty_icehouse, diff --git a/tests/charmhelpers/contrib/openstack/amulet/utils.py b/tests/charmhelpers/contrib/openstack/amulet/utils.py index d93cff3..84e87f5 100644 --- a/tests/charmhelpers/contrib/openstack/amulet/utils.py +++ b/tests/charmhelpers/contrib/openstack/amulet/utils.py @@ -50,6 +50,13 @@ ERROR = logging.ERROR NOVA_CLIENT_VERSION = "2" +OPENSTACK_RELEASES_PAIRS = [ + 'trusty_icehouse', 'trusty_kilo', 'trusty_liberty', + 'trusty_mitaka', 'xenial_mitaka', 'xenial_newton', + 'yakkety_newton', 'xenial_ocata', 'zesty_ocata', + 'xenial_pike', 'artful_pike', 'xenial_queens', + 'bionic_queens'] + class OpenStackAmuletUtils(AmuletUtils): """OpenStack amulet utilities. @@ -63,7 +70,34 @@ class OpenStackAmuletUtils(AmuletUtils): super(OpenStackAmuletUtils, self).__init__(log_level) def validate_endpoint_data(self, endpoints, admin_port, internal_port, - public_port, expected): + public_port, expected, openstack_release=None): + """Validate endpoint data. Pick the correct validator based on + OpenStack release. Expected data should be in the v2 format: + { + 'id': id, + 'region': region, + 'adminurl': adminurl, + 'internalurl': internalurl, + 'publicurl': publicurl, + 'service_id': service_id} + + """ + validation_function = self.validate_v2_endpoint_data + xenial_queens = OPENSTACK_RELEASES_PAIRS.index('xenial_queens') + if openstack_release and openstack_release >= xenial_queens: + validation_function = self.validate_v3_endpoint_data + expected = { + 'id': expected['id'], + 'region': expected['region'], + 'region_id': 'RegionOne', + 'url': self.valid_url, + 'interface': self.not_null, + 'service_id': expected['service_id']} + return validation_function(endpoints, admin_port, internal_port, + public_port, expected) + + def validate_v2_endpoint_data(self, endpoints, admin_port, internal_port, + public_port, expected): """Validate endpoint data. Validate actual endpoint data vs expected endpoint data. The ports @@ -141,7 +175,86 @@ class OpenStackAmuletUtils(AmuletUtils): if len(found) != expected_num_eps: return 'Unexpected number of endpoints found' - def validate_svc_catalog_endpoint_data(self, expected, actual): + def convert_svc_catalog_endpoint_data_to_v3(self, ep_data): + """Convert v2 endpoint data into v3. + + { + 'service_name1': [ + { + 'adminURL': adminURL, + 'id': id, + 'region': region. + 'publicURL': publicURL, + 'internalURL': internalURL + }], + 'service_name2': [ + { + 'adminURL': adminURL, + 'id': id, + 'region': region. + 'publicURL': publicURL, + 'internalURL': internalURL + }], + } + """ + self.log.warn("Endpoint ID and Region ID validation is limited to not " + "null checks after v2 to v3 conversion") + for svc in ep_data.keys(): + assert len(ep_data[svc]) == 1, "Unknown data format" + svc_ep_data = ep_data[svc][0] + ep_data[svc] = [ + { + 'url': svc_ep_data['adminURL'], + 'interface': 'admin', + 'region': svc_ep_data['region'], + 'region_id': self.not_null, + 'id': self.not_null}, + { + 'url': svc_ep_data['publicURL'], + 'interface': 'public', + 'region': svc_ep_data['region'], + 'region_id': self.not_null, + 'id': self.not_null}, + { + 'url': svc_ep_data['internalURL'], + 'interface': 'internal', + 'region': svc_ep_data['region'], + 'region_id': self.not_null, + 'id': self.not_null}] + return ep_data + + def validate_svc_catalog_endpoint_data(self, expected, actual, + openstack_release=None): + """Validate service catalog endpoint data. Pick the correct validator + for the OpenStack version. Expected data should be in the v2 format: + { + 'service_name1': [ + { + 'adminURL': adminURL, + 'id': id, + 'region': region. + 'publicURL': publicURL, + 'internalURL': internalURL + }], + 'service_name2': [ + { + 'adminURL': adminURL, + 'id': id, + 'region': region. + 'publicURL': publicURL, + 'internalURL': internalURL + }], + } + + """ + validation_function = self.validate_v2_svc_catalog_endpoint_data + xenial_queens = OPENSTACK_RELEASES_PAIRS.index('xenial_queens') + if openstack_release and openstack_release >= xenial_queens: + validation_function = self.validate_v3_svc_catalog_endpoint_data + expected = self.convert_svc_catalog_endpoint_data_to_v3(expected) + return validation_function(expected, actual) + + def validate_v2_svc_catalog_endpoint_data(self, expected, actual): """Validate service catalog endpoint data. Validate a list of actual service catalog endpoints vs a list of @@ -328,7 +441,7 @@ class OpenStackAmuletUtils(AmuletUtils): if rel.get('api_version') != str(api_version): raise Exception("api_version not propagated through relation" " data yet ('{}' != '{}')." - "".format(rel['api_version'], api_version)) + "".format(rel.get('api_version'), api_version)) def keystone_configure_api_version(self, sentry_relation_pairs, deployment, api_version): @@ -350,16 +463,13 @@ class OpenStackAmuletUtils(AmuletUtils): deployment._auto_wait_for_status() self.keystone_wait_for_propagation(sentry_relation_pairs, api_version) - def authenticate_cinder_admin(self, keystone_sentry, username, - password, tenant, api_version=2): + def authenticate_cinder_admin(self, keystone, api_version=2): """Authenticates admin user with cinder.""" - # NOTE(beisner): cinder python client doesn't accept tokens. - keystone_ip = keystone_sentry.info['public-address'] - ept = "http://{}:5000/v2.0".format(keystone_ip.strip().decode('utf-8')) + self.log.debug('Authenticating cinder admin...') _clients = { 1: cinder_client.Client, 2: cinder_clientv2.Client} - return _clients[api_version](username, password, tenant, ept) + return _clients[api_version](session=keystone.session) def authenticate_keystone(self, keystone_ip, username, password, api_version=False, admin_port=False, @@ -367,13 +477,36 @@ class OpenStackAmuletUtils(AmuletUtils): project_domain_name=None, project_name=None): """Authenticate with Keystone""" self.log.debug('Authenticating with keystone...') - port = 5000 - if admin_port: - port = 35357 - base_ep = "http://{}:{}".format(keystone_ip.strip().decode('utf-8'), - port) - if not api_version or api_version == 2: - ep = base_ep + "/v2.0" + if not api_version: + api_version = 2 + sess, auth = self.get_keystone_session( + keystone_ip=keystone_ip, + username=username, + password=password, + api_version=api_version, + admin_port=admin_port, + user_domain_name=user_domain_name, + domain_name=domain_name, + project_domain_name=project_domain_name, + project_name=project_name + ) + if api_version == 2: + client = keystone_client.Client(session=sess) + else: + client = keystone_client_v3.Client(session=sess) + # This populates the client.service_catalog + client.auth_ref = auth.get_access(sess) + return client + + def get_keystone_session(self, keystone_ip, username, password, + api_version=False, admin_port=False, + user_domain_name=None, domain_name=None, + project_domain_name=None, project_name=None): + """Return a keystone session object""" + ep = self.get_keystone_endpoint(keystone_ip, + api_version=api_version, + admin_port=admin_port) + if api_version == 2: auth = v2.Password( username=username, password=password, @@ -381,12 +514,7 @@ class OpenStackAmuletUtils(AmuletUtils): auth_url=ep ) sess = keystone_session.Session(auth=auth) - client = keystone_client.Client(session=sess) - # This populates the client.service_catalog - client.auth_ref = auth.get_access(sess) - return client else: - ep = base_ep + "/v3" auth = v3.Password( user_domain_name=user_domain_name, username=username, @@ -397,10 +525,57 @@ class OpenStackAmuletUtils(AmuletUtils): auth_url=ep ) sess = keystone_session.Session(auth=auth) - client = keystone_client_v3.Client(session=sess) - # This populates the client.service_catalog - client.auth_ref = auth.get_access(sess) - return client + return (sess, auth) + + def get_keystone_endpoint(self, keystone_ip, api_version=None, + admin_port=False): + """Return keystone endpoint""" + port = 5000 + if admin_port: + port = 35357 + base_ep = "http://{}:{}".format(keystone_ip.strip().decode('utf-8'), + port) + if api_version == 2: + ep = base_ep + "/v2.0" + else: + ep = base_ep + "/v3" + return ep + + def get_default_keystone_session(self, keystone_sentry, + openstack_release=None): + """Return a keystone session object and client object assuming standard + default settings + + Example call in amulet tests: + self.keystone_session, self.keystone = u.get_default_keystone_session( + self.keystone_sentry, + openstack_release=self._get_openstack_release()) + + The session can then be used to auth other clients: + neutronclient.Client(session=session) + aodh_client.Client(session=session) + eyc + """ + self.log.debug('Authenticating keystone admin...') + api_version = 2 + client_class = keystone_client.Client + # 11 => xenial_queens + if openstack_release and openstack_release >= 11: + api_version = 3 + client_class = keystone_client_v3.Client + keystone_ip = keystone_sentry.info['public-address'] + session, auth = self.get_keystone_session( + keystone_ip, + api_version=api_version, + username='admin', + password='openstack', + project_name='admin', + user_domain_name='admin_domain', + project_domain_name='admin_domain') + client = client_class(session=session) + # This populates the client.service_catalog + client.auth_ref = auth.get_access(session) + return session, client def authenticate_keystone_admin(self, keystone_sentry, user, password, tenant=None, api_version=None, diff --git a/tests/charmhelpers/core/hookenv.py b/tests/charmhelpers/core/hookenv.py index 7ed1cc4..627d8f7 100644 --- a/tests/charmhelpers/core/hookenv.py +++ b/tests/charmhelpers/core/hookenv.py @@ -27,6 +27,7 @@ import glob import os import json import yaml +import re import subprocess import sys import errno @@ -67,7 +68,7 @@ def cached(func): @wraps(func) def wrapper(*args, **kwargs): global cache - key = str((func, args, kwargs)) + key = json.dumps((func, args, kwargs), sort_keys=True, default=str) try: return cache[key] except KeyError: @@ -289,7 +290,7 @@ class Config(dict): self.implicit_save = True self._prev_dict = None self.path = os.path.join(charm_dir(), Config.CONFIG_FILE_NAME) - if os.path.exists(self.path): + if os.path.exists(self.path) and os.stat(self.path).st_size: self.load_previous() atexit(self._implicit_save) @@ -309,7 +310,11 @@ class Config(dict): """ self.path = path or self.path with open(self.path) as f: - self._prev_dict = json.load(f) + try: + self._prev_dict = json.load(f) + except ValueError as e: + log('Unable to parse previous config data - {}'.format(str(e)), + level=ERROR) for k, v in copy.deepcopy(self._prev_dict).items(): if k not in self: self[k] = v @@ -353,22 +358,40 @@ class Config(dict): self.save() -@cached +_cache_config = None + + def config(scope=None): - """Juju charm configuration""" - config_cmd_line = ['config-get'] - if scope is not None: - config_cmd_line.append(scope) - else: - config_cmd_line.append('--all') - config_cmd_line.append('--format=json') + """ + Get the juju charm configuration (scope==None) or individual key, + (scope=str). The returned value is a Python data structure loaded as + JSON from the Juju config command. + + :param scope: If set, return the value for the specified key. + :type scope: Optional[str] + :returns: Either the whole config as a Config, or a key from it. + :rtype: Any + """ + global _cache_config + config_cmd_line = ['config-get', '--all', '--format=json'] try: - config_data = json.loads( - subprocess.check_output(config_cmd_line).decode('UTF-8')) + # JSON Decode Exception for Python3.5+ + exc_json = json.decoder.JSONDecodeError + except AttributeError: + # JSON Decode Exception for Python2.7 through Python3.4 + exc_json = ValueError + try: + if _cache_config is None: + config_data = json.loads( + subprocess.check_output(config_cmd_line).decode('UTF-8')) + _cache_config = Config(config_data) if scope is not None: - return config_data - return Config(config_data) - except ValueError: + return _cache_config.get(scope) + return _cache_config + except (exc_json, UnicodeDecodeError) as e: + log('Unable to parse output from config-get: config_cmd_line="{}" ' + 'message="{}"' + .format(config_cmd_line, str(e)), level=ERROR) return None @@ -1043,7 +1066,6 @@ def juju_version(): universal_newlines=True).strip() -@cached def has_juju_version(minimum_version): """Return True if the Juju version is at least the provided version""" return LooseVersion(juju_version()) >= LooseVersion(minimum_version) @@ -1103,6 +1125,8 @@ def _run_atexit(): @translate_exc(from_exc=OSError, to_exc=NotImplementedError) def network_get_primary_address(binding): ''' + Deprecated since Juju 2.3; use network_get() + Retrieve the primary network address for a named binding :param binding: string. The name of a relation of extra-binding @@ -1123,7 +1147,6 @@ def network_get_primary_address(binding): return response -@translate_exc(from_exc=OSError, to_exc=NotImplementedError) def network_get(endpoint, relation_id=None): """ Retrieve the network details for a relation endpoint @@ -1131,24 +1154,20 @@ def network_get(endpoint, relation_id=None): :param endpoint: string. The name of a relation endpoint :param relation_id: int. The ID of the relation for the current context. :return: dict. The loaded YAML output of the network-get query. - :raise: NotImplementedError if run on Juju < 2.1 + :raise: NotImplementedError if request not supported by the Juju version. """ + if not has_juju_version('2.2'): + raise NotImplementedError(juju_version()) # earlier versions require --primary-address + if relation_id and not has_juju_version('2.3'): + raise NotImplementedError # 2.3 added the -r option + cmd = ['network-get', endpoint, '--format', 'yaml'] if relation_id: cmd.append('-r') cmd.append(relation_id) - try: - response = subprocess.check_output( - cmd, - stderr=subprocess.STDOUT).decode('UTF-8').strip() - except CalledProcessError as e: - # Early versions of Juju 2.0.x required the --primary-address argument. - # We catch that condition here and raise NotImplementedError since - # the requested semantics are not available - the caller can then - # use the network_get_primary_address() method instead. - if '--primary-address is currently required' in e.output.decode('UTF-8'): - raise NotImplementedError - raise + response = subprocess.check_output( + cmd, + stderr=subprocess.STDOUT).decode('UTF-8').strip() return yaml.safe_load(response) @@ -1204,9 +1223,23 @@ def iter_units_for_relation_name(relation_name): def ingress_address(rid=None, unit=None): """ - Retrieve the ingress-address from a relation when available. Otherwise, - return the private-address. This function is to be used on the consuming - side of the relation. + Retrieve the ingress-address from a relation when available. + Otherwise, return the private-address. + + When used on the consuming side of the relation (unit is a remote + unit), the ingress-address is the IP address that this unit needs + to use to reach the provided service on the remote unit. + + When used on the providing side of the relation (unit == local_unit()), + the ingress-address is the IP address that is advertised to remote + units on this relation. Remote units need to use this address to + reach the local provided service on this unit. + + Note that charms may document some other method to use in + preference to the ingress_address(), such as an address provided + on a different relation attribute or a service discovery mechanism. + This allows charms to redirect inbound connections to their peers + or different applications such as load balancers. Usage: addresses = [ingress_address(rid=u.rid, unit=u.unit) @@ -1220,3 +1253,40 @@ def ingress_address(rid=None, unit=None): settings = relation_get(rid=rid, unit=unit) return (settings.get('ingress-address') or settings.get('private-address')) + + +def egress_subnets(rid=None, unit=None): + """ + Retrieve the egress-subnets from a relation. + + This function is to be used on the providing side of the + relation, and provides the ranges of addresses that client + connections may come from. The result is uninteresting on + the consuming side of a relation (unit == local_unit()). + + Returns a stable list of subnets in CIDR format. + eg. ['192.168.1.0/24', '2001::F00F/128'] + + If egress-subnets is not available, falls back to using the published + ingress-address, or finally private-address. + + :param rid: string relation id + :param unit: string unit name + :side effect: calls relation_get + :return: list of subnets in CIDR format. eg. ['192.168.1.0/24', '2001::F00F/128'] + """ + def _to_range(addr): + if re.search(r'^(?:\d{1,3}\.){3}\d{1,3}$', addr) is not None: + addr += '/32' + elif ':' in addr and '/' not in addr: # IPv6 + addr += '/128' + return addr + + settings = relation_get(rid=rid, unit=unit) + if 'egress-subnets' in settings: + return [n.strip() for n in settings['egress-subnets'].split(',') if n.strip()] + if 'ingress-address' in settings: + return [_to_range(settings['ingress-address'])] + if 'private-address' in settings: + return [_to_range(settings['private-address'])] + return [] # Should never happen diff --git a/tests/charmhelpers/core/host.py b/tests/charmhelpers/core/host.py index fd14d60..322ab2a 100644 --- a/tests/charmhelpers/core/host.py +++ b/tests/charmhelpers/core/host.py @@ -993,7 +993,7 @@ def updatedb(updatedb_text, new_path): return output -def modulo_distribution(modulo=3, wait=30): +def modulo_distribution(modulo=3, wait=30, non_zero_wait=False): """ Modulo distribution This helper uses the unit number, a modulo value and a constant wait time @@ -1015,7 +1015,14 @@ def modulo_distribution(modulo=3, wait=30): @param modulo: int The modulo number creates the group distribution @param wait: int The constant time wait value + @param non_zero_wait: boolean Override unit % modulo == 0, + return modulo * wait. Used to avoid collisions with + leader nodes which are often given priority. @return: int Calculated time to wait for unit operation """ unit_number = int(local_unit().split('/')[1]) - return (unit_number % modulo) * wait + calculated_wait_time = (unit_number % modulo) * wait + if non_zero_wait and calculated_wait_time == 0: + return modulo * wait + else: + return calculated_wait_time diff --git a/tests/charmhelpers/core/services/base.py b/tests/charmhelpers/core/services/base.py index ca9dc99..179ad4f 100644 --- a/tests/charmhelpers/core/services/base.py +++ b/tests/charmhelpers/core/services/base.py @@ -307,23 +307,34 @@ class PortManagerCallback(ManagerCallback): """ def __call__(self, manager, service_name, event_name): service = manager.get_service(service_name) - new_ports = service.get('ports', []) + # turn this generator into a list, + # as we'll be going over it multiple times + new_ports = list(service.get('ports', [])) port_file = os.path.join(hookenv.charm_dir(), '.{}.ports'.format(service_name)) if os.path.exists(port_file): with open(port_file) as fp: old_ports = fp.read().split(',') for old_port in old_ports: - if bool(old_port): - old_port = int(old_port) - if old_port not in new_ports: - hookenv.close_port(old_port) + if bool(old_port) and not self.ports_contains(old_port, new_ports): + hookenv.close_port(old_port) with open(port_file, 'w') as fp: fp.write(','.join(str(port) for port in new_ports)) for port in new_ports: + # A port is either a number or 'ICMP' + protocol = 'TCP' + if str(port).upper() == 'ICMP': + protocol = 'ICMP' if event_name == 'start': - hookenv.open_port(port) + hookenv.open_port(port, protocol) elif event_name == 'stop': - hookenv.close_port(port) + hookenv.close_port(port, protocol) + + def ports_contains(self, port, ports): + if not bool(port): + return False + if str(port).upper() != 'ICMP': + port = int(port) + return port in ports def service_stop(service_name): diff --git a/tests/charmhelpers/core/sysctl.py b/tests/charmhelpers/core/sysctl.py index 6e413e3..1f188d8 100644 --- a/tests/charmhelpers/core/sysctl.py +++ b/tests/charmhelpers/core/sysctl.py @@ -31,18 +31,22 @@ __author__ = 'Jorge Niedbalski R. ' def create(sysctl_dict, sysctl_file): """Creates a sysctl.conf file from a YAML associative array - :param sysctl_dict: a YAML-formatted string of sysctl options eg "{ 'kernel.max_pid': 1337 }" + :param sysctl_dict: a dict or YAML-formatted string of sysctl + options eg "{ 'kernel.max_pid': 1337 }" :type sysctl_dict: str :param sysctl_file: path to the sysctl file to be saved :type sysctl_file: str or unicode :returns: None """ - try: - sysctl_dict_parsed = yaml.safe_load(sysctl_dict) - except yaml.YAMLError: - log("Error parsing YAML sysctl_dict: {}".format(sysctl_dict), - level=ERROR) - return + if type(sysctl_dict) is not dict: + try: + sysctl_dict_parsed = yaml.safe_load(sysctl_dict) + except yaml.YAMLError: + log("Error parsing YAML sysctl_dict: {}".format(sysctl_dict), + level=ERROR) + return + else: + sysctl_dict_parsed = sysctl_dict with open(sysctl_file, "w") as fd: for key, value in sysctl_dict_parsed.items(): diff --git a/tests/charmhelpers/core/unitdata.py b/tests/charmhelpers/core/unitdata.py index 6d7b494..ab55432 100644 --- a/tests/charmhelpers/core/unitdata.py +++ b/tests/charmhelpers/core/unitdata.py @@ -166,6 +166,10 @@ class Storage(object): To support dicts, lists, integer, floats, and booleans values are automatically json encoded/decoded. + + Note: to facilitate unit testing, ':memory:' can be passed as the + path parameter which causes sqlite3 to only build the db in memory. + This should only be used for testing purposes. """ def __init__(self, path=None): self.db_path = path @@ -175,8 +179,9 @@ class Storage(object): else: self.db_path = os.path.join( os.environ.get('CHARM_DIR', ''), '.unit-state.db') - with open(self.db_path, 'a') as f: - os.fchmod(f.fileno(), 0o600) + if self.db_path != ':memory:': + with open(self.db_path, 'a') as f: + os.fchmod(f.fileno(), 0o600) self.conn = sqlite3.connect('%s' % self.db_path) self.cursor = self.conn.cursor() self.revision = None diff --git a/tests/dev-basic-bionic-queens b/tests/gate-basic-bionic-queens similarity index 100% rename from tests/dev-basic-bionic-queens rename to tests/gate-basic-bionic-queens diff --git a/tests/gate-basic-xenial-queens b/tests/gate-basic-xenial-queens new file mode 100755 index 0000000..f0e8d5e --- /dev/null +++ b/tests/gate-basic-xenial-queens @@ -0,0 +1,25 @@ +#!/usr/bin/env python +# +# Copyright 2016 Canonical Ltd +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Amulet tests on a basic cinder backup deployment on xenial-queens.""" + +from basic_deployment import CinderBackupBasicDeployment + +if __name__ == '__main__': + deployment = CinderBackupBasicDeployment(series='xenial', + openstack='cloud:xenial-queens', + source='cloud:xenial-updates/queens') + deployment.run_tests() diff --git a/tox.ini b/tox.ini index 4319064..09ca045 100644 --- a/tox.ini +++ b/tox.ini @@ -60,7 +60,7 @@ basepython = python2.7 deps = -r{toxinidir}/requirements.txt -r{toxinidir}/test-requirements.txt commands = - bundletester -vl DEBUG -r json -o func-results.json gate-basic-xenial-pike --no-destroy + bundletester -vl DEBUG -r json -o func-results.json gate-basic-bionic-queens --no-destroy [testenv:func27-dfs] # Charm Functional Test