diff --git a/hooks/charmhelpers/contrib/openstack/amulet/utils.py b/hooks/charmhelpers/contrib/openstack/amulet/utils.py index b71b2b1..87f364d 100644 --- a/hooks/charmhelpers/contrib/openstack/amulet/utils.py +++ b/hooks/charmhelpers/contrib/openstack/amulet/utils.py @@ -858,9 +858,12 @@ class OpenStackAmuletUtils(AmuletUtils): :returns: List of pool name, object count, kb disk space used """ df = self.get_ceph_df(sentry_unit) - pool_name = df['pools'][pool_id]['name'] - obj_count = df['pools'][pool_id]['stats']['objects'] - kb_used = df['pools'][pool_id]['stats']['kb_used'] + for pool in df['pools']: + if pool['id'] == pool_id: + pool_name = pool['name'] + obj_count = pool['stats']['objects'] + kb_used = pool['stats']['kb_used'] + self.log.debug('Ceph {} pool (ID {}): {} objects, ' '{} kb used'.format(pool_name, pool_id, obj_count, kb_used)) diff --git a/hooks/charmhelpers/contrib/openstack/utils.py b/hooks/charmhelpers/contrib/openstack/utils.py index 8a541d4..9e5af34 100644 --- a/hooks/charmhelpers/contrib/openstack/utils.py +++ b/hooks/charmhelpers/contrib/openstack/utils.py @@ -392,6 +392,8 @@ def get_swift_codename(version): releases = UBUNTU_OPENSTACK_RELEASE release = [k for k, v in six.iteritems(releases) if codename in v] ret = subprocess.check_output(['apt-cache', 'policy', 'swift']) + if six.PY3: + ret = ret.decode('UTF-8') if codename in ret or release[0] in ret: return codename elif len(codenames) == 1: diff --git a/hooks/charmhelpers/contrib/storage/linux/ceph.py b/hooks/charmhelpers/contrib/storage/linux/ceph.py index 3923161..0d9bacf 100644 --- a/hooks/charmhelpers/contrib/storage/linux/ceph.py +++ b/hooks/charmhelpers/contrib/storage/linux/ceph.py @@ -377,12 +377,12 @@ def get_mon_map(service): try: return json.loads(mon_status) except ValueError as v: - log("Unable to parse mon_status json: {}. Error: {}".format( - mon_status, v.message)) + log("Unable to parse mon_status json: {}. Error: {}" + .format(mon_status, str(v))) raise except CalledProcessError as e: - log("mon_status command failed with message: {}".format( - e.message)) + log("mon_status command failed with message: {}" + .format(str(e))) raise diff --git a/hooks/charmhelpers/core/host.py b/hooks/charmhelpers/core/host.py index 5cc5c86..fd14d60 100644 --- a/hooks/charmhelpers/core/host.py +++ b/hooks/charmhelpers/core/host.py @@ -549,6 +549,8 @@ def write_file(path, content, owner='root', group='root', perms=0o444): with open(path, 'wb') as target: os.fchown(target.fileno(), uid, gid) os.fchmod(target.fileno(), perms) + if six.PY3 and isinstance(content, six.string_types): + content = content.encode('UTF-8') target.write(content) return # the contents were the same, but we might still need to change the diff --git a/hooks/charmhelpers/core/host_factory/ubuntu.py b/hooks/charmhelpers/core/host_factory/ubuntu.py index d8dc378..99451b5 100644 --- a/hooks/charmhelpers/core/host_factory/ubuntu.py +++ b/hooks/charmhelpers/core/host_factory/ubuntu.py @@ -20,6 +20,7 @@ UBUNTU_RELEASES = ( 'yakkety', 'zesty', 'artful', + 'bionic', ) diff --git a/tests/basic_deployment.py b/tests/basic_deployment.py index 825a60b..4a3ef17 100644 --- a/tests/basic_deployment.py +++ b/tests/basic_deployment.py @@ -65,7 +65,7 @@ class CinderCephBasicDeployment(OpenStackAmuletDeployment): # Note: cinder-ceph becomes a cinder subordinate unit. this_service = {'name': 'cinder-ceph'} other_services = [ - {'name': 'percona-cluster', 'constraints': {'mem': '3072M'}}, + {'name': 'percona-cluster'}, {'name': 'keystone'}, {'name': 'rabbitmq-server'}, {'name': 'ceph', 'units': 3}, @@ -100,10 +100,8 @@ class CinderCephBasicDeployment(OpenStackAmuletDeployment): 'admin-token': 'ubuntutesting' } pxc_config = { - 'dataset-size': '25%', + 'innodb-buffer-pool-size': '256M', 'max-connections': 1000, - 'root-password': 'ChangeMe123', - 'sst-password': 'ChangeMe123', } cinder_config = { 'block-device': 'None', @@ -245,18 +243,32 @@ class CinderCephBasicDeployment(OpenStackAmuletDeployment): def test_110_users(self): """Verify expected users.""" u.log.debug('Checking keystone users...') - expected = [ - {'name': 'cinder_cinderv2', - 'enabled': True, - 'tenantId': u.not_null, - 'id': u.not_null, - 'email': 'juju@localhost'}, - {'name': 'admin', - 'enabled': True, - 'tenantId': u.not_null, - 'id': u.not_null, - 'email': 'juju@localhost'} - ] + + if self._get_openstack_release() < self.xenial_pike: + expected = [{ + 'name': 'cinder_cinderv2', + 'enabled': True, + 'tenantId': u.not_null, + 'id': u.not_null, + 'email': 'juju@localhost', + }] + else: + expected = [{ + 'name': 'cinderv3_cinderv2', + 'enabled': True, + 'tenantId': u.not_null, + 'id': u.not_null, + 'email': 'juju@localhost', + }] + + expected.append({ + 'name': 'admin', + 'enabled': True, + 'tenantId': u.not_null, + 'id': u.not_null, + 'email': 'juju@localhost', + }) + actual = self.keystone.users.list() ret = u.validate_user_data(expected, actual) if ret: @@ -265,26 +277,26 @@ class CinderCephBasicDeployment(OpenStackAmuletDeployment): def test_112_service_catalog(self): """Verify that the service catalog endpoint data""" u.log.debug('Checking keystone service catalog...') - endpoint_vol = { - 'adminURL': u.valid_url, - 'region': 'RegionOne', - 'publicURL': u.valid_url, - 'internalURL': u.valid_url - } - endpoint_id = { - 'adminURL': u.valid_url, - 'region': 'RegionOne', - 'publicURL': u.valid_url, - 'internalURL': u.valid_url - } + endpoint_vol = {'adminURL': u.valid_url, + 'region': 'RegionOne', + 'publicURL': u.valid_url, + 'internalURL': u.valid_url} + endpoint_id = {'adminURL': u.valid_url, + 'region': 'RegionOne', + 'publicURL': u.valid_url, + 'internalURL': u.valid_url} if self._get_openstack_release() >= self.trusty_icehouse: endpoint_vol['id'] = u.not_null endpoint_id['id'] = u.not_null - expected = { - 'identity': [endpoint_id], - 'volume': [endpoint_id] - } + if self._get_openstack_release() >= self.xenial_pike: + # Pike and later + expected = {'identity': [endpoint_id], + 'volumev2': [endpoint_id]} + else: + # Ocata and prior + expected = {'identity': [endpoint_id], + 'volume': [endpoint_id]} actual = self.keystone.service_catalog.get_endpoints() ret = u.validate_svc_catalog_endpoint_data(expected, actual) @@ -491,10 +503,17 @@ class CinderCephBasicDeployment(OpenStackAmuletDeployment): 'auth_protocol': 'http', 'private-address': u.valid_ip, 'auth_host': u.valid_ip, - 'service_username': 'cinder_cinderv2', 'service_tenant_id': u.not_null, 'service_host': u.valid_ip } + + if self._get_openstack_release() < self.xenial_pike: + # Ocata and earlier + expected['service_username'] = 'cinder_cinderv2' + else: + # Pike and later + expected['service_username'] = 'cinderv3_cinderv2' + ret = u.validate_relation_data(unit, relation, expected) if ret: msg = u.relation_error('identity-service cinder', ret) @@ -507,11 +526,6 @@ class CinderCephBasicDeployment(OpenStackAmuletDeployment): relation = ['identity-service', 'keystone:identity-service'] expected = { - 'cinder_service': 'cinder', - 'cinder_region': 'RegionOne', - 'cinder_public_url': u.valid_url, - 'cinder_internal_url': u.valid_url, - 'cinder_admin_url': u.valid_url, 'private-address': u.valid_ip } ret = u.validate_relation_data(unit, relation, expected) @@ -654,6 +668,12 @@ class CinderCephBasicDeployment(OpenStackAmuletDeployment): if 'cinder-ceph' not in expected_pools: expected_pools.append('cinder-ceph') + if (self._get_openstack_release() >= self.xenial_ocata and + 'cinder' in expected_pools): + # No cinder after mitaka because we don't use the relation in this + # test + expected_pools.remove('cinder') + results = [] sentries = [ self.ceph0_sentry, @@ -700,6 +720,8 @@ class CinderCephBasicDeployment(OpenStackAmuletDeployment): # Check ceph cinder pool object count, disk space usage and pool name u.log.debug('Checking ceph cinder pool original samples...') + u.log.debug('cinder-ceph pool: {}'.format(cinder_ceph_pool)) + u.log.debug('Checking ceph cinder pool original samples...') pool_name, obj_count, kb_used = u.get_ceph_pool_sample( sentry_unit, cinder_ceph_pool) @@ -741,11 +763,14 @@ class CinderCephBasicDeployment(OpenStackAmuletDeployment): if ret: amulet.raise_status(amulet.FAIL, msg=ret) - # Validate ceph cinder pool disk space usage samples over time - ret = u.validate_ceph_pool_samples(pool_size_samples, - "cinder pool disk usage") - if ret: - amulet.raise_status(amulet.FAIL, msg=ret) + # Luminous (pike) ceph seems more efficient at disk usage so we cannot + # grantee the ordering of kb_used + if self._get_openstack_release() < self.xenial_mitaka: + # Validate ceph cinder pool disk space usage samples over time + ret = u.validate_ceph_pool_samples(pool_size_samples, + "cinder pool disk usage") + if ret: + amulet.raise_status(amulet.FAIL, msg=ret) def test_499_ceph_cmds_exit_zero(self): """Check basic functionality of ceph cli commands against diff --git a/tests/charmhelpers/contrib/openstack/amulet/utils.py b/tests/charmhelpers/contrib/openstack/amulet/utils.py index b71b2b1..87f364d 100644 --- a/tests/charmhelpers/contrib/openstack/amulet/utils.py +++ b/tests/charmhelpers/contrib/openstack/amulet/utils.py @@ -858,9 +858,12 @@ class OpenStackAmuletUtils(AmuletUtils): :returns: List of pool name, object count, kb disk space used """ df = self.get_ceph_df(sentry_unit) - pool_name = df['pools'][pool_id]['name'] - obj_count = df['pools'][pool_id]['stats']['objects'] - kb_used = df['pools'][pool_id]['stats']['kb_used'] + for pool in df['pools']: + if pool['id'] == pool_id: + pool_name = pool['name'] + obj_count = pool['stats']['objects'] + kb_used = pool['stats']['kb_used'] + self.log.debug('Ceph {} pool (ID {}): {} objects, ' '{} kb used'.format(pool_name, pool_id, obj_count, kb_used)) diff --git a/tests/charmhelpers/core/host.py b/tests/charmhelpers/core/host.py index 5cc5c86..fd14d60 100644 --- a/tests/charmhelpers/core/host.py +++ b/tests/charmhelpers/core/host.py @@ -549,6 +549,8 @@ def write_file(path, content, owner='root', group='root', perms=0o444): with open(path, 'wb') as target: os.fchown(target.fileno(), uid, gid) os.fchmod(target.fileno(), perms) + if six.PY3 and isinstance(content, six.string_types): + content = content.encode('UTF-8') target.write(content) return # the contents were the same, but we might still need to change the diff --git a/tests/charmhelpers/core/host_factory/ubuntu.py b/tests/charmhelpers/core/host_factory/ubuntu.py index d8dc378..99451b5 100644 --- a/tests/charmhelpers/core/host_factory/ubuntu.py +++ b/tests/charmhelpers/core/host_factory/ubuntu.py @@ -20,6 +20,7 @@ UBUNTU_RELEASES = ( 'yakkety', 'zesty', 'artful', + 'bionic', ) diff --git a/tests/gate-basic-artful-pike b/tests/gate-basic-artful-pike index 2def33f..fd3816b 100644 --- a/tests/gate-basic-artful-pike +++ b/tests/gate-basic-artful-pike @@ -21,3 +21,6 @@ from basic_deployment import CinderCephBasicDeployment if __name__ == '__main__': deployment = CinderCephBasicDeployment(series='artful') deployment.run_tests() + +# NOTE(beisner): Artful target disabled, pending bug: +# https://bugs.launchpad.net/charm-percona-cluster/+bug/1728132 diff --git a/tests/gate-basic-xenial-pike b/tests/gate-basic-xenial-pike old mode 100644 new mode 100755 diff --git a/tox.ini b/tox.ini index 7c2936e..6d44f4b 100644 --- a/tox.ini +++ b/tox.ini @@ -60,7 +60,7 @@ basepython = python2.7 deps = -r{toxinidir}/requirements.txt -r{toxinidir}/test-requirements.txt commands = - bundletester -vl DEBUG -r json -o func-results.json gate-basic-xenial-mitaka --no-destroy + bundletester -vl DEBUG -r json -o func-results.json gate-basic-xenial-pike --no-destroy [testenv:func27-dfs] # Charm Functional Test