Switch to using zaza for functional tests

Change-Id: I21c472b579e865078eb58d1576f4c3c3ec845903
This commit is contained in:
Liam Young 2019-05-23 13:01:07 +00:00
parent 75bbcaa2df
commit 691bb92310
25 changed files with 1002 additions and 1230 deletions

2
.gitignore vendored
View File

@ -7,3 +7,5 @@ bin
.unit-state.db
.stestr
__pycache__
func-results.json
tests/id_rsa_zaza

View File

@ -27,3 +27,5 @@ git+https://github.com/juju/charm-helpers.git#egg=charmhelpers
# NOTE: workaround for 14.04 pip/tox
pytz
pyudev # for ceph-* charm unit tests (not mocked?)
git+https://github.com/openstack-charmers/zaza.git@remove-namespaced-tests#egg=zaza
git+https://github.com/openstack-charmers/zaza-openstack-tests.git#egg=zaza.openstack

View File

@ -1,8 +1,9 @@
# Overview
This directory provides Amulet tests to verify basic deployment functionality
from the perspective of this charm, its requirements and its features, as
exercised in a subset of the full OpenStack deployment test bundle topology.
This directory provides Zaza test definitions and bundles to verify basic
deployment functionality from the perspective of this charm, its requirements
and its features, as exercised in a subset of the full OpenStack deployment
test bundle topology.
For full details on functional testing of OpenStack charms please refer to
the [functional testing](http://docs.openstack.org/developer/charm-guide/testing.html#functional-testing)

View File

@ -1,924 +0,0 @@
#!/usr/bin/env python
#
# Copyright 2016 Canonical Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import amulet
from charmhelpers.contrib.openstack.amulet.deployment import (
OpenStackAmuletDeployment
)
from charmhelpers.contrib.openstack.amulet.utils import (
OpenStackAmuletUtils,
DEBUG,
# ERROR
)
import keystoneclient
# Use DEBUG to turn on debug logging
u = OpenStackAmuletUtils(DEBUG)
class CinderBasicDeployment(OpenStackAmuletDeployment):
"""Amulet tests on a basic lvm-backed cinder deployment. Verify
relations, service status, users and endpoint service catalog.
Create, clone, delete volumes. Create volume from glance image.
Create volume snapshot. Create volume from snapshot."""
def __init__(self, series=None, openstack=None, source=None,
stable=False):
"""Deploy the entire test environment."""
super(CinderBasicDeployment, self).__init__(series, openstack, source,
stable)
self._add_services()
self._add_relations()
self._configure_services()
self._deploy()
u.log.info('Waiting on extended status checks...')
exclude_services = []
self._auto_wait_for_status(exclude_services=exclude_services)
self.d.sentry.wait()
self._initialize_tests()
def _add_services(self):
"""Add services
Add the services that we're testing, where cinder is local,
and the rest of the service are from lp branches that are
compatible with the local charm (e.g. stable or next).
"""
this_service = {'name': 'cinder'}
other_services = [
{'name': 'percona-cluster'},
{'name': 'rabbitmq-server'},
{'name': 'keystone'},
{'name': 'glance'}
]
if self._get_openstack_release() >= self.xenial_pike:
# Pike and later, `openstack volume list` expects a compute
# endpoint in the catalog.
other_services.extend([
{'name': 'nova-compute'},
{'name': 'nova-cloud-controller'},
])
super(CinderBasicDeployment, self)._add_services(this_service,
other_services)
def _add_relations(self):
"""Add relations for the services."""
relations = {
'keystone:shared-db': 'percona-cluster:shared-db',
'cinder:shared-db': 'percona-cluster:shared-db',
'cinder:identity-service': 'keystone:identity-service',
'cinder:amqp': 'rabbitmq-server:amqp',
'cinder:image-service': 'glance:image-service',
'glance:identity-service': 'keystone:identity-service',
'glance:shared-db': 'percona-cluster:shared-db',
'glance:amqp': 'rabbitmq-server:amqp'
}
if self._get_openstack_release() >= self.xenial_pike:
# Pike and later, `openstack volume list` expects a compute
# endpoint in the catalog.
relations.update({
'nova-compute:image-service': 'glance:image-service',
'nova-compute:amqp': 'rabbitmq-server:amqp',
'nova-cloud-controller:shared-db': 'percona-cluster:shared-db',
'nova-cloud-controller:identity-service': 'keystone:'
'identity-service',
'nova-cloud-controller:amqp': 'rabbitmq-server:amqp',
'nova-cloud-controller:cloud-compute': 'nova-compute:'
'cloud-compute',
'nova-cloud-controller:image-service': 'glance:image-service',
})
super(CinderBasicDeployment, self)._add_relations(relations)
def _configure_services(self):
"""Configure all of the services."""
cinder_config = {'block-device': 'vdb',
'glance-api-version': '2',
'overwrite': 'true',
'ephemeral-unmount': '/mnt'}
keystone_config = {
'admin-password': 'openstack',
'admin-token': 'ubuntutesting'
}
pxc_config = {
'innodb-buffer-pool-size': '256M',
'max-connections': 1000,
}
configs = {
'cinder': cinder_config,
'keystone': keystone_config,
'percona-cluster': pxc_config,
}
super(CinderBasicDeployment, self)._configure_services(configs)
def _initialize_tests(self):
"""Perform final initialization before tests get run."""
# Access the sentries for inspecting service units
self.cinder_sentry = self.d.sentry['cinder'][0]
self.glance_sentry = self.d.sentry['glance'][0]
self.pxc_sentry = self.d.sentry['percona-cluster'][0]
self.keystone_sentry = self.d.sentry['keystone'][0]
self.rabbitmq_sentry = self.d.sentry['rabbitmq-server'][0]
u.log.debug('openstack release val: {}'.format(
self._get_openstack_release()))
u.log.debug('openstack release str: {}'.format(
self._get_openstack_release_string()))
# Authenticate admin with keystone
self.keystone_session, self.keystone = u.get_default_keystone_session(
self.keystone_sentry,
openstack_release=self._get_openstack_release())
# Authenticate admin with cinder endpoint
if self._get_openstack_release() >= self.xenial_pike:
api_version = 2
else:
api_version = 1
self.cinder = u.authenticate_cinder_admin(self.keystone, api_version)
if self._get_openstack_release() >= self.xenial_queens:
self.create_users_v3()
self.keystone_non_admin = u.authenticate_keystone(
self.keystone_sentry.info['public-address'],
user_domain_name=self.demo_domain,
username=self.demo_user_v3,
password='password',
api_version=self.keystone_api_version,
project_domain_name=self.demo_domain,
project_name=self.demo_project,
)
else:
self.create_users_v2()
self.keystone_non_admin = u.authenticate_keystone_user(
self.keystone, user=self.demo_user,
password='password', tenant=self.demo_tenant)
force_v1_client = False
if self._get_openstack_release() == self.trusty_icehouse:
# Updating image properties (such as arch or hypervisor) using the
# v2 api in icehouse results in:
# https://bugs.launchpad.net/python-glanceclient/+bug/1371559
u.log.debug('Forcing glance to use v1 api')
force_v1_client = True
# Authenticate admin with glance endpoint
self.glance = u.authenticate_glance_admin(
self.keystone,
force_v1_client=force_v1_client)
self.cinder_non_admin = u.authenticate_cinder_admin(
self.keystone_non_admin, api_version)
def create_users_v2(self):
# Create a demo tenant/role/user
self.demo_tenant = 'demoTenant'
self.demo_role = 'demoRole'
self.demo_user = 'demoUser'
self.keystone_api_version = 2
if not u.tenant_exists(self.keystone, self.demo_tenant):
tenant = self.keystone.tenants.create(
tenant_name=self.demo_tenant,
description='demo tenant',
enabled=True)
self.keystone.roles.create(name=self.demo_role)
self.keystone.users.create(name=self.demo_user,
password='password',
tenant_id=tenant.id,
email='demo@demo.com')
def create_users_v3(self):
# Create a demo tenant/role/user
self.demo_project = 'demoProject'
self.demo_user_v3 = 'demoUserV3'
self.demo_role = 'demoRoleV3'
self.demo_domain_admin = 'demoDomainAdminV3'
self.demo_domain = 'demoDomain'
self.keystone_api_version = 3
try:
domain = self.keystone.domains.find(name=self.demo_domain)
except keystoneclient.exceptions.NotFound:
domain = self.keystone.domains.create(
self.demo_domain,
description='Demo Domain',
enabled=True
)
try:
self.keystone.projects.find(name=self.demo_project)
except keystoneclient.exceptions.NotFound:
self.keystone.projects.create(
self.demo_project,
domain,
description='Demo Project',
enabled=True,
)
try:
self.keystone.roles.find(name=self.demo_role)
except keystoneclient.exceptions.NotFound:
self.keystone.roles.create(name=self.demo_role)
try:
self.keystone.roles.find(name='Member')
except keystoneclient.exceptions.NotFound:
self.keystone.roles.create(name='Member')
if not self.find_keystone_v3_user(self.keystone,
self.demo_user_v3,
self.demo_domain):
user = self.keystone.users.create(
self.demo_user_v3,
domain=domain.id,
project=self.demo_project,
password='password',
email='demov3@demo.com',
description='Demo',
enabled=True)
role = self.keystone.roles.find(name='Member')
u.log.debug("self.keystone.roles.grant('{}', user='{}', "
"domain='{}')".format(role.id, user.id, domain.id))
self.keystone.roles.grant(
role.id,
user=user.id,
project=self.keystone.projects.find(name=self.demo_project).id)
try:
self.keystone.roles.find(name='Admin')
except keystoneclient.exceptions.NotFound:
self.keystone.roles.create(name='Admin')
if not self.find_keystone_v3_user(self.keystone,
self.demo_domain_admin,
self.demo_domain):
user = self.keystone.users.create(
self.demo_domain_admin,
domain=domain.id,
project=self.demo_project,
password='password',
email='demoadminv3@demo.com',
description='Demo Admin',
enabled=True)
role = self.keystone.roles.find(name='Admin')
u.log.debug("self.keystone.roles.grant('{}', user='{}', "
"domain='{}')".format(role.id, user.id, domain.id))
self.keystone.roles.grant(
role.id,
user=user.id,
domain=domain.id)
def find_keystone_v3_user(self, client, username, domain):
"""Find a user within a specified keystone v3 domain"""
domain_users = client.users.list(
domain=client.domains.find(name=domain).id
)
for user in domain_users:
if username.lower() == user.name.lower():
return user
return None
def _extend_cinder_volume(self, vol_id, new_size=2):
"""Extend an existing cinder volume size.
:param vol_id: existing cinder volume to extend
:param new_size: new size in gigabytes
:returns: None if successful; Failure message otherwise
"""
# Extend existing volume size
try:
self.cinder.volumes.extend(vol_id, new_size)
vol_size_org = self.cinder.volumes.get(vol_id).size
except Exception as e:
msg = 'Failed to extend volume: {}'.format(e)
amulet.raise_status(amulet.FAIL, msg=msg)
# Confirm that the volume reaches available status.
ret = u.resource_reaches_status(self.cinder.volumes, vol_id,
expected_stat="available",
msg="Volume status wait")
if not ret:
msg = ('Cinder volume failed to reach expected state '
'while extending.')
return ret
# Validate volume size and status
u.log.debug('Validating volume attributes...')
vol_size_ext = self.cinder.volumes.get(vol_id).size
vol_stat = self.cinder.volumes.get(vol_id).status
msg_attr = ('Volume attributes - orig size:{} extended size:{} '
'stat:{}'.format(vol_size_org, vol_size_ext, vol_stat))
if vol_size_ext > vol_size_org and vol_stat == 'available':
u.log.debug(msg_attr)
else:
msg = ('Volume validation failed, {}'.format(msg_attr))
return ret
return None
def _snapshot_cinder_volume(self, name='demo-snapshot', vol_id=None):
"""Create a snapshot of an existing cinder volume.
:param name: display name to assign to snapshot
:param vol_id: existing cinder volume to snapshot
:returns: None if successful; Failure message otherwise
"""
u.log.debug('Creating snapshot of volume ({})...'.format(vol_id))
# Create snapshot of an existing cinder volume
try:
snap_new = self.cinder.volume_snapshots.create(
volume_id=vol_id, display_name=name)
snap_id = snap_new.id
except TypeError:
snap_new = self.cinder.volume_snapshots.create(
volume_id=vol_id, name=name)
snap_id = snap_new.id
except Exception as e:
msg = 'Failed to snapshot the volume: {}'.format(e)
amulet.raise_status(amulet.FAIL, msg=msg)
# Confirm that the volume reaches available status.
ret = u.resource_reaches_status(self.cinder.volume_snapshots,
snap_id,
expected_stat="available",
msg="Volume status wait")
if not ret:
msg = ('Cinder volume failed to reach expected state '
'while snapshotting.')
return ret
# Validate snapshot
u.log.debug('Validating snapshot attributes...')
snap_name = u._get_cinder_obj_name(
self.cinder.volume_snapshots.get(snap_id))
snap_stat = self.cinder.volume_snapshots.get(snap_id).status
snap_vol_id = self.cinder.volume_snapshots.get(snap_id).volume_id
msg_attr = ('Snapshot attributes - name:{} status:{} '
'vol_id:{}'.format(snap_name, snap_stat, snap_vol_id))
if snap_name == name and snap_stat == 'available' \
and snap_vol_id == vol_id:
u.log.debug(msg_attr)
else:
msg = ('Snapshot validation failed, {}'.format(msg_attr))
amulet.raise_status(amulet.FAIL, msg=msg)
return snap_new
def _check_cinder_lvm(self):
"""Inspect lvm on cinder unit, do basic validation against
cinder volumes and snapshots that exist."""
u.log.debug('Checking cinder volumes against lvm volumes...')
# Inspect
cmd = ('sudo lvs | grep -E \'^\s*(volume|_snap)\' | '
'grep cinder-volumes | awk \'{ print $1 }\'')
output, code = self.cinder_sentry.run(cmd)
u.log.debug('{} `{}` returned '
'{}'.format(self.cinder_sentry.info['unit_name'],
cmd, code))
if code != 0:
return "command `{}` returned {}".format(cmd, str(code))
vol_list = self.cinder.volumes.list()
lv_id_list = output.split('\n')
lv_count = len(lv_id_list)
vol_count = len(vol_list)
snap_count = len(self.cinder.volume_snapshots.list())
# Expect cinder vol + snap count to match lvm log vol count
u.log.debug('vols:{} snaps:{} lvs:{}'.format(vol_count,
snap_count,
lv_count))
if (vol_count + snap_count) != len(lv_id_list):
msg = ('lvm volume count ({}) != cinder volume + snap count '
'({})'.format(len(vol_list), len(lv_id_list)))
return msg
# Expect all cinder vol IDs to exist in the LVM volume list
for vol_this in vol_list:
try:
vol_id = vol_this.id
vol_name = u._get_cinder_obj_name(vol_this)
lv_id = 'volume-{}'.format(vol_id)
_index = lv_id_list.index(lv_id)
u.log.info('Volume ({}) correlates to lv '
'{} ({})'.format(vol_name,
_index,
lv_id))
except:
u.log.error('lvs output: {}'.format(output))
msg = ('Volume ID {} not found in '
'LVM volume list.'.format(vol_this.id))
return msg
return None
def test_100_services(self):
"""Verify that the expected services are running on the
cinder unit."""
services = {
self.cinder_sentry: ['cinder-scheduler',
'cinder-volume'],
}
ret = u.validate_services_by_name(services)
if ret:
amulet.raise_status(amulet.FAIL, msg=ret)
if self._get_openstack_release() < self.xenial_ocata:
services[self.cinder_sentry].append('cinder-api')
def test_110_memcache(self):
u.validate_memcache(self.cinder_sentry,
'/etc/cinder/cinder.conf',
self._get_openstack_release(),
earliest_release=self.trusty_mitaka)
def test_110_users(self):
"""Verify expected users."""
u.log.debug('Checking keystone users...')
if self._get_openstack_release() >= self.xenial_queens:
expected = [{
'name': 'cinderv2_cinderv3',
'enabled': True,
'default_project_id': u.not_null,
'id': u.not_null,
'email': 'juju@localhost',
}]
domain = self.keystone.domains.find(name='service_domain')
actual = self.keystone.users.list(domain=domain)
api_version = 3
elif self._get_openstack_release() >= self.xenial_pike:
expected = [
{'name': 'cinderv2_cinderv3',
'enabled': True,
'tenantId': u.not_null,
'id': u.not_null,
'email': 'juju@localhost'},
{'name': 'admin',
'enabled': True,
'tenantId': u.not_null,
'id': u.not_null,
'email': 'juju@localhost'}
]
actual = self.keystone.users.list()
api_version = 2
else:
expected = [
{'name': 'cinder_cinderv2',
'enabled': True,
'tenantId': u.not_null,
'id': u.not_null,
'email': 'juju@localhost'},
{'name': 'admin',
'enabled': True,
'tenantId': u.not_null,
'id': u.not_null,
'email': 'juju@localhost'}
]
actual = self.keystone.users.list()
api_version = 2
ret = u.validate_user_data(expected, actual, api_version)
if ret:
amulet.raise_status(amulet.FAIL, msg=ret)
def test_112_service_catalog(self):
"""Verify that the service catalog endpoint data"""
u.log.debug('Checking keystone service catalog...')
endpoint_vol = {'adminURL': u.valid_url,
'region': 'RegionOne',
'publicURL': u.valid_url,
'internalURL': u.valid_url}
endpoint_id = {'adminURL': u.valid_url,
'region': 'RegionOne',
'publicURL': u.valid_url,
'internalURL': u.valid_url}
if self._get_openstack_release() >= self.trusty_icehouse:
endpoint_vol['id'] = u.not_null
endpoint_id['id'] = u.not_null
if self._get_openstack_release() >= self.xenial_pike:
# Pike and later
expected = {'image': [endpoint_id],
'identity': [endpoint_id],
'volumev2': [endpoint_id]}
else:
# Ocata and prior
expected = {'image': [endpoint_id],
'identity': [endpoint_id],
'volume': [endpoint_id]}
actual = self.keystone.service_catalog.get_endpoints()
ret = u.validate_svc_catalog_endpoint_data(
expected,
actual,
openstack_release=self._get_openstack_release())
if ret:
amulet.raise_status(amulet.FAIL, msg=ret)
def test_114_cinder_endpoint(self):
"""Verify the cinder endpoint data."""
u.log.debug('Checking cinder endpoint...')
endpoints = self.keystone.endpoints.list()
admin_port = internal_port = public_port = '8776'
if self._get_openstack_release() >= self.xenial_queens:
expected = {
'id': u.not_null,
'region': 'RegionOne',
'region_id': 'RegionOne',
'url': u.valid_url,
'interface': u.not_null,
'service_id': u.not_null}
ret = u.validate_v3_endpoint_data(
endpoints,
admin_port,
internal_port,
public_port,
expected,
6)
else:
expected = {
'id': u.not_null,
'region': 'RegionOne',
'adminurl': u.valid_url,
'internalurl': u.valid_url,
'publicurl': u.valid_url,
'service_id': u.not_null}
ret = u.validate_v2_endpoint_data(
endpoints,
admin_port,
internal_port,
public_port,
expected)
if ret:
amulet.raise_status(amulet.FAIL,
msg='cinder endpoint: {}'.format(ret))
def test_202_cinder_glance_image_service_relation(self):
"""Verify the cinder:glance image-service relation data"""
u.log.debug('Checking cinder:glance image-service relation data...')
unit = self.cinder_sentry
relation = ['image-service', 'glance:image-service']
expected = {'private-address': u.valid_ip}
ret = u.validate_relation_data(unit, relation, expected)
if ret:
msg = u.relation_error('cinder image-service', ret)
amulet.raise_status(amulet.FAIL, msg=msg)
def test_203_glance_cinder_image_service_relation(self):
"""Verify the glance:cinder image-service relation data"""
u.log.debug('Checking glance:cinder image-service relation data...')
unit = self.glance_sentry
relation = ['image-service', 'cinder:image-service']
expected = {
'private-address': u.valid_ip,
'glance-api-server': u.valid_url
}
ret = u.validate_relation_data(unit, relation, expected)
if ret:
msg = u.relation_error('glance image-service', ret)
amulet.raise_status(amulet.FAIL, msg=msg)
def test_204_mysql_cinder_db_relation(self):
"""Verify the mysql:glance shared-db relation data"""
u.log.debug('Checking mysql:cinder db relation data...')
unit = self.pxc_sentry
relation = ['shared-db', 'cinder:shared-db']
expected = {
'private-address': u.valid_ip,
'db_host': u.valid_ip
}
ret = u.validate_relation_data(unit, relation, expected)
if ret:
msg = u.relation_error('mysql shared-db', ret)
amulet.raise_status(amulet.FAIL, msg=msg)
def test_205_cinder_mysql_db_relation(self):
"""Verify the cinder:mysql shared-db relation data"""
u.log.debug('Checking cinder:mysql db relation data...')
unit = self.cinder_sentry
relation = ['shared-db', 'percona-cluster:shared-db']
expected = {
'private-address': u.valid_ip,
'hostname': u.valid_ip,
'username': 'cinder',
'database': 'cinder'
}
ret = u.validate_relation_data(unit, relation, expected)
if ret:
msg = u.relation_error('cinder shared-db', ret)
amulet.raise_status(amulet.FAIL, msg=msg)
def test_206_keystone_cinder_id_relation(self):
"""Verify the keystone:cinder identity-service relation data"""
u.log.debug('Checking keystone:cinder id relation data...')
unit = self.keystone_sentry
relation = ['identity-service',
'cinder:identity-service']
expected = {
'service_protocol': 'http',
'service_tenant': 'services',
'admin_token': 'ubuntutesting',
'service_password': u.not_null,
'service_port': '5000',
'auth_port': '35357',
'auth_protocol': 'http',
'private-address': u.valid_ip,
'auth_host': u.valid_ip,
'service_tenant_id': u.not_null,
'service_host': u.valid_ip
}
if self._get_openstack_release() < self.xenial_pike:
# Ocata and earlier
expected['service_username'] = 'cinder_cinderv2'
else:
# Pike and later
expected['service_username'] = 'cinderv2_cinderv3'
ret = u.validate_relation_data(unit, relation, expected)
if ret:
msg = u.relation_error('identity-service cinder', ret)
amulet.raise_status(amulet.FAIL, msg=msg)
def test_207_cinder_keystone_id_relation(self):
"""Verify the cinder:keystone identity-service relation data"""
u.log.debug('Checking cinder:keystone id relation data...')
unit = self.cinder_sentry
relation = ['identity-service',
'keystone:identity-service']
expected = {
'private-address': u.valid_ip
}
ret = u.validate_relation_data(unit, relation, expected)
if ret:
msg = u.relation_error('cinder identity-service', ret)
amulet.raise_status(amulet.FAIL, msg=msg)
def test_208_rabbitmq_cinder_amqp_relation(self):
"""Verify the rabbitmq-server:cinder amqp relation data"""
u.log.debug('Checking rmq:cinder amqp relation data...')
unit = self.rabbitmq_sentry
relation = ['amqp', 'cinder:amqp']
expected = {
'private-address': u.valid_ip,
'password': u.not_null,
'hostname': u.valid_ip
}
ret = u.validate_relation_data(unit, relation, expected)
if ret:
msg = u.relation_error('amqp cinder', ret)
amulet.raise_status(amulet.FAIL, msg=msg)
def test_209_cinder_rabbitmq_amqp_relation(self):
"""Verify the cinder:rabbitmq-server amqp relation data"""
u.log.debug('Checking cinder:rmq amqp relation data...')
unit = self.cinder_sentry
relation = ['amqp', 'rabbitmq-server:amqp']
expected = {
'private-address': u.valid_ip,
'vhost': 'openstack',
'username': u.not_null
}
ret = u.validate_relation_data(unit, relation, expected)
if ret:
msg = u.relation_error('cinder amqp', ret)
amulet.raise_status(amulet.FAIL, msg=msg)
def test_301_cinder_logging_config(self):
"""Verify the data in the cinder logging conf file."""
u.log.debug('Checking cinder logging config file data...')
unit = self.cinder_sentry
conf = '/etc/cinder/logging.conf'
expected = {
'loggers': {
'keys': 'root, cinder'
},
'logger_cinder': {
'level': 'INFO',
'handlers': 'stderr',
'qualname': 'cinder'
},
'logger_root': {
'level': 'WARNING',
'handlers': 'null'
}
}
for section, pairs in expected.iteritems():
ret = u.validate_config_data(unit, conf, section, pairs)
if ret:
message = "cinder logging config error: {}".format(ret)
amulet.raise_status(amulet.FAIL, msg=message)
def test_303_cinder_rootwrap_config(self):
"""Inspect select config pairs in rootwrap.conf."""
u.log.debug('Checking cinder rootwrap config file data...')
unit = self.cinder_sentry
conf = '/etc/cinder/rootwrap.conf'
section = 'DEFAULT'
expected = {
'filters_path': '/etc/cinder/rootwrap.d,'
'/usr/share/cinder/rootwrap',
'use_syslog': 'False',
}
ret = u.validate_config_data(unit, conf, section, expected)
if ret:
msg = "cinder rootwrap config error: {}".format(ret)
amulet.raise_status(amulet.FAIL, msg=msg)
def test_400_cinder_api_connection(self):
"""Simple api call to check service is up and responding"""
u.log.debug('Checking basic cinder api functionality...')
check = list(self.cinder.volumes.list())
u.log.debug('Cinder api check (volumes.list): {}'.format(check))
assert(check == [])
def test_401_create_delete_volume(self):
"""Create a cinder volume and delete it."""
u.log.debug('Creating, checking and deleting cinder volume...')
vol_new = u.create_cinder_volume(self.cinder)
vol_id = vol_new.id
u.delete_resource(self.cinder.volumes, vol_id, msg="cinder volume")
def test_402_create_delete_volume_from_image(self):
"""Create a cinder volume from a glance image, and delete it."""
u.log.debug('Creating, checking and deleting cinder volume'
'from glance image...')
img_new = u.create_cirros_image(self.glance, "cirros-image-1")
img_id = img_new.id
vol_new = u.create_cinder_volume(self.cinder,
vol_name="demo-vol-cirros",
img_id=img_id)
vol_id = vol_new.id
u.delete_resource(self.glance.images, img_id, msg="glance image")
u.delete_resource(self.cinder.volumes, vol_id, msg="cinder volume")
def test_403_volume_snap_clone_extend_inspect(self):
"""Create a cinder volume, clone it, extend its size, create a
snapshot of the volume, create a volume from a snapshot, check
status of each, inspect underlying lvm, then delete the resources."""
u.log.debug('Creating, snapshotting, cloning, extending a '
'cinder volume...')
vols = []
# Create a 1GB volume
vol_new = u.create_cinder_volume(self.cinder, vol_size=1)
vols.append(vol_new)
vol_id = vol_new.id
# Snapshot the volume
snap = self._snapshot_cinder_volume(vol_id=vol_id)
snap_id = snap.id
# Create a volume from the snapshot
vol_from_snap = u.create_cinder_volume(self.cinder,
vol_name="demo-vol-from-snap",
snap_id=snap_id)
vols.append(vol_from_snap)
# Clone an existing volume
vol_clone = u.create_cinder_volume(self.cinder,
vol_name="demo-vol-clone",
src_vol_id=vol_id)
vols.append(vol_clone)
vol_clone_id = vol_clone.id
# Extend the cloned volume and confirm new size
ret = self._extend_cinder_volume(vol_clone_id, new_size=2)
if ret:
amulet.raise_status(amulet.FAIL, msg=ret)
# Inspect logical volumes (lvm) on cinder unit
ret = self._check_cinder_lvm()
if ret:
amulet.raise_status(amulet.FAIL, msg=ret)
# Cleanup
u.log.debug('Deleting snapshot {}...'.format(snap_id))
u.delete_resource(self.cinder.volume_snapshots,
snap_id, msg="cinder volume")
for vol in vols:
u.log.debug('Deleting volume {}...'.format(vol.id))
u.delete_resource(self.cinder.volumes, vol.id, msg="cinder volume")
def test_404_admin_force_delete_volume(self):
"""Create a cinder volume and delete it."""
u.log.debug('Creating, checking and deleting cinder volume...')
vol_new = u.create_cinder_volume(self.cinder)
vol_new.force_delete()
# TODO: Re-enable this test after we add back support for non-admins
# force deleting volumes.
# def test_405_non_admin_force_delete_volume(self):
# """Create a cinder volume and delete it."""
# os_release = self._get_openstack_release()
# if os_release < self.xenial_queens:
# u.log.info('Skipping test, {} < queens'.format(os_release))
# return
# u.log.debug('Creating, checking and deleting cinder volume...')
# vol_new = u.create_cinder_volume(self.cinder_non_admin)
# vol_new.force_delete()
def test_500_security_checklist_action(self):
"""Verify expected result on a default install"""
u.log.debug("Testing security-checklist")
sentry_unit = self.cinder_sentry
action_id = u.run_action(sentry_unit, "security-checklist")
u.wait_on_action(action_id)
data = amulet.actions.get_action_output(action_id, full_output=True)
assert data.get(u"status") == "failed", \
"Security check is expected to not pass by default"
def test_900_restart_on_config_change(self):
"""Verify that the specified services are restarted when the
config is changed."""
sentry = self.cinder_sentry
juju_service = 'cinder'
# Expected default and alternate values
set_default = {'debug': 'False'}
set_alternate = {'debug': 'True'}
# Config file affected by juju set config change
conf_file = '/etc/cinder/cinder.conf'
# Services which are expected to restart upon config change
services = {
'cinder-scheduler': conf_file,
'cinder-volume': conf_file
}
if self._get_openstack_release() >= self.xenial_ocata:
services['apache2'] = conf_file
else:
services['cinder-api'] = conf_file
# Make config change, check for service restarts
u.log.debug('Making config change on {}...'.format(juju_service))
mtime = u.get_sentry_time(sentry)
self.d.configure(juju_service, set_alternate)
sleep_time = 40
for s, conf_file in services.iteritems():
u.log.debug("Checking that service restarted: {}".format(s))
if not u.validate_service_config_changed(sentry, mtime, s,
conf_file,
retry_count=4,
retry_sleep_time=20,
sleep_time=sleep_time):
self.d.configure(juju_service, set_default)
msg = "service {} didn't restart after config change".format(s)
amulet.raise_status(amulet.FAIL, msg=msg)
sleep_time = 0
self.d.configure(juju_service, set_default)
def test_910_pause_and_resume(self):
"""The services can be paused and resumed. """
u.log.debug('Checking pause and resume actions...')
unit = self.d.sentry['cinder'][0]
unit_name = unit.info['unit_name']
u.log.debug('Checking for active status on {}'.format(unit_name))
assert u.status_get(unit)[0] == "active"
u.log.debug('Running pause action on {}'.format(unit_name))
action_id = u.run_action(unit, "pause")
u.log.debug('Waiting on action {}'.format(action_id))
assert u.wait_on_action(action_id), "Pause action failed."
u.log.debug('Checking for maintenance status on {}'.format(unit_name))
assert u.status_get(unit)[0] == "maintenance"
u.log.debug('Running resume action on {}'.format(unit_name))
action_id = u.run_action(unit, "resume")
u.log.debug('Waiting on action {}'.format(action_id))
assert u.wait_on_action(action_id), "Resume action failed."
u.log.debug('Checking for active status on {}'.format(unit_name))
assert u.status_get(unit)[0] == "active"
u.log.debug('OK')

View File

@ -0,0 +1,102 @@
series: bionic
relations:
- - nova-compute:image-service
- glance:image-service
- - nova-compute:amqp
- rabbitmq-server:amqp
- - nova-cloud-controller:shared-db
- percona-cluster:shared-db
- - nova-cloud-controller:identity-service
- keystone:identity-service
- - nova-cloud-controller:amqp
- rabbitmq-server:amqp
- - nova-cloud-controller:cloud-compute
- nova-compute:cloud-compute
- - nova-cloud-controller:image-service
- glance:image-service
- - keystone:shared-db
- percona-cluster:shared-db
- - glance:identity-service
- keystone:identity-service
- - glance:shared-db
- percona-cluster:shared-db
- - glance:amqp
- rabbitmq-server:amqp
- - neutron-gateway:amqp
- rabbitmq-server:amqp
- - nova-cloud-controller:quantum-network-service
- neutron-gateway:quantum-network-service
- - neutron-api:shared-db
- percona-cluster:shared-db
- - neutron-api:amqp
- rabbitmq-server:amqp
- - neutron-api:neutron-api
- nova-cloud-controller:neutron-api
- - neutron-api:identity-service
- keystone:identity-service
- - nova-compute:neutron-plugin
- neutron-openvswitch:neutron-plugin
- - rabbitmq-server:amqp
- neutron-openvswitch:amqp
- - cinder:shared-db
- percona-cluster:shared-db
- - cinder:identity-service
- keystone:identity-service
- - cinder:amqp
- rabbitmq-server:amqp
- - cinder:image-service
- glance:image-service
applications:
rabbitmq-server:
charm: cs:~openstack-charmers-next/rabbitmq-server
num_units: 1
percona-cluster:
charm: cs:~openstack-charmers-next/percona-cluster
num_units: 1
options:
max-connections: 1000
innodb-buffer-pool-size: 256M
nova-cloud-controller:
charm: cs:~openstack-charmers-next/nova-cloud-controller
num_units: 1
options:
network-manager: Neutron
debug: true
neutron-api:
charm: cs:~openstack-charmers-next/neutron-api
num_units: 1
options:
flat-network-providers: physnet1
neutron-security-groups: true
keystone:
charm: cs:~openstack-charmers-next/keystone
num_units: 1
neutron-gateway:
charm: cs:~openstack-charmers-next/neutron-gateway
num_units: 1
options:
bridge-mappings: physnet1:br-ex
glance:
charm: cs:~openstack-charmers-next/glance
num_units: 1
neutron-openvswitch:
charm: cs:~openstack-charmers-next/neutron-openvswitch
nova-compute:
charm: cs:~openstack-charmers-next/nova-compute
num_units: 1
constraints: mem=4G cores=4
options:
config-flags: auto_assign_floating_ip=False
enable-live-migration: false
aa-profile-mode: enforce
ephemeral-device: /dev/vdb
ephemeral-unmount: /mnt
debug: true
cinder:
series: bionic
num_units: 1
options:
block-device: /dev/vdb
glance-api-version: 2
overwrite: "true"
ephemeral-unmount: /mnt

View File

@ -0,0 +1,110 @@
series: bionic
relations:
- - nova-compute:image-service
- glance:image-service
- - nova-compute:amqp
- rabbitmq-server:amqp
- - nova-cloud-controller:shared-db
- percona-cluster:shared-db
- - nova-cloud-controller:identity-service
- keystone:identity-service
- - nova-cloud-controller:amqp
- rabbitmq-server:amqp
- - nova-cloud-controller:cloud-compute
- nova-compute:cloud-compute
- - nova-cloud-controller:image-service
- glance:image-service
- - keystone:shared-db
- percona-cluster:shared-db
- - glance:identity-service
- keystone:identity-service
- - glance:shared-db
- percona-cluster:shared-db
- - glance:amqp
- rabbitmq-server:amqp
- - neutron-gateway:amqp
- rabbitmq-server:amqp
- - nova-cloud-controller:quantum-network-service
- neutron-gateway:quantum-network-service
- - neutron-api:shared-db
- percona-cluster:shared-db
- - neutron-api:amqp
- rabbitmq-server:amqp
- - neutron-api:neutron-api
- nova-cloud-controller:neutron-api
- - neutron-api:identity-service
- keystone:identity-service
- - nova-compute:neutron-plugin
- neutron-openvswitch:neutron-plugin
- - rabbitmq-server:amqp
- neutron-openvswitch:amqp
- - cinder:shared-db
- percona-cluster:shared-db
- - cinder:identity-service
- keystone:identity-service
- - cinder:amqp
- rabbitmq-server:amqp
- - cinder:image-service
- glance:image-service
applications:
rabbitmq-server:
charm: cs:~openstack-charmers-next/rabbitmq-server
num_units: 1
percona-cluster:
charm: cs:~openstack-charmers-next/percona-cluster
num_units: 1
options:
max-connections: 1000
innodb-buffer-pool-size: 256M
nova-cloud-controller:
charm: cs:~openstack-charmers-next/nova-cloud-controller
num_units: 1
options:
openstack-origin: cloud:bionic-rocky
network-manager: Neutron
debug: true
neutron-api:
charm: cs:~openstack-charmers-next/neutron-api
num_units: 1
options:
openstack-origin: cloud:bionic-rocky
flat-network-providers: physnet1
neutron-security-groups: true
keystone:
charm: cs:~openstack-charmers-next/keystone
num_units: 1
options:
openstack-origin: cloud:bionic-rocky
neutron-gateway:
charm: cs:~openstack-charmers-next/neutron-gateway
num_units: 1
options:
openstack-origin: cloud:bionic-rocky
bridge-mappings: physnet1:br-ex
glance:
charm: cs:~openstack-charmers-next/glance
num_units: 1
options:
openstack-origin: cloud:bionic-rocky
neutron-openvswitch:
charm: cs:~openstack-charmers-next/neutron-openvswitch
nova-compute:
charm: cs:~openstack-charmers-next/nova-compute
num_units: 1
constraints: mem=4G cores=4
options:
openstack-origin: cloud:bionic-rocky
config-flags: auto_assign_floating_ip=False
enable-live-migration: false
aa-profile-mode: enforce
ephemeral-device: /dev/vdb
ephemeral-unmount: /mnt
debug: true
cinder:
series: bionic
num_units: 1
options:
block-device: /dev/vdb
glance-api-version: 2
overwrite: "true"
ephemeral-unmount: /mnt

View File

@ -0,0 +1,110 @@
series: bionic
relations:
- - nova-compute:image-service
- glance:image-service
- - nova-compute:amqp
- rabbitmq-server:amqp
- - nova-cloud-controller:shared-db
- percona-cluster:shared-db
- - nova-cloud-controller:identity-service
- keystone:identity-service
- - nova-cloud-controller:amqp
- rabbitmq-server:amqp
- - nova-cloud-controller:cloud-compute
- nova-compute:cloud-compute
- - nova-cloud-controller:image-service
- glance:image-service
- - keystone:shared-db
- percona-cluster:shared-db
- - glance:identity-service
- keystone:identity-service
- - glance:shared-db
- percona-cluster:shared-db
- - glance:amqp
- rabbitmq-server:amqp
- - neutron-gateway:amqp
- rabbitmq-server:amqp
- - nova-cloud-controller:quantum-network-service
- neutron-gateway:quantum-network-service
- - neutron-api:shared-db
- percona-cluster:shared-db
- - neutron-api:amqp
- rabbitmq-server:amqp
- - neutron-api:neutron-api
- nova-cloud-controller:neutron-api
- - neutron-api:identity-service
- keystone:identity-service
- - nova-compute:neutron-plugin
- neutron-openvswitch:neutron-plugin
- - rabbitmq-server:amqp
- neutron-openvswitch:amqp
- - cinder:shared-db
- percona-cluster:shared-db
- - cinder:identity-service
- keystone:identity-service
- - cinder:amqp
- rabbitmq-server:amqp
- - cinder:image-service
- glance:image-service
applications:
rabbitmq-server:
charm: cs:~openstack-charmers-next/rabbitmq-server
num_units: 1
percona-cluster:
charm: cs:~openstack-charmers-next/percona-cluster
num_units: 1
options:
max-connections: 1000
innodb-buffer-pool-size: 256M
nova-cloud-controller:
charm: cs:~openstack-charmers-next/nova-cloud-controller
num_units: 1
options:
openstack-origin: cloud:bionic-stein
network-manager: Neutron
debug: true
neutron-api:
charm: cs:~openstack-charmers-next/neutron-api
num_units: 1
options:
openstack-origin: cloud:bionic-stein
flat-network-providers: physnet1
neutron-security-groups: true
keystone:
charm: cs:~openstack-charmers-next/keystone
num_units: 1
options:
openstack-origin: cloud:bionic-stein
neutron-gateway:
charm: cs:~openstack-charmers-next/neutron-gateway
num_units: 1
options:
openstack-origin: cloud:bionic-stein
bridge-mappings: physnet1:br-ex
glance:
charm: cs:~openstack-charmers-next/glance
num_units: 1
options:
openstack-origin: cloud:bionic-stein
neutron-openvswitch:
charm: cs:~openstack-charmers-next/neutron-openvswitch
nova-compute:
charm: cs:~openstack-charmers-next/nova-compute
num_units: 1
constraints: mem=4G cores=4
options:
openstack-origin: cloud:bionic-stein
config-flags: auto_assign_floating_ip=False
enable-live-migration: false
aa-profile-mode: enforce
ephemeral-device: /dev/vdb
ephemeral-unmount: /mnt
debug: true
cinder:
series: bionic
num_units: 1
options:
block-device: /dev/vdb
glance-api-version: 2
overwrite: "true"
ephemeral-unmount: /mnt

View File

@ -0,0 +1,92 @@
series: trusty
relations:
- - nova-compute:image-service
- glance:image-service
- - nova-compute:amqp
- rabbitmq-server:amqp
- - nova-cloud-controller:shared-db
- percona-cluster:shared-db
- - nova-cloud-controller:identity-service
- keystone:identity-service
- - nova-cloud-controller:amqp
- rabbitmq-server:amqp
- - nova-cloud-controller:cloud-compute
- nova-compute:cloud-compute
- - nova-cloud-controller:image-service
- glance:image-service
- - keystone:shared-db
- percona-cluster:shared-db
- - glance:identity-service
- keystone:identity-service
- - glance:shared-db
- percona-cluster:shared-db
- - glance:amqp
- rabbitmq-server:amqp
- - neutron-gateway:amqp
- rabbitmq-server:amqp
- - nova-cloud-controller:quantum-network-service
- neutron-gateway:quantum-network-service
- - neutron-api:shared-db
- percona-cluster:shared-db
- - neutron-api:amqp
- rabbitmq-server:amqp
- - neutron-api:neutron-api
- nova-cloud-controller:neutron-api
- - neutron-api:identity-service
- keystone:identity-service
- - nova-compute:neutron-plugin
- neutron-openvswitch:neutron-plugin
- - rabbitmq-server:amqp
- neutron-openvswitch:amqp
applications:
rabbitmq-server:
charm: cs:~openstack-charmers-next/rabbitmq-server
num_units: 1
percona-cluster:
charm: cs:~openstack-charmers-next/percona-cluster
num_units: 1
options:
max-connections: 1000
innodb-buffer-pool-size: 256M
nova-cloud-controller:
charm: cs:~openstack-charmers-next/nova-cloud-controller
num_units: 1
options:
openstack-origin: cloud:trusty-icehouse
debug: true
neutron-api:
charm: cs:~openstack-charmers-next/neutron-api
num_units: 1
options:
openstack-origin: cloud:trusty-icehouse
flat-network-providers: physnet1
neutron-security-groups: true
keystone:
charm: cs:~openstack-charmers-next/keystone
num_units: 1
options:
openstack-origin: cloud:trusty-icehouse
neutron-gateway:
charm: cs:~openstack-charmers-next/neutron-gateway
num_units: 1
options:
openstack-origin: cloud:trusty-icehouse
bridge-mappings: physnet1:br-ex
glance:
charm: cs:~openstack-charmers-next/glance
num_units: 1
options:
openstack-origin: cloud:trusty-icehouse
neutron-openvswitch:
charm: cs:~openstack-charmers-next/neutron-openvswitch
nova-compute:
series: trusty
charm: ../../../nova-compute
num_units: 1
constraints: mem=4G cores=4
options:
openstack-origin: cloud:trusty-icehouse
config-flags: auto_assign_floating_ip=False
enable-live-migration: false
aa-profile-mode: enforce
debug: true

View File

@ -0,0 +1,109 @@
series: trusty
relations:
- - nova-compute:image-service
- glance:image-service
- - nova-compute:amqp
- rabbitmq-server:amqp
- - nova-cloud-controller:shared-db
- percona-cluster:shared-db
- - nova-cloud-controller:identity-service
- keystone:identity-service
- - nova-cloud-controller:amqp
- rabbitmq-server:amqp
- - nova-cloud-controller:cloud-compute
- nova-compute:cloud-compute
- - nova-cloud-controller:image-service
- glance:image-service
- - keystone:shared-db
- percona-cluster:shared-db
- - glance:identity-service
- keystone:identity-service
- - glance:shared-db
- percona-cluster:shared-db
- - glance:amqp
- rabbitmq-server:amqp
- - neutron-gateway:amqp
- rabbitmq-server:amqp
- - nova-cloud-controller:quantum-network-service
- neutron-gateway:quantum-network-service
- - neutron-api:shared-db
- percona-cluster:shared-db
- - neutron-api:amqp
- rabbitmq-server:amqp
- - neutron-api:neutron-api
- nova-cloud-controller:neutron-api
- - neutron-api:identity-service
- keystone:identity-service
- - nova-compute:neutron-plugin
- neutron-openvswitch:neutron-plugin
- - rabbitmq-server:amqp
- neutron-openvswitch:amqp
- - cinder:shared-db
- percona-cluster:shared-db
- - cinder:identity-service
- keystone:identity-service
- - cinder:amqp
- rabbitmq-server:amqp
- - cinder:image-service
- glance:image-service
applications:
rabbitmq-server:
charm: cs:~openstack-charmers-next/rabbitmq-server
num_units: 1
percona-cluster:
charm: cs:~openstack-charmers-next/percona-cluster
num_units: 1
options:
max-connections: 1000
innodb-buffer-pool-size: 256M
nova-cloud-controller:
charm: cs:~openstack-charmers-next/nova-cloud-controller
num_units: 1
options:
openstack-origin: cloud:trusty-mitaka
network-manager: Neutron
debug: true
neutron-api:
charm: cs:~openstack-charmers-next/neutron-api
num_units: 1
options:
openstack-origin: cloud:trusty-mitaka
flat-network-providers: physnet1
neutron-security-groups: true
keystone:
charm: cs:~openstack-charmers-next/keystone
num_units: 1
options:
openstack-origin: cloud:trusty-mitaka
neutron-gateway:
charm: cs:~openstack-charmers-next/neutron-gateway
num_units: 1
options:
openstack-origin: cloud:trusty-mitaka
bridge-mappings: physnet1:br-ex
glance:
charm: cs:~openstack-charmers-next/glance
num_units: 1
options:
openstack-origin: cloud:trusty-mitaka
neutron-openvswitch:
charm: cs:~openstack-charmers-next/neutron-openvswitch
nova-compute:
charm: cs:~openstack-charmers-next/nova-compute
num_units: 1
constraints: mem=4G cores=4
options:
openstack-origin: cloud:trusty-mitaka
config-flags: auto_assign_floating_ip=False
enable-live-migration: false
aa-profile-mode: enforce
debug: true
cinder:
series: trusty
num_units: 1
options:
openstack-origin: cloud:trusty-mitaka
block-device: /dev/vdb
glance-api-version: 2
overwrite: "true"
ephemeral-unmount: /mnt

View File

@ -0,0 +1,102 @@
series: xenial
relations:
- - nova-compute:image-service
- glance:image-service
- - nova-compute:amqp
- rabbitmq-server:amqp
- - nova-cloud-controller:shared-db
- percona-cluster:shared-db
- - nova-cloud-controller:identity-service
- keystone:identity-service
- - nova-cloud-controller:amqp
- rabbitmq-server:amqp
- - nova-cloud-controller:cloud-compute
- nova-compute:cloud-compute
- - nova-cloud-controller:image-service
- glance:image-service
- - keystone:shared-db
- percona-cluster:shared-db
- - glance:identity-service
- keystone:identity-service
- - glance:shared-db
- percona-cluster:shared-db
- - glance:amqp
- rabbitmq-server:amqp
- - neutron-gateway:amqp
- rabbitmq-server:amqp
- - nova-cloud-controller:quantum-network-service
- neutron-gateway:quantum-network-service
- - neutron-api:shared-db
- percona-cluster:shared-db
- - neutron-api:amqp
- rabbitmq-server:amqp
- - neutron-api:neutron-api
- nova-cloud-controller:neutron-api
- - neutron-api:identity-service
- keystone:identity-service
- - nova-compute:neutron-plugin
- neutron-openvswitch:neutron-plugin
- - rabbitmq-server:amqp
- neutron-openvswitch:amqp
- - cinder:shared-db
- percona-cluster:shared-db
- - cinder:identity-service
- keystone:identity-service
- - cinder:amqp
- rabbitmq-server:amqp
- - cinder:image-service
- glance:image-service
applications:
rabbitmq-server:
charm: cs:~openstack-charmers-next/rabbitmq-server
num_units: 1
percona-cluster:
charm: cs:~openstack-charmers-next/percona-cluster
num_units: 1
options:
max-connections: 1000
innodb-buffer-pool-size: 256M
nova-cloud-controller:
charm: cs:~openstack-charmers-next/nova-cloud-controller
num_units: 1
options:
network-manager: Neutron
debug: true
neutron-api:
charm: cs:~openstack-charmers-next/neutron-api
num_units: 1
options:
flat-network-providers: physnet1
neutron-security-groups: true
keystone:
charm: cs:~openstack-charmers-next/keystone
num_units: 1
neutron-gateway:
charm: cs:~openstack-charmers-next/neutron-gateway
num_units: 1
options:
bridge-mappings: physnet1:br-ex
glance:
charm: cs:~openstack-charmers-next/glance
num_units: 1
neutron-openvswitch:
charm: cs:~openstack-charmers-next/neutron-openvswitch
nova-compute:
charm: cs:~openstack-charmers-next/nova-compute
num_units: 1
constraints: mem=4G cores=4
options:
config-flags: auto_assign_floating_ip=False
enable-live-migration: false
aa-profile-mode: enforce
ephemeral-device: /dev/vdb
ephemeral-unmount: /mnt
debug: true
cinder:
series: xenial
num_units: 1
options:
block-device: /dev/vdb
glance-api-version: 2
overwrite: "true"
ephemeral-unmount: /mnt

View File

@ -0,0 +1,110 @@
series: xenial
relations:
- - nova-compute:image-service
- glance:image-service
- - nova-compute:amqp
- rabbitmq-server:amqp
- - nova-cloud-controller:shared-db
- percona-cluster:shared-db
- - nova-cloud-controller:identity-service
- keystone:identity-service
- - nova-cloud-controller:amqp
- rabbitmq-server:amqp
- - nova-cloud-controller:cloud-compute
- nova-compute:cloud-compute
- - nova-cloud-controller:image-service
- glance:image-service
- - keystone:shared-db
- percona-cluster:shared-db
- - glance:identity-service
- keystone:identity-service
- - glance:shared-db
- percona-cluster:shared-db
- - glance:amqp
- rabbitmq-server:amqp
- - neutron-gateway:amqp
- rabbitmq-server:amqp
- - nova-cloud-controller:quantum-network-service
- neutron-gateway:quantum-network-service
- - neutron-api:shared-db
- percona-cluster:shared-db
- - neutron-api:amqp
- rabbitmq-server:amqp
- - neutron-api:neutron-api
- nova-cloud-controller:neutron-api
- - neutron-api:identity-service
- keystone:identity-service
- - nova-compute:neutron-plugin
- neutron-openvswitch:neutron-plugin
- - rabbitmq-server:amqp
- neutron-openvswitch:amqp
- - cinder:shared-db
- percona-cluster:shared-db
- - cinder:identity-service
- keystone:identity-service
- - cinder:amqp
- rabbitmq-server:amqp
- - cinder:image-service
- glance:image-service
applications:
rabbitmq-server:
charm: cs:~openstack-charmers-next/rabbitmq-server
num_units: 1
percona-cluster:
charm: cs:~openstack-charmers-next/percona-cluster
num_units: 1
options:
max-connections: 1000
innodb-buffer-pool-size: 256M
nova-cloud-controller:
charm: cs:~openstack-charmers-next/nova-cloud-controller
num_units: 1
options:
openstack-origin: cloud:xenial-ocata
network-manager: Neutron
debug: true
neutron-api:
charm: cs:~openstack-charmers-next/neutron-api
num_units: 1
options:
openstack-origin: cloud:xenial-ocata
flat-network-providers: physnet1
neutron-security-groups: true
keystone:
charm: cs:~openstack-charmers-next/keystone
num_units: 1
options:
openstack-origin: cloud:xenial-ocata
neutron-gateway:
charm: cs:~openstack-charmers-next/neutron-gateway
num_units: 1
options:
openstack-origin: cloud:xenial-ocata
bridge-mappings: physnet1:br-ex
glance:
charm: cs:~openstack-charmers-next/glance
num_units: 1
options:
openstack-origin: cloud:xenial-ocata
neutron-openvswitch:
charm: cs:~openstack-charmers-next/neutron-openvswitch
nova-compute:
charm: cs:~openstack-charmers-next/nova-compute
num_units: 1
constraints: mem=4G cores=4
options:
openstack-origin: cloud:xenial-ocata
config-flags: auto_assign_floating_ip=False
enable-live-migration: false
aa-profile-mode: enforce
ephemeral-device: /dev/vdb
ephemeral-unmount: /mnt
debug: true
cinder:
series: xenial
num_units: 1
options:
block-device: /dev/vdb
glance-api-version: 2
overwrite: "true"
ephemeral-unmount: /mnt

View File

@ -0,0 +1,110 @@
series: xenial
relations:
- - nova-compute:image-service
- glance:image-service
- - nova-compute:amqp
- rabbitmq-server:amqp
- - nova-cloud-controller:shared-db
- percona-cluster:shared-db
- - nova-cloud-controller:identity-service
- keystone:identity-service
- - nova-cloud-controller:amqp
- rabbitmq-server:amqp
- - nova-cloud-controller:cloud-compute
- nova-compute:cloud-compute
- - nova-cloud-controller:image-service
- glance:image-service
- - keystone:shared-db
- percona-cluster:shared-db
- - glance:identity-service
- keystone:identity-service
- - glance:shared-db
- percona-cluster:shared-db
- - glance:amqp
- rabbitmq-server:amqp
- - neutron-gateway:amqp
- rabbitmq-server:amqp
- - nova-cloud-controller:quantum-network-service
- neutron-gateway:quantum-network-service
- - neutron-api:shared-db
- percona-cluster:shared-db
- - neutron-api:amqp
- rabbitmq-server:amqp
- - neutron-api:neutron-api
- nova-cloud-controller:neutron-api
- - neutron-api:identity-service
- keystone:identity-service
- - nova-compute:neutron-plugin
- neutron-openvswitch:neutron-plugin
- - rabbitmq-server:amqp
- neutron-openvswitch:amqp
- - cinder:shared-db
- percona-cluster:shared-db
- - cinder:identity-service
- keystone:identity-service
- - cinder:amqp
- rabbitmq-server:amqp
- - cinder:image-service
- glance:image-service
applications:
rabbitmq-server:
charm: cs:~openstack-charmers-next/rabbitmq-server
num_units: 1
percona-cluster:
charm: cs:~openstack-charmers-next/percona-cluster
num_units: 1
options:
max-connections: 1000
innodb-buffer-pool-size: 256M
nova-cloud-controller:
charm: cs:~openstack-charmers-next/nova-cloud-controller
num_units: 1
options:
openstack-origin: cloud:xenial-pike
network-manager: Neutron
debug: true
neutron-api:
charm: cs:~openstack-charmers-next/neutron-api
num_units: 1
options:
openstack-origin: cloud:xenial-pike
flat-network-providers: physnet1
neutron-security-groups: true
keystone:
charm: cs:~openstack-charmers-next/keystone
num_units: 1
options:
openstack-origin: cloud:xenial-pike
neutron-gateway:
charm: cs:~openstack-charmers-next/neutron-gateway
num_units: 1
options:
openstack-origin: cloud:xenial-pike
bridge-mappings: physnet1:br-ex
glance:
charm: cs:~openstack-charmers-next/glance
num_units: 1
options:
openstack-origin: cloud:xenial-pike
neutron-openvswitch:
charm: cs:~openstack-charmers-next/neutron-openvswitch
nova-compute:
charm: cs:~openstack-charmers-next/nova-compute
num_units: 1
constraints: mem=4G cores=4
options:
openstack-origin: cloud:xenial-pike
config-flags: auto_assign_floating_ip=False
enable-live-migration: false
aa-profile-mode: enforce
ephemeral-device: /dev/vdb
ephemeral-unmount: /mnt
debug: true
cinder:
series: xenial
num_units: 1
options:
block-device: /dev/vdb
glance-api-version: 2
overwrite: "true"
ephemeral-unmount: /mnt

View File

@ -0,0 +1,110 @@
series: xenial
relations:
- - nova-compute:image-service
- glance:image-service
- - nova-compute:amqp
- rabbitmq-server:amqp
- - nova-cloud-controller:shared-db
- percona-cluster:shared-db
- - nova-cloud-controller:identity-service
- keystone:identity-service
- - nova-cloud-controller:amqp
- rabbitmq-server:amqp
- - nova-cloud-controller:cloud-compute
- nova-compute:cloud-compute
- - nova-cloud-controller:image-service
- glance:image-service
- - keystone:shared-db
- percona-cluster:shared-db
- - glance:identity-service
- keystone:identity-service
- - glance:shared-db
- percona-cluster:shared-db
- - glance:amqp
- rabbitmq-server:amqp
- - neutron-gateway:amqp
- rabbitmq-server:amqp
- - nova-cloud-controller:quantum-network-service
- neutron-gateway:quantum-network-service
- - neutron-api:shared-db
- percona-cluster:shared-db
- - neutron-api:amqp
- rabbitmq-server:amqp
- - neutron-api:neutron-api
- nova-cloud-controller:neutron-api
- - neutron-api:identity-service
- keystone:identity-service
- - nova-compute:neutron-plugin
- neutron-openvswitch:neutron-plugin
- - rabbitmq-server:amqp
- neutron-openvswitch:amqp
- - cinder:shared-db
- percona-cluster:shared-db
- - cinder:identity-service
- keystone:identity-service
- - cinder:amqp
- rabbitmq-server:amqp
- - cinder:image-service
- glance:image-service
applications:
rabbitmq-server:
charm: cs:~openstack-charmers-next/rabbitmq-server
num_units: 1
percona-cluster:
charm: cs:~openstack-charmers-next/percona-cluster
num_units: 1
options:
max-connections: 1000
innodb-buffer-pool-size: 256M
nova-cloud-controller:
charm: cs:~openstack-charmers-next/nova-cloud-controller
num_units: 1
options:
openstack-origin: cloud:xenial-queens
network-manager: Neutron
debug: true
neutron-api:
charm: cs:~openstack-charmers-next/neutron-api
num_units: 1
options:
openstack-origin: cloud:xenial-queens
flat-network-providers: physnet1
neutron-security-groups: true
keystone:
charm: cs:~openstack-charmers-next/keystone
num_units: 1
options:
openstack-origin: cloud:xenial-queens
neutron-gateway:
charm: cs:~openstack-charmers-next/neutron-gateway
num_units: 1
options:
openstack-origin: cloud:xenial-queens
bridge-mappings: physnet1:br-ex
glance:
charm: cs:~openstack-charmers-next/glance
num_units: 1
options:
openstack-origin: cloud:xenial-queens
neutron-openvswitch:
charm: cs:~openstack-charmers-next/neutron-openvswitch
nova-compute:
charm: cs:~openstack-charmers-next/nova-compute
num_units: 1
constraints: mem=4G cores=4
options:
openstack-origin: cloud:xenial-queens
config-flags: auto_assign_floating_ip=False
enable-live-migration: false
aa-profile-mode: enforce
ephemeral-device: /dev/vdb
ephemeral-unmount: /mnt
debug: true
cinder:
series: xenial
num_units: 1
options:
block-device: /dev/vdb
glance-api-version: 2
overwrite: "true"
ephemeral-unmount: /mnt

View File

@ -1,23 +0,0 @@
#!/usr/bin/env python
#
# Copyright 2016 Canonical Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Amulet tests on a basic Cinder deployment on cosmic-rocky."""
from basic_deployment import CinderBasicDeployment
if __name__ == '__main__':
deployment = CinderBasicDeployment(series='cosmic')
deployment.run_tests()

View File

@ -1,23 +0,0 @@
#!/usr/bin/env python
#
# Copyright 2016 Canonical Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Amulet tests on a basic Cinder deployment on disco-stein."""
from basic_deployment import CinderBasicDeployment
if __name__ == '__main__':
deployment = CinderBasicDeployment(series='disco')
deployment.run_tests()

View File

@ -1,23 +0,0 @@
#!/usr/bin/env python
#
# Copyright 2016 Canonical Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Amulet tests on a basic Cinder deployment on bionic-queens."""
from basic_deployment import CinderBasicDeployment
if __name__ == '__main__':
deployment = CinderBasicDeployment(series='bionic')
deployment.run_tests()

View File

@ -1,25 +0,0 @@
#!/usr/bin/env python
#
# Copyright 2016 Canonical Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Amulet tests on a basic cinder deployment on bionic-rocky."""
from basic_deployment import CinderBasicDeployment
if __name__ == '__main__':
deployment = CinderBasicDeployment(series='bionic',
openstack='cloud:bionic-rocky',
source='cloud:bionic-updates/rocky')
deployment.run_tests()

View File

@ -1,25 +0,0 @@
#!/usr/bin/env python
#
# Copyright 2016 Canonical Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Amulet tests on a basic cinder deployment on bionic-stein."""
from basic_deployment import CinderBasicDeployment
if __name__ == '__main__':
deployment = CinderBasicDeployment(series='bionic',
openstack='cloud:bionic-stein',
source='cloud:bionic-stein')
deployment.run_tests()

View File

@ -1,25 +0,0 @@
#!/usr/bin/env python
#
# Copyright 2016 Canonical Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Amulet tests on a basic cinder deployment on trusty-mitaka."""
from basic_deployment import CinderBasicDeployment
if __name__ == '__main__':
deployment = CinderBasicDeployment(series='trusty',
openstack='cloud:trusty-mitaka',
source='cloud:trusty-updates/mitaka')
deployment.run_tests()

View File

@ -1,23 +0,0 @@
#!/usr/bin/env python
#
# Copyright 2016 Canonical Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Amulet tests on a basic Cinder deployment on xenial-mitaka."""
from basic_deployment import CinderBasicDeployment
if __name__ == '__main__':
deployment = CinderBasicDeployment(series='xenial')
deployment.run_tests()

View File

@ -1,25 +0,0 @@
#!/usr/bin/env python
#
# Copyright 2016 Canonical Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Amulet tests on a basic cinder deployment on xenial-ocata."""
from basic_deployment import CinderBasicDeployment
if __name__ == '__main__':
deployment = CinderBasicDeployment(series='xenial',
openstack='cloud:xenial-ocata',
source='cloud:xenial-updates/ocata')
deployment.run_tests()

View File

@ -1,25 +0,0 @@
#!/usr/bin/env python
#
# Copyright 2016 Canonical Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Amulet tests on a basic cinder deployment on xenial-pike."""
from basic_deployment import CinderBasicDeployment
if __name__ == '__main__':
deployment = CinderBasicDeployment(series='xenial',
openstack='cloud:xenial-pike',
source='cloud:xenial-updates/pike')
deployment.run_tests()

View File

@ -1,25 +0,0 @@
#!/usr/bin/env python
#
# Copyright 2016 Canonical Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Amulet tests on a basic cinder deployment on xenial-queens."""
from basic_deployment import CinderBasicDeployment
if __name__ == '__main__':
deployment = CinderBasicDeployment(series='xenial',
openstack='cloud:xenial-queens',
source='cloud:xenial-updates/queens')
deployment.run_tests()

View File

@ -1,18 +1,25 @@
# Bootstrap the model if necessary.
bootstrap: True
# Re-use bootstrap node.
reset: True
# Use tox/requirements to drive the venv instead of bundletester's venv feature.
virtualenv: False
# Leave makefile empty, otherwise unit/lint tests will rerun ahead of amulet.
makefile: []
# Do not specify juju PPA sources. Juju is presumed to be pre-installed
# and configured in all test runner environments.
#sources:
# Do not specify or rely on system packages.
#packages:
# Do not specify python packages here. Use test-requirements.txt
# and tox instead. ie. The venv is constructed before bundletester
# is invoked.
#python-packages:
reset_timeout: 600
charm_name: cinder
smoke_bundles:
- bionic-rocky
gate_bundles:
- bionic-stein
- bionic-rocky
- bionic-queens
- xenial-queens
- xenial-ocata
- xenial-pike
- xenial-mitaka
- trusty-mitaka
dev_bundles:
- cosmic-rocky
- disco-stein
configure:
- zaza.openstack.charm_tests.glance.setup.add_cirros_image
- zaza.openstack.charm_tests.glance.setup.add_lts_image
- zaza.openstack.charm_tests.keystone.setup.add_demo_user
- zaza.openstack.charm_tests.neutron.setup.basic_overcloud_network
- zaza.openstack.charm_tests.nova.setup.create_flavors
- zaza.openstack.charm_tests.nova.setup.manage_ssh_key
tests:
- zaza.openstack.charm_tests.cinder.tests.CinderTests
- zaza.openstack.charm_tests.cinder.tests.SecurityTests

57
tox.ini
View File

@ -2,7 +2,7 @@
# This file is managed centrally by release-tools and should not be modified
# within individual charm repos.
[tox]
envlist = pep8,py3{5,6}
envlist = pep8,py35,py36
skipsdist = True
[testenv]
@ -12,14 +12,14 @@ setenv = VIRTUAL_ENV={envdir}
AMULET_SETUP_TIMEOUT=5400
install_command =
pip install {opts} {packages}
commands = stestr run {posargs}
commands = stestr run --slowest {posargs}
whitelist_externals = juju
passenv = HOME TERM AMULET_* CS_API_*
passenv = HOME TERM AMULET_* CS_API_* OS_*
deps = -r{toxinidir}/test-requirements.txt
[testenv:py27]
basepython = python2.7
deps = -r{toxinidir}/requirements.txt
-r{toxinidir}/test-requirements.txt
deps =
commands = /bin/true
[testenv:py35]
@ -71,49 +71,20 @@ omit =
basepython = python3
commands = {posargs}
[testenv:func27-noop]
# DRY RUN - For Debug
basepython = python2.7
deps = -r{toxinidir}/requirements.txt
-r{toxinidir}/test-requirements.txt
[testenv:func]
basepython = python3
commands =
bundletester -vl DEBUG -r json -o func-results.json --test-pattern "gate-*" -n --no-destroy
functest-run-suite --keep-model
[testenv:func27]
# Charm Functional Test
# Run all gate tests which are +x (expected to always pass)
basepython = python2.7
deps = -r{toxinidir}/requirements.txt
-r{toxinidir}/test-requirements.txt
[testenv:func-smoke]
basepython = python3
commands =
bundletester -vl DEBUG -r json -o func-results.json --test-pattern "gate-*" --no-destroy
functest-run-suite --keep-model --smoke
[testenv:func27-smoke]
# Charm Functional Test
# Run a specific test as an Amulet smoke test (expected to always pass)
basepython = python2.7
deps = -r{toxinidir}/requirements.txt
-r{toxinidir}/test-requirements.txt
[testenv:func-dev]
basepython = python3
commands =
bundletester -vl DEBUG -r json -o func-results.json gate-basic-bionic-rocky --no-destroy
[testenv:func27-dfs]
# Charm Functional Test
# Run all deploy-from-source tests which are +x (may not always pass!)
basepython = python2.7
deps = -r{toxinidir}/requirements.txt
-r{toxinidir}/test-requirements.txt
commands =
bundletester -vl DEBUG -r json -o func-results.json --test-pattern "dfs-*" --no-destroy
[testenv:func27-dev]
# Charm Functional Test
# Run all development test targets which are +x (may not always pass!)
basepython = python2.7
deps = -r{toxinidir}/requirements.txt
-r{toxinidir}/test-requirements.txt
commands =
bundletester -vl DEBUG -r json -o func-results.json --test-pattern "dev-*" --no-destroy
functest-run-suite --keep-model --dev
[flake8]
ignore = E402,E226