Migrate CephFS tests to Zaza

Change-Id: I36d3d58d8f5c15475460997cce20fe442978eeed
Closes-Bug: #1828424
This commit is contained in:
Chris MacNaughton 2019-12-02 13:49:08 +01:00
parent 39089a27d6
commit 4dfe62d344
25 changed files with 1094 additions and 564 deletions

View File

@ -7,27 +7,7 @@ mock>=1.2
flake8>=2.2.4,<=2.4.1
stestr>=2.2.0
requests>=2.18.4
# BEGIN: Amulet OpenStack Charm Helper Requirements
# Liberty client lower constraints
amulet>=1.14.3,<2.0;python_version=='2.7'
bundletester>=0.6.1,<1.0;python_version=='2.7'
aodhclient>=0.1.0
gnocchiclient>=3.1.0,<3.2.0
python-barbicanclient>=4.0.1
python-ceilometerclient>=1.5.0
python-cinderclient>=1.4.0,<5.0.0
python-designateclient>=1.5
python-glanceclient>=1.1.0
python-heatclient>=0.8.0
python-keystoneclient>=1.7.1
python-manilaclient>=1.8.1
python-neutronclient>=3.1.0
python-novaclient>=2.30.1
python-openstackclient>=1.7.0
python-swiftclient>=2.6.0
pika>=0.10.0,<1.0
distro-info
git+https://github.com/juju/charm-helpers.git#egg=charmhelpers
# END: Amulet OpenStack Charm Helper Requirements
git+https://github.com/openstack-charmers/zaza.git#egg=zaza
git+https://github.com/openstack-charmers/zaza-openstack-tests.git#egg=zaza.openstack
pytz # workaround for 14.04 pip/tox
pyudev # for ceph-* charm unit tests (not mocked?)

View File

@ -1,9 +0,0 @@
# Overview
This directory provides Amulet tests to verify basic deployment functionality
from the perspective of this charm, its requirements and its features, as
exercised in a subset of the full OpenStack deployment test bundle topology.
For full details on functional testing of OpenStack charms please refer to
the [functional testing](http://docs.openstack.org/developer/charm-guide/testing.html#functional-testing)
section of the OpenStack Charm Guide.

View File

@ -1,277 +0,0 @@
#!/usr/bin/env python
#
# Copyright 2016 Canonical Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import amulet
from charmhelpers.contrib.openstack.amulet.deployment import (
OpenStackAmuletDeployment
)
from charmhelpers.contrib.openstack.amulet.utils import ( # noqa
OpenStackAmuletUtils,
DEBUG,
)
# Use DEBUG to turn on debug logging
u = OpenStackAmuletUtils(DEBUG)
class CephFsBasicDeployment(OpenStackAmuletDeployment):
"""Amulet tests on a basic ceph deployment."""
def __init__(self, series=None, openstack=None, source=None, stable=False):
"""Deploy the entire test environment."""
super(CephFsBasicDeployment, self).__init__(series,
openstack,
source,
stable)
self._add_services()
self._add_relations()
self._configure_services()
self._deploy()
u.log.info('Waiting on extended status checks...')
exclude_services = []
# Wait for deployment ready msgs, except exclusions
self._auto_wait_for_status(exclude_services=exclude_services)
self.d.sentry.wait()
self._initialize_tests()
def _add_services(self, **kwargs):
"""Add services
Add the services that we're testing, where cephfs is local,
and the rest of the service are from lp branches that are
compatible with the local charm (e.g. stable or next).
:param **kwargs:
"""
no_origin = ['ceph-fs']
this_service = {'name': 'ceph-fs', 'units': 1}
other_services = [
{'name': 'ceph-mon', 'units': 3},
{'name': 'ceph-osd', 'units': 3,
'storage': {'osd-devices': 'cinder,10G'}},
]
super(CephFsBasicDeployment, self)._add_services(this_service,
other_services,
no_origin=no_origin)
def _add_relations(self, **kwargs):
"""Add all of the relations for the services.
:param **kwargs:
"""
relations = {
'ceph-osd:mon': 'ceph-mon:osd',
'ceph-fs:ceph-mds': 'ceph-mon:mds',
}
super(CephFsBasicDeployment, self)._add_relations(relations)
def _configure_services(self, **kwargs):
"""Configure all of the services.
:param **kwargs:
"""
ceph_fs_config = {
'source': self.source,
}
ceph_mon_config = {
'source': self.source,
}
# Include a non-existent device as osd-devices is a whitelist,
# and this will catch cases where proposals attempt to change that.
ceph_osd_config = {
'source': self.source,
'osd-devices': '/srv/ceph /dev/test-non-existent',
}
configs = {
'ceph-mon': ceph_mon_config,
'ceph-osd': ceph_osd_config,
'ceph-fs': ceph_fs_config,
}
super(CephFsBasicDeployment, self)._configure_services(configs)
def _initialize_tests(self):
"""Perform final initialization before tests get run."""
# Access the sentries for inspecting service units
self.ceph_osd0_sentry = self.d.sentry['ceph-osd'][0]
self.ceph_osd1_sentry = self.d.sentry['ceph-osd'][1]
self.ceph_osd2_sentry = self.d.sentry['ceph-osd'][2]
self.ceph_mon0_sentry = self.d.sentry['ceph-mon'][0]
self.ceph_mon1_sentry = self.d.sentry['ceph-mon'][1]
self.ceph_mon2_sentry = self.d.sentry['ceph-mon'][2]
self.ceph_mds_sentry = self.d.sentry['ceph-fs'][0]
def test_100_ceph_processes(self):
"""Verify that the expected service processes are running
on each ceph unit."""
# Process name and quantity of processes to expect on each unit
ceph_mon_processes = {
'ceph-mon': 1
}
ceph_osd_processes = {
'ceph-osd': 2
}
# Pre-nautilus (ie. pre-train) we had a directory backed OSD and
# a disk backed OSD, but at nautilus (and beyond) we only have
# the disk backed OSD.
if self._get_openstack_release() >= self.bionic_train:
ceph_osd_processes['ceph-osd'] = 1
ceph_mds_processes = {
'ceph-mds': 1
}
# Units with process names and PID quantities expected
expected_processes = {
self.ceph_mon0_sentry: ceph_mon_processes,
self.ceph_mon1_sentry: ceph_mon_processes,
self.ceph_mon2_sentry: ceph_mon_processes,
self.ceph_osd0_sentry: ceph_osd_processes,
self.ceph_osd1_sentry: ceph_osd_processes,
self.ceph_osd2_sentry: ceph_osd_processes,
self.ceph_mds_sentry: ceph_mds_processes
}
actual_pids = u.get_unit_process_ids(expected_processes)
ret = u.validate_unit_process_ids(expected_processes, actual_pids)
if ret:
amulet.raise_status(amulet.FAIL, msg=ret)
def test_102_services(self):
"""Verify the expected services are running on the service units."""
services = {}
if self._get_openstack_release() < self.xenial_mitaka:
# For upstart systems only. Ceph services under systemd
# are checked by process name instead.
ceph_services = [
'ceph-mon-all',
'ceph-mon id=`hostname`'
]
services[self.ceph_mon0_sentry] = ceph_services
services[self.ceph_mon1_sentry] = ceph_services
services[self.ceph_mon2_sentry] = ceph_services
ceph_osd_services = [
'ceph-osd id={}'.format(u.get_ceph_osd_id_cmd(0)),
]
services[self.ceph_osd0_sentry] = ceph_osd_services
ret = u.validate_services_by_name(services)
if ret:
amulet.raise_status(amulet.FAIL, msg=ret)
def test_300_ceph_config(self):
"""Verify the data in the ceph config file."""
u.log.debug('Checking ceph config file data...')
unit = self.ceph_mon0_sentry
conf = '/etc/ceph/ceph.conf'
(fsid, _) = unit.run('leader-get fsid')
expected = {
'global': {
'fsid': fsid,
'log to syslog': 'false',
'err to syslog': 'false',
'clog to syslog': 'false',
'mon cluster log to syslog': 'false',
'auth cluster required': 'cephx',
'auth service required': 'cephx',
'auth client required': 'cephx'
},
'mon': {
'keyring': '/var/lib/ceph/mon/$cluster-$id/keyring'
},
'mds': {
'keyring': '/var/lib/ceph/mds/$cluster-$id/keyring'
},
}
for section, pairs in expected.iteritems():
ret = u.validate_config_data(unit, conf, section, pairs)
if ret:
message = "ceph config error: {}".format(ret)
amulet.raise_status(amulet.FAIL, msg=message)
def test_400_ceph_check_osd_pools(self):
"""Check osd pools on all ceph units, expect them to be
identical, and expect specific pools to be present."""
u.log.debug('Checking pools on ceph units...')
if self._get_openstack_release() >= self.xenial_pike:
expected_pools = ['ceph-fs_data', 'ceph-fs_metadata']
else:
expected_pools = ['rbd', 'ceph-fs_data', 'ceph-fs_metadata']
results = []
sentries = [
self.ceph_mon0_sentry,
self.ceph_mon1_sentry,
self.ceph_mon2_sentry
]
# Check for presence of expected pools on each unit
u.log.debug('Expected pools: {}'.format(expected_pools))
for sentry_unit in sentries:
pools = u.get_ceph_pools(sentry_unit)
results.append(pools)
for expected_pool in expected_pools:
if expected_pool not in pools:
msg = ('{} does not have pool: '
'{}'.format(sentry_unit.info['unit_name'],
expected_pool))
amulet.raise_status(amulet.FAIL, msg=msg)
u.log.debug('{} has (at least) the expected '
'pools.'.format(sentry_unit.info['unit_name']))
# Check that all units returned the same pool name:id data
ret = u.validate_list_of_identical_dicts(results)
if ret:
u.log.debug('Pool list results: {}'.format(results))
msg = ('{}; Pool list results are not identical on all '
'ceph units.'.format(ret))
amulet.raise_status(amulet.FAIL, msg=msg)
else:
u.log.debug('Pool list on all ceph units produced the '
'same results (OK).')
def test_499_ceph_cmds_exit_zero(self):
"""Check basic functionality of ceph cli commands against
all ceph units."""
sentry_units = [
self.ceph_mon0_sentry,
self.ceph_mon1_sentry,
self.ceph_mon2_sentry
]
commands = [
'sudo ceph health',
'sudo ceph mds stat',
'sudo ceph pg stat',
'sudo ceph osd stat',
'sudo ceph mon stat',
'sudo ceph fs ls',
]
ret = u.check_commands_on_units(commands, sentry_units)
if ret:
amulet.raise_status(amulet.FAIL, msg=ret)
# FYI: No restart check as ceph services do not restart
# when charm config changes, unless monitor count increases.

View File

@ -0,0 +1,95 @@
series: bionic
applications:
ceph-fs:
charm: ceph-fs
series: bionic
num_units: 1
ceph-osd:
charm: cs:~openstack-charmers-next/ceph-osd
num_units: 3
storage:
osd-devices: 'cinder,10G'
options:
osd-devices: '/srv/ceph /dev/test-non-existent'
ceph-mon:
charm: cs:~openstack-charmers-next/ceph-mon
num_units: 3
options:
monitor-count: '3'
auth-supported: 'none'
percona-cluster:
charm: cs:~openstack-charmers-next/percona-cluster
num_units: 1
options:
dataset-size: 25%
max-connections: 1000
rabbitmq-server:
charm: cs:~openstack-charmers-next/rabbitmq-server
num_units: 1
keystone:
expose: True
charm: cs:~openstack-charmers-next/keystone
num_units: 1
nova-compute:
charm: cs:~openstack-charmers-next/nova-compute
num_units: 1
glance:
expose: True
charm: cs:~openstack-charmers-next/glance
num_units: 1
cinder:
expose: True
charm: cs:~openstack-charmers-next/cinder
num_units: 1
options:
block-device: 'None'
glance-api-version: '2'
cinder-ceph:
charm: cs:~openstack-charmers-next/cinder-ceph
nova-cloud-controller:
expose: True
charm: cs:~openstack-charmers-next/nova-cloud-controller
num_units: 1
relations:
- - ceph-mon:mds
- ceph-fs:ceph-mds
- - nova-compute:amqp
- rabbitmq-server:amqp
- - nova-compute:image-service
- glance:image-service
- - nova-compute:ceph
- ceph-mon:client
- - keystone:shared-db
- percona-cluster:shared-db
- - glance:shared-db
- percona-cluster:shared-db
- - glance:identity-service
- keystone:identity-service
- - glance:amqp
- rabbitmq-server:amqp
- - glance:ceph
- ceph-mon:client
- - cinder:shared-db
- percona-cluster:shared-db
- - cinder:identity-service
- keystone:identity-service
- - cinder:amqp
- rabbitmq-server:amqp
- - cinder:image-service
- glance:image-service
- - cinder-ceph:storage-backend
- cinder:storage-backend
- - cinder-ceph:ceph
- ceph-mon:client
- - ceph-osd:mon
- ceph-mon:osd
- - nova-cloud-controller:shared-db
- percona-cluster:shared-db
- - nova-cloud-controller:identity-service
- keystone:identity-service
- - nova-cloud-controller:amqp
- rabbitmq-server:amqp
- - nova-cloud-controller:cloud-compute
- nova-compute:cloud-compute
- - nova-cloud-controller:image-service
- glance:image-service

View File

@ -0,0 +1,111 @@
series: bionic
applications:
ceph-fs:
charm: ceph-fs
series: bionic
num_units: 1
options:
source: cloud:bionic-rocky
ceph-osd:
charm: cs:~openstack-charmers-next/ceph-osd
num_units: 3
storage:
osd-devices: 'cinder,10G'
options:
osd-devices: '/srv/ceph /dev/test-non-existent'
source: cloud:bionic-rocky
ceph-mon:
charm: cs:~openstack-charmers-next/ceph-mon
num_units: 3
options:
monitor-count: '3'
auth-supported: 'none'
source: cloud:bionic-rocky
percona-cluster:
charm: cs:~openstack-charmers-next/percona-cluster
num_units: 1
options:
dataset-size: 25%
max-connections: 1000
source: cloud:bionic-rocky
rabbitmq-server:
charm: cs:~openstack-charmers-next/rabbitmq-server
num_units: 1
options:
source: cloud:bionic-rocky
keystone:
expose: True
charm: cs:~openstack-charmers-next/keystone
num_units: 1
options:
openstack-origin: cloud:bionic-rocky
nova-compute:
charm: cs:~openstack-charmers-next/nova-compute
num_units: 1
options:
openstack-origin: cloud:bionic-rocky
glance:
expose: True
charm: cs:~openstack-charmers-next/glance
num_units: 1
options:
openstack-origin: cloud:bionic-rocky
cinder:
expose: True
charm: cs:~openstack-charmers-next/cinder
num_units: 1
options:
block-device: 'None'
glance-api-version: '2'
openstack-origin: cloud:bionic-rocky
cinder-ceph:
charm: cs:~openstack-charmers-next/cinder-ceph
nova-cloud-controller:
expose: True
charm: cs:~openstack-charmers-next/nova-cloud-controller
num_units: 1
options:
openstack-origin: cloud:bionic-rocky
relations:
- - ceph-mon:mds
- ceph-fs:ceph-mds
- - nova-compute:amqp
- rabbitmq-server:amqp
- - nova-compute:image-service
- glance:image-service
- - nova-compute:ceph
- ceph-mon:client
- - keystone:shared-db
- percona-cluster:shared-db
- - glance:shared-db
- percona-cluster:shared-db
- - glance:identity-service
- keystone:identity-service
- - glance:amqp
- rabbitmq-server:amqp
- - glance:ceph
- ceph-mon:client
- - cinder:shared-db
- percona-cluster:shared-db
- - cinder:identity-service
- keystone:identity-service
- - cinder:amqp
- rabbitmq-server:amqp
- - cinder:image-service
- glance:image-service
- - cinder-ceph:storage-backend
- cinder:storage-backend
- - cinder-ceph:ceph
- ceph-mon:client
- - ceph-osd:mon
- ceph-mon:osd
- - nova-cloud-controller:shared-db
- percona-cluster:shared-db
- - nova-cloud-controller:identity-service
- keystone:identity-service
- - nova-cloud-controller:amqp
- rabbitmq-server:amqp
- - nova-cloud-controller:cloud-compute
- nova-compute:cloud-compute
- - nova-cloud-controller:image-service
- glance:image-service

View File

@ -0,0 +1,111 @@
series: bionic
applications:
ceph-fs:
charm: ceph-fs
series: bionic
num_units: 1
options:
source: cloud:bionic-stein
ceph-osd:
charm: cs:~openstack-charmers-next/ceph-osd
num_units: 3
storage:
osd-devices: 'cinder,10G'
options:
osd-devices: '/srv/ceph /dev/test-non-existent'
source: cloud:bionic-stein
ceph-mon:
charm: cs:~openstack-charmers-next/ceph-mon
num_units: 3
options:
monitor-count: '3'
auth-supported: 'none'
source: cloud:bionic-stein
percona-cluster:
charm: cs:~openstack-charmers-next/percona-cluster
num_units: 1
options:
dataset-size: 25%
max-connections: 1000
source: cloud:bionic-stein
rabbitmq-server:
charm: cs:~openstack-charmers-next/rabbitmq-server
num_units: 1
options:
source: cloud:bionic-stein
keystone:
expose: True
charm: cs:~openstack-charmers-next/keystone
num_units: 1
options:
openstack-origin: cloud:bionic-stein
nova-compute:
charm: cs:~openstack-charmers-next/nova-compute
num_units: 1
options:
openstack-origin: cloud:bionic-stein
glance:
expose: True
charm: cs:~openstack-charmers-next/glance
num_units: 1
options:
openstack-origin: cloud:bionic-stein
cinder:
expose: True
charm: cs:~openstack-charmers-next/cinder
num_units: 1
options:
block-device: 'None'
glance-api-version: '2'
openstack-origin: cloud:bionic-stein
cinder-ceph:
charm: cs:~openstack-charmers-next/cinder-ceph
nova-cloud-controller:
expose: True
charm: cs:~openstack-charmers-next/nova-cloud-controller
num_units: 1
options:
openstack-origin: cloud:bionic-stein
relations:
- - ceph-mon:mds
- ceph-fs:ceph-mds
- - nova-compute:amqp
- rabbitmq-server:amqp
- - nova-compute:image-service
- glance:image-service
- - nova-compute:ceph
- ceph-mon:client
- - keystone:shared-db
- percona-cluster:shared-db
- - glance:shared-db
- percona-cluster:shared-db
- - glance:identity-service
- keystone:identity-service
- - glance:amqp
- rabbitmq-server:amqp
- - glance:ceph
- ceph-mon:client
- - cinder:shared-db
- percona-cluster:shared-db
- - cinder:identity-service
- keystone:identity-service
- - cinder:amqp
- rabbitmq-server:amqp
- - cinder:image-service
- glance:image-service
- - cinder-ceph:storage-backend
- cinder:storage-backend
- - cinder-ceph:ceph
- ceph-mon:client
- - ceph-osd:mon
- ceph-mon:osd
- - nova-cloud-controller:shared-db
- percona-cluster:shared-db
- - nova-cloud-controller:identity-service
- keystone:identity-service
- - nova-cloud-controller:amqp
- rabbitmq-server:amqp
- - nova-cloud-controller:cloud-compute
- nova-compute:cloud-compute
- - nova-cloud-controller:image-service
- glance:image-service

View File

@ -0,0 +1,121 @@
series: bionic
applications:
ceph-fs:
charm: ceph-fs
series: bionic
num_units: 1
options:
source: cloud:bionic-train/proposed
ceph-osd:
charm: cs:~openstack-charmers-next/ceph-osd
num_units: 3
storage:
osd-devices: 'cinder,10G'
options:
osd-devices: '/dev/test-non-existent'
source: cloud:bionic-train/proposed
ceph-mon:
charm: cs:~openstack-charmers-next/ceph-mon
num_units: 3
options:
monitor-count: '3'
source: cloud:bionic-train/proposed
percona-cluster:
charm: cs:~openstack-charmers-next/percona-cluster
num_units: 1
options:
dataset-size: 25%
max-connections: 1000
source: cloud:bionic-train/proposed
rabbitmq-server:
charm: cs:~openstack-charmers-next/rabbitmq-server
num_units: 1
options:
source: cloud:bionic-train/proposed
keystone:
expose: True
charm: cs:~openstack-charmers-next/keystone
num_units: 1
options:
openstack-origin: cloud:bionic-train/proposed
nova-compute:
charm: cs:~openstack-charmers-next/nova-compute
num_units: 1
options:
openstack-origin: cloud:bionic-train/proposed
glance:
expose: True
charm: cs:~openstack-charmers-next/glance
num_units: 1
options:
openstack-origin: cloud:bionic-train/proposed
cinder:
expose: True
charm: cs:~openstack-charmers-next/cinder
num_units: 1
options:
block-device: 'None'
glance-api-version: '2'
openstack-origin: cloud:bionic-train/proposed
cinder-ceph:
charm: cs:~openstack-charmers-next/cinder-ceph
nova-cloud-controller:
expose: True
charm: cs:~openstack-charmers-next/nova-cloud-controller
num_units: 1
options:
openstack-origin: cloud:bionic-train/proposed
placement:
charm: cs:~openstack-charmers-next/placement
num_units: 1
options:
openstack-origin: cloud:bionic-train
relations:
- - ceph-mon:mds
- ceph-fs:ceph-mds
- - nova-compute:amqp
- rabbitmq-server:amqp
- - nova-compute:image-service
- glance:image-service
- - nova-compute:ceph
- ceph-mon:client
- - keystone:shared-db
- percona-cluster:shared-db
- - glance:shared-db
- percona-cluster:shared-db
- - glance:identity-service
- keystone:identity-service
- - glance:amqp
- rabbitmq-server:amqp
- - glance:ceph
- ceph-mon:client
- - cinder:shared-db
- percona-cluster:shared-db
- - cinder:identity-service
- keystone:identity-service
- - cinder:amqp
- rabbitmq-server:amqp
- - cinder:image-service
- glance:image-service
- - cinder-ceph:storage-backend
- cinder:storage-backend
- - cinder-ceph:ceph
- ceph-mon:client
- - ceph-osd:mon
- ceph-mon:osd
- - nova-cloud-controller:shared-db
- percona-cluster:shared-db
- - nova-cloud-controller:identity-service
- keystone:identity-service
- - nova-cloud-controller:amqp
- rabbitmq-server:amqp
- - nova-cloud-controller:cloud-compute
- nova-compute:cloud-compute
- - nova-cloud-controller:image-service
- glance:image-service
- - placement
- percona-cluster
- - placement
- keystone
- - placement
- nova-cloud-controller

View File

@ -0,0 +1,95 @@
series: cosmic
applications:
ceph-fs:
charm: ceph-fs
series: cosmic
num_units: 1
ceph-osd:
charm: cs:~openstack-charmers-next/ceph-osd
num_units: 3
storage:
osd-devices: 'cinder,10G'
options:
osd-devices: '/srv/ceph /dev/test-non-existent'
ceph-mon:
charm: cs:~openstack-charmers-next/ceph-mon
num_units: 3
options:
monitor-count: '3'
auth-supported: 'none'
percona-cluster:
charm: cs:~openstack-charmers-next/percona-cluster
num_units: 1
options:
dataset-size: 25%
max-connections: 1000
rabbitmq-server:
charm: cs:~openstack-charmers-next/rabbitmq-server
num_units: 1
keystone:
expose: True
charm: cs:~openstack-charmers-next/keystone
num_units: 1
nova-compute:
charm: cs:~openstack-charmers-next/nova-compute
num_units: 1
glance:
expose: True
charm: cs:~openstack-charmers-next/glance
num_units: 1
cinder:
expose: True
charm: cs:~openstack-charmers-next/cinder
num_units: 1
options:
block-device: 'None'
glance-api-version: '2'
cinder-ceph:
charm: cs:~openstack-charmers-next/cinder-ceph
nova-cloud-controller:
expose: True
charm: cs:~openstack-charmers-next/nova-cloud-controller
num_units: 1
relations:
- - ceph-mon:mds
- ceph-fs:ceph-mds
- - nova-compute:amqp
- rabbitmq-server:amqp
- - nova-compute:image-service
- glance:image-service
- - nova-compute:ceph
- ceph-mon:client
- - keystone:shared-db
- percona-cluster:shared-db
- - glance:shared-db
- percona-cluster:shared-db
- - glance:identity-service
- keystone:identity-service
- - glance:amqp
- rabbitmq-server:amqp
- - glance:ceph
- ceph-mon:client
- - cinder:shared-db
- percona-cluster:shared-db
- - cinder:identity-service
- keystone:identity-service
- - cinder:amqp
- rabbitmq-server:amqp
- - cinder:image-service
- glance:image-service
- - cinder-ceph:storage-backend
- cinder:storage-backend
- - cinder-ceph:ceph
- ceph-mon:client
- - ceph-osd:mon
- ceph-mon:osd
- - nova-cloud-controller:shared-db
- percona-cluster:shared-db
- - nova-cloud-controller:identity-service
- keystone:identity-service
- - nova-cloud-controller:amqp
- rabbitmq-server:amqp
- - nova-cloud-controller:cloud-compute
- nova-compute:cloud-compute
- - nova-cloud-controller:image-service
- glance:image-service

View File

@ -0,0 +1,95 @@
series: disco
applications:
ceph-fs:
charm: ceph-fs
series: disco
num_units: 1
ceph-osd:
charm: cs:~openstack-charmers-next/ceph-osd
num_units: 3
storage:
osd-devices: 'cinder,10G'
options:
osd-devices: '/srv/ceph /dev/test-non-existent'
ceph-mon:
charm: cs:~openstack-charmers-next/ceph-mon
num_units: 3
options:
monitor-count: '3'
auth-supported: 'none'
percona-cluster:
charm: cs:~openstack-charmers-next/percona-cluster
num_units: 1
options:
dataset-size: 25%
max-connections: 1000
rabbitmq-server:
charm: cs:~openstack-charmers-next/rabbitmq-server
num_units: 1
keystone:
expose: True
charm: cs:~openstack-charmers-next/keystone
num_units: 1
nova-compute:
charm: cs:~openstack-charmers-next/nova-compute
num_units: 1
glance:
expose: True
charm: cs:~openstack-charmers-next/glance
num_units: 1
cinder:
expose: True
charm: cs:~openstack-charmers-next/cinder
num_units: 1
options:
block-device: 'None'
glance-api-version: '2'
cinder-ceph:
charm: cs:~openstack-charmers-next/cinder-ceph
nova-cloud-controller:
expose: True
charm: cs:~openstack-charmers-next/nova-cloud-controller
num_units: 1
relations:
- - ceph-mon:mds
- ceph-fs:ceph-mds
- - nova-compute:amqp
- rabbitmq-server:amqp
- - nova-compute:image-service
- glance:image-service
- - nova-compute:ceph
- ceph-mon:client
- - keystone:shared-db
- percona-cluster:shared-db
- - glance:shared-db
- percona-cluster:shared-db
- - glance:identity-service
- keystone:identity-service
- - glance:amqp
- rabbitmq-server:amqp
- - glance:ceph
- ceph-mon:client
- - cinder:shared-db
- percona-cluster:shared-db
- - cinder:identity-service
- keystone:identity-service
- - cinder:amqp
- rabbitmq-server:amqp
- - cinder:image-service
- glance:image-service
- - cinder-ceph:storage-backend
- cinder:storage-backend
- - cinder-ceph:ceph
- ceph-mon:client
- - ceph-osd:mon
- ceph-mon:osd
- - nova-cloud-controller:shared-db
- percona-cluster:shared-db
- - nova-cloud-controller:identity-service
- keystone:identity-service
- - nova-cloud-controller:amqp
- rabbitmq-server:amqp
- - nova-cloud-controller:cloud-compute
- nova-compute:cloud-compute
- - nova-cloud-controller:image-service
- glance:image-service

View File

@ -0,0 +1,95 @@
series: xenial
applications:
ceph-fs:
charm: ceph-fs
series: xenial
num_units: 1
ceph-osd:
charm: cs:~openstack-charmers-next/ceph-osd
num_units: 3
storage:
osd-devices: 'cinder,10G'
options:
osd-devices: '/srv/ceph /dev/test-non-existent'
ceph-mon:
charm: cs:~openstack-charmers-next/ceph-mon
num_units: 3
options:
monitor-count: '3'
auth-supported: 'none'
percona-cluster:
charm: cs:~openstack-charmers-next/percona-cluster
num_units: 1
options:
dataset-size: 25%
max-connections: 1000
rabbitmq-server:
charm: cs:~openstack-charmers-next/rabbitmq-server
num_units: 1
keystone:
expose: True
charm: cs:~openstack-charmers-next/keystone
num_units: 1
nova-compute:
charm: cs:~openstack-charmers-next/nova-compute
num_units: 1
glance:
expose: True
charm: cs:~openstack-charmers-next/glance
num_units: 1
cinder:
expose: True
charm: cs:~openstack-charmers-next/cinder
num_units: 1
options:
block-device: 'None'
glance-api-version: '2'
cinder-ceph:
charm: cs:~openstack-charmers-next/cinder-ceph
nova-cloud-controller:
expose: True
charm: cs:~openstack-charmers-next/nova-cloud-controller
num_units: 1
relations:
- - ceph-mon:mds
- ceph-fs:ceph-mds
- - nova-compute:amqp
- rabbitmq-server:amqp
- - nova-compute:image-service
- glance:image-service
- - nova-compute:ceph
- ceph-mon:client
- - keystone:shared-db
- percona-cluster:shared-db
- - glance:shared-db
- percona-cluster:shared-db
- - glance:identity-service
- keystone:identity-service
- - glance:amqp
- rabbitmq-server:amqp
- - glance:ceph
- ceph-mon:client
- - cinder:shared-db
- percona-cluster:shared-db
- - cinder:identity-service
- keystone:identity-service
- - cinder:amqp
- rabbitmq-server:amqp
- - cinder:image-service
- glance:image-service
- - cinder-ceph:storage-backend
- cinder:storage-backend
- - cinder-ceph:ceph
- ceph-mon:client
- - ceph-osd:mon
- ceph-mon:osd
- - nova-cloud-controller:shared-db
- percona-cluster:shared-db
- - nova-cloud-controller:identity-service
- keystone:identity-service
- - nova-cloud-controller:amqp
- rabbitmq-server:amqp
- - nova-cloud-controller:cloud-compute
- nova-compute:cloud-compute
- - nova-cloud-controller:image-service
- glance:image-service

View File

@ -0,0 +1,111 @@
series: xenial
applications:
ceph-fs:
charm: ceph-fs
series: xenial
num_units: 1
options:
source: cloud:xenial-ocata
ceph-osd:
charm: cs:~openstack-charmers-next/ceph-osd
num_units: 3
storage:
osd-devices: 'cinder,10G'
options:
osd-devices: '/srv/ceph /dev/test-non-existent'
source: cloud:xenial-ocata
ceph-mon:
charm: cs:~openstack-charmers-next/ceph-mon
num_units: 3
options:
monitor-count: '3'
auth-supported: 'none'
source: cloud:xenial-ocata
percona-cluster:
charm: cs:~openstack-charmers-next/percona-cluster
num_units: 1
options:
dataset-size: 25%
max-connections: 1000
source: cloud:xenial-ocata
rabbitmq-server:
charm: cs:~openstack-charmers-next/rabbitmq-server
num_units: 1
options:
source: cloud:xenial-ocata
keystone:
expose: True
charm: cs:~openstack-charmers-next/keystone
num_units: 1
options:
openstack-origin: cloud:xenial-ocata
nova-compute:
charm: cs:~openstack-charmers-next/nova-compute
num_units: 1
options:
openstack-origin: cloud:xenial-ocata
glance:
expose: True
charm: cs:~openstack-charmers-next/glance
num_units: 1
options:
openstack-origin: cloud:xenial-ocata
cinder:
expose: True
charm: cs:~openstack-charmers-next/cinder
num_units: 1
options:
block-device: 'None'
glance-api-version: '2'
openstack-origin: cloud:xenial-ocata
cinder-ceph:
charm: cs:~openstack-charmers-next/cinder-ceph
nova-cloud-controller:
expose: True
charm: cs:~openstack-charmers-next/nova-cloud-controller
num_units: 1
options:
openstack-origin: cloud:xenial-ocata
relations:
- - ceph-mon:mds
- ceph-fs:ceph-mds
- - nova-compute:amqp
- rabbitmq-server:amqp
- - nova-compute:image-service
- glance:image-service
- - nova-compute:ceph
- ceph-mon:client
- - keystone:shared-db
- percona-cluster:shared-db
- - glance:shared-db
- percona-cluster:shared-db
- - glance:identity-service
- keystone:identity-service
- - glance:amqp
- rabbitmq-server:amqp
- - glance:ceph
- ceph-mon:client
- - cinder:shared-db
- percona-cluster:shared-db
- - cinder:identity-service
- keystone:identity-service
- - cinder:amqp
- rabbitmq-server:amqp
- - cinder:image-service
- glance:image-service
- - cinder-ceph:storage-backend
- cinder:storage-backend
- - cinder-ceph:ceph
- ceph-mon:client
- - ceph-osd:mon
- ceph-mon:osd
- - nova-cloud-controller:shared-db
- percona-cluster:shared-db
- - nova-cloud-controller:identity-service
- keystone:identity-service
- - nova-cloud-controller:amqp
- rabbitmq-server:amqp
- - nova-cloud-controller:cloud-compute
- nova-compute:cloud-compute
- - nova-cloud-controller:image-service
- glance:image-service

View File

@ -0,0 +1,111 @@
series: xenial
applications:
ceph-fs:
charm: ceph-fs
series: xenial
num_units: 1
options:
source: cloud:xenial-pike
ceph-osd:
charm: cs:~openstack-charmers-next/ceph-osd
num_units: 3
storage:
osd-devices: 'cinder,10G'
options:
osd-devices: '/srv/ceph /dev/test-non-existent'
source: cloud:xenial-pike
ceph-mon:
charm: cs:~openstack-charmers-next/ceph-mon
num_units: 3
options:
monitor-count: '3'
auth-supported: 'none'
source: cloud:xenial-pike
percona-cluster:
charm: cs:~openstack-charmers-next/percona-cluster
num_units: 1
options:
dataset-size: 25%
max-connections: 1000
source: cloud:xenial-pike
rabbitmq-server:
charm: cs:~openstack-charmers-next/rabbitmq-server
num_units: 1
options:
source: cloud:xenial-pike
keystone:
expose: True
charm: cs:~openstack-charmers-next/keystone
num_units: 1
options:
openstack-origin: cloud:xenial-pike
nova-compute:
charm: cs:~openstack-charmers-next/nova-compute
num_units: 1
options:
openstack-origin: cloud:xenial-pike
glance:
expose: True
charm: cs:~openstack-charmers-next/glance
num_units: 1
options:
openstack-origin: cloud:xenial-pike
cinder:
expose: True
charm: cs:~openstack-charmers-next/cinder
num_units: 1
options:
block-device: 'None'
glance-api-version: '2'
openstack-origin: cloud:xenial-pike
cinder-ceph:
charm: cs:~openstack-charmers-next/cinder-ceph
nova-cloud-controller:
expose: True
charm: cs:~openstack-charmers-next/nova-cloud-controller
num_units: 1
options:
openstack-origin: cloud:xenial-pike
relations:
- - ceph-mon:mds
- ceph-fs:ceph-mds
- - nova-compute:amqp
- rabbitmq-server:amqp
- - nova-compute:image-service
- glance:image-service
- - nova-compute:ceph
- ceph-mon:client
- - keystone:shared-db
- percona-cluster:shared-db
- - glance:shared-db
- percona-cluster:shared-db
- - glance:identity-service
- keystone:identity-service
- - glance:amqp
- rabbitmq-server:amqp
- - glance:ceph
- ceph-mon:client
- - cinder:shared-db
- percona-cluster:shared-db
- - cinder:identity-service
- keystone:identity-service
- - cinder:amqp
- rabbitmq-server:amqp
- - cinder:image-service
- glance:image-service
- - cinder-ceph:storage-backend
- cinder:storage-backend
- - cinder-ceph:ceph
- ceph-mon:client
- - ceph-osd:mon
- ceph-mon:osd
- - nova-cloud-controller:shared-db
- percona-cluster:shared-db
- - nova-cloud-controller:identity-service
- keystone:identity-service
- - nova-cloud-controller:amqp
- rabbitmq-server:amqp
- - nova-cloud-controller:cloud-compute
- nova-compute:cloud-compute
- - nova-cloud-controller:image-service
- glance:image-service

View File

@ -0,0 +1,111 @@
series: xenial
applications:
ceph-fs:
charm: ceph-fs
series: xenial
num_units: 1
options:
source: cloud:xenial-queens
ceph-osd:
charm: cs:~openstack-charmers-next/ceph-osd
num_units: 3
storage:
osd-devices: 'cinder,10G'
options:
osd-devices: '/srv/ceph /dev/test-non-existent'
source: cloud:xenial-queens
ceph-mon:
charm: cs:~openstack-charmers-next/ceph-mon
num_units: 3
options:
monitor-count: '3'
auth-supported: 'none'
source: cloud:xenial-queens
percona-cluster:
charm: cs:~openstack-charmers-next/percona-cluster
num_units: 1
options:
dataset-size: 25%
max-connections: 1000
source: cloud:xenial-queens
rabbitmq-server:
charm: cs:~openstack-charmers-next/rabbitmq-server
num_units: 1
options:
source: cloud:xenial-queens
keystone:
expose: True
charm: cs:~openstack-charmers-next/keystone
num_units: 1
options:
openstack-origin: cloud:xenial-queens
nova-compute:
charm: cs:~openstack-charmers-next/nova-compute
num_units: 1
options:
openstack-origin: cloud:xenial-queens
glance:
expose: True
charm: cs:~openstack-charmers-next/glance
num_units: 1
options:
openstack-origin: cloud:xenial-queens
cinder:
expose: True
charm: cs:~openstack-charmers-next/cinder
num_units: 1
options:
block-device: 'None'
glance-api-version: '2'
openstack-origin: cloud:xenial-queens
cinder-ceph:
charm: cs:~openstack-charmers-next/cinder-ceph
nova-cloud-controller:
expose: True
charm: cs:~openstack-charmers-next/nova-cloud-controller
num_units: 1
options:
openstack-origin: cloud:xenial-queens
relations:
- - ceph-mon:mds
- ceph-fs:ceph-mds
- - nova-compute:amqp
- rabbitmq-server:amqp
- - nova-compute:image-service
- glance:image-service
- - nova-compute:ceph
- ceph-mon:client
- - keystone:shared-db
- percona-cluster:shared-db
- - glance:shared-db
- percona-cluster:shared-db
- - glance:identity-service
- keystone:identity-service
- - glance:amqp
- rabbitmq-server:amqp
- - glance:ceph
- ceph-mon:client
- - cinder:shared-db
- percona-cluster:shared-db
- - cinder:identity-service
- keystone:identity-service
- - cinder:amqp
- rabbitmq-server:amqp
- - cinder:image-service
- glance:image-service
- - cinder-ceph:storage-backend
- cinder:storage-backend
- - cinder-ceph:ceph
- ceph-mon:client
- - ceph-osd:mon
- ceph-mon:osd
- - nova-cloud-controller:shared-db
- percona-cluster:shared-db
- - nova-cloud-controller:identity-service
- keystone:identity-service
- - nova-cloud-controller:amqp
- rabbitmq-server:amqp
- - nova-cloud-controller:cloud-compute
- nova-compute:cloud-compute
- - nova-cloud-controller:image-service
- glance:image-service

View File

@ -1,23 +0,0 @@
#!/usr/bin/env python
#
# Copyright 2016 Canonical Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Amulet tests on a basic ceph deployment on bionic-queens."""
from basic_deployment import CephFsBasicDeployment
if __name__ == '__main__':
deployment = CephFsBasicDeployment(series='bionic')
deployment.run_tests()

View File

@ -1,25 +0,0 @@
#!/usr/bin/env python
#
# Copyright 2016 Canonical Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Amulet tests on a basic ceph deployment on bionic-rocky."""
from basic_deployment import CephFsBasicDeployment
if __name__ == '__main__':
deployment = CephFsBasicDeployment(series='bionic',
openstack='cloud:bionic-rocky',
source='cloud:bionic-updates/rocky')
deployment.run_tests()

View File

@ -1,25 +0,0 @@
#!/usr/bin/env python
#
# Copyright 2016 Canonical Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Amulet tests on a basic ceph deployment on bionic-stein."""
from basic_deployment import CephFsBasicDeployment
if __name__ == '__main__':
deployment = CephFsBasicDeployment(series='bionic',
openstack='cloud:bionic-stein',
source='cloud:bionic-stein')
deployment.run_tests()

View File

@ -1,25 +0,0 @@
#!/usr/bin/env python
#
# Copyright 2019 Canonical Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Amulet tests on a basic ceph deployment on bionic-train."""
from basic_deployment import CephFsBasicDeployment
if __name__ == '__main__':
deployment = CephFsBasicDeployment(series='bionic',
openstack='cloud:bionic-train',
source='cloud:bionic-train')
deployment.run_tests()

View File

@ -1,23 +0,0 @@
#!/usr/bin/env python
#
# Copyright 2016 Canonical Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Amulet tests on a basic ceph deployment on disco-stein."""
from basic_deployment import CephFsBasicDeployment
if __name__ == '__main__':
deployment = CephFsBasicDeployment(series='disco')
deployment.run_tests()

View File

@ -1,23 +0,0 @@
#!/usr/bin/env python
#
# Copyright 2016 Canonical Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Amulet tests on a basic ceph deployment on xenial-mitaka."""
from basic_deployment import CephFsBasicDeployment
if __name__ == '__main__':
deployment = CephFsBasicDeployment(series='xenial')
deployment.run_tests()

View File

@ -1,25 +0,0 @@
#!/usr/bin/env python
#
# Copyright 2016 Canonical Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Amulet tests on a basic ceph deployment on xenial-ocata."""
from basic_deployment import CephFsBasicDeployment
if __name__ == '__main__':
deployment = CephFsBasicDeployment(series='xenial',
openstack='cloud:xenial-ocata',
source='cloud:xenial-updates/ocata')
deployment.run_tests()

View File

@ -1,25 +0,0 @@
#!/usr/bin/env python
#
# Copyright 2016 Canonical Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Amulet tests on a basic ceph deployment on xenial-pike."""
from basic_deployment import CephFsBasicDeployment
if __name__ == '__main__':
deployment = CephFsBasicDeployment(series='xenial',
openstack='cloud:xenial-pike',
source='cloud:xenial-updates/pike')
deployment.run_tests()

View File

@ -1,25 +0,0 @@
#!/usr/bin/env python
#
# Copyright 2016 Canonical Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Amulet tests on a basic ceph deployment on xenial-queens."""
from basic_deployment import CephFsBasicDeployment
if __name__ == '__main__':
deployment = CephFsBasicDeployment(series='xenial',
openstack='cloud:xenial-queens',
source='cloud:xenial-updates/queens')
deployment.run_tests()

View File

@ -1,18 +1,22 @@
# Bootstrap the model if necessary.
bootstrap: True
# Re-use bootstrap node instead of destroying/re-bootstrapping.
reset: True
# Use tox/requirements to drive the venv instead of bundletester's venv feature.
virtualenv: False
# Leave makefile empty, otherwise unit/lint tests will rerun ahead of amulet.
makefile: []
# Do not specify juju PPA sources. Juju is presumed to be pre-installed
# and configured in all test runner environments.
#sources:
# Do not specify or rely on system packages.
#packages:
# Do not specify python packages here. Use test-requirements.txt
# and tox instead. ie. The venv is constructed before bundletester
# is invoked.
#python-packages:
reset_timeout: 600
charm_name: ceph-fs
gate_bundles:
- bionic-train
- bionic-stein
- bionic-rocky
- bionic-queens
- xenial-queens
- xenial-pike
- xenial-ocata
- xenial-mitaka
smoke_bundles:
- bionic-stein
dev_bundles:
- cosmic-rocky
- disco-stein
configure:
- zaza.openstack.charm_tests.glance.setup.add_lts_image
tests:
- zaza.openstack.charm_tests.ceph.tests.CephLowLevelTest
- zaza.openstack.charm_tests.ceph.tests.CephRelationTest
- zaza.openstack.charm_tests.ceph.tests.CephTest
- zaza.openstack.charm_tests.ceph.osd.tests.SecurityTest

View File

@ -1,4 +1,4 @@
# Source charm (with amulet): ./src/tox.ini
# Source charm (with zaza): ./src/tox.ini
# This file is managed centrally by release-tools and should not be modified
# within individual charm repos. See the 'global' dir contents for available
# choices of tox.ini for OpenStack Charms:
@ -15,41 +15,36 @@ skip_missing_interpreters = False
[testenv]
setenv = VIRTUAL_ENV={envdir}
PYTHONHASHSEED=0
CHARM_DIR={envdir}
AMULET_SETUP_TIMEOUT=5400
whitelist_externals = juju
passenv = HOME TERM AMULET_* CS_* OS_* TEST_*
passenv = HOME TERM CS_* OS_* TEST_*
deps = -r{toxinidir}/test-requirements.txt
install_command =
pip install {opts} {packages}
[testenv:pep8]
basepython = python3
deps=charm-tools
commands = charm-proof
[testenv:func-noop]
# DRY RUN - For Debug
basepython = python2.7
basepython = python3
commands =
bundletester -vl DEBUG -r json -o func-results.json --test-pattern "gate-*" -n --no-destroy
functest-run-suite --help
[testenv:func]
# Run all gate tests which are +x (expected to always pass)
basepython = python2.7
basepython = python3
commands =
bundletester -vl DEBUG -r json -o func-results.json --test-pattern "gate-*" --no-destroy
functest-run-suite --keep-model
[testenv:func-smoke]
# Run a specific test as an Amulet smoke test (expected to always pass)
basepython = python2.7
basepython = python3
commands =
bundletester -vl DEBUG -r json -o func-results.json gate-basic-bionic-train --no-destroy
functest-run-suite --keep-model --smoke
[testenv:func-dev]
# Run all development test targets which are +x (may not always pass!)
basepython = python2.7
[testenv:func-target]
basepython = python3
commands =
bundletester -vl DEBUG -r json -o func-results.json --test-pattern "dev-*" --no-destroy
functest-run-suite --keep-model --bundle {posargs}
[testenv:venv]
commands = {posargs}
commands = {posargs}

View File

@ -89,4 +89,4 @@ commands = {posargs}
[flake8]
# E402 ignore necessary for path append before sys module import in actions
ignore = E402,W504
ignore = E402,W504