amulet tests - update test coverage, enable vivid, prep for wily

add basic functional checks
sync tests/charmhelpers
This commit is contained in:
Ryan Beisner 2015-06-22 17:36:31 +00:00
parent ca864fbac9
commit 6202362477
9 changed files with 769 additions and 69 deletions

View File

@ -2,17 +2,17 @@
PYTHON := /usr/bin/env python
lint:
@flake8 --exclude hooks/charmhelpers hooks tests unit_tests
@flake8 --exclude hooks/charmhelpers,tests/charmhelpers \
hooks tests unit_tests
@charm proof
unit_test:
test:
@# Bundletester expects unit tests here.
@echo Starting unit tests...
@$(PYTHON) /usr/bin/nosetests --nologcapture --with-coverage unit_tests
test:
functional_test:
@echo Starting Amulet tests...
# coreycb note: The -v should only be temporary until Amulet sends
# raise_status() messages to stderr:
# https://bugs.launchpad.net/amulet/+bug/1320357
@juju test -v -p AMULET_HTTP_PROXY,AMULET_OS_VIP --timeout 2700
bin/charm_helpers_sync.py:

View File

@ -7,7 +7,7 @@ description: |
.
This charm provides the RADOS HTTP gateway supporting S3 and Swift protocols
for object storage.
categories:
tags:
- misc
requires:
mon:

View File

@ -5,6 +5,9 @@ set -ex
sudo add-apt-repository --yes ppa:juju/stable
sudo apt-get update --yes
sudo apt-get install --yes python-amulet \
python-cinderclient \
python-distro-info \
python-keystoneclient \
python-glanceclient \
python-novaclient
python-novaclient \
python-swiftclient

0
tests/017-basic-trusty-kilo Normal file → Executable file
View File

0
tests/019-basic-vivid-kilo Normal file → Executable file
View File

View File

@ -1,13 +1,14 @@
#!/usr/bin/python
import amulet
import time
from charmhelpers.contrib.openstack.amulet.deployment import (
OpenStackAmuletDeployment
)
from charmhelpers.contrib.openstack.amulet.utils import ( # noqa
from charmhelpers.contrib.openstack.amulet.utils import (
OpenStackAmuletUtils,
DEBUG,
ERROR
#ERROR
)
# Use DEBUG to turn on debug logging
@ -35,9 +36,12 @@ class CephRadosGwBasicDeployment(OpenStackAmuletDeployment):
compatible with the local charm (e.g. stable or next).
"""
this_service = {'name': 'ceph-radosgw'}
other_services = [{'name': 'ceph', 'units': 3}, {'name': 'mysql'},
{'name': 'keystone'}, {'name': 'rabbitmq-server'},
{'name': 'nova-compute'}, {'name': 'glance'},
other_services = [{'name': 'ceph', 'units': 3},
{'name': 'mysql'},
{'name': 'keystone'},
{'name': 'rabbitmq-server'},
{'name': 'nova-compute'},
{'name': 'glance'},
{'name': 'cinder'}]
super(CephRadosGwBasicDeployment, self)._add_services(this_service,
other_services)
@ -92,13 +96,20 @@ class CephRadosGwBasicDeployment(OpenStackAmuletDeployment):
self.mysql_sentry = self.d.sentry.unit['mysql/0']
self.keystone_sentry = self.d.sentry.unit['keystone/0']
self.rabbitmq_sentry = self.d.sentry.unit['rabbitmq-server/0']
self.nova_compute_sentry = self.d.sentry.unit['nova-compute/0']
self.nova_sentry = self.d.sentry.unit['nova-compute/0']
self.glance_sentry = self.d.sentry.unit['glance/0']
self.cinder_sentry = self.d.sentry.unit['cinder/0']
self.ceph0_sentry = self.d.sentry.unit['ceph/0']
self.ceph1_sentry = self.d.sentry.unit['ceph/1']
self.ceph2_sentry = self.d.sentry.unit['ceph/2']
self.ceph_radosgw_sentry = self.d.sentry.unit['ceph-radosgw/0']
u.log.debug('openstack release val: {}'.format(
self._get_openstack_release()))
u.log.debug('openstack release str: {}'.format(
self._get_openstack_release_string()))
# Let things settle a bit original moving forward
time.sleep(30)
# Authenticate admin with keystone
self.keystone = u.authenticate_keystone_admin(self.keystone_sentry,
@ -135,39 +146,76 @@ class CephRadosGwBasicDeployment(OpenStackAmuletDeployment):
'password',
self.demo_tenant)
def _ceph_osd_id(self, index):
"""Produce a shell command that will return a ceph-osd id."""
return "`initctl list | grep 'ceph-osd ' | awk 'NR=={} {{ print $2 }}' | grep -o '[0-9]*'`".format(index + 1) # noqa
# Authenticate radosgw user using swift api
ks_obj_rel = self.keystone_sentry.relation('identity-service',
'ceph-radosgw:identity-service')
self.swift = u.authenticate_swift_user(self.keystone,
user=ks_obj_rel['service_username'],
password=ks_obj_rel['service_password'],
tenant=ks_obj_rel['service_tenant'])
def test_services(self):
"""Verify the expected services are running on the service units."""
ceph_services = ['status ceph-mon-all',
'status ceph-mon id=`hostname`']
commands = {
self.mysql_sentry: ['status mysql'],
self.rabbitmq_sentry: ['sudo service rabbitmq-server status'],
self.nova_compute_sentry: ['status nova-compute'],
self.keystone_sentry: ['status keystone'],
self.glance_sentry: ['status glance-registry',
'status glance-api'],
self.cinder_sentry: ['status cinder-api',
'status cinder-scheduler',
'status cinder-volume'],
self.ceph_radosgw_sentry: ['status radosgw-all']
def test_100_ceph_processes(self):
"""Verify that the expected service processes are running
on each ceph unit."""
# Process name and quantity of processes to expect on each unit
ceph_processes = {
'ceph-mon': 1,
'ceph-mon': 1,
'ceph-osd': 2
}
ceph_osd0 = 'status ceph-osd id={}'.format(self._ceph_osd_id(0))
ceph_osd1 = 'status ceph-osd id={}'.format(self._ceph_osd_id(1))
ceph_services.extend([ceph_osd0, ceph_osd1, 'status ceph-osd-all'])
commands[self.ceph0_sentry] = ceph_services
commands[self.ceph1_sentry] = ceph_services
commands[self.ceph2_sentry] = ceph_services
ret = u.validate_services(commands)
# Units with process names and PID quantities expected
expected_processes = {
self.ceph_radosgw_sentry: {'radosgw': 1},
self.ceph0_sentry: ceph_processes,
self.ceph1_sentry: ceph_processes,
self.ceph2_sentry: ceph_processes
}
actual_pids = u.get_unit_process_ids(expected_processes)
ret = u.validate_unit_process_ids(expected_processes, actual_pids)
if ret:
amulet.raise_status(amulet.FAIL, msg=ret)
def test_ceph_radosgw_ceph_relation(self):
def test_102_services(self):
"""Verify the expected services are running on the service units."""
services = {
self.mysql_sentry: ['mysql'],
self.rabbitmq_sentry: ['rabbitmq-server'],
self.nova_sentry: ['nova-compute'],
self.keystone_sentry: ['keystone'],
self.glance_sentry: ['glance-registry',
'glance-api'],
self.cinder_sentry: ['cinder-api',
'cinder-scheduler',
'cinder-volume'],
}
if self._get_openstack_release() < self.vivid_kilo:
# For upstart systems only. Ceph services under systemd
# are checked by process name instead.
ceph_services = [
'ceph-mon-all',
'ceph-mon id=`hostname`',
'ceph-osd-all',
'ceph-osd id={}'.format(u.get_ceph_osd_id_cmd(0)),
'ceph-osd id={}'.format(u.get_ceph_osd_id_cmd(1))
]
services[self.ceph0_sentry] = ceph_services
services[self.ceph1_sentry] = ceph_services
services[self.ceph2_sentry] = ceph_services
services[self.ceph_radosgw_sentry] = ['radosgw-all']
ret = u.validate_services_by_name(services)
if ret:
amulet.raise_status(amulet.FAIL, msg=ret)
def test_200_ceph_radosgw_ceph_relation(self):
"""Verify the ceph-radosgw to ceph relation data."""
u.log.debug('Checking ceph-radosgw:mon to ceph:radosgw '
'relation data...')
unit = self.ceph_radosgw_sentry
relation = ['mon', 'ceph:radosgw']
expected = {
@ -179,8 +227,9 @@ class CephRadosGwBasicDeployment(OpenStackAmuletDeployment):
message = u.relation_error('ceph-radosgw to ceph', ret)
amulet.raise_status(amulet.FAIL, msg=message)
def test_ceph0_ceph_radosgw_relation(self):
def test_201_ceph0_ceph_radosgw_relation(self):
"""Verify the ceph0 to ceph-radosgw relation data."""
u.log.debug('Checking ceph0:radosgw radosgw:mon relation data...')
unit = self.ceph0_sentry
relation = ['radosgw', 'ceph-radosgw:mon']
expected = {
@ -196,8 +245,9 @@ class CephRadosGwBasicDeployment(OpenStackAmuletDeployment):
message = u.relation_error('ceph0 to ceph-radosgw', ret)
amulet.raise_status(amulet.FAIL, msg=message)
def test_ceph1_ceph_radosgw_relation(self):
def test_202_ceph1_ceph_radosgw_relation(self):
"""Verify the ceph1 to ceph-radosgw relation data."""
u.log.debug('Checking ceph1:radosgw ceph-radosgw:mon relation data...')
unit = self.ceph1_sentry
relation = ['radosgw', 'ceph-radosgw:mon']
expected = {
@ -213,8 +263,9 @@ class CephRadosGwBasicDeployment(OpenStackAmuletDeployment):
message = u.relation_error('ceph1 to ceph-radosgw', ret)
amulet.raise_status(amulet.FAIL, msg=message)
def test_ceph2_ceph_radosgw_relation(self):
def test_203_ceph2_ceph_radosgw_relation(self):
"""Verify the ceph2 to ceph-radosgw relation data."""
u.log.debug('Checking ceph2:radosgw ceph-radosgw:mon relation data...')
unit = self.ceph2_sentry
relation = ['radosgw', 'ceph-radosgw:mon']
expected = {
@ -230,8 +281,10 @@ class CephRadosGwBasicDeployment(OpenStackAmuletDeployment):
message = u.relation_error('ceph2 to ceph-radosgw', ret)
amulet.raise_status(amulet.FAIL, msg=message)
def test_ceph_radosgw_keystone_relation(self):
def test_204_ceph_radosgw_keystone_relation(self):
"""Verify the ceph-radosgw to keystone relation data."""
u.log.debug('Checking ceph-radosgw to keystone id service '
'relation data...')
unit = self.ceph_radosgw_sentry
relation = ['identity-service', 'keystone:identity-service']
expected = {
@ -249,8 +302,10 @@ class CephRadosGwBasicDeployment(OpenStackAmuletDeployment):
message = u.relation_error('ceph-radosgw to keystone', ret)
amulet.raise_status(amulet.FAIL, msg=message)
def test_keystone_ceph_radosgw_relation(self):
def test_205_keystone_ceph_radosgw_relation(self):
"""Verify the keystone to ceph-radosgw relation data."""
u.log.debug('Checking keystone to ceph-radosgw id service '
'relation data...')
unit = self.keystone_sentry
relation = ['identity-service', 'ceph-radosgw:identity-service']
expected = {
@ -273,8 +328,9 @@ class CephRadosGwBasicDeployment(OpenStackAmuletDeployment):
message = u.relation_error('keystone to ceph-radosgw', ret)
amulet.raise_status(amulet.FAIL, msg=message)
def test_ceph_config(self):
def test_300_ceph_radosgw_config(self):
"""Verify the data in the ceph config file."""
u.log.debug('Checking ceph config file data...')
unit = self.ceph_radosgw_sentry
conf = '/etc/ceph/ceph.conf'
keystone_sentry = self.keystone_sentry
@ -309,11 +365,153 @@ class CephRadosGwBasicDeployment(OpenStackAmuletDeployment):
message = "ceph config error: {}".format(ret)
amulet.raise_status(amulet.FAIL, msg=message)
def test_restart_on_config_change(self):
"""Verify the specified services are restarted on config change."""
# NOTE(coreycb): Test not implemented but should it be? ceph-radosgw
# svcs aren't restarted by charm after config change
# Should they be restarted?
if self._get_openstack_release() >= self.precise_essex:
u.log.error("Test not implemented")
return
def test_302_cinder_rbd_config(self):
"""Verify the cinder config file data regarding ceph."""
u.log.debug('Checking cinder (rbd) config file data...')
unit = self.cinder_sentry
conf = '/etc/cinder/cinder.conf'
expected = {
'DEFAULT': {
'volume_driver': 'cinder.volume.drivers.rbd.RBDDriver'
}
}
for section, pairs in expected.iteritems():
ret = u.validate_config_data(unit, conf, section, pairs)
if ret:
message = "cinder (rbd) config error: {}".format(ret)
amulet.raise_status(amulet.FAIL, msg=message)
def test_304_glance_rbd_config(self):
"""Verify the glance config file data regarding ceph."""
u.log.debug('Checking glance (rbd) config file data...')
unit = self.glance_sentry
conf = '/etc/glance/glance-api.conf'
config = {
'default_store': 'rbd',
'rbd_store_ceph_conf': '/etc/ceph/ceph.conf',
'rbd_store_user': 'glance',
'rbd_store_pool': 'glance',
'rbd_store_chunk_size': '8'
}
if self._get_openstack_release() >= self.trusty_kilo:
# Kilo or later
config['stores'] = ('glance.store.filesystem.Store,'
'glance.store.http.Store,'
'glance.store.rbd.Store')
section = 'glance_store'
else:
# Juno or earlier
section = 'DEFAULT'
expected = {section: config}
for section, pairs in expected.iteritems():
ret = u.validate_config_data(unit, conf, section, pairs)
if ret:
message = "glance (rbd) config error: {}".format(ret)
amulet.raise_status(amulet.FAIL, msg=message)
def test_306_nova_rbd_config(self):
"""Verify the nova config file data regarding ceph."""
u.log.debug('Checking nova (rbd) config file data...')
unit = self.nova_sentry
conf = '/etc/nova/nova.conf'
expected = {
'libvirt': {
'rbd_pool': 'nova',
'rbd_user': 'nova-compute',
'rbd_secret_uuid': u.not_null
}
}
for section, pairs in expected.iteritems():
ret = u.validate_config_data(unit, conf, section, pairs)
if ret:
message = "nova (rbd) config error: {}".format(ret)
amulet.raise_status(amulet.FAIL, msg=message)
def test_400_ceph_check_osd_pools(self):
"""Check osd pools on all ceph units, expect them to be
identical, and expect specific pools to be present."""
u.log.debug('Checking pools on ceph units...')
expected_pools = self.get_ceph_expected_pools(radosgw=True)
results = []
sentries = [
self.ceph_radosgw_sentry,
self.ceph0_sentry,
self.ceph1_sentry,
self.ceph2_sentry
]
# Check for presence of expected pools on each unit
u.log.debug('Expected pools: {}'.format(expected_pools))
for sentry_unit in sentries:
pools = u.get_ceph_pools(sentry_unit)
results.append(pools)
for expected_pool in expected_pools:
if expected_pool not in pools:
msg = ('{} does not have pool: '
'{}'.format(sentry_unit.info['unit_name'],
expected_pool))
amulet.raise_status(amulet.FAIL, msg=msg)
u.log.debug('{} has (at least) the expected '
'pools.'.format(sentry_unit.info['unit_name']))
# Check that all units returned the same pool name:id data
ret = u.validate_list_of_identical_dicts(results)
if ret:
u.log.debug('Pool list results: {}'.format(results))
msg = ('{}; Pool list results are not identical on all '
'ceph units.'.format(ret))
amulet.raise_status(amulet.FAIL, msg=msg)
else:
u.log.debug('Pool list on all ceph units produced the '
'same results (OK).')
def test_402_swift_api_connection(self):
"""Simple api call to confirm basic service functionality"""
u.log.debug('Checking basic radosgw functionality via swift api...')
headers, containers = self.swift.get_account()
assert('content-type' in headers.keys())
assert(containers == [])
def test_498_radosgw_cmds_exit_zero(self):
"""Check basic functionality of radosgw cli commands against
the ceph_radosgw unit."""
sentry_units = [self.ceph_radosgw_sentry]
commands = [
'sudo radosgw-admin regions list',
'sudo radosgw-admin bucket list',
'sudo radosgw-admin zone list',
'sudo radosgw-admin metadata list',
'sudo radosgw-admin gc list'
]
ret = u.check_commands_on_units(commands, sentry_units)
if ret:
amulet.raise_status(amulet.FAIL, msg=ret)
def test_499_ceph_cmds_exit_zero(self):
"""Check basic functionality of ceph cli commands against
all ceph units."""
sentry_units = [
self.ceph_radosgw_sentry,
self.ceph0_sentry,
self.ceph1_sentry,
self.ceph2_sentry
]
commands = [
'sudo ceph health',
'sudo ceph mds stat',
'sudo ceph pg stat',
'sudo ceph osd stat',
'sudo ceph mon stat',
]
ret = u.check_commands_on_units(commands, sentry_units)
if ret:
amulet.raise_status(amulet.FAIL, msg=ret)
# Note(beisner): need to add basic object store functional checks.
# FYI: No restart check as ceph services do not restart
# when charm config changes, unless monitor count increases.

View File

@ -15,13 +15,15 @@
# along with charm-helpers. If not, see <http://www.gnu.org/licenses/>.
import ConfigParser
import distro_info
import io
import logging
import os
import re
import six
import sys
import time
import six
import urlparse
class AmuletUtils(object):
@ -33,6 +35,7 @@ class AmuletUtils(object):
def __init__(self, log_level=logging.ERROR):
self.log = self.get_logger(level=log_level)
self.ubuntu_releases = self.get_ubuntu_releases()
def get_logger(self, name="amulet-logger", level=logging.DEBUG):
"""Get a logger object that will log to stdout."""
@ -70,12 +73,44 @@ class AmuletUtils(object):
else:
return False
def validate_services(self, commands):
"""Validate services.
def get_ubuntu_release_from_sentry(self, sentry_unit):
"""Get Ubuntu release codename from sentry unit.
Verify the specified services are running on the corresponding
:param sentry_unit: amulet sentry/service unit pointer
:returns: list of strings - release codename, failure message
"""
msg = None
cmd = 'lsb_release -cs'
release, code = sentry_unit.run(cmd)
if code == 0:
self.log.debug('{} lsb_release: {}'.format(
sentry_unit.info['unit_name'], release))
else:
msg = ('{} `{}` returned {} '
'{}'.format(sentry_unit.info['unit_name'],
cmd, release, code))
if release not in self.ubuntu_releases:
msg = ("Release ({}) not found in Ubuntu releases "
"({})".format(release, self.ubuntu_releases))
return release, msg
def validate_services(self, commands):
"""Validate that lists of commands succeed on service units. Can be
used to verify system services are running on the corresponding
service units.
"""
:param commands: dict with sentry keys and arbitrary command list vals
:returns: None if successful, Failure string message otherwise
"""
self.log.debug('Checking status of system services...')
# /!\ DEPRECATION WARNING (beisner):
# New and existing tests should be rewritten to use
# validate_services_by_name() as it is aware of init systems.
self.log.warn('/!\\ DEPRECATION WARNING: use '
'validate_services_by_name instead of validate_services '
'due to init system differences.')
for k, v in six.iteritems(commands):
for cmd in v:
output, code = k.run(cmd)
@ -86,6 +121,41 @@ class AmuletUtils(object):
return "command `{}` returned {}".format(cmd, str(code))
return None
def validate_services_by_name(self, sentry_services):
"""Validate system service status by service name, automatically
detecting init system based on Ubuntu release codename.
:param sentry_services: dict with sentry keys and svc list values
:returns: None if successful, Failure string message otherwise
"""
self.log.debug('Checking status of system services...')
# Point at which systemd became a thing
systemd_switch = self.ubuntu_releases.index('vivid')
for sentry_unit, services_list in six.iteritems(sentry_services):
# Get lsb_release codename from unit
release, ret = self.get_ubuntu_release_from_sentry(sentry_unit)
if ret:
return ret
for service_name in services_list:
if (self.ubuntu_releases.index(release) >= systemd_switch or
service_name == "rabbitmq-server"):
# init is systemd
cmd = 'sudo service {} status'.format(service_name)
elif self.ubuntu_releases.index(release) < systemd_switch:
# init is upstart
cmd = 'sudo status {}'.format(service_name)
output, code = sentry_unit.run(cmd)
self.log.debug('{} `{}` returned '
'{}'.format(sentry_unit.info['unit_name'],
cmd, code))
if code != 0:
return "command `{}` returned {}".format(cmd, str(code))
return None
def _get_config(self, unit, filename):
"""Get a ConfigParser object for parsing a unit's config file."""
file_contents = unit.file_contents(filename)
@ -104,6 +174,9 @@ class AmuletUtils(object):
Verify that the specified section of the config file contains
the expected option key:value pairs.
"""
self.log.debug('Validating config file data ({} in {} on {})'
'...'.format(section, config_file,
sentry_unit.info['unit_name']))
config = self._get_config(sentry_unit, config_file)
if section != 'DEFAULT' and not config.has_section(section):
@ -112,10 +185,23 @@ class AmuletUtils(object):
for k in expected.keys():
if not config.has_option(section, k):
return "section [{}] is missing option {}".format(section, k)
if config.get(section, k) != expected[k]:
return "section [{}] {}:{} != expected {}:{}".format(
section, k, config.get(section, k), k, expected[k])
return None
actual = config.get(section, k)
v = expected[k]
if (isinstance(v, six.string_types) or
isinstance(v, bool) or
isinstance(v, six.integer_types)):
# handle explicit values
if actual != v:
return "section [{}] {}:{} != expected {}:{}".format(
section, k, actual, k, expected[k])
else:
# handle not_null, valid_ip boolean comparison methods, etc.
if v(actual):
return None
else:
return "section [{}] {}:{} != expected {}:{}".format(
section, k, actual, k, expected[k])
def _validate_dict_data(self, expected, actual):
"""Validate dictionary data.
@ -321,3 +407,135 @@ class AmuletUtils(object):
def endpoint_error(self, name, data):
return 'unexpected endpoint data in {} - {}'.format(name, data)
def get_ubuntu_releases(self):
"""Return a list of all Ubuntu releases in order of release."""
_d = distro_info.UbuntuDistroInfo()
_release_list = _d.all
self.log.debug('Ubuntu release list: {}'.format(_release_list))
return _release_list
def file_to_url(self, file_rel_path):
"""Convert a relative file path to a file URL."""
_abs_path = os.path.abspath(file_rel_path)
return urlparse.urlparse(_abs_path, scheme='file').geturl()
def check_commands_on_units(self, commands, sentry_units):
"""Check that all commands in a list exit zero on all
sentry units in a list.
:param commands: list of bash commands
:param sentry_units: list of sentry unit pointers
:returns: None if successful; Failure message otherwise
"""
self.log.debug('Checking exit codes for {} commands on {} '
'sentry units...'.format(len(commands),
len(sentry_units)))
for sentry_unit in sentry_units:
for cmd in commands:
output, code = sentry_unit.run(cmd)
if code == 0:
msg = ('{} `{}` returned {} '
'(OK)'.format(sentry_unit.info['unit_name'],
cmd, code))
self.log.debug(msg)
else:
msg = ('{} `{}` returned {} '
'{}'.format(sentry_unit.info['unit_name'],
cmd, code, output))
return msg
return None
def get_process_id_list(self, sentry_unit, process_name):
"""Get a list of process ID(s) from a single sentry juju unit
for a single process name.
:param sentry_unit: Pointer to amulet sentry instance (juju unit)
:param process_name: Process name
:returns: List of process IDs
"""
cmd = 'pidof {}'.format(process_name)
output, code = sentry_unit.run(cmd)
if code != 0:
msg = ('{} `{}` returned {} '
'{}'.format(sentry_unit.info['unit_name'],
cmd, code, output))
raise RuntimeError(msg)
return str(output).split()
def get_unit_process_ids(self, unit_processes):
"""Construct a dict containing unit sentries, process names, and
process IDs."""
pid_dict = {}
for sentry_unit, process_list in unit_processes.iteritems():
pid_dict[sentry_unit] = {}
for process in process_list:
pids = self.get_process_id_list(sentry_unit, process)
pid_dict[sentry_unit].update({process: pids})
return pid_dict
def validate_unit_process_ids(self, expected, actual):
"""Validate process id quantities for services on units."""
self.log.debug('Checking units for running processes...')
self.log.debug('Expected PIDs: {}'.format(expected))
self.log.debug('Actual PIDs: {}'.format(actual))
if len(actual) != len(expected):
msg = ('Unit count mismatch. expected, actual: {}, '
'{} '.format(len(expected), len(actual)))
return msg
for (e_sentry, e_proc_names) in expected.iteritems():
e_sentry_name = e_sentry.info['unit_name']
if e_sentry in actual.keys():
a_proc_names = actual[e_sentry]
else:
msg = ('Expected sentry ({}) not found in actual dict data.'
'{}'.format(e_sentry_name, e_sentry))
return msg
if len(e_proc_names.keys()) != len(a_proc_names.keys()):
msg = ('Process name count mismatch. expected, actual: {}, '
'{}'.format(len(expected), len(actual)))
return msg
for (e_proc_name, e_pids_length), (a_proc_name, a_pids) in \
zip(e_proc_names.items(), a_proc_names.items()):
if e_proc_name != a_proc_name:
msg = ('Process name mismatch. expected, actual: {}, '
'{}'.format(e_proc_name, a_proc_name))
return msg
a_pids_length = len(a_pids)
if e_pids_length != a_pids_length:
msg = ('PID count mismatch. {} ({}) expected, actual: {}, '
'{} ({})'.format(e_sentry_name,
e_proc_name,
e_pids_length,
a_pids_length,
a_pids))
return msg
else:
msg = ('PID check OK: {} {} {}: '
'{}'.format(e_sentry_name,
e_proc_name,
e_pids_length,
a_pids))
self.log.debug(msg)
return None
def validate_list_of_identical_dicts(self, list_of_dicts):
"""Check that all dicts within a list are identical."""
hashes = []
for _dict in list_of_dicts:
hashes.append(hash(frozenset(_dict.items())))
self.log.debug('Hashes: {}'.format(hashes))
if len(set(hashes)) == 1:
msg = 'Dicts within list are identical'
self.log.debug(msg)
else:
msg = 'Dicts within list are not identical'
return msg
return None

View File

@ -110,7 +110,8 @@ class OpenStackAmuletDeployment(AmuletDeployment):
(self.precise_essex, self.precise_folsom, self.precise_grizzly,
self.precise_havana, self.precise_icehouse,
self.trusty_icehouse, self.trusty_juno, self.utopic_juno,
self.trusty_kilo, self.vivid_kilo) = range(10)
self.trusty_kilo, self.vivid_kilo, self.trusty_liberty,
self.wily_liberty) = range(12)
releases = {
('precise', None): self.precise_essex,
@ -121,8 +122,10 @@ class OpenStackAmuletDeployment(AmuletDeployment):
('trusty', None): self.trusty_icehouse,
('trusty', 'cloud:trusty-juno'): self.trusty_juno,
('trusty', 'cloud:trusty-kilo'): self.trusty_kilo,
('trusty', 'cloud:trusty-liberty'): self.trusty_liberty,
('utopic', None): self.utopic_juno,
('vivid', None): self.vivid_kilo}
('vivid', None): self.vivid_kilo,
('wily', None): self.wily_liberty}
return releases[(self.series, self.openstack)]
def _get_openstack_release_string(self):
@ -138,9 +141,42 @@ class OpenStackAmuletDeployment(AmuletDeployment):
('trusty', 'icehouse'),
('utopic', 'juno'),
('vivid', 'kilo'),
('wily', 'liberty'),
])
if self.openstack:
os_origin = self.openstack.split(':')[1]
return os_origin.split('%s-' % self.series)[1].split('/')[0]
else:
return releases[self.series]
def get_ceph_expected_pools(self, radosgw=False):
"""Return a list of expected ceph pools based on Ubuntu-OpenStack
release and whether ceph radosgw is flagged as present or not."""
if self._get_openstack_release() >= self.trusty_kilo:
# Kilo or later
pools = [
'rbd',
'cinder',
'glance'
]
else:
# Juno or earlier
pools = [
'data',
'metadata',
'rbd',
'cinder',
'glance'
]
if radosgw:
pools.extend([
'.rgw.root',
'.rgw.control',
'.rgw',
'.rgw.gc',
'.users.uid'
])
return pools

View File

@ -14,16 +14,19 @@
# You should have received a copy of the GNU Lesser General Public License
# along with charm-helpers. If not, see <http://www.gnu.org/licenses/>.
import json
import logging
import os
import six
import time
import urllib
import cinderclient.v1.client as cinder_client
import glanceclient.v1.client as glance_client
import heatclient.v1.client as heat_client
import keystoneclient.v2_0 as keystone_client
import novaclient.v1_1.client as nova_client
import six
import swiftclient
from charmhelpers.contrib.amulet.utils import (
AmuletUtils
@ -37,7 +40,7 @@ class OpenStackAmuletUtils(AmuletUtils):
"""OpenStack amulet utilities.
This class inherits from AmuletUtils and has additional support
that is specifically for use by OpenStack charms.
that is specifically for use by OpenStack charm tests.
"""
def __init__(self, log_level=ERROR):
@ -51,6 +54,8 @@ class OpenStackAmuletUtils(AmuletUtils):
Validate actual endpoint data vs expected endpoint data. The ports
are used to find the matching endpoint.
"""
self.log.debug('Validating endpoint data...')
self.log.debug('actual: {}'.format(repr(endpoints)))
found = False
for ep in endpoints:
self.log.debug('endpoint: {}'.format(repr(ep)))
@ -77,6 +82,7 @@ class OpenStackAmuletUtils(AmuletUtils):
Validate a list of actual service catalog endpoints vs a list of
expected service catalog endpoints.
"""
self.log.debug('Validating service catalog endpoint data...')
self.log.debug('actual: {}'.format(repr(actual)))
for k, v in six.iteritems(expected):
if k in actual:
@ -93,6 +99,7 @@ class OpenStackAmuletUtils(AmuletUtils):
Validate a list of actual tenant data vs list of expected tenant
data.
"""
self.log.debug('Validating tenant data...')
self.log.debug('actual: {}'.format(repr(actual)))
for e in expected:
found = False
@ -114,6 +121,7 @@ class OpenStackAmuletUtils(AmuletUtils):
Validate a list of actual role data vs a list of expected role
data.
"""
self.log.debug('Validating role data...')
self.log.debug('actual: {}'.format(repr(actual)))
for e in expected:
found = False
@ -134,6 +142,7 @@ class OpenStackAmuletUtils(AmuletUtils):
Validate a list of actual user data vs a list of expected user
data.
"""
self.log.debug('Validating user data...')
self.log.debug('actual: {}'.format(repr(actual)))
for e in expected:
found = False
@ -155,17 +164,29 @@ class OpenStackAmuletUtils(AmuletUtils):
Validate a list of actual flavors vs a list of expected flavors.
"""
self.log.debug('Validating flavor data...')
self.log.debug('actual: {}'.format(repr(actual)))
act = [a.name for a in actual]
return self._validate_list_data(expected, act)
def tenant_exists(self, keystone, tenant):
"""Return True if tenant exists."""
self.log.debug('Checking if tenant exists ({})...'.format(tenant))
return tenant in [t.name for t in keystone.tenants.list()]
def authenticate_cinder_admin(self, keystone_sentry, username,
password, tenant):
"""Authenticates admin user with cinder."""
service_ip = \
keystone_sentry.relation('shared-db',
'mysql:shared-db')['private-address']
ept = "http://{}:5000/v2.0".format(service_ip.strip().decode('utf-8'))
return cinder_client.Client(username, password, tenant, ept)
def authenticate_keystone_admin(self, keystone_sentry, user, password,
tenant):
"""Authenticates admin user with the keystone admin endpoint."""
self.log.debug('Authenticating keystone admin...')
unit = keystone_sentry
service_ip = unit.relation('shared-db',
'mysql:shared-db')['private-address']
@ -175,6 +196,7 @@ class OpenStackAmuletUtils(AmuletUtils):
def authenticate_keystone_user(self, keystone, user, password, tenant):
"""Authenticates a regular user with the keystone public endpoint."""
self.log.debug('Authenticating keystone user ({})...'.format(user))
ep = keystone.service_catalog.url_for(service_type='identity',
endpoint_type='publicURL')
return keystone_client.Client(username=user, password=password,
@ -182,19 +204,40 @@ class OpenStackAmuletUtils(AmuletUtils):
def authenticate_glance_admin(self, keystone):
"""Authenticates admin user with glance."""
self.log.debug('Authenticating glance admin...')
ep = keystone.service_catalog.url_for(service_type='image',
endpoint_type='adminURL')
return glance_client.Client(ep, token=keystone.auth_token)
def authenticate_heat_admin(self, keystone):
"""Authenticates the admin user with heat."""
self.log.debug('Authenticating heat admin...')
ep = keystone.service_catalog.url_for(service_type='orchestration',
endpoint_type='publicURL')
return heat_client.Client(endpoint=ep, token=keystone.auth_token)
def authenticate_nova_user(self, keystone, user, password, tenant):
"""Authenticates a regular user with nova-api."""
self.log.debug('Authenticating nova user ({})...'.format(user))
ep = keystone.service_catalog.url_for(service_type='identity',
endpoint_type='publicURL')
return nova_client.Client(username=user, api_key=password,
project_id=tenant, auth_url=ep)
def authenticate_swift_user(self, keystone, user, password, tenant):
"""Authenticates a regular user with swift api."""
self.log.debug('Authenticating swift user ({})...'.format(user))
ep = keystone.service_catalog.url_for(service_type='identity',
endpoint_type='publicURL')
return swiftclient.Connection(authurl=ep,
user=user,
key=password,
tenant_name=tenant,
auth_version='2.0')
def create_cirros_image(self, glance, image_name):
"""Download the latest cirros image and upload it to glance."""
self.log.debug('Creating glance image ({})...'.format(image_name))
http_proxy = os.getenv('AMULET_HTTP_PROXY')
self.log.debug('AMULET_HTTP_PROXY: {}'.format(http_proxy))
if http_proxy:
@ -235,6 +278,11 @@ class OpenStackAmuletUtils(AmuletUtils):
def delete_image(self, glance, image):
"""Delete the specified image."""
# /!\ DEPRECATION WARNING
self.log.warn('/!\\ DEPRECATION WARNING: use '
'delete_resource instead of delete_image.')
self.log.debug('Deleting glance image ({})...'.format(image))
num_before = len(list(glance.images.list()))
glance.images.delete(image)
@ -254,6 +302,8 @@ class OpenStackAmuletUtils(AmuletUtils):
def create_instance(self, nova, image_name, instance_name, flavor):
"""Create the specified instance."""
self.log.debug('Creating instance '
'({}|{}|{})'.format(instance_name, image_name, flavor))
image = nova.images.find(name=image_name)
flavor = nova.flavors.find(name=flavor)
instance = nova.servers.create(name=instance_name, image=image,
@ -276,6 +326,11 @@ class OpenStackAmuletUtils(AmuletUtils):
def delete_instance(self, nova, instance):
"""Delete the specified instance."""
# /!\ DEPRECATION WARNING
self.log.warn('/!\\ DEPRECATION WARNING: use '
'delete_resource instead of delete_instance.')
self.log.debug('Deleting instance ({})...'.format(instance))
num_before = len(list(nova.servers.list()))
nova.servers.delete(instance)
@ -292,3 +347,193 @@ class OpenStackAmuletUtils(AmuletUtils):
return False
return True
def create_or_get_keypair(self, nova, keypair_name="testkey"):
"""Create a new keypair, or return pointer if it already exists."""
try:
_keypair = nova.keypairs.get(keypair_name)
self.log.debug('Keypair ({}) already exists, '
'using it.'.format(keypair_name))
return _keypair
except:
self.log.debug('Keypair ({}) does not exist, '
'creating it.'.format(keypair_name))
_keypair = nova.keypairs.create(name=keypair_name)
return _keypair
def create_cinder_volume(self, cinder, vol_name="demo-vol", vol_size=1):
"""Add and confirm a new volume, 1GB by default."""
self.log.debug('Creating volume ({}|{}GB)'.format(vol_name, vol_size))
vol_new = cinder.volumes.create(display_name=vol_name, size=1)
vol_id = vol_new.id
ret = self.resource_reaches_status(cinder.volumes, vol_id,
expected_stat="available",
msg="Create volume status wait")
if ret:
return vol_new
else:
self.log.error('Failed to create volume.')
return None
def delete_resource(self, resource, resource_id,
msg="resource", max_wait=120):
"""Delete one openstack resource, such as one instance, keypair,
image, volume, stack, etc., and confirm deletion within max wait time.
:param resource: pointer to os resource type, ex:glance_client.images
:param resource_id: unique name or id for the openstack resource
:param msg: text to identify purpose in logging
:param max_wait: maximum wait time in seconds
:returns: True if successful, otherwise False
"""
self.log.debug('Deleting OpenStack resource '
'{} ({})'.format(resource_id, msg))
num_before = len(list(resource.list()))
resource.delete(resource_id)
tries = 0
num_after = len(list(resource.list()))
while num_after != (num_before - 1) and tries < (max_wait / 4):
self.log.debug('{} delete check: '
'{} [{}:{}] {}'.format(msg, tries,
num_before,
num_after,
resource_id))
time.sleep(4)
num_after = len(list(resource.list()))
tries += 1
self.log.debug('{}: expected, actual count = {}, '
'{}'.format(msg, num_before - 1, num_after))
if num_after == (num_before - 1):
return True
else:
self.log.error('{} delete timed out'.format(msg))
return False
def resource_reaches_status(self, resource, resource_id,
expected_stat='available',
msg='resource', max_wait=120):
"""Wait for an openstack resources status to reach an
expected status within a specified time. Useful to confirm that
nova instances, cinder vols, snapshots, glance images, heat stacks
and other resources eventually reach the expected status.
:param resource: pointer to os resource type, ex: heat_client.stacks
:param resource_id: unique id for the openstack resource
:param expected_stat: status to expect resource to reach
:param msg: text to identify purpose in logging
:param max_wait: maximum wait time in seconds
:returns: True if successful, False if status is not reached
"""
tries = 0
resource_stat = resource.get(resource_id).status
while resource_stat != expected_stat and tries < (max_wait / 4):
self.log.debug('{} status check: '
'{} [{}:{}] {}'.format(msg, tries,
resource_stat,
expected_stat,
resource_id))
time.sleep(4)
resource_stat = resource.get(resource_id).status
tries += 1
self.log.debug('{}: expected, actual status = {}, '
'{}'.format(msg, resource_stat, expected_stat))
if resource_stat == expected_stat:
return True
else:
self.log.debug('{} never reached expected status: '
'{}'.format(resource_id, expected_stat))
return False
def get_ceph_osd_id_cmd(self, index):
"""Produce a shell command that will return a ceph-osd id."""
cmd = ("`initctl list | grep 'ceph-osd ' | awk 'NR=={} {{ print $2 }}'"
" | grep -o '[0-9]*'`".format(index + 1))
return cmd
def get_ceph_pools(self, sentry_unit):
"""Return a dict of ceph pools from a single ceph unit, with
pool name as keys, pool id as vals."""
pools = {}
cmd = 'sudo ceph osd lspools'
output, code = sentry_unit.run(cmd)
if code != 0:
msg = ('{} `{}` returned {} '
'{}'.format(sentry_unit.info['unit_name'],
cmd, code, output))
raise RuntimeError(msg)
# Example output: 0 data,1 metadata,2 rbd,3 cinder,4 glance,
for pool in str(output).split(','):
pool_id_name = pool.split(' ')
if len(pool_id_name) == 2:
pool_id = pool_id_name[0]
pool_name = pool_id_name[1]
pools[pool_name] = int(pool_id)
self.log.debug('Pools on {}: {}'.format(sentry_unit.info['unit_name'],
pools))
return pools
def get_ceph_df(self, sentry_unit):
"""Return dict of ceph df json output, including ceph pool state.
:param sentry_unit: Pointer to amulet sentry instance (juju unit)
:returns: Dict of ceph df output
"""
cmd = 'sudo ceph df --format=json'
output, code = sentry_unit.run(cmd)
if code != 0:
msg = ('{} `{}` returned {} '
'{}'.format(sentry_unit.info['unit_name'],
cmd, code, output))
raise RuntimeError(msg)
return json.loads(output)
def get_ceph_pool_sample(self, sentry_unit, pool_id=0):
"""Take a sample of attributes of a ceph pool, returning ceph
pool name, object count and disk space used for the specified
pool ID number.
:param sentry_unit: Pointer to amulet sentry instance (juju unit)
:param pool_id: Ceph pool ID
:returns: List of pool name, object count, kb disk space used
"""
df = self.get_ceph_df(sentry_unit)
pool_name = df['pools'][pool_id]['name']
obj_count = df['pools'][pool_id]['stats']['objects']
kb_used = df['pools'][pool_id]['stats']['kb_used']
self.log.debug('Ceph {} pool (ID {}): {} objects, '
'{} kb used'.format(pool_name,
pool_id,
obj_count,
kb_used))
return pool_name, obj_count, kb_used
def validate_ceph_pool_samples(self, samples, sample_type="resource pool"):
"""Validate ceph pool samples taken over time, such as pool
object counts or pool kb used, before adding, after adding, and
after deleting items which affect those pool attributes. The
2nd element is expected to be greater than the 1st; 3rd is expected
to be less than the 2nd.
:param samples: List containing 3 data samples
:param sample_type: String for logging and usage context
:returns: None if successful, Failure message otherwise
"""
original, created, deleted = range(3)
if samples[created] <= samples[original] or \
samples[deleted] >= samples[created]:
msg = ('Ceph {} samples ({}) '
'unexpected.'.format(sample_type, samples))
return msg
else:
self.log.debug('Ceph {} samples (OK): '
'{}'.format(sample_type, samples))
return None