Carry over amulet tests from ceilometer as a baseline; Add subordinate relation, service catalog, endpoint and nova ceilometer config checks.

Fix lint in unit test (unused import).

Resolve grizzly-override assumptions re: bug 1469241.

Remove accidental templates/ceilometer.conf.~1~ from rev23 circa 2013.

Update tags for consistency with other os-charms.
This commit is contained in:
james.page@ubuntu.com 2015-07-07 14:56:22 +01:00
commit bf3c2df6d8
42 changed files with 2938 additions and 191 deletions

View File

@ -2,16 +2,27 @@
PYTHON := /usr/bin/env python
lint:
@flake8 --exclude hooks/charmhelpers hooks
@flake8 --exclude hooks/charmhelpers,tests/charmhelpers \
hooks tests unit_tests
@charm proof
test:
@# Bundletester expects unit tests here.
@$(PYTHON) /usr/bin/nosetests --nologcapture --with-coverage unit_tests
functional_test:
@echo Starting Amulet tests...
@juju test -v -p AMULET_HTTP_PROXY,AMULET_OS_VIP --timeout 2700
bin/charm_helpers_sync.py:
@mkdir -p bin
@bzr cat lp:charm-helpers/tools/charm_helpers_sync/charm_helpers_sync.py \
> bin/charm_helpers_sync.py
sync: bin/charm_helpers_sync.py
@$(PYTHON) bin/charm_helpers_sync.py -c charm-helpers.yaml
@$(PYTHON) bin/charm_helpers_sync.py -c charm-helpers-hooks.yaml
@$(PYTHON) bin/charm_helpers_sync.py -c charm-helpers-tests.yaml
unit_test:
@$(PYTHON) /usr/bin/nosetests --nologcapture --with-coverage unit_tests
publish: lint test
@bzr push lp:charms/ceilometer-agent
@bzr push lp:charms/trusty/ceilometer-agent

5
charm-helpers-tests.yaml Normal file
View File

@ -0,0 +1,5 @@
branch: lp:charm-helpers
destination: tests/charmhelpers
include:
- contrib.amulet
- contrib.openstack.amulet

View File

@ -15,7 +15,6 @@ from charmhelpers.core.hookenv import (
)
from charmhelpers.core.host import (
restart_on_change,
lsb_release,
)
from charmhelpers.contrib.openstack.utils import (
configure_installation_source,
@ -38,9 +37,6 @@ CONFIGS = register_configs()
@hooks.hook()
def install():
origin = config('openstack-origin')
if (lsb_release()['DISTRIB_CODENAME'] == 'precise'
and origin == 'distro'):
origin = 'cloud:precise-grizzly'
configure_installation_source(origin)
apt_update(fatal=True)
apt_install(

View File

@ -66,7 +66,7 @@ def register_configs():
# just default to earliest supported release. configs dont get touched
# till post-install, anyway.
release = get_os_codename_package('ceilometer-common', fatal=False) \
or 'grizzly'
or 'icehouse'
configs = templating.OSConfigRenderer(templates_dir=TEMPLATES,
openstack_release=release)

View File

@ -44,6 +44,7 @@ from charmhelpers.core.hookenv import (
ERROR,
WARNING,
unit_get,
is_leader as juju_is_leader
)
from charmhelpers.core.decorators import (
retry_on_exception,
@ -52,6 +53,8 @@ from charmhelpers.core.strutils import (
bool_from_string,
)
DC_RESOURCE_NAME = 'DC'
class HAIncompleteConfig(Exception):
pass
@ -61,17 +64,30 @@ class CRMResourceNotFound(Exception):
pass
class CRMDCNotFound(Exception):
pass
def is_elected_leader(resource):
"""
Returns True if the charm executing this is the elected cluster leader.
It relies on two mechanisms to determine leadership:
1. If the charm is part of a corosync cluster, call corosync to
1. If juju is sufficiently new and leadership election is supported,
the is_leader command will be used.
2. If the charm is part of a corosync cluster, call corosync to
determine leadership.
2. If the charm is not part of a corosync cluster, the leader is
3. If the charm is not part of a corosync cluster, the leader is
determined as being "the alive unit with the lowest unit numer". In
other words, the oldest surviving unit.
"""
try:
return juju_is_leader()
except NotImplementedError:
log('Juju leadership election feature not enabled'
', using fallback support',
level=WARNING)
if is_clustered():
if not is_crm_leader(resource):
log('Deferring action to CRM leader.', level=INFO)
@ -95,7 +111,33 @@ def is_clustered():
return False
@retry_on_exception(5, base_delay=2, exc_type=CRMResourceNotFound)
def is_crm_dc():
"""
Determine leadership by querying the pacemaker Designated Controller
"""
cmd = ['crm', 'status']
try:
status = subprocess.check_output(cmd, stderr=subprocess.STDOUT)
if not isinstance(status, six.text_type):
status = six.text_type(status, "utf-8")
except subprocess.CalledProcessError as ex:
raise CRMDCNotFound(str(ex))
current_dc = ''
for line in status.split('\n'):
if line.startswith('Current DC'):
# Current DC: juju-lytrusty-machine-2 (168108163) - partition with quorum
current_dc = line.split(':')[1].split()[0]
if current_dc == get_unit_hostname():
return True
elif current_dc == 'NONE':
raise CRMDCNotFound('Current DC: NONE')
return False
@retry_on_exception(5, base_delay=2,
exc_type=(CRMResourceNotFound, CRMDCNotFound))
def is_crm_leader(resource, retry=False):
"""
Returns True if the charm calling this is the elected corosync leader,
@ -104,6 +146,8 @@ def is_crm_leader(resource, retry=False):
We allow this operation to be retried to avoid the possibility of getting a
false negative. See LP #1396246 for more info.
"""
if resource == DC_RESOURCE_NAME:
return is_crm_dc()
cmd = ['crm', 'resource', 'show', resource]
try:
status = subprocess.check_output(cmd, stderr=subprocess.STDOUT)

View File

@ -110,7 +110,8 @@ class OpenStackAmuletDeployment(AmuletDeployment):
(self.precise_essex, self.precise_folsom, self.precise_grizzly,
self.precise_havana, self.precise_icehouse,
self.trusty_icehouse, self.trusty_juno, self.utopic_juno,
self.trusty_kilo, self.vivid_kilo) = range(10)
self.trusty_kilo, self.vivid_kilo, self.trusty_liberty,
self.wily_liberty) = range(12)
releases = {
('precise', None): self.precise_essex,
@ -121,8 +122,10 @@ class OpenStackAmuletDeployment(AmuletDeployment):
('trusty', None): self.trusty_icehouse,
('trusty', 'cloud:trusty-juno'): self.trusty_juno,
('trusty', 'cloud:trusty-kilo'): self.trusty_kilo,
('trusty', 'cloud:trusty-liberty'): self.trusty_liberty,
('utopic', None): self.utopic_juno,
('vivid', None): self.vivid_kilo}
('vivid', None): self.vivid_kilo,
('wily', None): self.wily_liberty}
return releases[(self.series, self.openstack)]
def _get_openstack_release_string(self):
@ -138,6 +141,7 @@ class OpenStackAmuletDeployment(AmuletDeployment):
('trusty', 'icehouse'),
('utopic', 'juno'),
('vivid', 'kilo'),
('wily', 'liberty'),
])
if self.openstack:
os_origin = self.openstack.split(':')[1]

View File

@ -16,15 +16,15 @@
import logging
import os
import six
import time
import urllib
import glanceclient.v1.client as glance_client
import heatclient.v1.client as heat_client
import keystoneclient.v2_0 as keystone_client
import novaclient.v1_1.client as nova_client
import six
from charmhelpers.contrib.amulet.utils import (
AmuletUtils
)
@ -37,7 +37,7 @@ class OpenStackAmuletUtils(AmuletUtils):
"""OpenStack amulet utilities.
This class inherits from AmuletUtils and has additional support
that is specifically for use by OpenStack charms.
that is specifically for use by OpenStack charm tests.
"""
def __init__(self, log_level=ERROR):
@ -51,6 +51,8 @@ class OpenStackAmuletUtils(AmuletUtils):
Validate actual endpoint data vs expected endpoint data. The ports
are used to find the matching endpoint.
"""
self.log.debug('Validating endpoint data...')
self.log.debug('actual: {}'.format(repr(endpoints)))
found = False
for ep in endpoints:
self.log.debug('endpoint: {}'.format(repr(ep)))
@ -77,6 +79,7 @@ class OpenStackAmuletUtils(AmuletUtils):
Validate a list of actual service catalog endpoints vs a list of
expected service catalog endpoints.
"""
self.log.debug('Validating service catalog endpoint data...')
self.log.debug('actual: {}'.format(repr(actual)))
for k, v in six.iteritems(expected):
if k in actual:
@ -93,6 +96,7 @@ class OpenStackAmuletUtils(AmuletUtils):
Validate a list of actual tenant data vs list of expected tenant
data.
"""
self.log.debug('Validating tenant data...')
self.log.debug('actual: {}'.format(repr(actual)))
for e in expected:
found = False
@ -114,6 +118,7 @@ class OpenStackAmuletUtils(AmuletUtils):
Validate a list of actual role data vs a list of expected role
data.
"""
self.log.debug('Validating role data...')
self.log.debug('actual: {}'.format(repr(actual)))
for e in expected:
found = False
@ -134,6 +139,7 @@ class OpenStackAmuletUtils(AmuletUtils):
Validate a list of actual user data vs a list of expected user
data.
"""
self.log.debug('Validating user data...')
self.log.debug('actual: {}'.format(repr(actual)))
for e in expected:
found = False
@ -155,17 +161,20 @@ class OpenStackAmuletUtils(AmuletUtils):
Validate a list of actual flavors vs a list of expected flavors.
"""
self.log.debug('Validating flavor data...')
self.log.debug('actual: {}'.format(repr(actual)))
act = [a.name for a in actual]
return self._validate_list_data(expected, act)
def tenant_exists(self, keystone, tenant):
"""Return True if tenant exists."""
self.log.debug('Checking if tenant exists ({})...'.format(tenant))
return tenant in [t.name for t in keystone.tenants.list()]
def authenticate_keystone_admin(self, keystone_sentry, user, password,
tenant):
"""Authenticates admin user with the keystone admin endpoint."""
self.log.debug('Authenticating keystone admin...')
unit = keystone_sentry
service_ip = unit.relation('shared-db',
'mysql:shared-db')['private-address']
@ -175,6 +184,7 @@ class OpenStackAmuletUtils(AmuletUtils):
def authenticate_keystone_user(self, keystone, user, password, tenant):
"""Authenticates a regular user with the keystone public endpoint."""
self.log.debug('Authenticating keystone user ({})...'.format(user))
ep = keystone.service_catalog.url_for(service_type='identity',
endpoint_type='publicURL')
return keystone_client.Client(username=user, password=password,
@ -182,12 +192,21 @@ class OpenStackAmuletUtils(AmuletUtils):
def authenticate_glance_admin(self, keystone):
"""Authenticates admin user with glance."""
self.log.debug('Authenticating glance admin...')
ep = keystone.service_catalog.url_for(service_type='image',
endpoint_type='adminURL')
return glance_client.Client(ep, token=keystone.auth_token)
def authenticate_heat_admin(self, keystone):
"""Authenticates the admin user with heat."""
self.log.debug('Authenticating heat admin...')
ep = keystone.service_catalog.url_for(service_type='orchestration',
endpoint_type='publicURL')
return heat_client.Client(endpoint=ep, token=keystone.auth_token)
def authenticate_nova_user(self, keystone, user, password, tenant):
"""Authenticates a regular user with nova-api."""
self.log.debug('Authenticating nova user ({})...'.format(user))
ep = keystone.service_catalog.url_for(service_type='identity',
endpoint_type='publicURL')
return nova_client.Client(username=user, api_key=password,
@ -195,6 +214,7 @@ class OpenStackAmuletUtils(AmuletUtils):
def create_cirros_image(self, glance, image_name):
"""Download the latest cirros image and upload it to glance."""
self.log.debug('Creating glance image ({})...'.format(image_name))
http_proxy = os.getenv('AMULET_HTTP_PROXY')
self.log.debug('AMULET_HTTP_PROXY: {}'.format(http_proxy))
if http_proxy:
@ -235,6 +255,11 @@ class OpenStackAmuletUtils(AmuletUtils):
def delete_image(self, glance, image):
"""Delete the specified image."""
# /!\ DEPRECATION WARNING
self.log.warn('/!\\ DEPRECATION WARNING: use '
'delete_resource instead of delete_image.')
self.log.debug('Deleting glance image ({})...'.format(image))
num_before = len(list(glance.images.list()))
glance.images.delete(image)
@ -254,6 +279,8 @@ class OpenStackAmuletUtils(AmuletUtils):
def create_instance(self, nova, image_name, instance_name, flavor):
"""Create the specified instance."""
self.log.debug('Creating instance '
'({}|{}|{})'.format(instance_name, image_name, flavor))
image = nova.images.find(name=image_name)
flavor = nova.flavors.find(name=flavor)
instance = nova.servers.create(name=instance_name, image=image,
@ -276,6 +303,11 @@ class OpenStackAmuletUtils(AmuletUtils):
def delete_instance(self, nova, instance):
"""Delete the specified instance."""
# /!\ DEPRECATION WARNING
self.log.warn('/!\\ DEPRECATION WARNING: use '
'delete_resource instead of delete_instance.')
self.log.debug('Deleting instance ({})...'.format(instance))
num_before = len(list(nova.servers.list()))
nova.servers.delete(instance)
@ -292,3 +324,90 @@ class OpenStackAmuletUtils(AmuletUtils):
return False
return True
def create_or_get_keypair(self, nova, keypair_name="testkey"):
"""Create a new keypair, or return pointer if it already exists."""
try:
_keypair = nova.keypairs.get(keypair_name)
self.log.debug('Keypair ({}) already exists, '
'using it.'.format(keypair_name))
return _keypair
except:
self.log.debug('Keypair ({}) does not exist, '
'creating it.'.format(keypair_name))
_keypair = nova.keypairs.create(name=keypair_name)
return _keypair
def delete_resource(self, resource, resource_id,
msg="resource", max_wait=120):
"""Delete one openstack resource, such as one instance, keypair,
image, volume, stack, etc., and confirm deletion within max wait time.
:param resource: pointer to os resource type, ex:glance_client.images
:param resource_id: unique name or id for the openstack resource
:param msg: text to identify purpose in logging
:param max_wait: maximum wait time in seconds
:returns: True if successful, otherwise False
"""
num_before = len(list(resource.list()))
resource.delete(resource_id)
tries = 0
num_after = len(list(resource.list()))
while num_after != (num_before - 1) and tries < (max_wait / 4):
self.log.debug('{} delete check: '
'{} [{}:{}] {}'.format(msg, tries,
num_before,
num_after,
resource_id))
time.sleep(4)
num_after = len(list(resource.list()))
tries += 1
self.log.debug('{}: expected, actual count = {}, '
'{}'.format(msg, num_before - 1, num_after))
if num_after == (num_before - 1):
return True
else:
self.log.error('{} delete timed out'.format(msg))
return False
def resource_reaches_status(self, resource, resource_id,
expected_stat='available',
msg='resource', max_wait=120):
"""Wait for an openstack resources status to reach an
expected status within a specified time. Useful to confirm that
nova instances, cinder vols, snapshots, glance images, heat stacks
and other resources eventually reach the expected status.
:param resource: pointer to os resource type, ex: heat_client.stacks
:param resource_id: unique id for the openstack resource
:param expected_stat: status to expect resource to reach
:param msg: text to identify purpose in logging
:param max_wait: maximum wait time in seconds
:returns: True if successful, False if status is not reached
"""
tries = 0
resource_stat = resource.get(resource_id).status
while resource_stat != expected_stat and tries < (max_wait / 4):
self.log.debug('{} status check: '
'{} [{}:{}] {}'.format(msg, tries,
resource_stat,
expected_stat,
resource_id))
time.sleep(4)
resource_stat = resource.get(resource_id).status
tries += 1
self.log.debug('{}: expected, actual status = {}, '
'{}'.format(msg, resource_stat, expected_stat))
if resource_stat == expected_stat:
return True
else:
self.log.debug('{} never reached expected status: '
'{}'.format(resource_id, expected_stat))
return False

View File

@ -240,7 +240,7 @@ class SharedDBContext(OSContextGenerator):
if self.relation_prefix:
password_setting = self.relation_prefix + '_password'
for rid in relation_ids('shared-db'):
for rid in relation_ids(self.interfaces[0]):
for unit in related_units(rid):
rdata = relation_get(rid=rid, unit=unit)
host = rdata.get('db_host')

View File

@ -17,6 +17,7 @@
from charmhelpers.core.hookenv import (
config,
unit_get,
service_name,
)
from charmhelpers.contrib.network.ip import (
get_address_in_network,
@ -26,8 +27,6 @@ from charmhelpers.contrib.network.ip import (
)
from charmhelpers.contrib.hahelpers.cluster import is_clustered
from functools import partial
PUBLIC = 'public'
INTERNAL = 'int'
ADMIN = 'admin'
@ -35,15 +34,18 @@ ADMIN = 'admin'
ADDRESS_MAP = {
PUBLIC: {
'config': 'os-public-network',
'fallback': 'public-address'
'fallback': 'public-address',
'override': 'os-public-hostname',
},
INTERNAL: {
'config': 'os-internal-network',
'fallback': 'private-address'
'fallback': 'private-address',
'override': 'os-internal-hostname',
},
ADMIN: {
'config': 'os-admin-network',
'fallback': 'private-address'
'fallback': 'private-address',
'override': 'os-admin-hostname',
}
}
@ -57,15 +59,50 @@ def canonical_url(configs, endpoint_type=PUBLIC):
:param endpoint_type: str endpoint type to resolve.
:param returns: str base URL for services on the current service unit.
"""
scheme = 'http'
if 'https' in configs.complete_contexts():
scheme = 'https'
scheme = _get_scheme(configs)
address = resolve_address(endpoint_type)
if is_ipv6(address):
address = "[{}]".format(address)
return '%s://%s' % (scheme, address)
def _get_scheme(configs):
"""Returns the scheme to use for the url (either http or https)
depending upon whether https is in the configs value.
:param configs: OSTemplateRenderer config templating object to inspect
for a complete https context.
:returns: either 'http' or 'https' depending on whether https is
configured within the configs context.
"""
scheme = 'http'
if configs and 'https' in configs.complete_contexts():
scheme = 'https'
return scheme
def _get_address_override(endpoint_type=PUBLIC):
"""Returns any address overrides that the user has defined based on the
endpoint type.
Note: this function allows for the service name to be inserted into the
address if the user specifies {service_name}.somehost.org.
:param endpoint_type: the type of endpoint to retrieve the override
value for.
:returns: any endpoint address or hostname that the user has overridden
or None if an override is not present.
"""
override_key = ADDRESS_MAP[endpoint_type]['override']
addr_override = config(override_key)
if not addr_override:
return None
else:
return addr_override.format(service_name=service_name())
def resolve_address(endpoint_type=PUBLIC):
"""Return unit address depending on net config.
@ -77,7 +114,10 @@ def resolve_address(endpoint_type=PUBLIC):
:param endpoint_type: Network endpoing type
"""
resolved_address = None
resolved_address = _get_address_override(endpoint_type)
if resolved_address:
return resolved_address
vips = config('vip')
if vips:
vips = vips.split()
@ -109,38 +149,3 @@ def resolve_address(endpoint_type=PUBLIC):
"clustered=%s)" % (net_type, clustered))
return resolved_address
def endpoint_url(configs, url_template, port, endpoint_type=PUBLIC,
override=None):
"""Returns the correct endpoint URL to advertise to Keystone.
This method provides the correct endpoint URL which should be advertised to
the keystone charm for endpoint creation. This method allows for the url to
be overridden to force a keystone endpoint to have specific URL for any of
the defined scopes (admin, internal, public).
:param configs: OSTemplateRenderer config templating object to inspect
for a complete https context.
:param url_template: str format string for creating the url template. Only
two values will be passed - the scheme+hostname
returned by the canonical_url and the port.
:param endpoint_type: str endpoint type to resolve.
:param override: str the name of the config option which overrides the
endpoint URL defined by the charm itself. None will
disable any overrides (default).
"""
if override:
# Return any user-defined overrides for the keystone endpoint URL.
user_value = config(override)
if user_value:
return user_value.strip()
return url_template % (canonical_url(configs, endpoint_type), port)
public_endpoint = partial(endpoint_url, endpoint_type=PUBLIC)
internal_endpoint = partial(endpoint_url, endpoint_type=INTERNAL)
admin_endpoint = partial(endpoint_url, endpoint_type=ADMIN)

View File

@ -172,14 +172,16 @@ def neutron_plugins():
'services': ['calico-felix',
'bird',
'neutron-dhcp-agent',
'nova-api-metadata'],
'nova-api-metadata',
'etcd'],
'packages': [[headers_package()] + determine_dkms_package(),
['calico-compute',
'bird',
'neutron-dhcp-agent',
'nova-api-metadata']],
'server_packages': ['neutron-server', 'calico-control'],
'server_services': ['neutron-server']
'nova-api-metadata',
'etcd']],
'server_packages': ['neutron-server', 'calico-control', 'etcd'],
'server_services': ['neutron-server', 'etcd']
},
'vsp': {
'config': '/etc/neutron/plugins/nuage/nuage_plugin.ini',
@ -256,11 +258,14 @@ def network_manager():
def parse_mappings(mappings):
parsed = {}
if mappings:
mappings = mappings.split(' ')
mappings = mappings.split()
for m in mappings:
p = m.partition(':')
if p[1] == ':':
parsed[p[0].strip()] = p[2].strip()
key = p[0].strip()
if p[1]:
parsed[key] = p[2].strip()
else:
parsed[key] = ''
return parsed
@ -283,13 +288,13 @@ def parse_data_port_mappings(mappings, default_bridge='br-data'):
Returns dict of the form {bridge:port}.
"""
_mappings = parse_mappings(mappings)
if not _mappings:
if not _mappings or list(_mappings.values()) == ['']:
if not mappings:
return {}
# For backwards-compatibility we need to support port-only provided in
# config.
_mappings = {default_bridge: mappings.split(' ')[0]}
_mappings = {default_bridge: mappings.split()[0]}
bridges = _mappings.keys()
ports = _mappings.values()
@ -309,6 +314,8 @@ def parse_vlan_range_mappings(mappings):
Mappings must be a space-delimited list of provider:start:end mappings.
The start:end range is optional and may be omitted.
Returns dict of the form {provider: (start, end)}.
"""
_mappings = parse_mappings(mappings)

View File

@ -53,9 +53,13 @@ from charmhelpers.contrib.network.ip import (
get_ipv6_addr
)
from charmhelpers.contrib.python.packages import (
pip_create_virtualenv,
pip_install,
)
from charmhelpers.core.host import lsb_release, mounts, umount
from charmhelpers.fetch import apt_install, apt_cache, install_remote
from charmhelpers.contrib.python.packages import pip_install
from charmhelpers.contrib.storage.linux.utils import is_block_device, zap_disk
from charmhelpers.contrib.storage.linux.loopback import ensure_loopback_device
@ -75,6 +79,7 @@ UBUNTU_OPENSTACK_RELEASE = OrderedDict([
('trusty', 'icehouse'),
('utopic', 'juno'),
('vivid', 'kilo'),
('wily', 'liberty'),
])
@ -87,6 +92,7 @@ OPENSTACK_CODENAMES = OrderedDict([
('2014.1', 'icehouse'),
('2014.2', 'juno'),
('2015.1', 'kilo'),
('2015.2', 'liberty'),
])
# The ugly duckling
@ -109,6 +115,7 @@ SWIFT_CODENAMES = OrderedDict([
('2.2.0', 'juno'),
('2.2.1', 'kilo'),
('2.2.2', 'kilo'),
('2.3.0', 'liberty'),
])
DEFAULT_LOOPBACK_SIZE = '5G'
@ -317,6 +324,9 @@ def configure_installation_source(rel):
'kilo': 'trusty-updates/kilo',
'kilo/updates': 'trusty-updates/kilo',
'kilo/proposed': 'trusty-proposed/kilo',
'liberty': 'trusty-updates/liberty',
'liberty/updates': 'trusty-updates/liberty',
'liberty/proposed': 'trusty-proposed/liberty',
}
try:
@ -497,7 +507,17 @@ def git_install_requested():
requirements_dir = None
def git_clone_and_install(projects_yaml, core_project):
def _git_yaml_load(projects_yaml):
"""
Load the specified yaml into a dictionary.
"""
if not projects_yaml:
return None
return yaml.load(projects_yaml)
def git_clone_and_install(projects_yaml, core_project, depth=1):
"""
Clone/install all specified OpenStack repositories.
@ -510,23 +530,22 @@ def git_clone_and_install(projects_yaml, core_project):
repository: 'git://git.openstack.org/openstack/requirements.git',
branch: 'stable/icehouse'}
directory: /mnt/openstack-git
http_proxy: http://squid.internal:3128
https_proxy: https://squid.internal:3128
http_proxy: squid-proxy-url
https_proxy: squid-proxy-url
The directory, http_proxy, and https_proxy keys are optional.
"""
global requirements_dir
parent_dir = '/mnt/openstack-git'
http_proxy = None
if not projects_yaml:
return
projects = yaml.load(projects_yaml)
projects = _git_yaml_load(projects_yaml)
_git_validate_projects_yaml(projects, core_project)
old_environ = dict(os.environ)
if 'http_proxy' in projects.keys():
http_proxy = projects['http_proxy']
os.environ['http_proxy'] = projects['http_proxy']
if 'https_proxy' in projects.keys():
os.environ['https_proxy'] = projects['https_proxy']
@ -534,15 +553,24 @@ def git_clone_and_install(projects_yaml, core_project):
if 'directory' in projects.keys():
parent_dir = projects['directory']
pip_create_virtualenv(os.path.join(parent_dir, 'venv'))
# Upgrade setuptools from default virtualenv version. The default version
# in trusty breaks update.py in global requirements master branch.
pip_install('setuptools', upgrade=True, proxy=http_proxy,
venv=os.path.join(parent_dir, 'venv'))
for p in projects['repositories']:
repo = p['repository']
branch = p['branch']
if p['name'] == 'requirements':
repo_dir = _git_clone_and_install_single(repo, branch, parent_dir,
repo_dir = _git_clone_and_install_single(repo, branch, depth,
parent_dir, http_proxy,
update_requirements=False)
requirements_dir = repo_dir
else:
repo_dir = _git_clone_and_install_single(repo, branch, parent_dir,
repo_dir = _git_clone_and_install_single(repo, branch, depth,
parent_dir, http_proxy,
update_requirements=True)
os.environ = old_environ
@ -574,7 +602,8 @@ def _git_ensure_key_exists(key, keys):
error_out('openstack-origin-git key \'{}\' is missing'.format(key))
def _git_clone_and_install_single(repo, branch, parent_dir, update_requirements):
def _git_clone_and_install_single(repo, branch, depth, parent_dir, http_proxy,
update_requirements):
"""
Clone and install a single git repository.
"""
@ -587,23 +616,29 @@ def _git_clone_and_install_single(repo, branch, parent_dir, update_requirements)
if not os.path.exists(dest_dir):
juju_log('Cloning git repo: {}, branch: {}'.format(repo, branch))
repo_dir = install_remote(repo, dest=parent_dir, branch=branch)
repo_dir = install_remote(repo, dest=parent_dir, branch=branch,
depth=depth)
else:
repo_dir = dest_dir
venv = os.path.join(parent_dir, 'venv')
if update_requirements:
if not requirements_dir:
error_out('requirements repo must be cloned before '
'updating from global requirements.')
_git_update_requirements(repo_dir, requirements_dir)
_git_update_requirements(venv, repo_dir, requirements_dir)
juju_log('Installing git repo from dir: {}'.format(repo_dir))
pip_install(repo_dir)
if http_proxy:
pip_install(repo_dir, proxy=http_proxy, venv=venv)
else:
pip_install(repo_dir, venv=venv)
return repo_dir
def _git_update_requirements(package_dir, reqs_dir):
def _git_update_requirements(venv, package_dir, reqs_dir):
"""
Update from global requirements.
@ -612,25 +647,38 @@ def _git_update_requirements(package_dir, reqs_dir):
"""
orig_dir = os.getcwd()
os.chdir(reqs_dir)
cmd = ['python', 'update.py', package_dir]
python = os.path.join(venv, 'bin/python')
cmd = [python, 'update.py', package_dir]
try:
subprocess.check_call(cmd)
except subprocess.CalledProcessError:
package = os.path.basename(package_dir)
error_out("Error updating {} from global-requirements.txt".format(package))
error_out("Error updating {} from "
"global-requirements.txt".format(package))
os.chdir(orig_dir)
def git_pip_venv_dir(projects_yaml):
"""
Return the pip virtualenv path.
"""
parent_dir = '/mnt/openstack-git'
projects = _git_yaml_load(projects_yaml)
if 'directory' in projects.keys():
parent_dir = projects['directory']
return os.path.join(parent_dir, 'venv')
def git_src_dir(projects_yaml, project):
"""
Return the directory where the specified project's source is located.
"""
parent_dir = '/mnt/openstack-git'
if not projects_yaml:
return
projects = yaml.load(projects_yaml)
projects = _git_yaml_load(projects_yaml)
if 'directory' in projects.keys():
parent_dir = projects['directory']
@ -640,3 +688,15 @@ def git_src_dir(projects_yaml, project):
return os.path.join(parent_dir, os.path.basename(p['repository']))
return None
def git_yaml_value(projects_yaml, key):
"""
Return the value in projects_yaml for the specified key.
"""
projects = _git_yaml_load(projects_yaml)
if key in projects.keys():
return projects[key]
return None

View File

@ -17,8 +17,11 @@
# You should have received a copy of the GNU Lesser General Public License
# along with charm-helpers. If not, see <http://www.gnu.org/licenses/>.
import os
import subprocess
from charmhelpers.fetch import apt_install, apt_update
from charmhelpers.core.hookenv import log
from charmhelpers.core.hookenv import charm_dir, log
try:
from pip import main as pip_execute
@ -33,6 +36,8 @@ __author__ = "Jorge Niedbalski <jorge.niedbalski@canonical.com>"
def parse_options(given, available):
"""Given a set of options, check if available"""
for key, value in sorted(given.items()):
if not value:
continue
if key in available:
yield "--{0}={1}".format(key, value)
@ -51,11 +56,15 @@ def pip_install_requirements(requirements, **options):
pip_execute(command)
def pip_install(package, fatal=False, upgrade=False, **options):
def pip_install(package, fatal=False, upgrade=False, venv=None, **options):
"""Install a python package"""
command = ["install"]
if venv:
venv_python = os.path.join(venv, 'bin/pip')
command = [venv_python, "install"]
else:
command = ["install"]
available_options = ('proxy', 'src', 'log', "index-url", )
available_options = ('proxy', 'src', 'log', 'index-url', )
for option in parse_options(options, available_options):
command.append(option)
@ -69,7 +78,10 @@ def pip_install(package, fatal=False, upgrade=False, **options):
log("Installing {} package with options: {}".format(package,
command))
pip_execute(command)
if venv:
subprocess.check_call(command)
else:
pip_execute(command)
def pip_uninstall(package, **options):
@ -94,3 +106,16 @@ def pip_list():
"""Returns the list of current python installed packages
"""
return pip_execute(["list"])
def pip_create_virtualenv(path=None):
"""Create an isolated Python environment."""
apt_install('python-virtualenv')
if path:
venv_path = path
else:
venv_path = os.path.join(charm_dir(), 'venv')
if not os.path.exists(venv_path):
subprocess.check_call(['virtualenv', venv_path])

View File

@ -21,12 +21,16 @@
# Charm Helpers Developers <juju@lists.ubuntu.com>
from __future__ import print_function
from distutils.version import LooseVersion
from functools import wraps
import glob
import os
import json
import yaml
import subprocess
import sys
import errno
import tempfile
from subprocess import CalledProcessError
import six
@ -58,15 +62,17 @@ def cached(func):
will cache the result of unit_get + 'test' for future calls.
"""
@wraps(func)
def wrapper(*args, **kwargs):
global cache
key = str((func, args, kwargs))
try:
return cache[key]
except KeyError:
res = func(*args, **kwargs)
cache[key] = res
return res
pass # Drop out of the exception handler scope.
res = func(*args, **kwargs)
cache[key] = res
return res
return wrapper
@ -178,7 +184,7 @@ def local_unit():
def remote_unit():
"""The remote unit for the current relation hook"""
return os.environ['JUJU_REMOTE_UNIT']
return os.environ.get('JUJU_REMOTE_UNIT', None)
def service_name():
@ -238,23 +244,7 @@ class Config(dict):
self.path = os.path.join(charm_dir(), Config.CONFIG_FILE_NAME)
if os.path.exists(self.path):
self.load_previous()
def __getitem__(self, key):
"""For regular dict lookups, check the current juju config first,
then the previous (saved) copy. This ensures that user-saved values
will be returned by a dict lookup.
"""
try:
return dict.__getitem__(self, key)
except KeyError:
return (self._prev_dict or {})[key]
def keys(self):
prev_keys = []
if self._prev_dict is not None:
prev_keys = self._prev_dict.keys()
return list(set(prev_keys + list(dict.keys(self))))
atexit(self._implicit_save)
def load_previous(self, path=None):
"""Load previous copy of config from disk.
@ -273,6 +263,9 @@ class Config(dict):
self.path = path or self.path
with open(self.path) as f:
self._prev_dict = json.load(f)
for k, v in self._prev_dict.items():
if k not in self:
self[k] = v
def changed(self, key):
"""Return True if the current value for this key is different from
@ -304,13 +297,13 @@ class Config(dict):
instance.
"""
if self._prev_dict:
for k, v in six.iteritems(self._prev_dict):
if k not in self:
self[k] = v
with open(self.path, 'w') as f:
json.dump(self, f)
def _implicit_save(self):
if self.implicit_save:
self.save()
@cached
def config(scope=None):
@ -353,18 +346,49 @@ def relation_set(relation_id=None, relation_settings=None, **kwargs):
"""Set relation information for the current unit"""
relation_settings = relation_settings if relation_settings else {}
relation_cmd_line = ['relation-set']
accepts_file = "--file" in subprocess.check_output(
relation_cmd_line + ["--help"], universal_newlines=True)
if relation_id is not None:
relation_cmd_line.extend(('-r', relation_id))
for k, v in (list(relation_settings.items()) + list(kwargs.items())):
if v is None:
relation_cmd_line.append('{}='.format(k))
else:
relation_cmd_line.append('{}={}'.format(k, v))
subprocess.check_call(relation_cmd_line)
settings = relation_settings.copy()
settings.update(kwargs)
for key, value in settings.items():
# Force value to be a string: it always should, but some call
# sites pass in things like dicts or numbers.
if value is not None:
settings[key] = "{}".format(value)
if accepts_file:
# --file was introduced in Juju 1.23.2. Use it by default if
# available, since otherwise we'll break if the relation data is
# too big. Ideally we should tell relation-set to read the data from
# stdin, but that feature is broken in 1.23.2: Bug #1454678.
with tempfile.NamedTemporaryFile(delete=False) as settings_file:
settings_file.write(yaml.safe_dump(settings).encode("utf-8"))
subprocess.check_call(
relation_cmd_line + ["--file", settings_file.name])
os.remove(settings_file.name)
else:
for key, value in settings.items():
if value is None:
relation_cmd_line.append('{}='.format(key))
else:
relation_cmd_line.append('{}={}'.format(key, value))
subprocess.check_call(relation_cmd_line)
# Flush cache of any relation-gets for local unit
flush(local_unit())
def relation_clear(r_id=None):
''' Clears any relation data already set on relation r_id '''
settings = relation_get(rid=r_id,
unit=local_unit())
for setting in settings:
if setting not in ['public-address', 'private-address']:
settings[setting] = None
relation_set(relation_id=r_id,
**settings)
@cached
def relation_ids(reltype=None):
"""A list of relation_ids"""
@ -509,6 +533,11 @@ def unit_get(attribute):
return None
def unit_public_ip():
"""Get this unit's public IP address"""
return unit_get('public-address')
def unit_private_ip():
"""Get this unit's private IP address"""
return unit_get('private-address')
@ -541,10 +570,14 @@ class Hooks(object):
hooks.execute(sys.argv)
"""
def __init__(self, config_save=True):
def __init__(self, config_save=None):
super(Hooks, self).__init__()
self._hooks = {}
self._config_save = config_save
# For unknown reasons, we allow the Hooks constructor to override
# config().implicit_save.
if config_save is not None:
config().implicit_save = config_save
def register(self, name, function):
"""Register a hook"""
@ -552,13 +585,16 @@ class Hooks(object):
def execute(self, args):
"""Execute a registered hook based on args[0]"""
_run_atstart()
hook_name = os.path.basename(args[0])
if hook_name in self._hooks:
self._hooks[hook_name]()
if self._config_save:
cfg = config()
if cfg.implicit_save:
cfg.save()
try:
self._hooks[hook_name]()
except SystemExit as x:
if x.code is None or x.code == 0:
_run_atexit()
raise
_run_atexit()
else:
raise UnregisteredHookError(hook_name)
@ -605,3 +641,160 @@ def action_fail(message):
The results set by action_set are preserved."""
subprocess.check_call(['action-fail', message])
def status_set(workload_state, message):
"""Set the workload state with a message
Use status-set to set the workload state with a message which is visible
to the user via juju status. If the status-set command is not found then
assume this is juju < 1.23 and juju-log the message unstead.
workload_state -- valid juju workload state.
message -- status update message
"""
valid_states = ['maintenance', 'blocked', 'waiting', 'active']
if workload_state not in valid_states:
raise ValueError(
'{!r} is not a valid workload state'.format(workload_state)
)
cmd = ['status-set', workload_state, message]
try:
ret = subprocess.call(cmd)
if ret == 0:
return
except OSError as e:
if e.errno != errno.ENOENT:
raise
log_message = 'status-set failed: {} {}'.format(workload_state,
message)
log(log_message, level='INFO')
def status_get():
"""Retrieve the previously set juju workload state
If the status-set command is not found then assume this is juju < 1.23 and
return 'unknown'
"""
cmd = ['status-get']
try:
raw_status = subprocess.check_output(cmd, universal_newlines=True)
status = raw_status.rstrip()
return status
except OSError as e:
if e.errno == errno.ENOENT:
return 'unknown'
else:
raise
def translate_exc(from_exc, to_exc):
def inner_translate_exc1(f):
def inner_translate_exc2(*args, **kwargs):
try:
return f(*args, **kwargs)
except from_exc:
raise to_exc
return inner_translate_exc2
return inner_translate_exc1
@translate_exc(from_exc=OSError, to_exc=NotImplementedError)
def is_leader():
"""Does the current unit hold the juju leadership
Uses juju to determine whether the current unit is the leader of its peers
"""
cmd = ['is-leader', '--format=json']
return json.loads(subprocess.check_output(cmd).decode('UTF-8'))
@translate_exc(from_exc=OSError, to_exc=NotImplementedError)
def leader_get(attribute=None):
"""Juju leader get value(s)"""
cmd = ['leader-get', '--format=json'] + [attribute or '-']
return json.loads(subprocess.check_output(cmd).decode('UTF-8'))
@translate_exc(from_exc=OSError, to_exc=NotImplementedError)
def leader_set(settings=None, **kwargs):
"""Juju leader set value(s)"""
# Don't log secrets.
# log("Juju leader-set '%s'" % (settings), level=DEBUG)
cmd = ['leader-set']
settings = settings or {}
settings.update(kwargs)
for k, v in settings.items():
if v is None:
cmd.append('{}='.format(k))
else:
cmd.append('{}={}'.format(k, v))
subprocess.check_call(cmd)
@cached
def juju_version():
"""Full version string (eg. '1.23.3.1-trusty-amd64')"""
# Per https://bugs.launchpad.net/juju-core/+bug/1455368/comments/1
jujud = glob.glob('/var/lib/juju/tools/machine-*/jujud')[0]
return subprocess.check_output([jujud, 'version'],
universal_newlines=True).strip()
@cached
def has_juju_version(minimum_version):
"""Return True if the Juju version is at least the provided version"""
return LooseVersion(juju_version()) >= LooseVersion(minimum_version)
_atexit = []
_atstart = []
def atstart(callback, *args, **kwargs):
'''Schedule a callback to run before the main hook.
Callbacks are run in the order they were added.
This is useful for modules and classes to perform initialization
and inject behavior. In particular:
- Run common code before all of your hooks, such as logging
the hook name or interesting relation data.
- Defer object or module initialization that requires a hook
context until we know there actually is a hook context,
making testing easier.
- Rather than requiring charm authors to include boilerplate to
invoke your helper's behavior, have it run automatically if
your object is instantiated or module imported.
This is not at all useful after your hook framework as been launched.
'''
global _atstart
_atstart.append((callback, args, kwargs))
def atexit(callback, *args, **kwargs):
'''Schedule a callback to run on successful hook completion.
Callbacks are run in the reverse order that they were added.'''
_atexit.append((callback, args, kwargs))
def _run_atstart():
'''Hook frameworks must invoke this before running the main hook body.'''
global _atstart
for callback, args, kwargs in _atstart:
callback(*args, **kwargs)
del _atstart[:]
def _run_atexit():
'''Hook frameworks must invoke this after the main hook body has
successfully completed. Do not invoke it if the hook fails.'''
global _atexit
for callback, args, kwargs in reversed(_atexit):
callback(*args, **kwargs)
del _atexit[:]

View File

@ -24,6 +24,7 @@
import os
import re
import pwd
import glob
import grp
import random
import string
@ -90,7 +91,7 @@ def service_available(service_name):
['service', service_name, 'status'],
stderr=subprocess.STDOUT).decode('UTF-8')
except subprocess.CalledProcessError as e:
return 'unrecognized service' not in e.output
return b'unrecognized service' not in e.output
else:
return True
@ -269,6 +270,21 @@ def file_hash(path, hash_type='md5'):
return None
def path_hash(path):
"""
Generate a hash checksum of all files matching 'path'. Standard wildcards
like '*' and '?' are supported, see documentation for the 'glob' module for
more information.
:return: dict: A { filename: hash } dictionary for all matched files.
Empty if none found.
"""
return {
filename: file_hash(filename)
for filename in glob.iglob(path)
}
def check_hash(path, checksum, hash_type='md5'):
"""
Validate a file using a cryptographic checksum.
@ -296,23 +312,25 @@ def restart_on_change(restart_map, stopstart=False):
@restart_on_change({
'/etc/ceph/ceph.conf': [ 'cinder-api', 'cinder-volume' ]
'/etc/apache/sites-enabled/*': [ 'apache2' ]
})
def ceph_client_changed():
def config_changed():
pass # your code here
In this example, the cinder-api and cinder-volume services
would be restarted if /etc/ceph/ceph.conf is changed by the
ceph_client_changed function.
ceph_client_changed function. The apache2 service would be
restarted if any file matching the pattern got changed, created
or removed. Standard wildcards are supported, see documentation
for the 'glob' module for more information.
"""
def wrap(f):
def wrapped_f(*args, **kwargs):
checksums = {}
for path in restart_map:
checksums[path] = file_hash(path)
checksums = {path: path_hash(path) for path in restart_map}
f(*args, **kwargs)
restarts = []
for path in restart_map:
if checksums[path] != file_hash(path):
if path_hash(path) != checksums[path]:
restarts += restart_map[path]
services_list = list(OrderedDict.fromkeys(restarts))
if not stopstart:

View File

@ -15,9 +15,9 @@
# along with charm-helpers. If not, see <http://www.gnu.org/licenses/>.
import os
import re
import json
from collections import Iterable
from inspect import getargspec
from collections import Iterable, OrderedDict
from charmhelpers.core import host
from charmhelpers.core import hookenv
@ -119,7 +119,7 @@ class ServiceManager(object):
"""
self._ready_file = os.path.join(hookenv.charm_dir(), 'READY-SERVICES.json')
self._ready = None
self.services = {}
self.services = OrderedDict()
for service in services or []:
service_name = service['service']
self.services[service_name] = service
@ -128,15 +128,18 @@ class ServiceManager(object):
"""
Handle the current hook by doing The Right Thing with the registered services.
"""
hook_name = hookenv.hook_name()
if hook_name == 'stop':
self.stop_services()
else:
self.provide_data()
self.reconfigure_services()
cfg = hookenv.config()
if cfg.implicit_save:
cfg.save()
hookenv._run_atstart()
try:
hook_name = hookenv.hook_name()
if hook_name == 'stop':
self.stop_services()
else:
self.reconfigure_services()
self.provide_data()
except SystemExit as x:
if x.code is None or x.code == 0:
hookenv._run_atexit()
hookenv._run_atexit()
def provide_data(self):
"""
@ -145,15 +148,36 @@ class ServiceManager(object):
A provider must have a `name` attribute, which indicates which relation
to set data on, and a `provide_data()` method, which returns a dict of
data to set.
The `provide_data()` method can optionally accept two parameters:
* ``remote_service`` The name of the remote service that the data will
be provided to. The `provide_data()` method will be called once
for each connected service (not unit). This allows the method to
tailor its data to the given service.
* ``service_ready`` Whether or not the service definition had all of
its requirements met, and thus the ``data_ready`` callbacks run.
Note that the ``provided_data`` methods are now called **after** the
``data_ready`` callbacks are run. This gives the ``data_ready`` callbacks
a chance to generate any data necessary for the providing to the remote
services.
"""
hook_name = hookenv.hook_name()
for service in self.services.values():
for service_name, service in self.services.items():
service_ready = self.is_ready(service_name)
for provider in service.get('provided_data', []):
if re.match(r'{}-relation-(joined|changed)'.format(provider.name), hook_name):
data = provider.provide_data()
_ready = provider._is_ready(data) if hasattr(provider, '_is_ready') else data
if _ready:
hookenv.relation_set(None, data)
for relid in hookenv.relation_ids(provider.name):
units = hookenv.related_units(relid)
if not units:
continue
remote_service = units[0].split('/')[0]
argspec = getargspec(provider.provide_data)
if len(argspec.args) > 1:
data = provider.provide_data(remote_service, service_ready)
else:
data = provider.provide_data()
if data:
hookenv.relation_set(relid, data)
def reconfigure_services(self, *service_names):
"""

View File

@ -158,7 +158,7 @@ def filter_installed_packages(packages):
def apt_cache(in_memory=True):
"""Build and return an apt cache"""
import apt_pkg
from apt import apt_pkg
apt_pkg.init()
if in_memory:
apt_pkg.config.set("Dir::Cache::pkgcache", "")

View File

@ -45,14 +45,16 @@ class GitUrlFetchHandler(BaseFetchHandler):
else:
return True
def clone(self, source, dest, branch):
def clone(self, source, dest, branch, depth=None):
if not self.can_handle(source):
raise UnhandledSource("Cannot handle {}".format(source))
repo = Repo.clone_from(source, dest)
repo.git.checkout(branch)
if depth:
Repo.clone_from(source, dest, branch=branch, depth=depth)
else:
Repo.clone_from(source, dest, branch=branch)
def install(self, source, branch="master", dest=None):
def install(self, source, branch="master", dest=None, depth=None):
url_parts = self.parse_url(source)
branch_name = url_parts.path.strip("/").split("/")[-1]
if dest:
@ -63,7 +65,7 @@ class GitUrlFetchHandler(BaseFetchHandler):
if not os.path.exists(dest_dir):
mkdir(dest_dir, perms=0o755)
try:
self.clone(source, dest_dir, branch)
self.clone(source, dest_dir, branch, depth)
except GitCommandError as e:
raise UnhandledSource(e.message)
except OSError as e:

View File

@ -11,9 +11,10 @@ description: |
.
This charm should be used in conjunction with the ceilometer and nova charm to collect
Openstack measures.
categories:
- miscellaneous
tags:
- openstack
- telemetry
- misc
provides:
nrpe-external-master:
interface: nrpe-external-master

View File

@ -1,21 +0,0 @@
[DEFAULT]
debug = {{ debug }}
verbose = {{ verbose }}
metering_secret = {{ metering_secret }}
rabbit_host = {{ rabbitmq_host }}
rabbit_userid = {{ rabbitmq_user }}
rabbit_password = {{ rabbitmq_password }}
rabbit_virtual_host = {{ rabbitmq_virtual_host }}
os_auth_url = {{ auth_protocol }}://{{ auth_host }}:{{ auth_port }}/v2.0
os_tenant_name = {{ admin_tenant_name }}
os_username = {{ admin_user }}
os_password = {{ admin_password }}
logdir = /var/log/ceilometer
host = {{ ceilometer_host }}
# from socket import gethostname as get_host_name

15
tests/00-setup Executable file
View File

@ -0,0 +1,15 @@
#!/bin/bash
set -ex
sudo add-apt-repository --yes ppa:juju/stable
sudo apt-get update --yes
sudo apt-get install --yes python-amulet \
python-ceilometerclient \
python-cinderclient \
python-distro-info \
python-glanceclient \
python-heatclient \
python-keystoneclient \
python-novaclient \
python-swiftclient

View File

@ -0,0 +1,11 @@
#!/usr/bin/python
"""Amulet tests on a basic ceilometer-agent deployment on precise-icehouse."""
from basic_deployment import CeiloAgentBasicDeployment
if __name__ == '__main__':
deployment = CeiloAgentBasicDeployment(series='precise',
openstack='cloud:precise-icehouse',
source='cloud:precise-updates/icehouse')
deployment.run_tests()

View File

@ -0,0 +1,9 @@
#!/usr/bin/python
"""Amulet tests on a basic ceilometer-agent deployment on trusty-icehouse."""
from basic_deployment import CeiloAgentBasicDeployment
if __name__ == '__main__':
deployment = CeiloAgentBasicDeployment(series='trusty')
deployment.run_tests()

11
tests/016-basic-trusty-juno Executable file
View File

@ -0,0 +1,11 @@
#!/usr/bin/python
"""Amulet tests on a basic ceilometer-agent deployment on trusty-juno."""
from basic_deployment import CeiloAgentBasicDeployment
if __name__ == '__main__':
deployment = CeiloAgentBasicDeployment(series='trusty',
openstack='cloud:trusty-juno',
source='cloud:trusty-updates/juno')
deployment.run_tests()

11
tests/017-basic-trusty-kilo Executable file
View File

@ -0,0 +1,11 @@
#!/usr/bin/python
"""Amulet tests on a basic ceilometer-agent deployment on trusty-kilo."""
from basic_deployment import CeiloAgentBasicDeployment
if __name__ == '__main__':
deployment = CeiloAgentBasicDeployment(series='trusty',
openstack='cloud:trusty-kilo',
source='cloud:trusty-updates/kilo')
deployment.run_tests()

9
tests/018-basic-utopic-juno Executable file
View File

@ -0,0 +1,9 @@
#!/usr/bin/python
"""Amulet tests on a basic ceilometer-agent deployment on utopic-juno."""
from basic_deployment import CeiloAgentBasicDeployment
if __name__ == '__main__':
deployment = CeiloAgentBasicDeployment(series='utopic')
deployment.run_tests()

9
tests/019-basic-vivid-kilo Executable file
View File

@ -0,0 +1,9 @@
#!/usr/bin/python
"""Amulet tests on a basic ceilometer-agent deployment on vivid-kilo."""
from basic_deployment import CeiloAgentBasicDeployment
if __name__ == '__main__':
deployment = CeiloAgentBasicDeployment(series='vivid')
deployment.run_tests()

View File

@ -0,0 +1,11 @@
#!/usr/bin/python
"""Amulet tests on a basic ceilometer-agent deployment on trusty-liberty."""
from basic_deployment import CeiloAgentBasicDeployment
if __name__ == '__main__':
deployment = CeiloAgentBasicDeployment(series='trusty',
openstack='cloud:trusty-liberty',
source='cloud:trusty-updates/liberty')
deployment.run_tests()

View File

@ -0,0 +1,9 @@
#!/usr/bin/python
"""Amulet tests on a basic ceilometer-agent deployment on wily-liberty."""
from basic_deployment import CeiloAgentBasicDeployment
if __name__ == '__main__':
deployment = CeiloAgentBasicDeployment(series='wily')
deployment.run_tests()

62
tests/README Normal file
View File

@ -0,0 +1,62 @@
This directory provides Amulet tests that focus on verification of
ceilometer-agent deployments.
test_* methods are called in lexical sort order.
Test name convention to ensure desired test order:
1xx service and endpoint checks
2xx relation checks
3xx config checks
4xx functional checks
9xx restarts and other final checks
In order to run tests, you'll need charm-tools installed (in addition to
juju, of course):
sudo add-apt-repository ppa:juju/stable
sudo apt-get update
sudo apt-get install charm-tools
If you use a web proxy server to access the web, you'll need to set the
AMULET_HTTP_PROXY environment variable to the http URL of the proxy server.
The following examples demonstrate different ways that tests can be executed.
All examples are run from the charm's root directory.
* To run all tests (starting with 00-setup):
make test
* To run a specific test module (or modules):
juju test -v -p AMULET_HTTP_PROXY 15-basic-trusty-icehouse
* To run a specific test module (or modules), and keep the environment
deployed after a failure:
juju test --set-e -v -p AMULET_HTTP_PROXY 15-basic-trusty-icehouse
* To re-run a test module against an already deployed environment (one
that was deployed by a previous call to 'juju test --set-e'):
./tests/15-basic-trusty-icehouse
For debugging and test development purposes, all code should be idempotent.
In other words, the code should have the ability to be re-run without changing
the results beyond the initial run. This enables editing and re-running of a
test module against an already deployed environment, as described above.
Manual debugging tips:
* Set the following env vars before using the OpenStack CLI as admin:
export OS_AUTH_URL=http://`juju-deployer -f keystone 2>&1 | tail -n 1`:5000/v2.0
export OS_TENANT_NAME=admin
export OS_USERNAME=admin
export OS_PASSWORD=openstack
export OS_REGION_NAME=RegionOne
* Set the following env vars before using the OpenStack CLI as demoUser:
export OS_AUTH_URL=http://`juju-deployer -f keystone 2>&1 | tail -n 1`:5000/v2.0
export OS_TENANT_NAME=demoTenant
export OS_USERNAME=demoUser
export OS_PASSWORD=password
export OS_REGION_NAME=RegionOne

567
tests/basic_deployment.py Normal file
View File

@ -0,0 +1,567 @@
#!/usr/bin/python
"""
Basic ceilometer-agent functional tests.
"""
import amulet
import time
from ceilometerclient.v2 import client as ceilclient
from charmhelpers.contrib.openstack.amulet.deployment import (
OpenStackAmuletDeployment
)
from charmhelpers.contrib.openstack.amulet.utils import (
OpenStackAmuletUtils,
DEBUG,
#ERROR
)
# Use DEBUG to turn on debug logging
u = OpenStackAmuletUtils(DEBUG)
class CeiloAgentBasicDeployment(OpenStackAmuletDeployment):
"""Amulet tests on a basic ceilometer-agent deployment."""
def __init__(self, series, openstack=None, source=None, stable=False):
"""Deploy the entire test environment."""
super(CeiloAgentBasicDeployment, self).__init__(series, openstack,
source, stable)
self._add_services()
self._add_relations()
self._configure_services()
self._deploy()
self._initialize_tests()
def _add_services(self):
"""Add services
Add the services that we're testing, where ceilometer is local,
and the rest of the service are from lp branches that are
compatible with the local charm (e.g. stable or next).
"""
# Note: ceilometer-agent becomes a subordinate of nova-compute
this_service = {'name': 'ceilometer-agent'}
other_services = [{'name': 'mysql'},
{'name': 'rabbitmq-server'},
{'name': 'keystone'},
{'name': 'mongodb'},
{'name': 'ceilometer'},
{'name': 'nova-compute'}]
super(CeiloAgentBasicDeployment, self)._add_services(this_service,
other_services)
def _add_relations(self):
"""Add all of the relations for the services."""
relations = {
'ceilometer:shared-db': 'mongodb:database',
'ceilometer:amqp': 'rabbitmq-server:amqp',
'ceilometer:identity-service': 'keystone:identity-service',
'ceilometer:identity-notifications': 'keystone:'
'identity-notifications',
'keystone:shared-db': 'mysql:shared-db',
'ceilometer:ceilometer-service': 'ceilometer-agent:'
'ceilometer-service',
'nova-compute:nova-ceilometer': 'ceilometer-agent:nova-ceilometer',
'nova-compute:shared-db': 'mysql:shared-db',
'nova-compute:amqp': 'rabbitmq-server:amqp'
}
super(CeiloAgentBasicDeployment, self)._add_relations(relations)
def _configure_services(self):
"""Configure all of the services."""
keystone_config = {'admin-password': 'openstack',
'admin-token': 'ubuntutesting'}
configs = {'keystone': keystone_config}
super(CeiloAgentBasicDeployment, self)._configure_services(configs)
def _get_token(self):
return self.keystone.service_catalog.catalog['token']['id']
def _initialize_tests(self):
"""Perform final initialization before tests get run."""
# Access the sentries for inspecting service units
self.ceil_agent_sentry = self.d.sentry.unit['ceilometer-agent/0']
self.ceil_sentry = self.d.sentry.unit['ceilometer/0']
self.mysql_sentry = self.d.sentry.unit['mysql/0']
self.keystone_sentry = self.d.sentry.unit['keystone/0']
self.rabbitmq_sentry = self.d.sentry.unit['rabbitmq-server/0']
self.mongodb_sentry = self.d.sentry.unit['mongodb/0']
self.nova_sentry = self.d.sentry.unit['nova-compute/0']
u.log.debug('openstack release val: {}'.format(
self._get_openstack_release()))
u.log.debug('openstack release str: {}'.format(
self._get_openstack_release_string()))
# Let things settle a bit before moving forward
time.sleep(30)
# Authenticate admin with keystone endpoint
self.keystone = u.authenticate_keystone_admin(self.keystone_sentry,
user='admin',
password='openstack',
tenant='admin')
# Authenticate admin with ceilometer endpoint
ep = self.keystone.service_catalog.url_for(service_type='metering',
endpoint_type='publicURL')
self.ceil = ceilclient.Client(endpoint=ep, token=self._get_token)
def test_100_services(self):
"""Verify the expected services are running on the corresponding
service units."""
ceilometer_svcs = [
'ceilometer-collector',
'ceilometer-api',
'ceilometer-alarm-evaluator',
'ceilometer-alarm-notifier',
'ceilometer-agent-notification',
]
service_names = {
self.ceil_sentry: ceilometer_svcs,
self.mysql_sentry: ['mysql'],
self.keystone_sentry: ['keystone'],
self.rabbitmq_sentry: ['rabbitmq-server'],
self.mongodb_sentry: ['mongodb'],
}
ret = u.validate_services_by_name(service_names)
if ret:
amulet.raise_status(amulet.FAIL, msg=ret)
def test_110_service_catalog(self):
"""Verify that the service catalog endpoint data is valid."""
endpoint_check = {
'adminURL': u.valid_url,
'id': u.not_null,
'region': 'RegionOne',
'publicURL': u.valid_url,
'internalURL': u.valid_url
}
expected = {
'metering': [endpoint_check],
'identity': [endpoint_check]
}
actual = self.keystone.service_catalog.get_endpoints()
ret = u.validate_svc_catalog_endpoint_data(expected, actual)
if ret:
amulet.raise_status(amulet.FAIL, msg=ret)
def test_112_keystone_api_endpoint(self):
"""Verify the ceilometer api endpoint data."""
endpoints = self.keystone.endpoints.list()
u.log.debug(endpoints)
internal_port = public_port = '5000'
admin_port = '35357'
expected = {'id': u.not_null,
'region': 'RegionOne',
'adminurl': u.valid_url,
'internalurl': u.valid_url,
'publicurl': u.valid_url,
'service_id': u.not_null}
ret = u.validate_endpoint_data(endpoints, admin_port, internal_port,
public_port, expected)
if ret:
message = 'Keystone endpoint: {}'.format(ret)
amulet.raise_status(amulet.FAIL, msg=message)
def test_114_ceilometer_api_endpoint(self):
"""Verify the ceilometer api endpoint data."""
endpoints = self.keystone.endpoints.list()
u.log.debug(endpoints)
admin_port = internal_port = public_port = '8777'
expected = {'id': u.not_null,
'region': 'RegionOne',
'adminurl': u.valid_url,
'internalurl': u.valid_url,
'publicurl': u.valid_url,
'service_id': u.not_null}
ret = u.validate_endpoint_data(endpoints, admin_port, internal_port,
public_port, expected)
if ret:
message = 'Ceilometer endpoint: {}'.format(ret)
amulet.raise_status(amulet.FAIL, msg=message)
def test_200_ceilometer_identity_relation(self):
"""Verify the ceilometer to keystone identity-service relation data"""
u.log.debug('Checking service catalog endpoint data...')
unit = self.ceil_sentry
relation = ['identity-service', 'keystone:identity-service']
ceil_ip = unit.relation('identity-service',
'keystone:identity-service')['private-address']
ceil_endpoint = "http://%s:8777" % (ceil_ip)
expected = {
'admin_url': ceil_endpoint,
'internal_url': ceil_endpoint,
'private-address': ceil_ip,
'public_url': ceil_endpoint,
'region': 'RegionOne',
'requested_roles': 'ResellerAdmin',
'service': 'ceilometer',
}
ret = u.validate_relation_data(unit, relation, expected)
if ret:
message = u.relation_error('ceilometer identity-service', ret)
amulet.raise_status(amulet.FAIL, msg=message)
def test_201_keystone_ceilometer_identity_relation(self):
"""Verify the keystone to ceilometer identity-service relation data"""
u.log.debug('Checking keystone:ceilometer identity relation data...')
unit = self.keystone_sentry
relation = ['identity-service', 'ceilometer:identity-service']
id_relation = unit.relation('identity-service',
'ceilometer:identity-service')
id_ip = id_relation['private-address']
expected = {
'admin_token': 'ubuntutesting',
'auth_host': id_ip,
'auth_port': "35357",
'auth_protocol': 'http',
'private-address': id_ip,
'service_host': id_ip,
'service_password': u.not_null,
'service_port': "5000",
'service_protocol': 'http',
'service_tenant': 'services',
'service_tenant_id': u.not_null,
'service_username': 'ceilometer',
}
ret = u.validate_relation_data(unit, relation, expected)
if ret:
message = u.relation_error('keystone identity-service', ret)
amulet.raise_status(amulet.FAIL, msg=message)
def test_202_keystone_ceilometer_identity_notes_relation(self):
"""Verify ceilometer to keystone identity-notifications relation"""
u.log.debug('Checking keystone:ceilometer '
'identity-notifications relation data...')
unit = self.keystone_sentry
relation = ['identity-service', 'ceilometer:identity-notifications']
expected = {
'ceilometer-endpoint-changed': u.not_null,
}
ret = u.validate_relation_data(unit, relation, expected)
if ret:
message = u.relation_error('keystone identity-notifications', ret)
amulet.raise_status(amulet.FAIL, msg=message)
def test_203_ceilometer_amqp_relation(self):
"""Verify the ceilometer to rabbitmq-server amqp relation data"""
u.log.debug('Checking ceilometer:rabbitmq amqp relation data...')
unit = self.ceil_sentry
relation = ['amqp', 'rabbitmq-server:amqp']
expected = {
'username': 'ceilometer',
'private-address': u.valid_ip,
'vhost': 'openstack'
}
ret = u.validate_relation_data(unit, relation, expected)
if ret:
message = u.relation_error('ceilometer amqp', ret)
amulet.raise_status(amulet.FAIL, msg=message)
def test_204_amqp_ceilometer_relation(self):
"""Verify the rabbitmq-server to ceilometer amqp relation data"""
u.log.debug('Checking rabbitmq:ceilometer amqp relation data...')
unit = self.rabbitmq_sentry
relation = ['amqp', 'ceilometer:amqp']
expected = {
'hostname': u.valid_ip,
'private-address': u.valid_ip,
'password': u.not_null,
}
ret = u.validate_relation_data(unit, relation, expected)
if ret:
message = u.relation_error('rabbitmq amqp', ret)
amulet.raise_status(amulet.FAIL, msg=message)
def test_205_ceilometer_to_mongodb_relation(self):
"""Verify the ceilometer to mongodb relation data"""
u.log.debug('Checking ceilometer:mongodb relation data...')
unit = self.ceil_sentry
relation = ['shared-db', 'mongodb:database']
expected = {
'ceilometer_database': 'ceilometer',
'private-address': u.valid_ip,
}
ret = u.validate_relation_data(unit, relation, expected)
if ret:
message = u.relation_error('ceilometer shared-db', ret)
amulet.raise_status(amulet.FAIL, msg=message)
def test_206_mongodb_to_ceilometer_relation(self):
"""Verify the mongodb to ceilometer relation data"""
u.log.debug('Checking mongodb:ceilometer relation data...')
unit = self.mongodb_sentry
relation = ['database', 'ceilometer:shared-db']
expected = {
'hostname': u.valid_ip,
'port': '27017',
'private-address': u.valid_ip,
'type': 'database',
}
if self._get_openstack_release() == self.precise_icehouse:
expected['replset'] = 'myset'
ret = u.validate_relation_data(unit, relation, expected)
if ret:
message = u.relation_error('mongodb database', ret)
amulet.raise_status(amulet.FAIL, msg=message)
def test_207_ceilometer_ceilometer_agent_relation(self):
"""Verify the ceilometer to ceilometer-agent relation data"""
u.log.debug('Checking ceilometer:ceilometer-agent relation data...')
unit = self.ceil_sentry
relation = ['ceilometer-service',
'ceilometer-agent:ceilometer-service']
expected = {
'rabbitmq_user': 'ceilometer',
'verbose': 'False',
'rabbitmq_host': u.valid_ip,
'service_ports': "{'ceilometer_api': [8777, 8767]}",
'use_syslog': 'False',
'metering_secret': u.not_null,
'rabbitmq_virtual_host': 'openstack',
'db_port': '27017',
'private-address': u.valid_ip,
'db_name': 'ceilometer',
'db_host': u.valid_ip,
'debug': 'False',
'rabbitmq_password': u.not_null,
'port': '8767'
}
ret = u.validate_relation_data(unit, relation, expected)
if ret:
message = u.relation_error('ceilometer-service', ret)
amulet.raise_status(amulet.FAIL, msg=message)
def test_208_ceilometer_agent_ceilometer_relation(self):
"""Verify the ceilometer-agent to ceilometer relation data"""
u.log.debug('Checking ceilometer-agent:ceilometer relation data...')
unit = self.ceil_agent_sentry
relation = ['ceilometer-service', 'ceilometer:ceilometer-service']
expected = {'private-address': u.valid_ip}
ret = u.validate_relation_data(unit, relation, expected)
if ret:
message = u.relation_error('ceilometer-service', ret)
amulet.raise_status(amulet.FAIL, msg=message)
def test_209_nova_compute_ceilometer_agent_relation(self):
"""Verify the nova-compute to ceilometer relation data"""
u.log.debug('Checking nova-compute:ceilometer relation data...')
unit = self.nova_sentry
relation = ['nova-ceilometer', 'ceilometer-agent:nova-ceilometer']
expected = {'private-address': u.valid_ip}
ret = u.validate_relation_data(unit, relation, expected)
if ret:
message = u.relation_error('ceilometer-service', ret)
amulet.raise_status(amulet.FAIL, msg=message)
def test_210_ceilometer_agent_nova_compute_relation(self):
"""Verify the ceilometer to nova-compute relation data"""
u.log.debug('Checking ceilometer:nova-compute relation data...')
unit = self.ceil_agent_sentry
relation = ['nova-ceilometer', 'nova-compute:nova-ceilometer']
sub = ('{"nova": {"/etc/nova/nova.conf": {"sections": {"DEFAULT": '
'[["instance_usage_audit", "True"], '
'["instance_usage_audit_period", "hour"], '
'["notify_on_state_change", "vm_and_task_state"], '
'["notification_driver", "ceilometer.compute.nova_notifier"], '
'["notification_driver", '
'"nova.openstack.common.notifier.rpc_notifier"]]}}}}')
expected = {
'subordinate_configuration': sub,
'private-address': u.valid_ip
}
ret = u.validate_relation_data(unit, relation, expected)
if ret:
message = u.relation_error('ceilometer-service', ret)
amulet.raise_status(amulet.FAIL, msg=message)
def test_300_ceilometer_config(self):
"""Verify the data in the ceilometer config file."""
u.log.debug('Checking ceilometer config file data...')
unit = self.ceil_sentry
rmq_rel = self.rabbitmq_sentry.relation('amqp',
'ceilometer:amqp')
ks_rel = self.keystone_sentry.relation('identity-service',
'ceilometer:identity-service')
auth_uri = '%s://%s:%s/' % (ks_rel['service_protocol'],
ks_rel['service_host'],
ks_rel['service_port'])
db_relation = self.mongodb_sentry.relation('database',
'ceilometer:shared-db')
db_conn = 'mongodb://%s:%s/ceilometer' % (db_relation['hostname'],
db_relation['port'])
conf = '/etc/ceilometer/ceilometer.conf'
expected = {
'DEFAULT': {
'verbose': 'False',
'debug': 'False',
'use_syslog': 'False',
'rabbit_userid': 'ceilometer',
'rabbit_virtual_host': 'openstack',
'rabbit_password': rmq_rel['password'],
'rabbit_host': rmq_rel['hostname'],
},
'api': {
'port': '8767',
},
'service_credentials': {
'os_auth_url': auth_uri + 'v2.0',
'os_tenant_name': 'services',
'os_username': 'ceilometer',
'os_password': ks_rel['service_password'],
},
'database': {
'connection': db_conn,
},
'keystone_authtoken': {
'auth_uri': auth_uri,
'auth_host': ks_rel['auth_host'],
'auth_port': ks_rel['auth_port'],
'auth_protocol': ks_rel['auth_protocol'],
'admin_tenant_name': 'services',
'admin_user': 'ceilometer',
'admin_password': ks_rel['service_password'],
},
}
for section, pairs in expected.iteritems():
ret = u.validate_config_data(unit, conf, section, pairs)
if ret:
message = "ceilometer config error: {}".format(ret)
amulet.raise_status(amulet.FAIL, msg=message)
def test_301_nova_config(self):
"""Verify data in the nova compute nova config file"""
u.log.debug('Checking nova compute config file...')
unit = self.nova_sentry
conf = '/etc/nova/nova.conf'
expected = {
'DEFAULT': {
'verbose': 'False',
'debug': 'False',
'use_syslog': 'False',
'my_ip': u.valid_ip,
'dhcpbridge_flagfile': '/etc/nova/nova.conf',
'dhcpbridge': '/usr/bin/nova-dhcpbridge',
'logdir': '/var/log/nova',
'state_path': '/var/lib/nova',
'api_paste_config': '/etc/nova/api-paste.ini',
'enabled_apis': 'ec2,osapi_compute,metadata',
'auth_strategy': 'keystone',
'instance_usage_audit': 'True',
'instance_usage_audit_period': 'hour',
'notify_on_state_change': 'vm_and_task_state',
}
}
# NOTE(beisner): notification_driver is not checked like the
# others, as configparser does not support duplicate config
# options, and dicts cant have duplicate keys.
for section, pairs in expected.iteritems():
ret = u.validate_config_data(unit, conf, section, pairs)
if ret:
message = "ceilometer config error: {}".format(ret)
amulet.raise_status(amulet.FAIL, msg=message)
# Check notification_driver existence via simple grep cmd
lines = [('notification_driver = '
'ceilometer.compute.nova_notifier'),
('notification_driver = '
'nova.openstack.common.notifier.rpc_notifier')]
sentry_units = [unit]
cmds = []
for line in lines:
cmds.append('grep "{}" {}'.format(line, conf))
ret = u.check_commands_on_units(cmds, sentry_units)
if ret:
amulet.raise_status(amulet.FAIL, msg=ret)
def test_302_nova_ceilometer_config(self):
"""Verify data in the ceilometer config file on the
nova-compute (ceilometer-agent) unit."""
u.log.debug('Checking nova ceilometer config file...')
unit = self.nova_sentry
conf = '/etc/ceilometer/ceilometer.conf'
expected = {
'DEFAULT': {
'logdir': '/var/log/ceilometer'
},
'database': {
'backend': 'sqlalchemy',
'connection': 'sqlite:////var/lib/ceilometer/$sqlite_db'
}
}
for section, pairs in expected.iteritems():
ret = u.validate_config_data(unit, conf, section, pairs)
if ret:
message = "ceilometer config error: {}".format(ret)
amulet.raise_status(amulet.FAIL, msg=message)
def test_400_api_connection(self):
"""Simple api calls to check service is up and responding"""
u.log.debug('Checking api functionality...')
assert(self.ceil.samples.list() == [])
assert(self.ceil.meters.list() == [])
# NOTE(beisner): need to add more functional tests
def test_900_restart_on_config_change(self):
"""Verify that the specified services are restarted when the config
is changed.
"""
sentry = self.ceil_sentry
juju_service = 'ceilometer'
# Expected default and alternate values
set_default = {'debug': 'False'}
set_alternate = {'debug': 'True'}
# Config file affected by juju set config change
conf_file = '/etc/ceilometer/ceilometer.conf'
# Services which are expected to restart upon config change
services = [
'ceilometer-agent-central',
'ceilometer-collector',
'ceilometer-api',
'ceilometer-alarm-evaluator',
'ceilometer-alarm-notifier',
'ceilometer-agent-notification',
]
# Make config change, check for service restarts
u.log.debug('Making config change on {}...'.format(juju_service))
self.d.configure(juju_service, set_alternate)
sleep_time = 40
for s in services:
u.log.debug("Checking that service restarted: {}".format(s))
if not u.service_restarted(sentry, s,
conf_file, sleep_time=sleep_time,
pgrep_full=True):
self.d.configure(juju_service, set_default)
msg = "service {} didn't restart after config change".format(s)
amulet.raise_status(amulet.FAIL, msg=msg)
sleep_time = 0
self.d.configure(juju_service, set_default)

View File

@ -0,0 +1,38 @@
# Copyright 2014-2015 Canonical Limited.
#
# This file is part of charm-helpers.
#
# charm-helpers is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License version 3 as
# published by the Free Software Foundation.
#
# charm-helpers is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with charm-helpers. If not, see <http://www.gnu.org/licenses/>.
# Bootstrap charm-helpers, installing its dependencies if necessary using
# only standard libraries.
import subprocess
import sys
try:
import six # flake8: noqa
except ImportError:
if sys.version_info.major == 2:
subprocess.check_call(['apt-get', 'install', '-y', 'python-six'])
else:
subprocess.check_call(['apt-get', 'install', '-y', 'python3-six'])
import six # flake8: noqa
try:
import yaml # flake8: noqa
except ImportError:
if sys.version_info.major == 2:
subprocess.check_call(['apt-get', 'install', '-y', 'python-yaml'])
else:
subprocess.check_call(['apt-get', 'install', '-y', 'python3-yaml'])
import yaml # flake8: noqa

View File

@ -0,0 +1,15 @@
# Copyright 2014-2015 Canonical Limited.
#
# This file is part of charm-helpers.
#
# charm-helpers is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License version 3 as
# published by the Free Software Foundation.
#
# charm-helpers is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with charm-helpers. If not, see <http://www.gnu.org/licenses/>.

View File

@ -0,0 +1,15 @@
# Copyright 2014-2015 Canonical Limited.
#
# This file is part of charm-helpers.
#
# charm-helpers is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License version 3 as
# published by the Free Software Foundation.
#
# charm-helpers is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with charm-helpers. If not, see <http://www.gnu.org/licenses/>.

View File

@ -0,0 +1,93 @@
# Copyright 2014-2015 Canonical Limited.
#
# This file is part of charm-helpers.
#
# charm-helpers is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License version 3 as
# published by the Free Software Foundation.
#
# charm-helpers is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with charm-helpers. If not, see <http://www.gnu.org/licenses/>.
import amulet
import os
import six
class AmuletDeployment(object):
"""Amulet deployment.
This class provides generic Amulet deployment and test runner
methods.
"""
def __init__(self, series=None):
"""Initialize the deployment environment."""
self.series = None
if series:
self.series = series
self.d = amulet.Deployment(series=self.series)
else:
self.d = amulet.Deployment()
def _add_services(self, this_service, other_services):
"""Add services.
Add services to the deployment where this_service is the local charm
that we're testing and other_services are the other services that
are being used in the local amulet tests.
"""
if this_service['name'] != os.path.basename(os.getcwd()):
s = this_service['name']
msg = "The charm's root directory name needs to be {}".format(s)
amulet.raise_status(amulet.FAIL, msg=msg)
if 'units' not in this_service:
this_service['units'] = 1
self.d.add(this_service['name'], units=this_service['units'])
for svc in other_services:
if 'location' in svc:
branch_location = svc['location']
elif self.series:
branch_location = 'cs:{}/{}'.format(self.series, svc['name']),
else:
branch_location = None
if 'units' not in svc:
svc['units'] = 1
self.d.add(svc['name'], charm=branch_location, units=svc['units'])
def _add_relations(self, relations):
"""Add all of the relations for the services."""
for k, v in six.iteritems(relations):
self.d.relate(k, v)
def _configure_services(self, configs):
"""Configure all of the services."""
for service, config in six.iteritems(configs):
self.d.configure(service, config)
def _deploy(self):
"""Deploy environment and wait for all hooks to finish executing."""
try:
self.d.setup(timeout=900)
self.d.sentry.wait(timeout=900)
except amulet.helpers.TimeoutError:
amulet.raise_status(amulet.FAIL, msg="Deployment timed out")
except Exception:
raise
def run_tests(self):
"""Run all of the methods that are prefixed with 'test_'."""
for test in dir(self):
if test.startswith('test_'):
getattr(self, test)()

View File

@ -0,0 +1,533 @@
# Copyright 2014-2015 Canonical Limited.
#
# This file is part of charm-helpers.
#
# charm-helpers is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License version 3 as
# published by the Free Software Foundation.
#
# charm-helpers is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with charm-helpers. If not, see <http://www.gnu.org/licenses/>.
import amulet
import ConfigParser
import distro_info
import io
import logging
import os
import re
import six
import sys
import time
import urlparse
class AmuletUtils(object):
"""Amulet utilities.
This class provides common utility functions that are used by Amulet
tests.
"""
def __init__(self, log_level=logging.ERROR):
self.log = self.get_logger(level=log_level)
self.ubuntu_releases = self.get_ubuntu_releases()
def get_logger(self, name="amulet-logger", level=logging.DEBUG):
"""Get a logger object that will log to stdout."""
log = logging
logger = log.getLogger(name)
fmt = log.Formatter("%(asctime)s %(funcName)s "
"%(levelname)s: %(message)s")
handler = log.StreamHandler(stream=sys.stdout)
handler.setLevel(level)
handler.setFormatter(fmt)
logger.addHandler(handler)
logger.setLevel(level)
return logger
def valid_ip(self, ip):
if re.match(r"^\d{1,3}\.\d{1,3}\.\d{1,3}\.\d{1,3}$", ip):
return True
else:
return False
def valid_url(self, url):
p = re.compile(
r'^(?:http|ftp)s?://'
r'(?:(?:[A-Z0-9](?:[A-Z0-9-]{0,61}[A-Z0-9])?\.)+(?:[A-Z]{2,6}\.?|[A-Z0-9-]{2,}\.?)|' # noqa
r'localhost|'
r'\d{1,3}\.\d{1,3}\.\d{1,3}\.\d{1,3})'
r'(?::\d+)?'
r'(?:/?|[/?]\S+)$',
re.IGNORECASE)
if p.match(url):
return True
else:
return False
def get_ubuntu_release_from_sentry(self, sentry_unit):
"""Get Ubuntu release codename from sentry unit.
:param sentry_unit: amulet sentry/service unit pointer
:returns: list of strings - release codename, failure message
"""
msg = None
cmd = 'lsb_release -cs'
release, code = sentry_unit.run(cmd)
if code == 0:
self.log.debug('{} lsb_release: {}'.format(
sentry_unit.info['unit_name'], release))
else:
msg = ('{} `{}` returned {} '
'{}'.format(sentry_unit.info['unit_name'],
cmd, release, code))
if release not in self.ubuntu_releases:
msg = ("Release ({}) not found in Ubuntu releases "
"({})".format(release, self.ubuntu_releases))
return release, msg
def validate_services(self, commands):
"""Validate that lists of commands succeed on service units. Can be
used to verify system services are running on the corresponding
service units.
:param commands: dict with sentry keys and arbitrary command list vals
:returns: None if successful, Failure string message otherwise
"""
self.log.debug('Checking status of system services...')
# /!\ DEPRECATION WARNING (beisner):
# New and existing tests should be rewritten to use
# validate_services_by_name() as it is aware of init systems.
self.log.warn('/!\\ DEPRECATION WARNING: use '
'validate_services_by_name instead of validate_services '
'due to init system differences.')
for k, v in six.iteritems(commands):
for cmd in v:
output, code = k.run(cmd)
self.log.debug('{} `{}` returned '
'{}'.format(k.info['unit_name'],
cmd, code))
if code != 0:
return "command `{}` returned {}".format(cmd, str(code))
return None
def validate_services_by_name(self, sentry_services):
"""Validate system service status by service name, automatically
detecting init system based on Ubuntu release codename.
:param sentry_services: dict with sentry keys and svc list values
:returns: None if successful, Failure string message otherwise
"""
self.log.debug('Checking status of system services...')
# Point at which systemd became a thing
systemd_switch = self.ubuntu_releases.index('vivid')
for sentry_unit, services_list in six.iteritems(sentry_services):
# Get lsb_release codename from unit
release, ret = self.get_ubuntu_release_from_sentry(sentry_unit)
if ret:
return ret
for service_name in services_list:
if (self.ubuntu_releases.index(release) >= systemd_switch or
service_name == "rabbitmq-server"):
# init is systemd
cmd = 'sudo service {} status'.format(service_name)
elif self.ubuntu_releases.index(release) < systemd_switch:
# init is upstart
cmd = 'sudo status {}'.format(service_name)
output, code = sentry_unit.run(cmd)
self.log.debug('{} `{}` returned '
'{}'.format(sentry_unit.info['unit_name'],
cmd, code))
if code != 0:
return "command `{}` returned {}".format(cmd, str(code))
return None
def _get_config(self, unit, filename):
"""Get a ConfigParser object for parsing a unit's config file."""
file_contents = unit.file_contents(filename)
# NOTE(beisner): by default, ConfigParser does not handle options
# with no value, such as the flags used in the mysql my.cnf file.
# https://bugs.python.org/issue7005
config = ConfigParser.ConfigParser(allow_no_value=True)
config.readfp(io.StringIO(file_contents))
return config
def validate_config_data(self, sentry_unit, config_file, section,
expected):
"""Validate config file data.
Verify that the specified section of the config file contains
the expected option key:value pairs.
Compare expected dictionary data vs actual dictionary data.
The values in the 'expected' dictionary can be strings, bools, ints,
longs, or can be a function that evaluates a variable and returns a
bool.
"""
self.log.debug('Validating config file data ({} in {} on {})'
'...'.format(section, config_file,
sentry_unit.info['unit_name']))
config = self._get_config(sentry_unit, config_file)
if section != 'DEFAULT' and not config.has_section(section):
return "section [{}] does not exist".format(section)
for k in expected.keys():
if not config.has_option(section, k):
return "section [{}] is missing option {}".format(section, k)
actual = config.get(section, k)
v = expected[k]
if (isinstance(v, six.string_types) or
isinstance(v, bool) or
isinstance(v, six.integer_types)):
# handle explicit values
if actual != v:
return "section [{}] {}:{} != expected {}:{}".format(
section, k, actual, k, expected[k])
# handle function pointers, such as not_null or valid_ip
elif not v(actual):
return "section [{}] {}:{} != expected {}:{}".format(
section, k, actual, k, expected[k])
return None
def _validate_dict_data(self, expected, actual):
"""Validate dictionary data.
Compare expected dictionary data vs actual dictionary data.
The values in the 'expected' dictionary can be strings, bools, ints,
longs, or can be a function that evaluates a variable and returns a
bool.
"""
self.log.debug('actual: {}'.format(repr(actual)))
self.log.debug('expected: {}'.format(repr(expected)))
for k, v in six.iteritems(expected):
if k in actual:
if (isinstance(v, six.string_types) or
isinstance(v, bool) or
isinstance(v, six.integer_types)):
# handle explicit values
if v != actual[k]:
return "{}:{}".format(k, actual[k])
# handle function pointers, such as not_null or valid_ip
elif not v(actual[k]):
return "{}:{}".format(k, actual[k])
else:
return "key '{}' does not exist".format(k)
return None
def validate_relation_data(self, sentry_unit, relation, expected):
"""Validate actual relation data based on expected relation data."""
actual = sentry_unit.relation(relation[0], relation[1])
return self._validate_dict_data(expected, actual)
def _validate_list_data(self, expected, actual):
"""Compare expected list vs actual list data."""
for e in expected:
if e not in actual:
return "expected item {} not found in actual list".format(e)
return None
def not_null(self, string):
if string is not None:
return True
else:
return False
def _get_file_mtime(self, sentry_unit, filename):
"""Get last modification time of file."""
return sentry_unit.file_stat(filename)['mtime']
def _get_dir_mtime(self, sentry_unit, directory):
"""Get last modification time of directory."""
return sentry_unit.directory_stat(directory)['mtime']
def _get_proc_start_time(self, sentry_unit, service, pgrep_full=False):
"""Get process' start time.
Determine start time of the process based on the last modification
time of the /proc/pid directory. If pgrep_full is True, the process
name is matched against the full command line.
"""
if pgrep_full:
cmd = 'pgrep -o -f {}'.format(service)
else:
cmd = 'pgrep -o {}'.format(service)
cmd = cmd + ' | grep -v pgrep || exit 0'
cmd_out = sentry_unit.run(cmd)
self.log.debug('CMDout: ' + str(cmd_out))
if cmd_out[0]:
self.log.debug('Pid for %s %s' % (service, str(cmd_out[0])))
proc_dir = '/proc/{}'.format(cmd_out[0].strip())
return self._get_dir_mtime(sentry_unit, proc_dir)
def service_restarted(self, sentry_unit, service, filename,
pgrep_full=False, sleep_time=20):
"""Check if service was restarted.
Compare a service's start time vs a file's last modification time
(such as a config file for that service) to determine if the service
has been restarted.
"""
time.sleep(sleep_time)
if (self._get_proc_start_time(sentry_unit, service, pgrep_full) >=
self._get_file_mtime(sentry_unit, filename)):
return True
else:
return False
def service_restarted_since(self, sentry_unit, mtime, service,
pgrep_full=False, sleep_time=20,
retry_count=2):
"""Check if service was been started after a given time.
Args:
sentry_unit (sentry): The sentry unit to check for the service on
mtime (float): The epoch time to check against
service (string): service name to look for in process table
pgrep_full (boolean): Use full command line search mode with pgrep
sleep_time (int): Seconds to sleep before looking for process
retry_count (int): If service is not found, how many times to retry
Returns:
bool: True if service found and its start time it newer than mtime,
False if service is older than mtime or if service was
not found.
"""
self.log.debug('Checking %s restarted since %s' % (service, mtime))
time.sleep(sleep_time)
proc_start_time = self._get_proc_start_time(sentry_unit, service,
pgrep_full)
while retry_count > 0 and not proc_start_time:
self.log.debug('No pid file found for service %s, will retry %i '
'more times' % (service, retry_count))
time.sleep(30)
proc_start_time = self._get_proc_start_time(sentry_unit, service,
pgrep_full)
retry_count = retry_count - 1
if not proc_start_time:
self.log.warn('No proc start time found, assuming service did '
'not start')
return False
if proc_start_time >= mtime:
self.log.debug('proc start time is newer than provided mtime'
'(%s >= %s)' % (proc_start_time, mtime))
return True
else:
self.log.warn('proc start time (%s) is older than provided mtime '
'(%s), service did not restart' % (proc_start_time,
mtime))
return False
def config_updated_since(self, sentry_unit, filename, mtime,
sleep_time=20):
"""Check if file was modified after a given time.
Args:
sentry_unit (sentry): The sentry unit to check the file mtime on
filename (string): The file to check mtime of
mtime (float): The epoch time to check against
sleep_time (int): Seconds to sleep before looking for process
Returns:
bool: True if file was modified more recently than mtime, False if
file was modified before mtime,
"""
self.log.debug('Checking %s updated since %s' % (filename, mtime))
time.sleep(sleep_time)
file_mtime = self._get_file_mtime(sentry_unit, filename)
if file_mtime >= mtime:
self.log.debug('File mtime is newer than provided mtime '
'(%s >= %s)' % (file_mtime, mtime))
return True
else:
self.log.warn('File mtime %s is older than provided mtime %s'
% (file_mtime, mtime))
return False
def validate_service_config_changed(self, sentry_unit, mtime, service,
filename, pgrep_full=False,
sleep_time=20, retry_count=2):
"""Check service and file were updated after mtime
Args:
sentry_unit (sentry): The sentry unit to check for the service on
mtime (float): The epoch time to check against
service (string): service name to look for in process table
filename (string): The file to check mtime of
pgrep_full (boolean): Use full command line search mode with pgrep
sleep_time (int): Seconds to sleep before looking for process
retry_count (int): If service is not found, how many times to retry
Typical Usage:
u = OpenStackAmuletUtils(ERROR)
...
mtime = u.get_sentry_time(self.cinder_sentry)
self.d.configure('cinder', {'verbose': 'True', 'debug': 'True'})
if not u.validate_service_config_changed(self.cinder_sentry,
mtime,
'cinder-api',
'/etc/cinder/cinder.conf')
amulet.raise_status(amulet.FAIL, msg='update failed')
Returns:
bool: True if both service and file where updated/restarted after
mtime, False if service is older than mtime or if service was
not found or if filename was modified before mtime.
"""
self.log.debug('Checking %s restarted since %s' % (service, mtime))
time.sleep(sleep_time)
service_restart = self.service_restarted_since(sentry_unit, mtime,
service,
pgrep_full=pgrep_full,
sleep_time=0,
retry_count=retry_count)
config_update = self.config_updated_since(sentry_unit, filename, mtime,
sleep_time=0)
return service_restart and config_update
def get_sentry_time(self, sentry_unit):
"""Return current epoch time on a sentry"""
cmd = "date +'%s'"
return float(sentry_unit.run(cmd)[0])
def relation_error(self, name, data):
return 'unexpected relation data in {} - {}'.format(name, data)
def endpoint_error(self, name, data):
return 'unexpected endpoint data in {} - {}'.format(name, data)
def get_ubuntu_releases(self):
"""Return a list of all Ubuntu releases in order of release."""
_d = distro_info.UbuntuDistroInfo()
_release_list = _d.all
self.log.debug('Ubuntu release list: {}'.format(_release_list))
return _release_list
def file_to_url(self, file_rel_path):
"""Convert a relative file path to a file URL."""
_abs_path = os.path.abspath(file_rel_path)
return urlparse.urlparse(_abs_path, scheme='file').geturl()
def check_commands_on_units(self, commands, sentry_units):
"""Check that all commands in a list exit zero on all
sentry units in a list.
:param commands: list of bash commands
:param sentry_units: list of sentry unit pointers
:returns: None if successful; Failure message otherwise
"""
self.log.debug('Checking exit codes for {} commands on {} '
'sentry units...'.format(len(commands),
len(sentry_units)))
for sentry_unit in sentry_units:
for cmd in commands:
output, code = sentry_unit.run(cmd)
if code == 0:
self.log.debug('{} `{}` returned {} '
'(OK)'.format(sentry_unit.info['unit_name'],
cmd, code))
else:
return ('{} `{}` returned {} '
'{}'.format(sentry_unit.info['unit_name'],
cmd, code, output))
return None
def get_process_id_list(self, sentry_unit, process_name):
"""Get a list of process ID(s) from a single sentry juju unit
for a single process name.
:param sentry_unit: Pointer to amulet sentry instance (juju unit)
:param process_name: Process name
:returns: List of process IDs
"""
cmd = 'pidof {}'.format(process_name)
output, code = sentry_unit.run(cmd)
if code != 0:
msg = ('{} `{}` returned {} '
'{}'.format(sentry_unit.info['unit_name'],
cmd, code, output))
amulet.raise_status(amulet.FAIL, msg=msg)
return str(output).split()
def get_unit_process_ids(self, unit_processes):
"""Construct a dict containing unit sentries, process names, and
process IDs."""
pid_dict = {}
for sentry_unit, process_list in unit_processes.iteritems():
pid_dict[sentry_unit] = {}
for process in process_list:
pids = self.get_process_id_list(sentry_unit, process)
pid_dict[sentry_unit].update({process: pids})
return pid_dict
def validate_unit_process_ids(self, expected, actual):
"""Validate process id quantities for services on units."""
self.log.debug('Checking units for running processes...')
self.log.debug('Expected PIDs: {}'.format(expected))
self.log.debug('Actual PIDs: {}'.format(actual))
if len(actual) != len(expected):
return ('Unit count mismatch. expected, actual: {}, '
'{} '.format(len(expected), len(actual)))
for (e_sentry, e_proc_names) in expected.iteritems():
e_sentry_name = e_sentry.info['unit_name']
if e_sentry in actual.keys():
a_proc_names = actual[e_sentry]
else:
return ('Expected sentry ({}) not found in actual dict data.'
'{}'.format(e_sentry_name, e_sentry))
if len(e_proc_names.keys()) != len(a_proc_names.keys()):
return ('Process name count mismatch. expected, actual: {}, '
'{}'.format(len(expected), len(actual)))
for (e_proc_name, e_pids_length), (a_proc_name, a_pids) in \
zip(e_proc_names.items(), a_proc_names.items()):
if e_proc_name != a_proc_name:
return ('Process name mismatch. expected, actual: {}, '
'{}'.format(e_proc_name, a_proc_name))
a_pids_length = len(a_pids)
if e_pids_length != a_pids_length:
return ('PID count mismatch. {} ({}) expected, actual: '
'{}, {} ({})'.format(e_sentry_name, e_proc_name,
e_pids_length, a_pids_length,
a_pids))
else:
self.log.debug('PID check OK: {} {} {}: '
'{}'.format(e_sentry_name, e_proc_name,
e_pids_length, a_pids))
return None
def validate_list_of_identical_dicts(self, list_of_dicts):
"""Check that all dicts within a list are identical."""
hashes = []
for _dict in list_of_dicts:
hashes.append(hash(frozenset(_dict.items())))
self.log.debug('Hashes: {}'.format(hashes))
if len(set(hashes)) == 1:
self.log.debug('Dicts within list are identical')
else:
return 'Dicts within list are not identical'
return None

View File

@ -0,0 +1,15 @@
# Copyright 2014-2015 Canonical Limited.
#
# This file is part of charm-helpers.
#
# charm-helpers is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License version 3 as
# published by the Free Software Foundation.
#
# charm-helpers is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with charm-helpers. If not, see <http://www.gnu.org/licenses/>.

View File

@ -0,0 +1,15 @@
# Copyright 2014-2015 Canonical Limited.
#
# This file is part of charm-helpers.
#
# charm-helpers is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License version 3 as
# published by the Free Software Foundation.
#
# charm-helpers is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with charm-helpers. If not, see <http://www.gnu.org/licenses/>.

View File

@ -0,0 +1,183 @@
# Copyright 2014-2015 Canonical Limited.
#
# This file is part of charm-helpers.
#
# charm-helpers is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License version 3 as
# published by the Free Software Foundation.
#
# charm-helpers is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with charm-helpers. If not, see <http://www.gnu.org/licenses/>.
import six
from collections import OrderedDict
from charmhelpers.contrib.amulet.deployment import (
AmuletDeployment
)
class OpenStackAmuletDeployment(AmuletDeployment):
"""OpenStack amulet deployment.
This class inherits from AmuletDeployment and has additional support
that is specifically for use by OpenStack charms.
"""
def __init__(self, series=None, openstack=None, source=None, stable=True):
"""Initialize the deployment environment."""
super(OpenStackAmuletDeployment, self).__init__(series)
self.openstack = openstack
self.source = source
self.stable = stable
# Note(coreycb): this needs to be changed when new next branches come
# out.
self.current_next = "trusty"
def _determine_branch_locations(self, other_services):
"""Determine the branch locations for the other services.
Determine if the local branch being tested is derived from its
stable or next (dev) branch, and based on this, use the corresonding
stable or next branches for the other_services."""
base_charms = ['mysql', 'mongodb']
if self.series in ['precise', 'trusty']:
base_series = self.series
else:
base_series = self.current_next
if self.stable:
for svc in other_services:
temp = 'lp:charms/{}/{}'
svc['location'] = temp.format(base_series,
svc['name'])
else:
for svc in other_services:
if svc['name'] in base_charms:
temp = 'lp:charms/{}/{}'
svc['location'] = temp.format(base_series,
svc['name'])
else:
temp = 'lp:~openstack-charmers/charms/{}/{}/next'
svc['location'] = temp.format(self.current_next,
svc['name'])
return other_services
def _add_services(self, this_service, other_services):
"""Add services to the deployment and set openstack-origin/source."""
other_services = self._determine_branch_locations(other_services)
super(OpenStackAmuletDeployment, self)._add_services(this_service,
other_services)
services = other_services
services.append(this_service)
use_source = ['mysql', 'mongodb', 'rabbitmq-server', 'ceph',
'ceph-osd', 'ceph-radosgw']
# Most OpenStack subordinate charms do not expose an origin option
# as that is controlled by the principle.
ignore = ['cinder-ceph', 'hacluster', 'neutron-openvswitch']
if self.openstack:
for svc in services:
if svc['name'] not in use_source + ignore:
config = {'openstack-origin': self.openstack}
self.d.configure(svc['name'], config)
if self.source:
for svc in services:
if svc['name'] in use_source and svc['name'] not in ignore:
config = {'source': self.source}
self.d.configure(svc['name'], config)
def _configure_services(self, configs):
"""Configure all of the services."""
for service, config in six.iteritems(configs):
self.d.configure(service, config)
def _get_openstack_release(self):
"""Get openstack release.
Return an integer representing the enum value of the openstack
release.
"""
# Must be ordered by OpenStack release (not by Ubuntu release):
(self.precise_essex, self.precise_folsom, self.precise_grizzly,
self.precise_havana, self.precise_icehouse,
self.trusty_icehouse, self.trusty_juno, self.utopic_juno,
self.trusty_kilo, self.vivid_kilo, self.trusty_liberty,
self.wily_liberty) = range(12)
releases = {
('precise', None): self.precise_essex,
('precise', 'cloud:precise-folsom'): self.precise_folsom,
('precise', 'cloud:precise-grizzly'): self.precise_grizzly,
('precise', 'cloud:precise-havana'): self.precise_havana,
('precise', 'cloud:precise-icehouse'): self.precise_icehouse,
('trusty', None): self.trusty_icehouse,
('trusty', 'cloud:trusty-juno'): self.trusty_juno,
('trusty', 'cloud:trusty-kilo'): self.trusty_kilo,
('trusty', 'cloud:trusty-liberty'): self.trusty_liberty,
('utopic', None): self.utopic_juno,
('vivid', None): self.vivid_kilo,
('wily', None): self.wily_liberty}
return releases[(self.series, self.openstack)]
def _get_openstack_release_string(self):
"""Get openstack release string.
Return a string representing the openstack release.
"""
releases = OrderedDict([
('precise', 'essex'),
('quantal', 'folsom'),
('raring', 'grizzly'),
('saucy', 'havana'),
('trusty', 'icehouse'),
('utopic', 'juno'),
('vivid', 'kilo'),
('wily', 'liberty'),
])
if self.openstack:
os_origin = self.openstack.split(':')[1]
return os_origin.split('%s-' % self.series)[1].split('/')[0]
else:
return releases[self.series]
def get_ceph_expected_pools(self, radosgw=False):
"""Return a list of expected ceph pools in a ceph + cinder + glance
test scenario, based on OpenStack release and whether ceph radosgw
is flagged as present or not."""
if self._get_openstack_release() >= self.trusty_kilo:
# Kilo or later
pools = [
'rbd',
'cinder',
'glance'
]
else:
# Juno or earlier
pools = [
'data',
'metadata',
'rbd',
'cinder',
'glance'
]
if radosgw:
pools.extend([
'.rgw.root',
'.rgw.control',
'.rgw',
'.rgw.gc',
'.users.uid'
])
return pools

View File

@ -0,0 +1,604 @@
# Copyright 2014-2015 Canonical Limited.
#
# This file is part of charm-helpers.
#
# charm-helpers is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License version 3 as
# published by the Free Software Foundation.
#
# charm-helpers is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with charm-helpers. If not, see <http://www.gnu.org/licenses/>.
import amulet
import json
import logging
import os
import six
import time
import urllib
import cinderclient.v1.client as cinder_client
import glanceclient.v1.client as glance_client
import heatclient.v1.client as heat_client
import keystoneclient.v2_0 as keystone_client
import novaclient.v1_1.client as nova_client
import swiftclient
from charmhelpers.contrib.amulet.utils import (
AmuletUtils
)
DEBUG = logging.DEBUG
ERROR = logging.ERROR
class OpenStackAmuletUtils(AmuletUtils):
"""OpenStack amulet utilities.
This class inherits from AmuletUtils and has additional support
that is specifically for use by OpenStack charm tests.
"""
def __init__(self, log_level=ERROR):
"""Initialize the deployment environment."""
super(OpenStackAmuletUtils, self).__init__(log_level)
def validate_endpoint_data(self, endpoints, admin_port, internal_port,
public_port, expected):
"""Validate endpoint data.
Validate actual endpoint data vs expected endpoint data. The ports
are used to find the matching endpoint.
"""
self.log.debug('Validating endpoint data...')
self.log.debug('actual: {}'.format(repr(endpoints)))
found = False
for ep in endpoints:
self.log.debug('endpoint: {}'.format(repr(ep)))
if (admin_port in ep.adminurl and
internal_port in ep.internalurl and
public_port in ep.publicurl):
found = True
actual = {'id': ep.id,
'region': ep.region,
'adminurl': ep.adminurl,
'internalurl': ep.internalurl,
'publicurl': ep.publicurl,
'service_id': ep.service_id}
ret = self._validate_dict_data(expected, actual)
if ret:
return 'unexpected endpoint data - {}'.format(ret)
if not found:
return 'endpoint not found'
def validate_svc_catalog_endpoint_data(self, expected, actual):
"""Validate service catalog endpoint data.
Validate a list of actual service catalog endpoints vs a list of
expected service catalog endpoints.
"""
self.log.debug('Validating service catalog endpoint data...')
self.log.debug('actual: {}'.format(repr(actual)))
for k, v in six.iteritems(expected):
if k in actual:
ret = self._validate_dict_data(expected[k][0], actual[k][0])
if ret:
return self.endpoint_error(k, ret)
else:
return "endpoint {} does not exist".format(k)
return ret
def validate_tenant_data(self, expected, actual):
"""Validate tenant data.
Validate a list of actual tenant data vs list of expected tenant
data.
"""
self.log.debug('Validating tenant data...')
self.log.debug('actual: {}'.format(repr(actual)))
for e in expected:
found = False
for act in actual:
a = {'enabled': act.enabled, 'description': act.description,
'name': act.name, 'id': act.id}
if e['name'] == a['name']:
found = True
ret = self._validate_dict_data(e, a)
if ret:
return "unexpected tenant data - {}".format(ret)
if not found:
return "tenant {} does not exist".format(e['name'])
return ret
def validate_role_data(self, expected, actual):
"""Validate role data.
Validate a list of actual role data vs a list of expected role
data.
"""
self.log.debug('Validating role data...')
self.log.debug('actual: {}'.format(repr(actual)))
for e in expected:
found = False
for act in actual:
a = {'name': act.name, 'id': act.id}
if e['name'] == a['name']:
found = True
ret = self._validate_dict_data(e, a)
if ret:
return "unexpected role data - {}".format(ret)
if not found:
return "role {} does not exist".format(e['name'])
return ret
def validate_user_data(self, expected, actual):
"""Validate user data.
Validate a list of actual user data vs a list of expected user
data.
"""
self.log.debug('Validating user data...')
self.log.debug('actual: {}'.format(repr(actual)))
for e in expected:
found = False
for act in actual:
a = {'enabled': act.enabled, 'name': act.name,
'email': act.email, 'tenantId': act.tenantId,
'id': act.id}
if e['name'] == a['name']:
found = True
ret = self._validate_dict_data(e, a)
if ret:
return "unexpected user data - {}".format(ret)
if not found:
return "user {} does not exist".format(e['name'])
return ret
def validate_flavor_data(self, expected, actual):
"""Validate flavor data.
Validate a list of actual flavors vs a list of expected flavors.
"""
self.log.debug('Validating flavor data...')
self.log.debug('actual: {}'.format(repr(actual)))
act = [a.name for a in actual]
return self._validate_list_data(expected, act)
def tenant_exists(self, keystone, tenant):
"""Return True if tenant exists."""
self.log.debug('Checking if tenant exists ({})...'.format(tenant))
return tenant in [t.name for t in keystone.tenants.list()]
def authenticate_cinder_admin(self, keystone_sentry, username,
password, tenant):
"""Authenticates admin user with cinder."""
# NOTE(beisner): cinder python client doesn't accept tokens.
service_ip = \
keystone_sentry.relation('shared-db',
'mysql:shared-db')['private-address']
ept = "http://{}:5000/v2.0".format(service_ip.strip().decode('utf-8'))
return cinder_client.Client(username, password, tenant, ept)
def authenticate_keystone_admin(self, keystone_sentry, user, password,
tenant):
"""Authenticates admin user with the keystone admin endpoint."""
self.log.debug('Authenticating keystone admin...')
unit = keystone_sentry
service_ip = unit.relation('shared-db',
'mysql:shared-db')['private-address']
ep = "http://{}:35357/v2.0".format(service_ip.strip().decode('utf-8'))
return keystone_client.Client(username=user, password=password,
tenant_name=tenant, auth_url=ep)
def authenticate_keystone_user(self, keystone, user, password, tenant):
"""Authenticates a regular user with the keystone public endpoint."""
self.log.debug('Authenticating keystone user ({})...'.format(user))
ep = keystone.service_catalog.url_for(service_type='identity',
endpoint_type='publicURL')
return keystone_client.Client(username=user, password=password,
tenant_name=tenant, auth_url=ep)
def authenticate_glance_admin(self, keystone):
"""Authenticates admin user with glance."""
self.log.debug('Authenticating glance admin...')
ep = keystone.service_catalog.url_for(service_type='image',
endpoint_type='adminURL')
return glance_client.Client(ep, token=keystone.auth_token)
def authenticate_heat_admin(self, keystone):
"""Authenticates the admin user with heat."""
self.log.debug('Authenticating heat admin...')
ep = keystone.service_catalog.url_for(service_type='orchestration',
endpoint_type='publicURL')
return heat_client.Client(endpoint=ep, token=keystone.auth_token)
def authenticate_nova_user(self, keystone, user, password, tenant):
"""Authenticates a regular user with nova-api."""
self.log.debug('Authenticating nova user ({})...'.format(user))
ep = keystone.service_catalog.url_for(service_type='identity',
endpoint_type='publicURL')
return nova_client.Client(username=user, api_key=password,
project_id=tenant, auth_url=ep)
def authenticate_swift_user(self, keystone, user, password, tenant):
"""Authenticates a regular user with swift api."""
self.log.debug('Authenticating swift user ({})...'.format(user))
ep = keystone.service_catalog.url_for(service_type='identity',
endpoint_type='publicURL')
return swiftclient.Connection(authurl=ep,
user=user,
key=password,
tenant_name=tenant,
auth_version='2.0')
def create_cirros_image(self, glance, image_name):
"""Download the latest cirros image and upload it to glance,
validate and return a resource pointer.
:param glance: pointer to authenticated glance connection
:param image_name: display name for new image
:returns: glance image pointer
"""
self.log.debug('Creating glance cirros image '
'({})...'.format(image_name))
# Download cirros image
http_proxy = os.getenv('AMULET_HTTP_PROXY')
self.log.debug('AMULET_HTTP_PROXY: {}'.format(http_proxy))
if http_proxy:
proxies = {'http': http_proxy}
opener = urllib.FancyURLopener(proxies)
else:
opener = urllib.FancyURLopener()
f = opener.open('http://download.cirros-cloud.net/version/released')
version = f.read().strip()
cirros_img = 'cirros-{}-x86_64-disk.img'.format(version)
local_path = os.path.join('tests', cirros_img)
if not os.path.exists(local_path):
cirros_url = 'http://{}/{}/{}'.format('download.cirros-cloud.net',
version, cirros_img)
opener.retrieve(cirros_url, local_path)
f.close()
# Create glance image
with open(local_path) as f:
image = glance.images.create(name=image_name, is_public=True,
disk_format='qcow2',
container_format='bare', data=f)
# Wait for image to reach active status
img_id = image.id
ret = self.resource_reaches_status(glance.images, img_id,
expected_stat='active',
msg='Image status wait')
if not ret:
msg = 'Glance image failed to reach expected state.'
amulet.raise_status(amulet.FAIL, msg=msg)
# Re-validate new image
self.log.debug('Validating image attributes...')
val_img_name = glance.images.get(img_id).name
val_img_stat = glance.images.get(img_id).status
val_img_pub = glance.images.get(img_id).is_public
val_img_cfmt = glance.images.get(img_id).container_format
val_img_dfmt = glance.images.get(img_id).disk_format
msg_attr = ('Image attributes - name:{} public:{} id:{} stat:{} '
'container fmt:{} disk fmt:{}'.format(
val_img_name, val_img_pub, img_id,
val_img_stat, val_img_cfmt, val_img_dfmt))
if val_img_name == image_name and val_img_stat == 'active' \
and val_img_pub is True and val_img_cfmt == 'bare' \
and val_img_dfmt == 'qcow2':
self.log.debug(msg_attr)
else:
msg = ('Volume validation failed, {}'.format(msg_attr))
amulet.raise_status(amulet.FAIL, msg=msg)
return image
def delete_image(self, glance, image):
"""Delete the specified image."""
# /!\ DEPRECATION WARNING
self.log.warn('/!\\ DEPRECATION WARNING: use '
'delete_resource instead of delete_image.')
self.log.debug('Deleting glance image ({})...'.format(image))
return self.delete_resource(glance.images, image, msg='glance image')
def create_instance(self, nova, image_name, instance_name, flavor):
"""Create the specified instance."""
self.log.debug('Creating instance '
'({}|{}|{})'.format(instance_name, image_name, flavor))
image = nova.images.find(name=image_name)
flavor = nova.flavors.find(name=flavor)
instance = nova.servers.create(name=instance_name, image=image,
flavor=flavor)
count = 1
status = instance.status
while status != 'ACTIVE' and count < 60:
time.sleep(3)
instance = nova.servers.get(instance.id)
status = instance.status
self.log.debug('instance status: {}'.format(status))
count += 1
if status != 'ACTIVE':
self.log.error('instance creation timed out')
return None
return instance
def delete_instance(self, nova, instance):
"""Delete the specified instance."""
# /!\ DEPRECATION WARNING
self.log.warn('/!\\ DEPRECATION WARNING: use '
'delete_resource instead of delete_instance.')
self.log.debug('Deleting instance ({})...'.format(instance))
return self.delete_resource(nova.servers, instance,
msg='nova instance')
def create_or_get_keypair(self, nova, keypair_name="testkey"):
"""Create a new keypair, or return pointer if it already exists."""
try:
_keypair = nova.keypairs.get(keypair_name)
self.log.debug('Keypair ({}) already exists, '
'using it.'.format(keypair_name))
return _keypair
except:
self.log.debug('Keypair ({}) does not exist, '
'creating it.'.format(keypair_name))
_keypair = nova.keypairs.create(name=keypair_name)
return _keypair
def create_cinder_volume(self, cinder, vol_name="demo-vol", vol_size=1,
img_id=None, src_vol_id=None, snap_id=None):
"""Create cinder volume, optionally from a glance image, OR
optionally as a clone of an existing volume, OR optionally
from a snapshot. Wait for the new volume status to reach
the expected status, validate and return a resource pointer.
:param vol_name: cinder volume display name
:param vol_size: size in gigabytes
:param img_id: optional glance image id
:param src_vol_id: optional source volume id to clone
:param snap_id: optional snapshot id to use
:returns: cinder volume pointer
"""
# Handle parameter input and avoid impossible combinations
if img_id and not src_vol_id and not snap_id:
# Create volume from image
self.log.debug('Creating cinder volume from glance image...')
bootable = 'true'
elif src_vol_id and not img_id and not snap_id:
# Clone an existing volume
self.log.debug('Cloning cinder volume...')
bootable = cinder.volumes.get(src_vol_id).bootable
elif snap_id and not src_vol_id and not img_id:
# Create volume from snapshot
self.log.debug('Creating cinder volume from snapshot...')
snap = cinder.volume_snapshots.find(id=snap_id)
vol_size = snap.size
snap_vol_id = cinder.volume_snapshots.get(snap_id).volume_id
bootable = cinder.volumes.get(snap_vol_id).bootable
elif not img_id and not src_vol_id and not snap_id:
# Create volume
self.log.debug('Creating cinder volume...')
bootable = 'false'
else:
# Impossible combination of parameters
msg = ('Invalid method use - name:{} size:{} img_id:{} '
'src_vol_id:{} snap_id:{}'.format(vol_name, vol_size,
img_id, src_vol_id,
snap_id))
amulet.raise_status(amulet.FAIL, msg=msg)
# Create new volume
try:
vol_new = cinder.volumes.create(display_name=vol_name,
imageRef=img_id,
size=vol_size,
source_volid=src_vol_id,
snapshot_id=snap_id)
vol_id = vol_new.id
except Exception as e:
msg = 'Failed to create volume: {}'.format(e)
amulet.raise_status(amulet.FAIL, msg=msg)
# Wait for volume to reach available status
ret = self.resource_reaches_status(cinder.volumes, vol_id,
expected_stat="available",
msg="Volume status wait")
if not ret:
msg = 'Cinder volume failed to reach expected state.'
amulet.raise_status(amulet.FAIL, msg=msg)
# Re-validate new volume
self.log.debug('Validating volume attributes...')
val_vol_name = cinder.volumes.get(vol_id).display_name
val_vol_boot = cinder.volumes.get(vol_id).bootable
val_vol_stat = cinder.volumes.get(vol_id).status
val_vol_size = cinder.volumes.get(vol_id).size
msg_attr = ('Volume attributes - name:{} id:{} stat:{} boot:'
'{} size:{}'.format(val_vol_name, vol_id,
val_vol_stat, val_vol_boot,
val_vol_size))
if val_vol_boot == bootable and val_vol_stat == 'available' \
and val_vol_name == vol_name and val_vol_size == vol_size:
self.log.debug(msg_attr)
else:
msg = ('Volume validation failed, {}'.format(msg_attr))
amulet.raise_status(amulet.FAIL, msg=msg)
return vol_new
def delete_resource(self, resource, resource_id,
msg="resource", max_wait=120):
"""Delete one openstack resource, such as one instance, keypair,
image, volume, stack, etc., and confirm deletion within max wait time.
:param resource: pointer to os resource type, ex:glance_client.images
:param resource_id: unique name or id for the openstack resource
:param msg: text to identify purpose in logging
:param max_wait: maximum wait time in seconds
:returns: True if successful, otherwise False
"""
self.log.debug('Deleting OpenStack resource '
'{} ({})'.format(resource_id, msg))
num_before = len(list(resource.list()))
resource.delete(resource_id)
tries = 0
num_after = len(list(resource.list()))
while num_after != (num_before - 1) and tries < (max_wait / 4):
self.log.debug('{} delete check: '
'{} [{}:{}] {}'.format(msg, tries,
num_before,
num_after,
resource_id))
time.sleep(4)
num_after = len(list(resource.list()))
tries += 1
self.log.debug('{}: expected, actual count = {}, '
'{}'.format(msg, num_before - 1, num_after))
if num_after == (num_before - 1):
return True
else:
self.log.error('{} delete timed out'.format(msg))
return False
def resource_reaches_status(self, resource, resource_id,
expected_stat='available',
msg='resource', max_wait=120):
"""Wait for an openstack resources status to reach an
expected status within a specified time. Useful to confirm that
nova instances, cinder vols, snapshots, glance images, heat stacks
and other resources eventually reach the expected status.
:param resource: pointer to os resource type, ex: heat_client.stacks
:param resource_id: unique id for the openstack resource
:param expected_stat: status to expect resource to reach
:param msg: text to identify purpose in logging
:param max_wait: maximum wait time in seconds
:returns: True if successful, False if status is not reached
"""
tries = 0
resource_stat = resource.get(resource_id).status
while resource_stat != expected_stat and tries < (max_wait / 4):
self.log.debug('{} status check: '
'{} [{}:{}] {}'.format(msg, tries,
resource_stat,
expected_stat,
resource_id))
time.sleep(4)
resource_stat = resource.get(resource_id).status
tries += 1
self.log.debug('{}: expected, actual status = {}, '
'{}'.format(msg, resource_stat, expected_stat))
if resource_stat == expected_stat:
return True
else:
self.log.debug('{} never reached expected status: '
'{}'.format(resource_id, expected_stat))
return False
def get_ceph_osd_id_cmd(self, index):
"""Produce a shell command that will return a ceph-osd id."""
return ("`initctl list | grep 'ceph-osd ' | "
"awk 'NR=={} {{ print $2 }}' | "
"grep -o '[0-9]*'`".format(index + 1))
def get_ceph_pools(self, sentry_unit):
"""Return a dict of ceph pools from a single ceph unit, with
pool name as keys, pool id as vals."""
pools = {}
cmd = 'sudo ceph osd lspools'
output, code = sentry_unit.run(cmd)
if code != 0:
msg = ('{} `{}` returned {} '
'{}'.format(sentry_unit.info['unit_name'],
cmd, code, output))
amulet.raise_status(amulet.FAIL, msg=msg)
# Example output: 0 data,1 metadata,2 rbd,3 cinder,4 glance,
for pool in str(output).split(','):
pool_id_name = pool.split(' ')
if len(pool_id_name) == 2:
pool_id = pool_id_name[0]
pool_name = pool_id_name[1]
pools[pool_name] = int(pool_id)
self.log.debug('Pools on {}: {}'.format(sentry_unit.info['unit_name'],
pools))
return pools
def get_ceph_df(self, sentry_unit):
"""Return dict of ceph df json output, including ceph pool state.
:param sentry_unit: Pointer to amulet sentry instance (juju unit)
:returns: Dict of ceph df output
"""
cmd = 'sudo ceph df --format=json'
output, code = sentry_unit.run(cmd)
if code != 0:
msg = ('{} `{}` returned {} '
'{}'.format(sentry_unit.info['unit_name'],
cmd, code, output))
amulet.raise_status(amulet.FAIL, msg=msg)
return json.loads(output)
def get_ceph_pool_sample(self, sentry_unit, pool_id=0):
"""Take a sample of attributes of a ceph pool, returning ceph
pool name, object count and disk space used for the specified
pool ID number.
:param sentry_unit: Pointer to amulet sentry instance (juju unit)
:param pool_id: Ceph pool ID
:returns: List of pool name, object count, kb disk space used
"""
df = self.get_ceph_df(sentry_unit)
pool_name = df['pools'][pool_id]['name']
obj_count = df['pools'][pool_id]['stats']['objects']
kb_used = df['pools'][pool_id]['stats']['kb_used']
self.log.debug('Ceph {} pool (ID {}): {} objects, '
'{} kb used'.format(pool_name, pool_id,
obj_count, kb_used))
return pool_name, obj_count, kb_used
def validate_ceph_pool_samples(self, samples, sample_type="resource pool"):
"""Validate ceph pool samples taken over time, such as pool
object counts or pool kb used, before adding, after adding, and
after deleting items which affect those pool attributes. The
2nd element is expected to be greater than the 1st; 3rd is expected
to be less than the 2nd.
:param samples: List containing 3 data samples
:param sample_type: String for logging and usage context
:returns: None if successful, Failure message otherwise
"""
original, created, deleted = range(3)
if samples[created] <= samples[original] or \
samples[deleted] >= samples[created]:
return ('Ceph {} samples ({}) '
'unexpected.'.format(sample_type, samples))
else:
self.log.debug('Ceph {} samples (OK): '
'{}'.format(sample_type, samples))
return None

19
tests/tests.yaml Normal file
View File

@ -0,0 +1,19 @@
bootstrap: true
reset: true
virtualenv: true
makefile:
- lint
- test
sources:
- ppa:juju/stable
packages:
- amulet
- python-amulet
- python-ceilometerclient
- python-cinderclient
- python-distro-info
- python-glanceclient
- python-heatclient
- python-keystoneclient
- python-novaclient
- python-swiftclient

View File

@ -1,4 +1,4 @@
from mock import patch, call, MagicMock
from mock import call, MagicMock
import ceilometer_utils as utils