[corey.bryant,r=gnuoy] Add Amulet basic tests

This commit is contained in:
Liam Young 2014-09-17 07:36:31 +01:00
commit f40ff9e6c0
29 changed files with 1609 additions and 28 deletions

View File

@ -1,11 +1,25 @@
#!/usr/bin/make
PYTHON := /usr/bin/env python
lint:
@flake8 --exclude hooks/charmhelpers hooks
@flake8 --exclude hooks/charmhelpers hooks tests
@charm proof
sync:
@charm-helper-sync -c charm-helpers-sync.yaml
test:
@echo Starting Amulet tests...
# coreycb note: The -v should only be temporary until Amulet sends
# raise_status() messages to stderr:
# https://bugs.launchpad.net/amulet/+bug/1320357
@juju test -v -p AMULET_HTTP_PROXY
bin/charm_helpers_sync.py:
@mkdir -p bin
@bzr cat lp:charm-helpers/tools/charm_helpers_sync/charm_helpers_sync.py \
> bin/charm_helpers_sync.py
sync: bin/charm_helpers_sync.py
$(PYTHON) bin/charm_helpers_sync.py -c charm-helpers-hooks.yaml
$(PYTHON) bin/charm_helpers_sync.py -c charm-helpers-tests.yaml
publish: lint
bzr push lp:charms/ceph

5
charm-helpers-tests.yaml Normal file
View File

@ -0,0 +1,5 @@
branch: lp:charm-helpers
destination: tests/charmhelpers
include:
- contrib.amulet
- contrib.openstack.amulet

View File

@ -4,7 +4,7 @@ from functools import partial
from charmhelpers.fetch import apt_install
from charmhelpers.core.hookenv import (
ERROR, log,
ERROR, log, config,
)
try:
@ -154,3 +154,21 @@ def _get_for_address(address, key):
get_iface_for_address = partial(_get_for_address, key='iface')
get_netmask_for_address = partial(_get_for_address, key='netmask')
def get_ipv6_addr(iface="eth0"):
try:
iface_addrs = netifaces.ifaddresses(iface)
if netifaces.AF_INET6 not in iface_addrs:
raise Exception("Interface '%s' doesn't have an ipv6 address." % iface)
addresses = netifaces.ifaddresses(iface)[netifaces.AF_INET6]
ipv6_addr = [a['addr'] for a in addresses if not a['addr'].startswith('fe80')
and config('vip') != a['addr']]
if not ipv6_addr:
raise Exception("Interface '%s' doesn't have global ipv6 address." % iface)
return ipv6_addr[0]
except ValueError:
raise ValueError("Invalid interface '%s'" % iface)

View File

@ -46,5 +46,8 @@ def is_device_mounted(device):
:returns: boolean: True if the path represents a mounted device, False if
it doesn't.
'''
is_partition = bool(re.search(r".*[0-9]+\b", device))
out = check_output(['mount'])
if is_partition:
return bool(re.search(device + r"\b", out))
return bool(re.search(device + r"[0-9]+\b", out))

View File

@ -285,8 +285,9 @@ def relation_get(attribute=None, unit=None, rid=None):
raise
def relation_set(relation_id=None, relation_settings={}, **kwargs):
def relation_set(relation_id=None, relation_settings=None, **kwargs):
"""Set relation information for the current unit"""
relation_settings = relation_settings if relation_settings else {}
relation_cmd_line = ['relation-set']
if relation_id is not None:
relation_cmd_line.extend(('-r', relation_id))

View File

@ -12,6 +12,8 @@ import random
import string
import subprocess
import hashlib
import shutil
from contextlib import contextmanager
from collections import OrderedDict
@ -52,7 +54,7 @@ def service(action, service_name):
def service_running(service):
"""Determine whether a system service is running"""
try:
output = subprocess.check_output(['service', service, 'status'])
output = subprocess.check_output(['service', service, 'status'], stderr=subprocess.STDOUT)
except subprocess.CalledProcessError:
return False
else:
@ -62,6 +64,16 @@ def service_running(service):
return False
def service_available(service_name):
"""Determine whether a system service is available"""
try:
subprocess.check_output(['service', service_name, 'status'], stderr=subprocess.STDOUT)
except subprocess.CalledProcessError:
return False
else:
return True
def adduser(username, password=None, shell='/bin/bash', system_user=False):
"""Add a user to the system"""
try:
@ -329,3 +341,24 @@ def cmp_pkgrevno(package, revno, pkgcache=None):
pkgcache = apt_pkg.Cache()
pkg = pkgcache[package]
return apt_pkg.version_compare(pkg.current_ver.ver_str, revno)
@contextmanager
def chdir(d):
cur = os.getcwd()
try:
yield os.chdir(d)
finally:
os.chdir(cur)
def chownr(path, owner, group):
uid = pwd.getpwnam(owner).pw_uid
gid = grp.getgrnam(group).gr_gid
for root, dirs, files in os.walk(path):
for name in dirs + files:
full = os.path.join(root, name)
broken_symlink = os.path.lexists(full) and not os.path.exists(full)
if not broken_symlink:
os.chown(full, uid, gid)

View File

@ -0,0 +1,2 @@
from .base import *
from .helpers import *

View File

@ -0,0 +1,310 @@
import os
import re
import json
from collections import Iterable
from charmhelpers.core import host
from charmhelpers.core import hookenv
__all__ = ['ServiceManager', 'ManagerCallback',
'PortManagerCallback', 'open_ports', 'close_ports', 'manage_ports',
'service_restart', 'service_stop']
class ServiceManager(object):
def __init__(self, services=None):
"""
Register a list of services, given their definitions.
Service definitions are dicts in the following formats (all keys except
'service' are optional)::
{
"service": <service name>,
"required_data": <list of required data contexts>,
"provided_data": <list of provided data contexts>,
"data_ready": <one or more callbacks>,
"data_lost": <one or more callbacks>,
"start": <one or more callbacks>,
"stop": <one or more callbacks>,
"ports": <list of ports to manage>,
}
The 'required_data' list should contain dicts of required data (or
dependency managers that act like dicts and know how to collect the data).
Only when all items in the 'required_data' list are populated are the list
of 'data_ready' and 'start' callbacks executed. See `is_ready()` for more
information.
The 'provided_data' list should contain relation data providers, most likely
a subclass of :class:`charmhelpers.core.services.helpers.RelationContext`,
that will indicate a set of data to set on a given relation.
The 'data_ready' value should be either a single callback, or a list of
callbacks, to be called when all items in 'required_data' pass `is_ready()`.
Each callback will be called with the service name as the only parameter.
After all of the 'data_ready' callbacks are called, the 'start' callbacks
are fired.
The 'data_lost' value should be either a single callback, or a list of
callbacks, to be called when a 'required_data' item no longer passes
`is_ready()`. Each callback will be called with the service name as the
only parameter. After all of the 'data_lost' callbacks are called,
the 'stop' callbacks are fired.
The 'start' value should be either a single callback, or a list of
callbacks, to be called when starting the service, after the 'data_ready'
callbacks are complete. Each callback will be called with the service
name as the only parameter. This defaults to
`[host.service_start, services.open_ports]`.
The 'stop' value should be either a single callback, or a list of
callbacks, to be called when stopping the service. If the service is
being stopped because it no longer has all of its 'required_data', this
will be called after all of the 'data_lost' callbacks are complete.
Each callback will be called with the service name as the only parameter.
This defaults to `[services.close_ports, host.service_stop]`.
The 'ports' value should be a list of ports to manage. The default
'start' handler will open the ports after the service is started,
and the default 'stop' handler will close the ports prior to stopping
the service.
Examples:
The following registers an Upstart service called bingod that depends on
a mongodb relation and which runs a custom `db_migrate` function prior to
restarting the service, and a Runit service called spadesd::
manager = services.ServiceManager([
{
'service': 'bingod',
'ports': [80, 443],
'required_data': [MongoRelation(), config(), {'my': 'data'}],
'data_ready': [
services.template(source='bingod.conf'),
services.template(source='bingod.ini',
target='/etc/bingod.ini',
owner='bingo', perms=0400),
],
},
{
'service': 'spadesd',
'data_ready': services.template(source='spadesd_run.j2',
target='/etc/sv/spadesd/run',
perms=0555),
'start': runit_start,
'stop': runit_stop,
},
])
manager.manage()
"""
self._ready_file = os.path.join(hookenv.charm_dir(), 'READY-SERVICES.json')
self._ready = None
self.services = {}
for service in services or []:
service_name = service['service']
self.services[service_name] = service
def manage(self):
"""
Handle the current hook by doing The Right Thing with the registered services.
"""
hook_name = hookenv.hook_name()
if hook_name == 'stop':
self.stop_services()
else:
self.provide_data()
self.reconfigure_services()
def provide_data(self):
"""
Set the relation data for each provider in the ``provided_data`` list.
A provider must have a `name` attribute, which indicates which relation
to set data on, and a `provide_data()` method, which returns a dict of
data to set.
"""
hook_name = hookenv.hook_name()
for service in self.services.values():
for provider in service.get('provided_data', []):
if re.match(r'{}-relation-(joined|changed)'.format(provider.name), hook_name):
data = provider.provide_data()
_ready = provider._is_ready(data) if hasattr(provider, '_is_ready') else data
if _ready:
hookenv.relation_set(None, data)
def reconfigure_services(self, *service_names):
"""
Update all files for one or more registered services, and,
if ready, optionally restart them.
If no service names are given, reconfigures all registered services.
"""
for service_name in service_names or self.services.keys():
if self.is_ready(service_name):
self.fire_event('data_ready', service_name)
self.fire_event('start', service_name, default=[
service_restart,
manage_ports])
self.save_ready(service_name)
else:
if self.was_ready(service_name):
self.fire_event('data_lost', service_name)
self.fire_event('stop', service_name, default=[
manage_ports,
service_stop])
self.save_lost(service_name)
def stop_services(self, *service_names):
"""
Stop one or more registered services, by name.
If no service names are given, stops all registered services.
"""
for service_name in service_names or self.services.keys():
self.fire_event('stop', service_name, default=[
manage_ports,
service_stop])
def get_service(self, service_name):
"""
Given the name of a registered service, return its service definition.
"""
service = self.services.get(service_name)
if not service:
raise KeyError('Service not registered: %s' % service_name)
return service
def fire_event(self, event_name, service_name, default=None):
"""
Fire a data_ready, data_lost, start, or stop event on a given service.
"""
service = self.get_service(service_name)
callbacks = service.get(event_name, default)
if not callbacks:
return
if not isinstance(callbacks, Iterable):
callbacks = [callbacks]
for callback in callbacks:
if isinstance(callback, ManagerCallback):
callback(self, service_name, event_name)
else:
callback(service_name)
def is_ready(self, service_name):
"""
Determine if a registered service is ready, by checking its 'required_data'.
A 'required_data' item can be any mapping type, and is considered ready
if `bool(item)` evaluates as True.
"""
service = self.get_service(service_name)
reqs = service.get('required_data', [])
return all(bool(req) for req in reqs)
def _load_ready_file(self):
if self._ready is not None:
return
if os.path.exists(self._ready_file):
with open(self._ready_file) as fp:
self._ready = set(json.load(fp))
else:
self._ready = set()
def _save_ready_file(self):
if self._ready is None:
return
with open(self._ready_file, 'w') as fp:
json.dump(list(self._ready), fp)
def save_ready(self, service_name):
"""
Save an indicator that the given service is now data_ready.
"""
self._load_ready_file()
self._ready.add(service_name)
self._save_ready_file()
def save_lost(self, service_name):
"""
Save an indicator that the given service is no longer data_ready.
"""
self._load_ready_file()
self._ready.discard(service_name)
self._save_ready_file()
def was_ready(self, service_name):
"""
Determine if the given service was previously data_ready.
"""
self._load_ready_file()
return service_name in self._ready
class ManagerCallback(object):
"""
Special case of a callback that takes the `ServiceManager` instance
in addition to the service name.
Subclasses should implement `__call__` which should accept three parameters:
* `manager` The `ServiceManager` instance
* `service_name` The name of the service it's being triggered for
* `event_name` The name of the event that this callback is handling
"""
def __call__(self, manager, service_name, event_name):
raise NotImplementedError()
class PortManagerCallback(ManagerCallback):
"""
Callback class that will open or close ports, for use as either
a start or stop action.
"""
def __call__(self, manager, service_name, event_name):
service = manager.get_service(service_name)
new_ports = service.get('ports', [])
port_file = os.path.join(hookenv.charm_dir(), '.{}.ports'.format(service_name))
if os.path.exists(port_file):
with open(port_file) as fp:
old_ports = fp.read().split(',')
for old_port in old_ports:
if bool(old_port):
old_port = int(old_port)
if old_port not in new_ports:
hookenv.close_port(old_port)
with open(port_file, 'w') as fp:
fp.write(','.join(str(port) for port in new_ports))
for port in new_ports:
if event_name == 'start':
hookenv.open_port(port)
elif event_name == 'stop':
hookenv.close_port(port)
def service_stop(service_name):
"""
Wrapper around host.service_stop to prevent spurious "unknown service"
messages in the logs.
"""
if host.service_running(service_name):
host.service_stop(service_name)
def service_restart(service_name):
"""
Wrapper around host.service_restart to prevent spurious "unknown service"
messages in the logs.
"""
if host.service_available(service_name):
if host.service_running(service_name):
host.service_restart(service_name)
else:
host.service_start(service_name)
# Convenience aliases
open_ports = close_ports = manage_ports = PortManagerCallback()

View File

@ -0,0 +1,125 @@
from charmhelpers.core import hookenv
from charmhelpers.core import templating
from charmhelpers.core.services.base import ManagerCallback
__all__ = ['RelationContext', 'TemplateCallback',
'render_template', 'template']
class RelationContext(dict):
"""
Base class for a context generator that gets relation data from juju.
Subclasses must provide the attributes `name`, which is the name of the
interface of interest, `interface`, which is the type of the interface of
interest, and `required_keys`, which is the set of keys required for the
relation to be considered complete. The data for all interfaces matching
the `name` attribute that are complete will used to populate the dictionary
values (see `get_data`, below).
The generated context will be namespaced under the interface type, to prevent
potential naming conflicts.
"""
name = None
interface = None
required_keys = []
def __init__(self, *args, **kwargs):
super(RelationContext, self).__init__(*args, **kwargs)
self.get_data()
def __bool__(self):
"""
Returns True if all of the required_keys are available.
"""
return self.is_ready()
__nonzero__ = __bool__
def __repr__(self):
return super(RelationContext, self).__repr__()
def is_ready(self):
"""
Returns True if all of the `required_keys` are available from any units.
"""
ready = len(self.get(self.name, [])) > 0
if not ready:
hookenv.log('Incomplete relation: {}'.format(self.__class__.__name__), hookenv.DEBUG)
return ready
def _is_ready(self, unit_data):
"""
Helper method that tests a set of relation data and returns True if
all of the `required_keys` are present.
"""
return set(unit_data.keys()).issuperset(set(self.required_keys))
def get_data(self):
"""
Retrieve the relation data for each unit involved in a relation and,
if complete, store it in a list under `self[self.name]`. This
is automatically called when the RelationContext is instantiated.
The units are sorted lexographically first by the service ID, then by
the unit ID. Thus, if an interface has two other services, 'db:1'
and 'db:2', with 'db:1' having two units, 'wordpress/0' and 'wordpress/1',
and 'db:2' having one unit, 'mediawiki/0', all of which have a complete
set of data, the relation data for the units will be stored in the
order: 'wordpress/0', 'wordpress/1', 'mediawiki/0'.
If you only care about a single unit on the relation, you can just
access it as `{{ interface[0]['key'] }}`. However, if you can at all
support multiple units on a relation, you should iterate over the list,
like::
{% for unit in interface -%}
{{ unit['key'] }}{% if not loop.last %},{% endif %}
{%- endfor %}
Note that since all sets of relation data from all related services and
units are in a single list, if you need to know which service or unit a
set of data came from, you'll need to extend this class to preserve
that information.
"""
if not hookenv.relation_ids(self.name):
return
ns = self.setdefault(self.name, [])
for rid in sorted(hookenv.relation_ids(self.name)):
for unit in sorted(hookenv.related_units(rid)):
reldata = hookenv.relation_get(rid=rid, unit=unit)
if self._is_ready(reldata):
ns.append(reldata)
def provide_data(self):
"""
Return data to be relation_set for this interface.
"""
return {}
class TemplateCallback(ManagerCallback):
"""
Callback class that will render a template, for use as a ready action.
"""
def __init__(self, source, target, owner='root', group='root', perms=0444):
self.source = source
self.target = target
self.owner = owner
self.group = group
self.perms = perms
def __call__(self, manager, service_name, event_name):
service = manager.get_service(service_name)
context = {}
for ctx in service.get('required_data', []):
context.update(ctx)
templating.render(self.source, self.target, context,
self.owner, self.group, self.perms)
# Convenience aliases for templates
render_template = template = TemplateCallback

View File

@ -0,0 +1,51 @@
import os
from charmhelpers.core import host
from charmhelpers.core import hookenv
def render(source, target, context, owner='root', group='root', perms=0444, templates_dir=None):
"""
Render a template.
The `source` path, if not absolute, is relative to the `templates_dir`.
The `target` path should be absolute.
The context should be a dict containing the values to be replaced in the
template.
The `owner`, `group`, and `perms` options will be passed to `write_file`.
If omitted, `templates_dir` defaults to the `templates` folder in the charm.
Note: Using this requires python-jinja2; if it is not installed, calling
this will attempt to use charmhelpers.fetch.apt_install to install it.
"""
try:
from jinja2 import FileSystemLoader, Environment, exceptions
except ImportError:
try:
from charmhelpers.fetch import apt_install
except ImportError:
hookenv.log('Could not import jinja2, and could not import '
'charmhelpers.fetch to install it',
level=hookenv.ERROR)
raise
apt_install('python-jinja2', fatal=True)
from jinja2 import FileSystemLoader, Environment, exceptions
if templates_dir is None:
templates_dir = os.path.join(hookenv.charm_dir(), 'templates')
loader = Environment(loader=FileSystemLoader(templates_dir))
try:
source = source
template = loader.get_template(source)
except exceptions.TemplateNotFound as e:
hookenv.log('Could not load template %s from %s.' %
(source, templates_dir),
level=hookenv.ERROR)
raise e
content = template.render(context)
host.mkdir(os.path.dirname(target))
host.write_file(target, content, owner, group, perms)

View File

@ -1,4 +1,5 @@
import importlib
from tempfile import NamedTemporaryFile
import time
from yaml import safe_load
from charmhelpers.core.host import (
@ -122,6 +123,7 @@ def filter_installed_packages(packages):
# Tell apt to build an in-memory cache to prevent race conditions (if
# another process is already building the cache).
apt_pkg.config.set("Dir::Cache::pkgcache", "")
apt_pkg.config.set("Dir::Cache::srcpkgcache", "")
cache = apt_pkg.Cache()
_pkgs = []
@ -201,6 +203,27 @@ def apt_hold(packages, fatal=False):
def add_source(source, key=None):
"""Add a package source to this system.
@param source: a URL or sources.list entry, as supported by
add-apt-repository(1). Examples:
ppa:charmers/example
deb https://stub:key@private.example.com/ubuntu trusty main
In addition:
'proposed:' may be used to enable the standard 'proposed'
pocket for the release.
'cloud:' may be used to activate official cloud archive pockets,
such as 'cloud:icehouse'
@param key: A key to be added to the system's APT keyring and used
to verify the signatures on packages. Ideally, this should be an
ASCII format GPG public key including the block headers. A GPG key
id may also be used, but be aware that only insecure protocols are
available to retrieve the actual public key from a public keyserver
placing your Juju environment at risk. ppa and cloud archive keys
are securely added automtically, so sould not be provided.
"""
if source is None:
log('Source is not present. Skipping')
return
@ -225,10 +248,23 @@ def add_source(source, key=None):
release = lsb_release()['DISTRIB_CODENAME']
with open('/etc/apt/sources.list.d/proposed.list', 'w') as apt:
apt.write(PROPOSED_POCKET.format(release))
else:
raise SourceConfigError("Unknown source: {!r}".format(source))
if key:
subprocess.check_call(['apt-key', 'adv', '--keyserver',
'hkp://keyserver.ubuntu.com:80', '--recv',
key])
if '-----BEGIN PGP PUBLIC KEY BLOCK-----' in key:
with NamedTemporaryFile() as key_file:
key_file.write(key)
key_file.flush()
key_file.seek(0)
subprocess.check_call(['apt-key', 'add', '-'], stdin=key_file)
else:
# Note that hkp: is in no way a secure protocol. Using a
# GPG key id is pointless from a security POV unless you
# absolutely trust your network and DNS.
subprocess.check_call(['apt-key', 'adv', '--keyserver',
'hkp://keyserver.ubuntu.com:80', '--recv',
key])
def configure_sources(update=False,
@ -238,7 +274,8 @@ def configure_sources(update=False,
Configure multiple sources from charm configuration.
The lists are encoded as yaml fragments in the configuration.
The frament needs to be included as a string.
The frament needs to be included as a string. Sources and their
corresponding keys are of the types supported by add_source().
Example config:
install_sources: |

View File

@ -1,35 +1,35 @@
[global]
{% if old_auth %}
auth supported = {{ auth_supported }}
auth supported = {{ auth_supported }}
{% else %}
auth cluster required = {{ auth_supported }}
auth service required = {{ auth_supported }}
auth client required = {{ auth_supported }}
auth cluster required = {{ auth_supported }}
auth service required = {{ auth_supported }}
auth client required = {{ auth_supported }}
{% endif %}
keyring = /etc/ceph/$cluster.$name.keyring
mon host = {{ mon_hosts }}
fsid = {{ fsid }}
keyring = /etc/ceph/$cluster.$name.keyring
mon host = {{ mon_hosts }}
fsid = {{ fsid }}
log to syslog = {{ use_syslog }}
err to syslog = {{ use_syslog }}
clog to syslog = {{ use_syslog }}
mon cluster log to syslog = {{ use_syslog }}
log to syslog = {{ use_syslog }}
err to syslog = {{ use_syslog }}
clog to syslog = {{ use_syslog }}
mon cluster log to syslog = {{ use_syslog }}
{%- if ceph_public_network is string %}
public network = {{ ceph_public_network }}
public network = {{ ceph_public_network }}
{%- endif %}
{%- if ceph_cluster_network is string %}
cluster network = {{ ceph_cluster_network }}
cluster network = {{ ceph_cluster_network }}
{%- endif %}
[mon]
keyring = /var/lib/ceph/mon/$cluster-$id/keyring
keyring = /var/lib/ceph/mon/$cluster-$id/keyring
[mds]
keyring = /var/lib/ceph/mds/$cluster-$id/keyring
keyring = /var/lib/ceph/mds/$cluster-$id/keyring
[osd]
keyring = /var/lib/ceph/osd/$cluster-$id/keyring
osd journal size = {{ osd_journal_size }}
filestore xattr use omap = true
keyring = /var/lib/ceph/osd/$cluster-$id/keyring
osd journal size = {{ osd_journal_size }}
filestore xattr use omap = true

8
tests/00-setup Executable file
View File

@ -0,0 +1,8 @@
#!/bin/bash
set -ex
sudo add-apt-repository --yes ppa:juju/stable
sudo apt-get update --yes
sudo apt-get install --yes python-amulet
sudo apt-get install --yes python-keystoneclient

11
tests/12-basic-precise-grizzly Executable file
View File

@ -0,0 +1,11 @@
#!/usr/bin/python
"""Amulet tests on a basic ceph deployment on precise-grizzly."""
from basic_deployment import CephBasicDeployment
if __name__ == '__main__':
deployment = CephBasicDeployment(series='precise',
openstack='cloud:precise-grizzly',
source='cloud:precise-updates/grizzly')
deployment.run_tests()

11
tests/13-basic-precise-havana Executable file
View File

@ -0,0 +1,11 @@
#!/usr/bin/python
"""Amulet tests on a basic ceph deployment on precise-havana."""
from basic_deployment import CephBasicDeployment
if __name__ == '__main__':
deployment = CephBasicDeployment(series='precise',
openstack='cloud:precise-havana',
source='cloud:precise-updates/havana')
deployment.run_tests()

11
tests/14-basic-precise-icehouse Executable file
View File

@ -0,0 +1,11 @@
#!/usr/bin/python
"""Amulet tests on a basic ceph deployment on precise-icehouse."""
from basic_deployment import CephBasicDeployment
if __name__ == '__main__':
deployment = CephBasicDeployment(series='precise',
openstack='cloud:precise-icehouse',
source='cloud:precise-updates/icehouse')
deployment.run_tests()

9
tests/15-basic-trusty-icehouse Executable file
View File

@ -0,0 +1,9 @@
#!/usr/bin/python
"""Amulet tests on a basic ceph deployment on trusty-icehouse."""
from basic_deployment import CephBasicDeployment
if __name__ == '__main__':
deployment = CephBasicDeployment(series='trusty')
deployment.run_tests()

47
tests/README Normal file
View File

@ -0,0 +1,47 @@
This directory provides Amulet tests that focus on verification of ceph
deployments.
If you use a web proxy server to access the web, you'll need to set the
AMULET_HTTP_PROXY environment variable to the http URL of the proxy server.
The following examples demonstrate different ways that tests can be executed.
All examples are run from the charm's root directory.
* To run all tests (starting with 00-setup):
make test
* To run a specific test module (or modules):
juju test -v -p AMULET_HTTP_PROXY 15-basic-trusty-icehouse
* To run a specific test module (or modules), and keep the environment
deployed after a failure:
juju test --set-e -v -p AMULET_HTTP_PROXY 15-basic-trusty-icehouse
* To re-run a test module against an already deployed environment (one
that was deployed by a previous call to 'juju test --set-e'):
./tests/15-basic-trusty-icehouse
For debugging and test development purposes, all code should be idempotent.
In other words, the code should have the ability to be re-run without changing
the results beyond the initial run. This enables editing and re-running of a
test module against an already deployed environment, as described above.
Manual debugging tips:
* Set the following env vars before using the OpenStack CLI as admin:
export OS_AUTH_URL=http://`juju-deployer -f keystone 2>&1 | tail -n 1`:5000/v2.0
export OS_TENANT_NAME=admin
export OS_USERNAME=admin
export OS_PASSWORD=openstack
export OS_REGION_NAME=RegionOne
* Set the following env vars before using the OpenStack CLI as demoUser:
export OS_AUTH_URL=http://`juju-deployer -f keystone 2>&1 | tail -n 1`:5000/v2.0
export OS_TENANT_NAME=demoTenant
export OS_USERNAME=demoUser
export OS_PASSWORD=password
export OS_REGION_NAME=RegionOne

302
tests/basic_deployment.py Normal file
View File

@ -0,0 +1,302 @@
#!/usr/bin/python
import amulet
from charmhelpers.contrib.openstack.amulet.deployment import (
OpenStackAmuletDeployment
)
from charmhelpers.contrib.openstack.amulet.utils import ( # noqa
OpenStackAmuletUtils,
DEBUG,
ERROR
)
# Use DEBUG to turn on debug logging
u = OpenStackAmuletUtils(ERROR)
class CephBasicDeployment(OpenStackAmuletDeployment):
"""Amulet tests on a basic ceph deployment."""
def __init__(self, series=None, openstack=None, source=None):
"""Deploy the entire test environment."""
super(CephBasicDeployment, self).__init__(series, openstack, source)
self._add_services()
self._add_relations()
self._configure_services()
self._deploy()
self._initialize_tests()
def _add_services(self):
"""Add services
Add the services that we're testing, including the number of units,
where ceph is local, and mysql and cinder are from the charm
store.
"""
this_service = ('ceph', 3)
other_services = [('mysql', 1), ('keystone', 1),
('rabbitmq-server', 1), ('nova-compute', 1),
('glance', 1), ('cinder', 1)]
super(CephBasicDeployment, self)._add_services(this_service,
other_services)
def _add_relations(self):
"""Add all of the relations for the services."""
relations = {
'nova-compute:shared-db': 'mysql:shared-db',
'nova-compute:amqp': 'rabbitmq-server:amqp',
'nova-compute:image-service': 'glance:image-service',
'nova-compute:ceph': 'ceph:client',
'keystone:shared-db': 'mysql:shared-db',
'glance:shared-db': 'mysql:shared-db',
'glance:identity-service': 'keystone:identity-service',
'glance:amqp': 'rabbitmq-server:amqp',
'glance:ceph': 'ceph:client',
'cinder:shared-db': 'mysql:shared-db',
'cinder:identity-service': 'keystone:identity-service',
'cinder:amqp': 'rabbitmq-server:amqp',
'cinder:image-service': 'glance:image-service',
'cinder:ceph': 'ceph:client'
}
super(CephBasicDeployment, self)._add_relations(relations)
def _configure_services(self):
"""Configure all of the services."""
keystone_config = {'admin-password': 'openstack',
'admin-token': 'ubuntutesting'}
mysql_config = {'dataset-size': '50%'}
cinder_config = {'block-device': 'None', 'glance-api-version': '2'}
ceph_config = {
'monitor-count': '3',
'auth-supported': 'none',
'fsid': '6547bd3e-1397-11e2-82e5-53567c8d32dc',
'monitor-secret': 'AQCXrnZQwI7KGBAAiPofmKEXKxu5bUzoYLVkbQ==',
'osd-reformat': 'yes',
'ephemeral-unmount': '/mnt'
}
if self._get_openstack_release() >= self.precise_grizzly:
ceph_config['osd-devices'] = '/dev/vdb /srv/ceph'
else:
ceph_config['osd-devices'] = '/dev/vdb'
configs = {'keystone': keystone_config,
'mysql': mysql_config,
'cinder': cinder_config,
'ceph': ceph_config}
super(CephBasicDeployment, self)._configure_services(configs)
def _initialize_tests(self):
"""Perform final initialization before tests get run."""
# Access the sentries for inspecting service units
self.mysql_sentry = self.d.sentry.unit['mysql/0']
self.keystone_sentry = self.d.sentry.unit['keystone/0']
self.rabbitmq_sentry = self.d.sentry.unit['rabbitmq-server/0']
self.nova_compute_sentry = self.d.sentry.unit['nova-compute/0']
self.glance_sentry = self.d.sentry.unit['glance/0']
self.cinder_sentry = self.d.sentry.unit['cinder/0']
self.ceph0_sentry = self.d.sentry.unit['ceph/0']
self.ceph1_sentry = self.d.sentry.unit['ceph/1']
self.ceph2_sentry = self.d.sentry.unit['ceph/2']
# Authenticate admin with keystone
self.keystone = u.authenticate_keystone_admin(self.keystone_sentry,
user='admin',
password='openstack',
tenant='admin')
# Authenticate admin with glance endpoint
self.glance = u.authenticate_glance_admin(self.keystone)
# Create a demo tenant/role/user
self.demo_tenant = 'demoTenant'
self.demo_role = 'demoRole'
self.demo_user = 'demoUser'
if not u.tenant_exists(self.keystone, self.demo_tenant):
tenant = self.keystone.tenants.create(tenant_name=self.demo_tenant,
description='demo tenant',
enabled=True)
self.keystone.roles.create(name=self.demo_role)
self.keystone.users.create(name=self.demo_user,
password='password',
tenant_id=tenant.id,
email='demo@demo.com')
# Authenticate demo user with keystone
self.keystone_demo = u.authenticate_keystone_user(self.keystone,
self.demo_user,
'password',
self.demo_tenant)
# Authenticate demo user with nova-api
self.nova_demo = u.authenticate_nova_user(self.keystone,
self.demo_user,
'password',
self.demo_tenant)
def _ceph_osd_id(self, index):
"""Produce a shell command that will return a ceph-osd id."""
return "`initctl list | grep 'ceph-osd ' | awk 'NR=={} {{ print $2 }}' | grep -o '[0-9]*'`".format(index + 1) # noqa
def test_services(self):
"""Verify the expected services are running on the service units."""
ceph_services = ['status ceph-mon-all',
'status ceph-mon id=`hostname`']
commands = {
self.mysql_sentry: ['status mysql'],
self.rabbitmq_sentry: ['sudo service rabbitmq-server status'],
self.nova_compute_sentry: ['status nova-compute'],
self.keystone_sentry: ['status keystone'],
self.glance_sentry: ['status glance-registry',
'status glance-api'],
self.cinder_sentry: ['status cinder-api',
'status cinder-scheduler',
'status cinder-volume']
}
if self._get_openstack_release() >= self.precise_grizzly:
ceph_osd0 = 'status ceph-osd id={}'.format(self._ceph_osd_id(0))
ceph_osd1 = 'status ceph-osd id={}'.format(self._ceph_osd_id(1))
ceph_services.extend([ceph_osd0, ceph_osd1, 'status ceph-osd-all'])
commands[self.ceph0_sentry] = ceph_services
commands[self.ceph1_sentry] = ceph_services
commands[self.ceph2_sentry] = ceph_services
else:
ceph_osd0 = 'status ceph-osd id={}'.format(self._ceph_osd_id(0))
ceph_services.append(ceph_osd0)
commands[self.ceph0_sentry] = ceph_services
commands[self.ceph1_sentry] = ceph_services
commands[self.ceph2_sentry] = ceph_services
ret = u.validate_services(commands)
if ret:
amulet.raise_status(amulet.FAIL, msg=ret)
def test_ceph_nova_client_relation(self):
"""Verify the ceph to nova ceph-client relation data."""
unit = self.ceph0_sentry
relation = ['client', 'nova-compute:ceph']
expected = {
'private-address': u.valid_ip,
'auth': 'none',
'key': u.not_null
}
ret = u.validate_relation_data(unit, relation, expected)
if ret:
message = u.relation_error('ceph to nova ceph-client', ret)
amulet.raise_status(amulet.FAIL, msg=message)
def test_nova_ceph_client_relation(self):
"""Verify the nova to ceph ceph-client relation data."""
unit = self.nova_compute_sentry
relation = ['ceph', 'ceph:client']
expected = {
'private-address': u.valid_ip
}
ret = u.validate_relation_data(unit, relation, expected)
if ret:
message = u.relation_error('nova to ceph ceph-client', ret)
amulet.raise_status(amulet.FAIL, msg=message)
def test_ceph_glance_client_relation(self):
"""Verify the ceph to glance ceph-client relation data."""
unit = self.ceph1_sentry
relation = ['client', 'glance:ceph']
expected = {
'private-address': u.valid_ip,
'auth': 'none',
'key': u.not_null
}
ret = u.validate_relation_data(unit, relation, expected)
if ret:
message = u.relation_error('ceph to glance ceph-client', ret)
amulet.raise_status(amulet.FAIL, msg=message)
def test_glance_ceph_client_relation(self):
"""Verify the glance to ceph ceph-client relation data."""
unit = self.glance_sentry
relation = ['ceph', 'ceph:client']
expected = {
'private-address': u.valid_ip
}
ret = u.validate_relation_data(unit, relation, expected)
if ret:
message = u.relation_error('glance to ceph ceph-client', ret)
amulet.raise_status(amulet.FAIL, msg=message)
def test_ceph_cinder_client_relation(self):
"""Verify the ceph to cinder ceph-client relation data."""
unit = self.ceph2_sentry
relation = ['client', 'cinder:ceph']
expected = {
'private-address': u.valid_ip,
'auth': 'none',
'key': u.not_null
}
ret = u.validate_relation_data(unit, relation, expected)
if ret:
message = u.relation_error('ceph to cinder ceph-client', ret)
amulet.raise_status(amulet.FAIL, msg=message)
def test_cinder_ceph_client_relation(self):
"""Verify the cinder to ceph ceph-client relation data."""
unit = self.cinder_sentry
relation = ['ceph', 'ceph:client']
expected = {
'private-address': u.valid_ip
}
ret = u.validate_relation_data(unit, relation, expected)
if ret:
message = u.relation_error('cinder to ceph ceph-client', ret)
amulet.raise_status(amulet.FAIL, msg=message)
def test_ceph_config(self):
"""Verify the data in the ceph config file."""
unit = self.ceph0_sentry
conf = '/etc/ceph/ceph.conf'
expected = {
'global': {
'keyring': '/etc/ceph/$cluster.$name.keyring',
'fsid': '6547bd3e-1397-11e2-82e5-53567c8d32dc',
'log to syslog': 'false',
'err to syslog': 'false',
'clog to syslog': 'false',
'mon cluster log to syslog': 'false'
},
'mon': {
'keyring': '/var/lib/ceph/mon/$cluster-$id/keyring'
},
'mds': {
'keyring': '/var/lib/ceph/mds/$cluster-$id/keyring'
},
'osd': {
'keyring': '/var/lib/ceph/osd/$cluster-$id/keyring',
'osd journal size': '1024',
'filestore xattr use omap': 'true'
},
}
if self._get_openstack_release() >= self.precise_grizzly:
expected['global']['auth cluster required'] = 'none'
expected['global']['auth service required'] = 'none'
expected['global']['auth client required'] = 'none'
else:
expected['global']['auth supported'] = 'none'
for section, pairs in expected.iteritems():
ret = u.validate_config_data(unit, conf, section, pairs)
if ret:
message = "ceph config error: {}".format(ret)
amulet.raise_status(amulet.FAIL, msg=message)
def test_restart_on_config_change(self):
"""Verify the specified services are restarted on config change."""
# NOTE(coreycb): Test not implemented but should it be? ceph services
# aren't restarted by charm after config change. Should
# they be restarted?
if self._get_openstack_release() >= self.precise_essex:
u.log.error("Test not implemented")
return

View File

View File

View File

@ -0,0 +1,71 @@
import amulet
import os
class AmuletDeployment(object):
"""Amulet deployment.
This class provides generic Amulet deployment and test runner
methods.
"""
def __init__(self, series=None):
"""Initialize the deployment environment."""
self.series = None
if series:
self.series = series
self.d = amulet.Deployment(series=self.series)
else:
self.d = amulet.Deployment()
def _add_services(self, this_service, other_services):
"""Add services.
Add services to the deployment where this_service is the local charm
that we're focused on testing and other_services are the other
charms that come from the charm store.
"""
name, units = range(2)
if this_service[name] != os.path.basename(os.getcwd()):
s = this_service[name]
msg = "The charm's root directory name needs to be {}".format(s)
amulet.raise_status(amulet.FAIL, msg=msg)
self.d.add(this_service[name], units=this_service[units])
for svc in other_services:
if self.series:
self.d.add(svc[name],
charm='cs:{}/{}'.format(self.series, svc[name]),
units=svc[units])
else:
self.d.add(svc[name], units=svc[units])
def _add_relations(self, relations):
"""Add all of the relations for the services."""
for k, v in relations.iteritems():
self.d.relate(k, v)
def _configure_services(self, configs):
"""Configure all of the services."""
for service, config in configs.iteritems():
self.d.configure(service, config)
def _deploy(self):
"""Deploy environment and wait for all hooks to finish executing."""
try:
self.d.setup()
self.d.sentry.wait(timeout=900)
except amulet.helpers.TimeoutError:
amulet.raise_status(amulet.FAIL, msg="Deployment timed out")
except Exception:
raise
def run_tests(self):
"""Run all of the methods that are prefixed with 'test_'."""
for test in dir(self):
if test.startswith('test_'):
getattr(self, test)()

View File

@ -0,0 +1,176 @@
import ConfigParser
import io
import logging
import re
import sys
import time
class AmuletUtils(object):
"""Amulet utilities.
This class provides common utility functions that are used by Amulet
tests.
"""
def __init__(self, log_level=logging.ERROR):
self.log = self.get_logger(level=log_level)
def get_logger(self, name="amulet-logger", level=logging.DEBUG):
"""Get a logger object that will log to stdout."""
log = logging
logger = log.getLogger(name)
fmt = log.Formatter("%(asctime)s %(funcName)s "
"%(levelname)s: %(message)s")
handler = log.StreamHandler(stream=sys.stdout)
handler.setLevel(level)
handler.setFormatter(fmt)
logger.addHandler(handler)
logger.setLevel(level)
return logger
def valid_ip(self, ip):
if re.match(r"^\d{1,3}\.\d{1,3}\.\d{1,3}\.\d{1,3}$", ip):
return True
else:
return False
def valid_url(self, url):
p = re.compile(
r'^(?:http|ftp)s?://'
r'(?:(?:[A-Z0-9](?:[A-Z0-9-]{0,61}[A-Z0-9])?\.)+(?:[A-Z]{2,6}\.?|[A-Z0-9-]{2,}\.?)|' # noqa
r'localhost|'
r'\d{1,3}\.\d{1,3}\.\d{1,3}\.\d{1,3})'
r'(?::\d+)?'
r'(?:/?|[/?]\S+)$',
re.IGNORECASE)
if p.match(url):
return True
else:
return False
def validate_services(self, commands):
"""Validate services.
Verify the specified services are running on the corresponding
service units.
"""
for k, v in commands.iteritems():
for cmd in v:
output, code = k.run(cmd)
if code != 0:
return "command `{}` returned {}".format(cmd, str(code))
return None
def _get_config(self, unit, filename):
"""Get a ConfigParser object for parsing a unit's config file."""
file_contents = unit.file_contents(filename)
config = ConfigParser.ConfigParser()
config.readfp(io.StringIO(file_contents))
return config
def validate_config_data(self, sentry_unit, config_file, section,
expected):
"""Validate config file data.
Verify that the specified section of the config file contains
the expected option key:value pairs.
"""
config = self._get_config(sentry_unit, config_file)
if section != 'DEFAULT' and not config.has_section(section):
return "section [{}] does not exist".format(section)
for k in expected.keys():
if not config.has_option(section, k):
return "section [{}] is missing option {}".format(section, k)
if config.get(section, k) != expected[k]:
return "section [{}] {}:{} != expected {}:{}".format(
section, k, config.get(section, k), k, expected[k])
return None
def _validate_dict_data(self, expected, actual):
"""Validate dictionary data.
Compare expected dictionary data vs actual dictionary data.
The values in the 'expected' dictionary can be strings, bools, ints,
longs, or can be a function that evaluate a variable and returns a
bool.
"""
for k, v in expected.iteritems():
if k in actual:
if (isinstance(v, basestring) or
isinstance(v, bool) or
isinstance(v, (int, long))):
if v != actual[k]:
return "{}:{}".format(k, actual[k])
elif not v(actual[k]):
return "{}:{}".format(k, actual[k])
else:
return "key '{}' does not exist".format(k)
return None
def validate_relation_data(self, sentry_unit, relation, expected):
"""Validate actual relation data based on expected relation data."""
actual = sentry_unit.relation(relation[0], relation[1])
self.log.debug('actual: {}'.format(repr(actual)))
return self._validate_dict_data(expected, actual)
def _validate_list_data(self, expected, actual):
"""Compare expected list vs actual list data."""
for e in expected:
if e not in actual:
return "expected item {} not found in actual list".format(e)
return None
def not_null(self, string):
if string is not None:
return True
else:
return False
def _get_file_mtime(self, sentry_unit, filename):
"""Get last modification time of file."""
return sentry_unit.file_stat(filename)['mtime']
def _get_dir_mtime(self, sentry_unit, directory):
"""Get last modification time of directory."""
return sentry_unit.directory_stat(directory)['mtime']
def _get_proc_start_time(self, sentry_unit, service, pgrep_full=False):
"""Get process' start time.
Determine start time of the process based on the last modification
time of the /proc/pid directory. If pgrep_full is True, the process
name is matched against the full command line.
"""
if pgrep_full:
cmd = 'pgrep -o -f {}'.format(service)
else:
cmd = 'pgrep -o {}'.format(service)
proc_dir = '/proc/{}'.format(sentry_unit.run(cmd)[0].strip())
return self._get_dir_mtime(sentry_unit, proc_dir)
def service_restarted(self, sentry_unit, service, filename,
pgrep_full=False, sleep_time=20):
"""Check if service was restarted.
Compare a service's start time vs a file's last modification time
(such as a config file for that service) to determine if the service
has been restarted.
"""
time.sleep(sleep_time)
if (self._get_proc_start_time(sentry_unit, service, pgrep_full) >=
self._get_file_mtime(sentry_unit, filename)):
return True
else:
return False
def relation_error(self, name, data):
return 'unexpected relation data in {} - {}'.format(name, data)
def endpoint_error(self, name, data):
return 'unexpected endpoint data in {} - {}'.format(name, data)

View File

@ -0,0 +1,61 @@
from charmhelpers.contrib.amulet.deployment import (
AmuletDeployment
)
class OpenStackAmuletDeployment(AmuletDeployment):
"""OpenStack amulet deployment.
This class inherits from AmuletDeployment and has additional support
that is specifically for use by OpenStack charms.
"""
def __init__(self, series=None, openstack=None, source=None):
"""Initialize the deployment environment."""
super(OpenStackAmuletDeployment, self).__init__(series)
self.openstack = openstack
self.source = source
def _add_services(self, this_service, other_services):
"""Add services to the deployment and set openstack-origin."""
super(OpenStackAmuletDeployment, self)._add_services(this_service,
other_services)
name = 0
services = other_services
services.append(this_service)
use_source = ['mysql', 'mongodb', 'rabbitmq-server', 'ceph']
if self.openstack:
for svc in services:
if svc[name] not in use_source:
config = {'openstack-origin': self.openstack}
self.d.configure(svc[name], config)
if self.source:
for svc in services:
if svc[name] in use_source:
config = {'source': self.source}
self.d.configure(svc[name], config)
def _configure_services(self, configs):
"""Configure all of the services."""
for service, config in configs.iteritems():
self.d.configure(service, config)
def _get_openstack_release(self):
"""Get openstack release.
Return an integer representing the enum value of the openstack
release.
"""
(self.precise_essex, self.precise_folsom, self.precise_grizzly,
self.precise_havana, self.precise_icehouse,
self.trusty_icehouse) = range(6)
releases = {
('precise', None): self.precise_essex,
('precise', 'cloud:precise-folsom'): self.precise_folsom,
('precise', 'cloud:precise-grizzly'): self.precise_grizzly,
('precise', 'cloud:precise-havana'): self.precise_havana,
('precise', 'cloud:precise-icehouse'): self.precise_icehouse,
('trusty', None): self.trusty_icehouse}
return releases[(self.series, self.openstack)]

View File

@ -0,0 +1,275 @@
import logging
import os
import time
import urllib
import glanceclient.v1.client as glance_client
import keystoneclient.v2_0 as keystone_client
import novaclient.v1_1.client as nova_client
from charmhelpers.contrib.amulet.utils import (
AmuletUtils
)
DEBUG = logging.DEBUG
ERROR = logging.ERROR
class OpenStackAmuletUtils(AmuletUtils):
"""OpenStack amulet utilities.
This class inherits from AmuletUtils and has additional support
that is specifically for use by OpenStack charms.
"""
def __init__(self, log_level=ERROR):
"""Initialize the deployment environment."""
super(OpenStackAmuletUtils, self).__init__(log_level)
def validate_endpoint_data(self, endpoints, admin_port, internal_port,
public_port, expected):
"""Validate endpoint data.
Validate actual endpoint data vs expected endpoint data. The ports
are used to find the matching endpoint.
"""
found = False
for ep in endpoints:
self.log.debug('endpoint: {}'.format(repr(ep)))
if (admin_port in ep.adminurl and
internal_port in ep.internalurl and
public_port in ep.publicurl):
found = True
actual = {'id': ep.id,
'region': ep.region,
'adminurl': ep.adminurl,
'internalurl': ep.internalurl,
'publicurl': ep.publicurl,
'service_id': ep.service_id}
ret = self._validate_dict_data(expected, actual)
if ret:
return 'unexpected endpoint data - {}'.format(ret)
if not found:
return 'endpoint not found'
def validate_svc_catalog_endpoint_data(self, expected, actual):
"""Validate service catalog endpoint data.
Validate a list of actual service catalog endpoints vs a list of
expected service catalog endpoints.
"""
self.log.debug('actual: {}'.format(repr(actual)))
for k, v in expected.iteritems():
if k in actual:
ret = self._validate_dict_data(expected[k][0], actual[k][0])
if ret:
return self.endpoint_error(k, ret)
else:
return "endpoint {} does not exist".format(k)
return ret
def validate_tenant_data(self, expected, actual):
"""Validate tenant data.
Validate a list of actual tenant data vs list of expected tenant
data.
"""
self.log.debug('actual: {}'.format(repr(actual)))
for e in expected:
found = False
for act in actual:
a = {'enabled': act.enabled, 'description': act.description,
'name': act.name, 'id': act.id}
if e['name'] == a['name']:
found = True
ret = self._validate_dict_data(e, a)
if ret:
return "unexpected tenant data - {}".format(ret)
if not found:
return "tenant {} does not exist".format(e['name'])
return ret
def validate_role_data(self, expected, actual):
"""Validate role data.
Validate a list of actual role data vs a list of expected role
data.
"""
self.log.debug('actual: {}'.format(repr(actual)))
for e in expected:
found = False
for act in actual:
a = {'name': act.name, 'id': act.id}
if e['name'] == a['name']:
found = True
ret = self._validate_dict_data(e, a)
if ret:
return "unexpected role data - {}".format(ret)
if not found:
return "role {} does not exist".format(e['name'])
return ret
def validate_user_data(self, expected, actual):
"""Validate user data.
Validate a list of actual user data vs a list of expected user
data.
"""
self.log.debug('actual: {}'.format(repr(actual)))
for e in expected:
found = False
for act in actual:
a = {'enabled': act.enabled, 'name': act.name,
'email': act.email, 'tenantId': act.tenantId,
'id': act.id}
if e['name'] == a['name']:
found = True
ret = self._validate_dict_data(e, a)
if ret:
return "unexpected user data - {}".format(ret)
if not found:
return "user {} does not exist".format(e['name'])
return ret
def validate_flavor_data(self, expected, actual):
"""Validate flavor data.
Validate a list of actual flavors vs a list of expected flavors.
"""
self.log.debug('actual: {}'.format(repr(actual)))
act = [a.name for a in actual]
return self._validate_list_data(expected, act)
def tenant_exists(self, keystone, tenant):
"""Return True if tenant exists."""
return tenant in [t.name for t in keystone.tenants.list()]
def authenticate_keystone_admin(self, keystone_sentry, user, password,
tenant):
"""Authenticates admin user with the keystone admin endpoint."""
unit = keystone_sentry
service_ip = unit.relation('shared-db',
'mysql:shared-db')['private-address']
ep = "http://{}:35357/v2.0".format(service_ip.strip().decode('utf-8'))
return keystone_client.Client(username=user, password=password,
tenant_name=tenant, auth_url=ep)
def authenticate_keystone_user(self, keystone, user, password, tenant):
"""Authenticates a regular user with the keystone public endpoint."""
ep = keystone.service_catalog.url_for(service_type='identity',
endpoint_type='publicURL')
return keystone_client.Client(username=user, password=password,
tenant_name=tenant, auth_url=ep)
def authenticate_glance_admin(self, keystone):
"""Authenticates admin user with glance."""
ep = keystone.service_catalog.url_for(service_type='image',
endpoint_type='adminURL')
return glance_client.Client(ep, token=keystone.auth_token)
def authenticate_nova_user(self, keystone, user, password, tenant):
"""Authenticates a regular user with nova-api."""
ep = keystone.service_catalog.url_for(service_type='identity',
endpoint_type='publicURL')
return nova_client.Client(username=user, api_key=password,
project_id=tenant, auth_url=ep)
def create_cirros_image(self, glance, image_name):
"""Download the latest cirros image and upload it to glance."""
http_proxy = os.getenv('AMULET_HTTP_PROXY')
self.log.debug('AMULET_HTTP_PROXY: {}'.format(http_proxy))
if http_proxy:
proxies = {'http': http_proxy}
opener = urllib.FancyURLopener(proxies)
else:
opener = urllib.FancyURLopener()
f = opener.open("http://download.cirros-cloud.net/version/released")
version = f.read().strip()
cirros_img = "tests/cirros-{}-x86_64-disk.img".format(version)
if not os.path.exists(cirros_img):
cirros_url = "http://{}/{}/{}".format("download.cirros-cloud.net",
version, cirros_img)
opener.retrieve(cirros_url, cirros_img)
f.close()
with open(cirros_img) as f:
image = glance.images.create(name=image_name, is_public=True,
disk_format='qcow2',
container_format='bare', data=f)
count = 1
status = image.status
while status != 'active' and count < 10:
time.sleep(3)
image = glance.images.get(image.id)
status = image.status
self.log.debug('image status: {}'.format(status))
count += 1
if status != 'active':
self.log.error('image creation timed out')
return None
return image
def delete_image(self, glance, image):
"""Delete the specified image."""
num_before = len(list(glance.images.list()))
glance.images.delete(image)
count = 1
num_after = len(list(glance.images.list()))
while num_after != (num_before - 1) and count < 10:
time.sleep(3)
num_after = len(list(glance.images.list()))
self.log.debug('number of images: {}'.format(num_after))
count += 1
if num_after != (num_before - 1):
self.log.error('image deletion timed out')
return False
return True
def create_instance(self, nova, image_name, instance_name, flavor):
"""Create the specified instance."""
image = nova.images.find(name=image_name)
flavor = nova.flavors.find(name=flavor)
instance = nova.servers.create(name=instance_name, image=image,
flavor=flavor)
count = 1
status = instance.status
while status != 'ACTIVE' and count < 60:
time.sleep(3)
instance = nova.servers.get(instance.id)
status = instance.status
self.log.debug('instance status: {}'.format(status))
count += 1
if status != 'ACTIVE':
self.log.error('instance creation timed out')
return None
return instance
def delete_instance(self, nova, instance):
"""Delete the specified instance."""
num_before = len(list(nova.servers.list()))
nova.servers.delete(instance)
count = 1
num_after = len(list(nova.servers.list()))
while num_after != (num_before - 1) and count < 10:
time.sleep(3)
num_after = len(list(nova.servers.list()))
self.log.debug('number of instances: {}'.format(num_after))
count += 1
if num_after != (num_before - 1):
self.log.error('instance deletion timed out')
return False
return True