migrating to proxy charm

This commit is contained in:
Chris MacNaughton 2016-06-14 14:42:56 -04:00
parent fc7c60bf09
commit 1671d8b0e9
43 changed files with 80 additions and 4676 deletions

View File

@ -1,8 +1,4 @@
options:
loglevel:
default: 1
type: int
description: Mon and OSD debug level. Max is 20.
fsid:
type: string
default:
@ -11,32 +7,22 @@ options:
.
This configuration element is mandatory and the service will fail on
install if it is not provided.
auth-supported:
type: string
default: cephx
description: |
Which authentication flavour to use.
.
Valid options are "cephx" and "none". If "none" is specified,
keys will still be created and deployed so that it can be
enabled later.
monitor-secret:
monitor-hosts:
type: string
default:
description: |
This value will become the mon. key. To generate a suitable value use:
.
ceph-authtool /dev/stdout --name=mon. --gen-key
.
This configuration element is mandatory and the service will fail on
install if it is not provided.
monitor-count:
type: int
default: 3
Space separated list of existing monitor hosts, in the format
{IP / Hostname}:{port} {IP / Hostname}:{port}
admin-key:
type: string
default:
description: |
How many nodes to wait for before trying to create the monitor cluster
this number needs to be odd, and more than three is a waste except for
very large clusters.
Admin cephx key for existing Ceph cluster
mon-key:
type: string
default:
description: |
Monitor cephx key
source:
type: string
default:
@ -59,81 +45,3 @@ options:
description: |
Key ID to import to the apt keyring to support use with arbitary source
configuration from outside of Launchpad archives or PPA's.
use-syslog:
type: boolean
default: False
description: |
If set to True, supporting services will log to syslog.
ceph-public-network:
type: string
default:
description: |
The IP address and netmask of the public (front-side) network (e.g.,
192.168.0.0/24)
.
If multiple networks are to be used, a space-delimited list of a.b.c.d/x
can be provided.
ceph-cluster-network:
type: string
default:
description: |
The IP address and netmask of the cluster (back-side) network (e.g.,
192.168.0.0/24)
.
If multiple networks are to be used, a space-delimited list of a.b.c.d/x
can be provided.
prefer-ipv6:
type: boolean
default: False
description: |
If True enables IPv6 support. The charm will expect network interfaces
to be configured with an IPv6 address. If set to False (default) IPv4
is expected.
NOTE: these charms do not currently support IPv6 privacy extension. In
order for this charm to function correctly, the privacy extension must be
disabled and a non-temporary address must be configured/available on
your network interface.
sysctl:
type: string
default: '{ kernel.pid_max : 2097152, vm.max_map_count : 524288,
kernel.threads-max: 2097152 }'
description: |
YAML-formatted associative array of sysctl key/value pairs to be set
persistently. By default we set pid_max, max_map_count and
threads-max to a high value to avoid problems with large numbers (>20)
of OSDs recovering. very large clusters should set those values even
higher (e.g. max for kernel.pid_max is 4194303).
customize-failure-domain:
type: boolean
default: false
description: |
Setting this to true will tell Ceph to replicate across Juju's
Availability Zone instead of specifically by host.
nagios_context:
type: string
default: "juju"
type: string
description: |
Used by the nrpe-external-master subordinate charm.
A string that will be prepended to instance name to set the host name
in nagios. So for instance the hostname would be something like:
juju-myservice-0
If you're running multiple environments with the same services in them
this allows you to differentiate between them.
nagios_servicegroups:
default: ""
type: string
description: |
A comma-separated list of nagios servicegroups.
If left empty, the nagios_context will be used as the servicegroup
use-direct-io:
default: True
type: boolean
description: Configure use of direct IO for OSD journals.
harden:
default:
type: string
description: |
Apply system hardening. Supports a space-delimited list of modules
to run. Supported modules currently include os, ssh, apache and mysql.

View File

@ -63,205 +63,21 @@ from charmhelpers.contrib.storage.linux.ceph import (
monitor_key_exists,
monitor_key_get,
get_mon_map)
from utils import (
get_networks,
get_public_addr,
get_cluster_addr,
assert_charm_supports_ipv6
)
from ceph_broker import (
process_requests
)
from utils import (
get_public_addr,
get_unit_hostname,
)
from charmhelpers.contrib.charmsupport import nrpe
from charmhelpers.contrib.hardening.harden import harden
hooks = Hooks()
NAGIOS_PLUGINS = '/usr/local/lib/nagios/plugins'
SCRIPTS_DIR = '/usr/local/bin'
STATUS_FILE = '/var/lib/nagios/cat-ceph-status.txt'
STATUS_CRONFILE = '/etc/cron.d/cat-ceph-health'
# A dict of valid ceph upgrade paths. Mapping is old -> new
upgrade_paths = {
'cloud:trusty-juno': 'cloud:trusty-kilo',
'cloud:trusty-kilo': 'cloud:trusty-liberty',
'cloud:trusty-liberty': 'cloud:trusty-mitaka',
}
def pretty_print_upgrade_paths():
lines = []
for key, value in upgrade_paths.iteritems():
lines.append("{} -> {}".format(key, value))
return lines
def check_for_upgrade():
release_info = host.lsb_release()
if not release_info['DISTRIB_CODENAME'] == 'trusty':
log("Invalid upgrade path from {}. Only trusty is currently "
"supported".format(release_info['DISTRIB_CODENAME']))
return
c = hookenv.config()
old_version = c.previous('source')
log('old_version: {}'.format(old_version))
# Strip all whitespace
new_version = hookenv.config('source')
if new_version:
# replace all whitespace
new_version = new_version.replace(' ', '')
log('new_version: {}'.format(new_version))
if old_version in upgrade_paths:
if new_version == upgrade_paths[old_version]:
log("{} to {} is a valid upgrade path. Proceeding.".format(
old_version, new_version))
roll_monitor_cluster(new_version)
else:
# Log a helpful error message
log("Invalid upgrade path from {} to {}. "
"Valid paths are: {}".format(old_version,
new_version,
pretty_print_upgrade_paths()))
def lock_and_roll(my_name):
start_timestamp = time.time()
log('monitor_key_set {}_start {}'.format(my_name, start_timestamp))
monitor_key_set('admin', "{}_start".format(my_name), start_timestamp)
log("Rolling")
# This should be quick
upgrade_monitor()
log("Done")
stop_timestamp = time.time()
# Set a key to inform others I am finished
log('monitor_key_set {}_done {}'.format(my_name, stop_timestamp))
monitor_key_set('admin', "{}_done".format(my_name), stop_timestamp)
def wait_on_previous_node(previous_node):
log("Previous node is: {}".format(previous_node))
previous_node_finished = monitor_key_exists(
'admin',
"{}_done".format(previous_node))
while previous_node_finished is False:
log("{} is not finished. Waiting".format(previous_node))
# Has this node been trying to upgrade for longer than
# 10 minutes?
# If so then move on and consider that node dead.
# NOTE: This assumes the clusters clocks are somewhat accurate
# If the hosts clock is really far off it may cause it to skip
# the previous node even though it shouldn't.
current_timestamp = time.time()
previous_node_start_time = monitor_key_get(
'admin',
"{}_start".format(previous_node))
if (current_timestamp - (10 * 60)) > previous_node_start_time:
# Previous node is probably dead. Lets move on
if previous_node_start_time is not None:
log(
"Waited 10 mins on node {}. current time: {} > "
"previous node start time: {} Moving on".format(
previous_node,
(current_timestamp - (10 * 60)),
previous_node_start_time))
return
else:
# I have to wait. Sleep a random amount of time and then
# check if I can lock,upgrade and roll.
wait_time = random.randrange(5, 30)
log('waiting for {} seconds'.format(wait_time))
time.sleep(wait_time)
previous_node_finished = monitor_key_exists(
'admin',
"{}_done".format(previous_node))
# Edge cases:
# 1. Previous node dies on upgrade, can we retry?
def roll_monitor_cluster(new_version):
"""
This is tricky to get right so here's what we're going to do.
There's 2 possible cases: Either I'm first in line or not.
If I'm not first in line I'll wait a random time between 5-30 seconds
and test to see if the previous monitor is upgraded yet.
"""
log('roll_monitor_cluster called with {}'.format(new_version))
my_name = socket.gethostname()
monitor_list = []
mon_map = get_mon_map('admin')
if mon_map['monmap']['mons']:
for mon in mon_map['monmap']['mons']:
monitor_list.append(mon['name'])
else:
status_set('blocked', 'Unable to get monitor cluster information')
sys.exit(1)
log('monitor_list: {}'.format(monitor_list))
# A sorted list of osd unit names
mon_sorted_list = sorted(monitor_list)
try:
position = mon_sorted_list.index(my_name)
log("upgrade position: {}".format(position))
if position == 0:
# I'm first! Roll
# First set a key to inform others I'm about to roll
lock_and_roll(my_name=my_name)
else:
# Check if the previous node has finished
status_set('blocked',
'Waiting on {} to finish upgrading'.format(
mon_sorted_list[position - 1]))
wait_on_previous_node(previous_node=mon_sorted_list[position - 1])
lock_and_roll(my_name=my_name)
except ValueError:
log("Failed to find {} in list {}.".format(
my_name, mon_sorted_list))
status_set('blocked', 'failed to upgrade monitor')
def upgrade_monitor():
current_version = ceph.get_version()
status_set("maintenance", "Upgrading monitor")
log("Current ceph version is {}".format(current_version))
new_version = config('release-version')
log("Upgrading to: {}".format(new_version))
try:
add_source(config('source'), config('key'))
apt_update(fatal=True)
except subprocess.CalledProcessError as err:
log("Adding the ceph source failed with message: {}".format(
err.message))
status_set("blocked", "Upgrade to {} failed".format(new_version))
sys.exit(1)
try:
if ceph.systemd():
for mon_id in ceph.get_local_mon_ids():
service_stop('ceph-mon@{}'.format(mon_id))
else:
service_stop('ceph-mon-all')
apt_install(packages=ceph.PACKAGES, fatal=True)
if ceph.systemd():
for mon_id in ceph.get_local_mon_ids():
service_start('ceph-mon@{}'.format(mon_id))
else:
service_start('ceph-mon-all')
status_set("active", "")
except subprocess.CalledProcessError as err:
log("Stopping ceph and upgrading packages failed "
"with message: {}".format(err.message))
status_set("blocked", "Upgrade to {} failed".format(new_version))
sys.exit(1)
def install_upstart_scripts():
# Only install upstart configurations for older versions
@ -281,35 +97,14 @@ def install():
def emit_cephconf():
networks = get_networks('ceph-public-network')
public_network = ', '.join(networks)
networks = get_networks('ceph-cluster-network')
cluster_network = ', '.join(networks)
cephcontext = {
'auth_supported': config('auth-supported'),
'mon_hosts': ' '.join(get_mon_hosts()),
'fsid': leader_get('fsid'),
'old_auth': cmp_pkgrevno('ceph', "0.51") < 0,
'osd_journal_size': config('osd-journal-size'),
'mon_hosts': config('monitor-hosts'),
'fsid': config('fsid'),
'use_syslog': str(config('use-syslog')).lower(),
'ceph_public_network': public_network,
'ceph_cluster_network': cluster_network,
'loglevel': config('loglevel'),
'dio': str(config('use-direct-io')).lower(),
}
if config('prefer-ipv6'):
dynamic_ipv6_address = get_ipv6_addr()[0]
if not public_network:
cephcontext['public_addr'] = dynamic_ipv6_address
if not cluster_network:
cephcontext['cluster_addr'] = dynamic_ipv6_address
else:
cephcontext['public_addr'] = get_public_addr()
cephcontext['cluster_addr'] = get_cluster_addr()
# Install ceph.conf as an alternative to support
# co-existence with other charms that write this file
charm_ceph_conf = "/var/lib/charm/{}/ceph.conf".format(service_name())
@ -318,152 +113,22 @@ def emit_cephconf():
render('ceph.conf', charm_ceph_conf, cephcontext, perms=0o644)
install_alternative('ceph.conf', '/etc/ceph/ceph.conf',
charm_ceph_conf, 100)
keyring = 'ceph.client.admin.keyring'
keyring_path = '/etc/ceph/' + keyring
render(keyring, keyring_path, {'admin_key': config('admin-key')}, perms=0o600)
keyring = 'keyring'
keyring_path = '/var/lib/ceph/mon/ceph-' + get_unit_hostname()+ '/' + keyring
render('mon.keyring', keyring_path, {'mon_key': config('mon-key')}, perms=0o600)
JOURNAL_ZAPPED = '/var/lib/ceph/journal_zapped'
notify_radosgws()
notify_client()
@hooks.hook('config-changed')
@harden()
def config_changed():
if config('prefer-ipv6'):
assert_charm_supports_ipv6()
# Check if an upgrade was requested
check_for_upgrade()
log('Monitor hosts are ' + repr(get_mon_hosts()))
sysctl_dict = config('sysctl')
if sysctl_dict:
create_sysctl(sysctl_dict, '/etc/sysctl.d/50-ceph-charm.conf')
if relations_of_type('nrpe-external-master'):
update_nrpe_config()
if is_leader():
if not leader_get('fsid') or not leader_get('monitor-secret'):
if config('fsid'):
fsid = config('fsid')
else:
fsid = "{}".format(uuid.uuid1())
if config('monitor-secret'):
mon_secret = config('monitor-secret')
else:
mon_secret = "{}".format(ceph.generate_monitor_secret())
status_set('maintenance', 'Creating FSID and Monitor Secret')
opts = {
'fsid': fsid,
'monitor-secret': mon_secret,
}
log("Settings for the cluster are: {}".format(opts))
leader_set(opts)
else:
if leader_get('fsid') is None or leader_get('monitor-secret') is None:
log('still waiting for leader to setup keys')
status_set('waiting', 'Waiting for leader to setup keys')
sys.exit(0)
emit_cephconf()
# Support use of single node ceph
if not ceph.is_bootstrapped() and int(config('monitor-count')) == 1:
status_set('maintenance', 'Bootstrapping single Ceph MON')
ceph.bootstrap_monitor_cluster(config('monitor-secret'))
ceph.wait_for_bootstrap()
def get_mon_hosts():
hosts = []
addr = get_public_addr()
hosts.append('{}:6789'.format(format_ipv6_addr(addr) or addr))
for relid in relation_ids('mon'):
for unit in related_units(relid):
addr = relation_get('ceph-public-address', unit, relid)
if addr is not None:
hosts.append('{}:6789'.format(
format_ipv6_addr(addr) or addr))
hosts.sort()
return hosts
def get_peer_units():
"""
Returns a dictionary of unit names from the mon peer relation with
a flag indicating whether the unit has presented its address
"""
units = {}
units[local_unit()] = True
for relid in relation_ids('mon'):
for unit in related_units(relid):
addr = relation_get('ceph-public-address', unit, relid)
units[unit] = addr is not None
return units
@hooks.hook('mon-relation-joined')
def mon_relation_joined():
public_addr = get_public_addr()
for relid in relation_ids('mon'):
relation_set(relation_id=relid,
relation_settings={'ceph-public-address': public_addr})
@hooks.hook('mon-relation-departed',
'mon-relation-changed')
def mon_relation():
if leader_get('monitor-secret') is None:
log('still waiting for leader to setup keys')
status_set('waiting', 'Waiting for leader to setup keys')
return
emit_cephconf()
moncount = int(config('monitor-count'))
if len(get_mon_hosts()) >= moncount:
status_set('maintenance', 'Bootstrapping MON cluster')
ceph.bootstrap_monitor_cluster(leader_get('monitor-secret'))
ceph.wait_for_bootstrap()
ceph.wait_for_quorum()
# If we can and want to
if is_leader() and config('customize-failure-domain'):
# But only if the environment supports it
if os.environ.get('JUJU_AVAILABILITY_ZONE'):
cmds = [
"ceph osd getcrushmap -o /tmp/crush.map",
"crushtool -d /tmp/crush.map| "
"sed 's/step chooseleaf firstn 0 type host/step "
"chooseleaf firstn 0 type rack/' > "
"/tmp/crush.decompiled",
"crushtool -c /tmp/crush.decompiled -o /tmp/crush.map",
"crushtool -i /tmp/crush.map --test",
"ceph osd setcrushmap -i /tmp/crush.map"
]
for cmd in cmds:
try:
subprocess.check_call(cmd, shell=True)
except subprocess.CalledProcessError as e:
log("Failed to modify crush map:", level='error')
log("Cmd: {}".format(cmd), level='error')
log("Error: {}".format(e.output), level='error')
break
else:
log(
"Your Juju environment doesn't"
"have support for Availability Zones"
)
notify_osds()
notify_radosgws()
notify_client()
else:
log('Not enough mons ({}), punting.'
.format(len(get_mon_hosts())))
def notify_osds():
for relid in relation_ids('osd'):
osd_relation(relid)
def notify_radosgws():
for relid in relation_ids('radosgw'):
@ -476,55 +141,6 @@ def notify_client():
client_relation_joined(relid)
def upgrade_keys():
""" Ceph now required mon allow rw for pool creation """
if len(relation_ids('radosgw')) > 0:
ceph.upgrade_key_caps('client.radosgw.gateway',
ceph._radosgw_caps)
for relid in relation_ids('client'):
units = related_units(relid)
if len(units) > 0:
service_name = units[0].split('/')[0]
ceph.upgrade_key_caps('client.{}'.format(service_name),
ceph._default_caps)
@hooks.hook('osd-relation-joined')
def osd_relation(relid=None):
if ceph.is_quorum():
log('mon cluster in quorum - providing fsid & keys')
public_addr = get_public_addr()
data = {
'fsid': leader_get('fsid'),
'osd_bootstrap_key': ceph.get_osd_bootstrap_key(),
'auth': config('auth-supported'),
'ceph-public-address': public_addr,
'osd_upgrade_key': ceph.get_named_key('osd-upgrade',
caps=ceph.osd_upgrade_caps),
}
relation_set(relation_id=relid,
relation_settings=data)
# NOTE: radosgw key provision is gated on presence of OSD
# units so ensure that any deferred hooks are processed
notify_radosgws()
else:
log('mon cluster not in quorum - deferring fsid provision')
def related_osds(num_units=3):
'''
Determine whether there are OSD units currently related
@param num_units: The minimum number of units required
@return: boolean indicating whether the required number of
units where detected.
'''
for r_id in relation_ids('osd'):
if len(related_units(r_id)) >= num_units:
return True
return False
@hooks.hook('radosgw-relation-changed')
@hooks.hook('radosgw-relation-joined')
def radosgw_relation(relid=None, unit=None):
@ -535,14 +151,14 @@ def radosgw_relation(relid=None, unit=None):
# NOTE: radosgw needs some usage OSD storage, so defer key
# provision until OSD units are detected.
if ceph.is_quorum() and related_osds():
if ready():
log('mon cluster in quorum and osds related '
'- providing radosgw with keys')
public_addr = get_public_addr()
data = {
'fsid': leader_get('fsid'),
'fsid': config('fsid'),
'radosgw_key': ceph.get_radosgw_key(),
'auth': config('auth-supported'),
'auth': 'cephx',
'ceph-public-address': public_addr,
}
@ -559,13 +175,12 @@ def radosgw_relation(relid=None, unit=None):
relation_set(relation_id=relid, relation_settings=data)
else:
log('mon cluster not in quorum or no osds - deferring key provision')
log('FSID or admin key not provided, please configure them')
@hooks.hook('client-relation-joined')
def client_relation_joined(relid=None):
if ceph.is_quorum():
log('mon cluster in quorum - providing client with keys')
if ready():
service_name = None
if relid is None:
units = [remote_unit()]
@ -578,18 +193,18 @@ def client_relation_joined(relid=None):
if service_name is not None:
public_addr = get_public_addr()
data = {'key': ceph.get_named_key(service_name),
'auth': config('auth-supported'),
'auth': 'cephx',
'ceph-public-address': public_addr}
relation_set(relation_id=relid,
relation_settings=data)
else:
log('mon cluster not in quorum - deferring key provision')
log('FSID or admin key not provided, please configure them')
@hooks.hook('client-relation-changed')
def client_relation_changed():
"""Process broker requests from ceph client relations."""
if ceph.is_quorum():
if ready():
settings = relation_get()
if 'broker_req' in settings:
if not ceph.is_leader():
@ -606,86 +221,44 @@ def client_relation_changed():
}
relation_set(relation_settings=data)
else:
log('mon cluster not in quorum', level=DEBUG)
log('FSID or admin key not provided, please configure them')
@hooks.hook('upgrade-charm')
@harden()
def upgrade_charm():
emit_cephconf()
apt_install(packages=filter_installed_packages(ceph.PACKAGES), fatal=True)
install_upstart_scripts()
ceph.update_monfs()
upgrade_keys()
mon_relation_joined()
@hooks.hook('start')
def start():
# In case we're being redeployed to the same machines, try
# to make sure everything is running as soon as possible.
if ceph.systemd():
service_restart('ceph-mon')
else:
service_restart('ceph-mon-all')
@hooks.hook('nrpe-external-master-relation-joined')
@hooks.hook('nrpe-external-master-relation-changed')
def update_nrpe_config():
# python-dbus is used by check_upstart_job
apt_install('python-dbus')
log('Refreshing nagios checks')
if os.path.isdir(NAGIOS_PLUGINS):
rsync(os.path.join(os.getenv('CHARM_DIR'), 'files', 'nagios',
'check_ceph_status.py'),
os.path.join(NAGIOS_PLUGINS, 'check_ceph_status.py'))
script = os.path.join(SCRIPTS_DIR, 'collect_ceph_status.sh')
rsync(os.path.join(os.getenv('CHARM_DIR'), 'files',
'nagios', 'collect_ceph_status.sh'),
script)
cronjob = "{} root {}\n".format('*/5 * * * *', script)
write_file(STATUS_CRONFILE, cronjob)
# Find out if nrpe set nagios_hostname
hostname = nrpe.get_nagios_hostname()
current_unit = nrpe.get_nagios_unit_name()
nrpe_setup = nrpe.NRPE(hostname=hostname)
nrpe_setup.add_check(
shortname="ceph",
description='Check Ceph health {%s}' % current_unit,
check_cmd='check_ceph_status.py -f {}'.format(STATUS_FILE)
)
nrpe_setup.write()
def ready():
return config('fsid') and config('admin-key')
def assess_status():
'''Assess status of current unit'''
moncount = int(config('monitor-count'))
units = get_peer_units()
# not enough peers and mon_count > 1
if len(units.keys()) < moncount:
status_set('blocked', 'Insufficient peer units to bootstrap'
' cluster (require {})'.format(moncount))
return
# mon_count > 1, peers, but no ceph-public-address
ready = sum(1 for unit_ready in units.itervalues() if unit_ready)
if ready < moncount:
status_set('waiting', 'Peer units detected, waiting for addresses')
return
# active - bootstrapped + quorum status check
if ceph.is_bootstrapped() and ceph.is_quorum():
status_set('active', 'Unit is ready and clustered')
if ready():
status_set('active', 'Ready to proxy settings')
else:
# Unit should be running and clustered, but no quorum
# TODO: should this be blocked or waiting?
status_set('blocked', 'Unit not clustered (no quorum)')
# If there's a pending lock for this unit,
# can i get the lock?
# reboot the ceph-mon process
status_set('blocked', 'Ensure FSID and admin-key are set')
# moncount = int(config('monitor-count'))
# units = get_peer_units()
# # not enough peers and mon_count > 1
# if len(units.keys()) < moncount:
# status_set('blocked', 'Insufficient peer units to bootstrap'
# ' cluster (require {})'.format(moncount))
# return
# # mon_count > 1, peers, but no ceph-public-address
# ready = sum(1 for unit_ready in units.itervalues() if unit_ready)
# if ready < moncount:
# status_set('waiting', 'Peer units detected, waiting for addresses')
# return
# # active - bootstrapped + quorum status check
# if ceph.is_bootstrapped() and ceph.is_quorum():
# status_set('active', 'Unit is ready and clustered')
# else:
# # Unit should be running and clustered, but no quorum
# # TODO: should this be blocked or waiting?
# status_set('blocked', 'Unit not clustered (no quorum)')
# # If there's a pending lock for this unit,
# # can i get the lock?
# # reboot the ceph-mon process
# status_set('active', 'doing some shit maybe?')
@hooks.hook('update-status')

View File

@ -1 +0,0 @@
ceph_hooks.py

View File

@ -1 +0,0 @@
ceph_hooks.py

View File

@ -1 +0,0 @@
ceph_hooks.py

View File

@ -1 +0,0 @@
ceph_hooks.py

View File

@ -1 +0,0 @@
ceph_hooks.py

View File

@ -1 +0,0 @@
ceph_hooks.py

View File

@ -1 +0,0 @@
ceph_hooks.py

View File

@ -1 +0,0 @@
ceph_hooks.py

View File

@ -1 +0,0 @@
ceph_hooks.py

View File

@ -1,5 +1,5 @@
name: ceph-mon
summary: Highly scalable distributed storage
name: ceph-proxy
summary: Proxy to Juju external Ceph cluster
maintainer: OpenStack Charmers <openstack-charmers@lists.ubuntu.com>
description: |
Ceph is a distributed storage and network file system designed to provide
@ -9,22 +9,8 @@ tags:
- storage
- file-servers
- misc
peers:
mon:
interface: ceph
extra-bindings:
public:
cluster:
provides:
nrpe-external-master:
interface: nrpe-external-master
scope: container
client:
interface: ceph-client
osd:
interface: ceph-osd
radosgw:
interface: ceph-radosgw
nrpe-external-master:
interface: nrpe-external-master
scope: container

View File

@ -0,0 +1,2 @@
[client.admin]
key = {{admin_key}}

View File

@ -1,11 +1,9 @@
[global]
{% if old_auth %}
auth supported = {{ auth_supported }}
{% else %}
auth cluster required = {{ auth_supported }}
auth service required = {{ auth_supported }}
auth client required = {{ auth_supported }}
{% endif %}
auth cluster required = cephx
auth service required = cephx
auth client required = cephx
keyring = /etc/ceph/$cluster.$name.keyring
mon host = {{ mon_hosts }}
fsid = {{ fsid }}
@ -17,23 +15,3 @@ mon cluster log to syslog = {{ use_syslog }}
debug mon = {{ loglevel }}/5
debug osd = {{ loglevel }}/5
{%- if ceph_public_network is string %}
public network = {{ ceph_public_network }}
{%- endif %}
{%- if ceph_cluster_network is string %}
cluster network = {{ ceph_cluster_network }}
{%- endif %}
{% if public_addr %}
public addr = {{ public_addr }}
{% endif %}
{% if cluster_addr %}
cluster addr = {{ cluster_addr }}
{%- endif %}
[mon]
keyring = /var/lib/ceph/mon/$cluster-$id/keyring
[mds]
keyring = /var/lib/ceph/mds/$cluster-$id/keyring

3
templates/mon.keyring Normal file
View File

@ -0,0 +1,3 @@
[mon.]
key = {{mon_key}}
caps mon = "allow *"

View File

@ -1,11 +0,0 @@
#!/usr/bin/python
"""Amulet tests on a basic ceph deployment on precise-icehouse."""
from basic_deployment import CephBasicDeployment
if __name__ == '__main__':
deployment = CephBasicDeployment(series='precise',
openstack='cloud:precise-icehouse',
source='cloud:precise-updates/icehouse')
deployment.run_tests()

View File

@ -1,9 +0,0 @@
#!/usr/bin/python
"""Amulet tests on a basic ceph deployment on trusty-icehouse."""
from basic_deployment import CephBasicDeployment
if __name__ == '__main__':
deployment = CephBasicDeployment(series='trusty')
deployment.run_tests()

View File

@ -1,11 +0,0 @@
#!/usr/bin/python
"""Amulet tests on a basic ceph deployment on trusty-juno."""
from basic_deployment import CephBasicDeployment
if __name__ == '__main__':
deployment = CephBasicDeployment(series='trusty',
openstack='cloud:trusty-juno',
source='cloud:trusty-updates/juno')
deployment.run_tests()

View File

@ -1,11 +0,0 @@
#!/usr/bin/python
"""Amulet tests on a basic ceph deployment on trusty-kilo."""
from basic_deployment import CephBasicDeployment
if __name__ == '__main__':
deployment = CephBasicDeployment(series='trusty',
openstack='cloud:trusty-kilo',
source='cloud:trusty-updates/kilo')
deployment.run_tests()

View File

@ -1,11 +0,0 @@
#!/usr/bin/python
"""Amulet tests on a basic ceph deployment on trusty-liberty."""
from basic_deployment import CephBasicDeployment
if __name__ == '__main__':
deployment = CephBasicDeployment(series='trusty',
openstack='cloud:trusty-liberty',
source='cloud:trusty-updates/liberty')
deployment.run_tests()

View File

@ -1,11 +0,0 @@
#!/usr/bin/python
"""Amulet tests on a basic ceph deployment on trusty-mitaka."""
from basic_deployment import CephBasicDeployment
if __name__ == '__main__':
deployment = CephBasicDeployment(series='trusty',
openstack='cloud:trusty-mitaka',
source='cloud:trusty-updates/mitaka')
deployment.run_tests()

View File

@ -1,9 +0,0 @@
#!/usr/bin/python
"""Amulet tests on a basic ceph deployment on wily-liberty."""
from basic_deployment import CephBasicDeployment
if __name__ == '__main__':
deployment = CephBasicDeployment(series='wily')
deployment.run_tests()

View File

@ -1,9 +0,0 @@
#!/usr/bin/python
"""Amulet tests on a basic ceph deployment on xenial-mitaka."""
from basic_deployment import CephBasicDeployment
if __name__ == '__main__':
deployment = CephBasicDeployment(series='xenial')
deployment.run_tests()

View File

@ -1,113 +0,0 @@
This directory provides Amulet tests to verify basic deployment functionality
from the perspective of this charm, its requirements and its features, as
exercised in a subset of the full OpenStack deployment test bundle topology.
Reference: lp:openstack-charm-testing for full test bundles.
A single topology and configuration is defined and deployed, once for each of
the defined Ubuntu:OpenStack release combos. The ongoing goal is for this
charm to always possess tests and combo definitions for all currently-supported
release combinations of U:OS.
test_* methods are called in lexical sort order, as with most runners. However,
each individual test method should be idempotent and expected to pass regardless
of run order or Ubuntu:OpenStack combo. When writing or modifying tests,
ensure that every individual test is not dependent on another test_ method.
Test naming convention, purely for code organization purposes:
1xx service and endpoint checks
2xx relation checks
3xx config checks
4xx functional checks
9xx restarts, config changes, actions and other final checks
In order to run tests, charm-tools and juju must be installed:
sudo add-apt-repository ppa:juju/stable
sudo apt-get update
sudo apt-get install charm-tools juju juju-deployer amulet
Alternatively, tests may be exercised with proposed or development versions
of juju and related tools:
# juju proposed version
sudo add-apt-repository ppa:juju/proposed
sudo apt-get update
sudo apt-get install charm-tools juju juju-deployer
# juju development version
sudo add-apt-repository ppa:juju/devel
sudo apt-get update
sudo apt-get install charm-tools juju juju-deployer
Some tests may need to download files. If a web proxy server is required in
the environment, the AMULET_HTTP_PROXY environment variable must be set and
passed into the juju test command. This is unrelated to juju's http proxy
settings or behavior.
The following examples demonstrate different ways that tests can be executed.
All examples are run from the charm's root directory.
* To run all +x tests in the tests directory:
bzr branch lp:charms/trusty/foo
cd foo
make functional_test
* To run the tests against a specific release combo as defined in tests/:
bzr branch lp:charms/trusty/foo
cd foo
juju test -v -p AMULET_HTTP_PROXY 015-basic-trusty-icehouse
* To run tests and keep the juju environment deployed after a failure:
bzr branch lp:charms/trusty/foo
cd foo
juju test --set-e -v -p AMULET_HTTP_PROXY 015-basic-trusty-icehouse
* To re-run a test module against an already deployed environment (one
that was deployed by a previous call to 'juju test --set-e'):
./tests/015-basic-trusty-icehouse
* Even with --set-e, `juju test` will tear down the deployment when all
tests pass. The following work flow may be more effective when
iterating on test writing.
bzr branch lp:charms/trusty/foo
cd foo
./tests/setup/00-setup
juju bootstrap
./tests/015-basic-trusty-icehouse
# make some changes, run tests again
./tests/015-basic-trusty-icehouse
# make some changes, run tests again
./tests/015-basic-trusty-icehouse
* There may be test definitions in the tests/ dir which are not set +x
executable. This is generally true for deprecated releases, or for
upcoming releases which are not yet validated and enabled. To enable
and run these tests:
bzr branch lp:charms/trusty/foo
cd foo
ls tests
chmod +x tests/017-basic-trusty-kilo
./tests/setup/00-setup
juju bootstrap
./tests/017-basic-trusty-kilo
Additional notes:
* Use DEBUG to turn on debug logging, use ERROR otherwise.
u = OpenStackAmuletUtils(ERROR)
u = OpenStackAmuletUtils(DEBUG)
* To interact with the deployed environment:
export OS_USERNAME=admin
export OS_PASSWORD=openstack
export OS_TENANT_NAME=admin
export OS_REGION_NAME=RegionOne
export OS_AUTH_URL=${OS_AUTH_PROTOCOL:-http}://`juju-deployer -e trusty -f keystone`:5000/v2.0
keystone user-list
glance image-list

View File

@ -1,683 +0,0 @@
#!/usr/bin/python
import amulet
import re
import time
from charmhelpers.contrib.openstack.amulet.deployment import (
OpenStackAmuletDeployment
)
from charmhelpers.contrib.openstack.amulet.utils import ( # noqa
OpenStackAmuletUtils,
DEBUG,
# ERROR
)
# Use DEBUG to turn on debug logging
u = OpenStackAmuletUtils(DEBUG)
class CephBasicDeployment(OpenStackAmuletDeployment):
"""Amulet tests on a basic ceph deployment."""
def __init__(self, series=None, openstack=None, source=None, stable=False):
"""Deploy the entire test environment."""
super(CephBasicDeployment, self).__init__(series, openstack, source,
stable)
self._add_services()
self._add_relations()
self._configure_services()
self._deploy()
u.log.info('Waiting on extended status checks...')
exclude_services = ['mysql']
# Wait for deployment ready msgs, except exclusions
self._auto_wait_for_status(exclude_services=exclude_services)
self._initialize_tests()
def _add_services(self):
"""Add services
Add the services that we're testing, where ceph is local,
and the rest of the service are from lp branches that are
compatible with the local charm (e.g. stable or next).
"""
this_service = {'name': 'ceph-mon', 'units': 3}
other_services = [{'name': 'mysql'},
{'name': 'keystone'},
{'name': 'ceph-osd', 'units': 3},
{'name': 'rabbitmq-server'},
{'name': 'nova-compute'},
{'name': 'glance'},
{'name': 'cinder'}]
super(CephBasicDeployment, self)._add_services(this_service,
other_services)
def _add_relations(self):
"""Add all of the relations for the services."""
relations = {
'nova-compute:shared-db': 'mysql:shared-db',
'nova-compute:amqp': 'rabbitmq-server:amqp',
'nova-compute:image-service': 'glance:image-service',
'nova-compute:ceph': 'ceph-mon:client',
'keystone:shared-db': 'mysql:shared-db',
'glance:shared-db': 'mysql:shared-db',
'glance:identity-service': 'keystone:identity-service',
'glance:amqp': 'rabbitmq-server:amqp',
'glance:ceph': 'ceph-mon:client',
'cinder:shared-db': 'mysql:shared-db',
'cinder:identity-service': 'keystone:identity-service',
'cinder:amqp': 'rabbitmq-server:amqp',
'cinder:image-service': 'glance:image-service',
'cinder:ceph': 'ceph-mon:client',
'ceph-osd:mon': 'ceph-mon:osd'
}
super(CephBasicDeployment, self)._add_relations(relations)
def _configure_services(self):
"""Configure all of the services."""
keystone_config = {'admin-password': 'openstack',
'admin-token': 'ubuntutesting'}
mysql_config = {'dataset-size': '50%'}
cinder_config = {'block-device': 'None', 'glance-api-version': '2'}
# Include a non-existent device as osd-devices is a whitelist,
# and this will catch cases where proposals attempt to change that.
ceph_config = {
'monitor-count': '3',
'auth-supported': 'none',
'fsid': '6547bd3e-1397-11e2-82e5-53567c8d32dc',
'monitor-secret': 'AQCXrnZQwI7KGBAAiPofmKEXKxu5bUzoYLVkbQ==',
}
# Include a non-existent device as osd-devices is a whitelist,
# and this will catch cases where proposals attempt to change that.
ceph_osd_config = {
'osd-reformat': 'yes',
'ephemeral-unmount': '/mnt',
'osd-devices': '/dev/vdb /srv/ceph /dev/test-non-existent'
}
configs = {'keystone': keystone_config,
'mysql': mysql_config,
'cinder': cinder_config,
'ceph-mon': ceph_config,
'ceph-osd': ceph_osd_config}
super(CephBasicDeployment, self)._configure_services(configs)
def _initialize_tests(self):
"""Perform final initialization before tests get run."""
# Access the sentries for inspecting service units
self.mysql_sentry = self.d.sentry.unit['mysql/0']
self.keystone_sentry = self.d.sentry.unit['keystone/0']
self.rabbitmq_sentry = self.d.sentry.unit['rabbitmq-server/0']
self.nova_sentry = self.d.sentry.unit['nova-compute/0']
self.glance_sentry = self.d.sentry.unit['glance/0']
self.cinder_sentry = self.d.sentry.unit['cinder/0']
self.ceph_osd_sentry = self.d.sentry.unit['ceph-osd/0']
self.ceph0_sentry = self.d.sentry.unit['ceph-mon/0']
self.ceph1_sentry = self.d.sentry.unit['ceph-mon/1']
self.ceph2_sentry = self.d.sentry.unit['ceph-mon/2']
u.log.debug('openstack release val: {}'.format(
self._get_openstack_release()))
u.log.debug('openstack release str: {}'.format(
self._get_openstack_release_string()))
# Authenticate admin with keystone
self.keystone = u.authenticate_keystone_admin(self.keystone_sentry,
user='admin',
password='openstack',
tenant='admin')
# Authenticate admin with cinder endpoint
self.cinder = u.authenticate_cinder_admin(self.keystone_sentry,
username='admin',
password='openstack',
tenant='admin')
# Authenticate admin with glance endpoint
self.glance = u.authenticate_glance_admin(self.keystone)
# Authenticate admin with nova endpoint
self.nova = u.authenticate_nova_user(self.keystone,
user='admin',
password='openstack',
tenant='admin')
# Create a demo tenant/role/user
self.demo_tenant = 'demoTenant'
self.demo_role = 'demoRole'
self.demo_user = 'demoUser'
if not u.tenant_exists(self.keystone, self.demo_tenant):
tenant = self.keystone.tenants.create(tenant_name=self.demo_tenant,
description='demo tenant',
enabled=True)
self.keystone.roles.create(name=self.demo_role)
self.keystone.users.create(name=self.demo_user,
password='password',
tenant_id=tenant.id,
email='demo@demo.com')
# Authenticate demo user with keystone
self.keystone_demo = u.authenticate_keystone_user(self.keystone,
self.demo_user,
'password',
self.demo_tenant)
# Authenticate demo user with nova-api
self.nova_demo = u.authenticate_nova_user(self.keystone,
self.demo_user,
'password',
self.demo_tenant)
def test_100_ceph_processes(self):
"""Verify that the expected service processes are running
on each ceph unit."""
# Process name and quantity of processes to expect on each unit
ceph_processes = {
'ceph-mon': 1
}
# Units with process names and PID quantities expected
expected_processes = {
self.ceph0_sentry: ceph_processes,
self.ceph1_sentry: ceph_processes,
self.ceph2_sentry: ceph_processes
}
actual_pids = u.get_unit_process_ids(expected_processes)
ret = u.validate_unit_process_ids(expected_processes, actual_pids)
if ret:
amulet.raise_status(amulet.FAIL, msg=ret)
def test_102_services(self):
"""Verify the expected services are running on the service units."""
services = {
self.mysql_sentry: ['mysql'],
self.rabbitmq_sentry: ['rabbitmq-server'],
self.nova_sentry: ['nova-compute'],
self.keystone_sentry: ['keystone'],
self.glance_sentry: ['glance-registry',
'glance-api'],
self.cinder_sentry: ['cinder-api',
'cinder-scheduler',
'cinder-volume'],
}
if self._get_openstack_release() < self.vivid_kilo:
# For upstart systems only. Ceph services under systemd
# are checked by process name instead.
ceph_services = [
'ceph-mon-all',
'ceph-mon id=`hostname`'
]
services[self.ceph0_sentry] = ceph_services
services[self.ceph1_sentry] = ceph_services
services[self.ceph2_sentry] = ceph_services
ceph_osd_services = [
'ceph-osd id={}'.format(u.get_ceph_osd_id_cmd(0)),
'ceph-osd id={}'.format(u.get_ceph_osd_id_cmd(1))
]
services[self.ceph_osd_sentry] = ceph_osd_services
if self._get_openstack_release() >= self.trusty_liberty:
services[self.keystone_sentry] = ['apache2']
ret = u.validate_services_by_name(services)
if ret:
amulet.raise_status(amulet.FAIL, msg=ret)
def test_200_ceph_nova_client_relation(self):
"""Verify the ceph to nova ceph-client relation data."""
u.log.debug('Checking ceph:nova-compute ceph-mon relation data...')
unit = self.ceph0_sentry
relation = ['client', 'nova-compute:ceph']
expected = {
'private-address': u.valid_ip,
'auth': 'none',
'key': u.not_null
}
ret = u.validate_relation_data(unit, relation, expected)
if ret:
message = u.relation_error('ceph-mon to nova ceph-client', ret)
amulet.raise_status(amulet.FAIL, msg=message)
def test_201_nova_ceph_client_relation(self):
"""Verify the nova to ceph client relation data."""
u.log.debug('Checking nova-compute:ceph ceph-client relation data...')
unit = self.nova_sentry
relation = ['ceph', 'ceph-mon:client']
expected = {
'private-address': u.valid_ip
}
ret = u.validate_relation_data(unit, relation, expected)
if ret:
message = u.relation_error('nova to ceph ceph-client', ret)
amulet.raise_status(amulet.FAIL, msg=message)
def test_202_ceph_glance_client_relation(self):
"""Verify the ceph to glance ceph-client relation data."""
u.log.debug('Checking ceph:glance client relation data...')
unit = self.ceph1_sentry
relation = ['client', 'glance:ceph']
expected = {
'private-address': u.valid_ip,
'auth': 'none',
'key': u.not_null
}
ret = u.validate_relation_data(unit, relation, expected)
if ret:
message = u.relation_error('ceph to glance ceph-client', ret)
amulet.raise_status(amulet.FAIL, msg=message)
def test_203_glance_ceph_client_relation(self):
"""Verify the glance to ceph client relation data."""
u.log.debug('Checking glance:ceph client relation data...')
unit = self.glance_sentry
relation = ['ceph', 'ceph-mon:client']
expected = {
'private-address': u.valid_ip
}
ret = u.validate_relation_data(unit, relation, expected)
if ret:
message = u.relation_error('glance to ceph ceph-client', ret)
amulet.raise_status(amulet.FAIL, msg=message)
def test_204_ceph_cinder_client_relation(self):
"""Verify the ceph to cinder ceph-client relation data."""
u.log.debug('Checking ceph:cinder ceph relation data...')
unit = self.ceph2_sentry
relation = ['client', 'cinder:ceph']
expected = {
'private-address': u.valid_ip,
'auth': 'none',
'key': u.not_null
}
ret = u.validate_relation_data(unit, relation, expected)
if ret:
message = u.relation_error('ceph to cinder ceph-client', ret)
amulet.raise_status(amulet.FAIL, msg=message)
def test_205_cinder_ceph_client_relation(self):
"""Verify the cinder to ceph ceph-client relation data."""
u.log.debug('Checking cinder:ceph ceph relation data...')
unit = self.cinder_sentry
relation = ['ceph', 'ceph-mon:client']
expected = {
'private-address': u.valid_ip
}
ret = u.validate_relation_data(unit, relation, expected)
if ret:
message = u.relation_error('cinder to ceph ceph-client', ret)
amulet.raise_status(amulet.FAIL, msg=message)
def test_300_ceph_config(self):
"""Verify the data in the ceph config file."""
u.log.debug('Checking ceph config file data...')
unit = self.ceph0_sentry
conf = '/etc/ceph/ceph.conf'
expected = {
'global': {
'keyring': '/etc/ceph/$cluster.$name.keyring',
'fsid': '6547bd3e-1397-11e2-82e5-53567c8d32dc',
'log to syslog': 'false',
'err to syslog': 'false',
'clog to syslog': 'false',
'mon cluster log to syslog': 'false',
'auth cluster required': 'none',
'auth service required': 'none',
'auth client required': 'none'
},
'mon': {
'keyring': '/var/lib/ceph/mon/$cluster-$id/keyring'
},
'mds': {
'keyring': '/var/lib/ceph/mds/$cluster-$id/keyring'
},
}
for section, pairs in expected.iteritems():
ret = u.validate_config_data(unit, conf, section, pairs)
if ret:
message = "ceph config error: {}".format(ret)
amulet.raise_status(amulet.FAIL, msg=message)
def test_302_cinder_rbd_config(self):
"""Verify the cinder config file data regarding ceph."""
u.log.debug('Checking cinder (rbd) config file data...')
unit = self.cinder_sentry
conf = '/etc/cinder/cinder.conf'
expected = {
'DEFAULT': {
'volume_driver': 'cinder.volume.drivers.rbd.RBDDriver'
}
}
for section, pairs in expected.iteritems():
ret = u.validate_config_data(unit, conf, section, pairs)
if ret:
message = "cinder (rbd) config error: {}".format(ret)
amulet.raise_status(amulet.FAIL, msg=message)
def test_304_glance_rbd_config(self):
"""Verify the glance config file data regarding ceph."""
u.log.debug('Checking glance (rbd) config file data...')
unit = self.glance_sentry
conf = '/etc/glance/glance-api.conf'
config = {
'default_store': 'rbd',
'rbd_store_ceph_conf': '/etc/ceph/ceph.conf',
'rbd_store_user': 'glance',
'rbd_store_pool': 'glance',
'rbd_store_chunk_size': '8'
}
if self._get_openstack_release() >= self.trusty_kilo:
# Kilo or later
config['stores'] = ('glance.store.filesystem.Store,'
'glance.store.http.Store,'
'glance.store.rbd.Store')
section = 'glance_store'
else:
# Juno or earlier
section = 'DEFAULT'
expected = {section: config}
for section, pairs in expected.iteritems():
ret = u.validate_config_data(unit, conf, section, pairs)
if ret:
message = "glance (rbd) config error: {}".format(ret)
amulet.raise_status(amulet.FAIL, msg=message)
def test_306_nova_rbd_config(self):
"""Verify the nova config file data regarding ceph."""
u.log.debug('Checking nova (rbd) config file data...')
unit = self.nova_sentry
conf = '/etc/nova/nova.conf'
expected = {
'libvirt': {
'rbd_user': 'nova-compute',
'rbd_secret_uuid': u.not_null
}
}
for section, pairs in expected.iteritems():
ret = u.validate_config_data(unit, conf, section, pairs)
if ret:
message = "nova (rbd) config error: {}".format(ret)
amulet.raise_status(amulet.FAIL, msg=message)
def test_400_ceph_check_osd_pools(self):
"""Check osd pools on all ceph units, expect them to be
identical, and expect specific pools to be present."""
u.log.debug('Checking pools on ceph units...')
expected_pools = self.get_ceph_expected_pools()
results = []
sentries = [
self.ceph0_sentry,
self.ceph1_sentry,
self.ceph2_sentry
]
# Check for presence of expected pools on each unit
u.log.debug('Expected pools: {}'.format(expected_pools))
for sentry_unit in sentries:
pools = u.get_ceph_pools(sentry_unit)
results.append(pools)
for expected_pool in expected_pools:
if expected_pool not in pools:
msg = ('{} does not have pool: '
'{}'.format(sentry_unit.info['unit_name'],
expected_pool))
amulet.raise_status(amulet.FAIL, msg=msg)
u.log.debug('{} has (at least) the expected '
'pools.'.format(sentry_unit.info['unit_name']))
# Check that all units returned the same pool name:id data
ret = u.validate_list_of_identical_dicts(results)
if ret:
u.log.debug('Pool list results: {}'.format(results))
msg = ('{}; Pool list results are not identical on all '
'ceph units.'.format(ret))
amulet.raise_status(amulet.FAIL, msg=msg)
else:
u.log.debug('Pool list on all ceph units produced the '
'same results (OK).')
def test_402_pause_resume_actions(self):
"""Veryfy that pause/resume works"""
u.log.debug("Testing pause")
cmd = "ceph -s"
sentry_unit = self.ceph0_sentry
action_id = u.run_action(sentry_unit, 'pause-health')
assert u.wait_on_action(action_id), "Pause health action failed."
output, code = sentry_unit.run(cmd)
if 'nodown' not in output or 'noout' not in output:
amulet.raise_status(amulet.FAIL, msg="Missing noout,nodown")
u.log.debug("Testing resume")
action_id = u.run_action(sentry_unit, 'resume-health')
assert u.wait_on_action(action_id), "Resume health action failed."
output, code = sentry_unit.run(cmd)
if 'nodown' in output or 'noout' in output:
amulet.raise_status(amulet.FAIL, msg="Still has noout,nodown")
@staticmethod
def find_pool(sentry_unit, pool_name):
"""
This will do a ceph osd dump and search for pool you specify
:param sentry_unit: The unit to run this command from.
:param pool_name: str. The name of the Ceph pool to query
:return: str or None. The ceph pool or None if not found
"""
output, dump_code = sentry_unit.run("ceph osd dump")
if dump_code is not 0:
amulet.raise_status(
amulet.FAIL,
msg="ceph osd dump failed with output: {}".format(
output))
for line in output.split('\n'):
match = re.search(r"pool\s+\d+\s+'(?P<pool_name>.*)'", line)
if match:
name = match.group('pool_name')
if name == pool_name:
return line
return None
def test_403_cache_tier_actions(self):
"""Verify that cache tier add/remove works"""
u.log.debug("Testing cache tiering")
sentry_unit = self.ceph0_sentry
# Create our backer pool
output, code = sentry_unit.run("ceph osd pool create cold 128 128 ")
if code is not 0:
amulet.raise_status(
amulet.FAIL,
msg="ceph osd pool create cold failed with output: {}".format(
output))
# Create our cache pool
output, code = sentry_unit.run("ceph osd pool create hot 128 128 ")
if code is not 0:
amulet.raise_status(
amulet.FAIL,
msg="ceph osd pool create hot failed with output: {}".format(
output))
action_id = u.run_action(sentry_unit,
'create-cache-tier',
params={
'backer-pool': 'cold',
'cache-pool': 'hot',
'cache-mode': 'writeback'})
assert u.wait_on_action(action_id), \
"Create cache tier action failed."
pool_line = self.find_pool(
sentry_unit=sentry_unit,
pool_name='hot')
assert "cache_mode writeback" in pool_line, \
"cache_mode writeback not found in cache pool"
remove_action_id = u.run_action(sentry_unit,
'remove-cache-tier',
params={
'backer-pool': 'cold',
'cache-pool': 'hot'})
assert u.wait_on_action(remove_action_id), \
"Remove cache tier action failed"
pool_line = self.find_pool(sentry_unit=sentry_unit, pool_name='hot')
assert "cache_mode" not in pool_line, \
"cache_mode is still enabled on cache pool"
def test_410_ceph_cinder_vol_create(self):
"""Create and confirm a ceph-backed cinder volume, and inspect
ceph cinder pool object count as the volume is created
and deleted."""
sentry_unit = self.ceph0_sentry
obj_count_samples = []
pool_size_samples = []
pools = u.get_ceph_pools(self.ceph0_sentry)
cinder_pool = pools['cinder']
# Check ceph cinder pool object count, disk space usage and pool name
u.log.debug('Checking ceph cinder pool original samples...')
pool_name, obj_count, kb_used = u.get_ceph_pool_sample(sentry_unit,
cinder_pool)
obj_count_samples.append(obj_count)
pool_size_samples.append(kb_used)
expected = 'cinder'
if pool_name != expected:
msg = ('Ceph pool {} unexpected name (actual, expected): '
'{}. {}'.format(cinder_pool, pool_name, expected))
amulet.raise_status(amulet.FAIL, msg=msg)
# Create ceph-backed cinder volume
cinder_vol = u.create_cinder_volume(self.cinder)
# Re-check ceph cinder pool object count and disk usage
time.sleep(10)
u.log.debug('Checking ceph cinder pool samples after volume create...')
pool_name, obj_count, kb_used = u.get_ceph_pool_sample(sentry_unit,
cinder_pool)
obj_count_samples.append(obj_count)
pool_size_samples.append(kb_used)
# Delete ceph-backed cinder volume
u.delete_resource(self.cinder.volumes, cinder_vol, msg="cinder volume")
# Final check, ceph cinder pool object count and disk usage
time.sleep(10)
u.log.debug('Checking ceph cinder pool after volume delete...')
pool_name, obj_count, kb_used = u.get_ceph_pool_sample(sentry_unit,
cinder_pool)
obj_count_samples.append(obj_count)
pool_size_samples.append(kb_used)
# Validate ceph cinder pool object count samples over time
ret = u.validate_ceph_pool_samples(obj_count_samples,
"cinder pool object count")
if ret:
amulet.raise_status(amulet.FAIL, msg=ret)
# Validate ceph cinder pool disk space usage samples over time
ret = u.validate_ceph_pool_samples(pool_size_samples,
"cinder pool disk usage")
if ret:
amulet.raise_status(amulet.FAIL, msg=ret)
def test_412_ceph_glance_image_create_delete(self):
"""Create and confirm a ceph-backed glance image, and inspect
ceph glance pool object count as the image is created
and deleted."""
sentry_unit = self.ceph0_sentry
obj_count_samples = []
pool_size_samples = []
pools = u.get_ceph_pools(self.ceph0_sentry)
glance_pool = pools['glance']
# Check ceph glance pool object count, disk space usage and pool name
u.log.debug('Checking ceph glance pool original samples...')
pool_name, obj_count, kb_used = u.get_ceph_pool_sample(sentry_unit,
glance_pool)
obj_count_samples.append(obj_count)
pool_size_samples.append(kb_used)
expected = 'glance'
if pool_name != expected:
msg = ('Ceph glance pool {} unexpected name (actual, '
'expected): {}. {}'.format(glance_pool,
pool_name, expected))
amulet.raise_status(amulet.FAIL, msg=msg)
# Create ceph-backed glance image
glance_img = u.create_cirros_image(self.glance, "cirros-image-1")
# Re-check ceph glance pool object count and disk usage
time.sleep(10)
u.log.debug('Checking ceph glance pool samples after image create...')
pool_name, obj_count, kb_used = u.get_ceph_pool_sample(sentry_unit,
glance_pool)
obj_count_samples.append(obj_count)
pool_size_samples.append(kb_used)
# Delete ceph-backed glance image
u.delete_resource(self.glance.images,
glance_img, msg="glance image")
# Final check, ceph glance pool object count and disk usage
time.sleep(10)
u.log.debug('Checking ceph glance pool samples after image delete...')
pool_name, obj_count, kb_used = u.get_ceph_pool_sample(sentry_unit,
glance_pool)
obj_count_samples.append(obj_count)
pool_size_samples.append(kb_used)
# Validate ceph glance pool object count samples over time
ret = u.validate_ceph_pool_samples(obj_count_samples,
"glance pool object count")
if ret:
amulet.raise_status(amulet.FAIL, msg=ret)
# Validate ceph glance pool disk space usage samples over time
ret = u.validate_ceph_pool_samples(pool_size_samples,
"glance pool disk usage")
if ret:
amulet.raise_status(amulet.FAIL, msg=ret)
def test_499_ceph_cmds_exit_zero(self):
"""Check basic functionality of ceph cli commands against
all ceph units."""
sentry_units = [
self.ceph0_sentry,
self.ceph1_sentry,
self.ceph2_sentry
]
commands = [
'sudo ceph health',
'sudo ceph mds stat',
'sudo ceph pg stat',
'sudo ceph osd stat',
'sudo ceph mon stat',
]
ret = u.check_commands_on_units(commands, sentry_units)
if ret:
amulet.raise_status(amulet.FAIL, msg=ret)
# FYI: No restart check as ceph services do not restart
# when charm config changes, unless monitor count increases.

View File

@ -1,38 +0,0 @@
# Copyright 2014-2015 Canonical Limited.
#
# This file is part of charm-helpers.
#
# charm-helpers is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License version 3 as
# published by the Free Software Foundation.
#
# charm-helpers is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with charm-helpers. If not, see <http://www.gnu.org/licenses/>.
# Bootstrap charm-helpers, installing its dependencies if necessary using
# only standard libraries.
import subprocess
import sys
try:
import six # flake8: noqa
except ImportError:
if sys.version_info.major == 2:
subprocess.check_call(['apt-get', 'install', '-y', 'python-six'])
else:
subprocess.check_call(['apt-get', 'install', '-y', 'python3-six'])
import six # flake8: noqa
try:
import yaml # flake8: noqa
except ImportError:
if sys.version_info.major == 2:
subprocess.check_call(['apt-get', 'install', '-y', 'python-yaml'])
else:
subprocess.check_call(['apt-get', 'install', '-y', 'python3-yaml'])
import yaml # flake8: noqa

View File

@ -1,15 +0,0 @@
# Copyright 2014-2015 Canonical Limited.
#
# This file is part of charm-helpers.
#
# charm-helpers is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License version 3 as
# published by the Free Software Foundation.
#
# charm-helpers is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with charm-helpers. If not, see <http://www.gnu.org/licenses/>.

View File

@ -1,15 +0,0 @@
# Copyright 2014-2015 Canonical Limited.
#
# This file is part of charm-helpers.
#
# charm-helpers is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License version 3 as
# published by the Free Software Foundation.
#
# charm-helpers is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with charm-helpers. If not, see <http://www.gnu.org/licenses/>.

View File

@ -1,95 +0,0 @@
# Copyright 2014-2015 Canonical Limited.
#
# This file is part of charm-helpers.
#
# charm-helpers is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License version 3 as
# published by the Free Software Foundation.
#
# charm-helpers is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with charm-helpers. If not, see <http://www.gnu.org/licenses/>.
import amulet
import os
import six
class AmuletDeployment(object):
"""Amulet deployment.
This class provides generic Amulet deployment and test runner
methods.
"""
def __init__(self, series=None):
"""Initialize the deployment environment."""
self.series = None
if series:
self.series = series
self.d = amulet.Deployment(series=self.series)
else:
self.d = amulet.Deployment()
def _add_services(self, this_service, other_services):
"""Add services.
Add services to the deployment where this_service is the local charm
that we're testing and other_services are the other services that
are being used in the local amulet tests.
"""
if this_service['name'] != os.path.basename(os.getcwd()):
s = this_service['name']
msg = "The charm's root directory name needs to be {}".format(s)
amulet.raise_status(amulet.FAIL, msg=msg)
if 'units' not in this_service:
this_service['units'] = 1
self.d.add(this_service['name'], units=this_service['units'],
constraints=this_service.get('constraints'))
for svc in other_services:
if 'location' in svc:
branch_location = svc['location']
elif self.series:
branch_location = 'cs:{}/{}'.format(self.series, svc['name']),
else:
branch_location = None
if 'units' not in svc:
svc['units'] = 1
self.d.add(svc['name'], charm=branch_location, units=svc['units'],
constraints=svc.get('constraints'))
def _add_relations(self, relations):
"""Add all of the relations for the services."""
for k, v in six.iteritems(relations):
self.d.relate(k, v)
def _configure_services(self, configs):
"""Configure all of the services."""
for service, config in six.iteritems(configs):
self.d.configure(service, config)
def _deploy(self):
"""Deploy environment and wait for all hooks to finish executing."""
try:
self.d.setup(timeout=900)
self.d.sentry.wait(timeout=900)
except amulet.helpers.TimeoutError:
amulet.raise_status(amulet.FAIL, msg="Deployment timed out")
except Exception:
raise
def run_tests(self):
"""Run all of the methods that are prefixed with 'test_'."""
for test in dir(self):
if test.startswith('test_'):
getattr(self, test)()

View File

@ -1,829 +0,0 @@
# Copyright 2014-2015 Canonical Limited.
#
# This file is part of charm-helpers.
#
# charm-helpers is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License version 3 as
# published by the Free Software Foundation.
#
# charm-helpers is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with charm-helpers. If not, see <http://www.gnu.org/licenses/>.
import io
import json
import logging
import os
import re
import socket
import subprocess
import sys
import time
import uuid
import amulet
import distro_info
import six
from six.moves import configparser
if six.PY3:
from urllib import parse as urlparse
else:
import urlparse
class AmuletUtils(object):
"""Amulet utilities.
This class provides common utility functions that are used by Amulet
tests.
"""
def __init__(self, log_level=logging.ERROR):
self.log = self.get_logger(level=log_level)
self.ubuntu_releases = self.get_ubuntu_releases()
def get_logger(self, name="amulet-logger", level=logging.DEBUG):
"""Get a logger object that will log to stdout."""
log = logging
logger = log.getLogger(name)
fmt = log.Formatter("%(asctime)s %(funcName)s "
"%(levelname)s: %(message)s")
handler = log.StreamHandler(stream=sys.stdout)
handler.setLevel(level)
handler.setFormatter(fmt)
logger.addHandler(handler)
logger.setLevel(level)
return logger
def valid_ip(self, ip):
if re.match(r"^\d{1,3}\.\d{1,3}\.\d{1,3}\.\d{1,3}$", ip):
return True
else:
return False
def valid_url(self, url):
p = re.compile(
r'^(?:http|ftp)s?://'
r'(?:(?:[A-Z0-9](?:[A-Z0-9-]{0,61}[A-Z0-9])?\.)+(?:[A-Z]{2,6}\.?|[A-Z0-9-]{2,}\.?)|' # noqa
r'localhost|'
r'\d{1,3}\.\d{1,3}\.\d{1,3}\.\d{1,3})'
r'(?::\d+)?'
r'(?:/?|[/?]\S+)$',
re.IGNORECASE)
if p.match(url):
return True
else:
return False
def get_ubuntu_release_from_sentry(self, sentry_unit):
"""Get Ubuntu release codename from sentry unit.
:param sentry_unit: amulet sentry/service unit pointer
:returns: list of strings - release codename, failure message
"""
msg = None
cmd = 'lsb_release -cs'
release, code = sentry_unit.run(cmd)
if code == 0:
self.log.debug('{} lsb_release: {}'.format(
sentry_unit.info['unit_name'], release))
else:
msg = ('{} `{}` returned {} '
'{}'.format(sentry_unit.info['unit_name'],
cmd, release, code))
if release not in self.ubuntu_releases:
msg = ("Release ({}) not found in Ubuntu releases "
"({})".format(release, self.ubuntu_releases))
return release, msg
def validate_services(self, commands):
"""Validate that lists of commands succeed on service units. Can be
used to verify system services are running on the corresponding
service units.
:param commands: dict with sentry keys and arbitrary command list vals
:returns: None if successful, Failure string message otherwise
"""
self.log.debug('Checking status of system services...')
# /!\ DEPRECATION WARNING (beisner):
# New and existing tests should be rewritten to use
# validate_services_by_name() as it is aware of init systems.
self.log.warn('DEPRECATION WARNING: use '
'validate_services_by_name instead of validate_services '
'due to init system differences.')
for k, v in six.iteritems(commands):
for cmd in v:
output, code = k.run(cmd)
self.log.debug('{} `{}` returned '
'{}'.format(k.info['unit_name'],
cmd, code))
if code != 0:
return "command `{}` returned {}".format(cmd, str(code))
return None
def validate_services_by_name(self, sentry_services):
"""Validate system service status by service name, automatically
detecting init system based on Ubuntu release codename.
:param sentry_services: dict with sentry keys and svc list values
:returns: None if successful, Failure string message otherwise
"""
self.log.debug('Checking status of system services...')
# Point at which systemd became a thing
systemd_switch = self.ubuntu_releases.index('vivid')
for sentry_unit, services_list in six.iteritems(sentry_services):
# Get lsb_release codename from unit
release, ret = self.get_ubuntu_release_from_sentry(sentry_unit)
if ret:
return ret
for service_name in services_list:
if (self.ubuntu_releases.index(release) >= systemd_switch or
service_name in ['rabbitmq-server', 'apache2']):
# init is systemd (or regular sysv)
cmd = 'sudo service {} status'.format(service_name)
output, code = sentry_unit.run(cmd)
service_running = code == 0
elif self.ubuntu_releases.index(release) < systemd_switch:
# init is upstart
cmd = 'sudo status {}'.format(service_name)
output, code = sentry_unit.run(cmd)
service_running = code == 0 and "start/running" in output
self.log.debug('{} `{}` returned '
'{}'.format(sentry_unit.info['unit_name'],
cmd, code))
if not service_running:
return u"command `{}` returned {} {}".format(
cmd, output, str(code))
return None
def _get_config(self, unit, filename):
"""Get a ConfigParser object for parsing a unit's config file."""
file_contents = unit.file_contents(filename)
# NOTE(beisner): by default, ConfigParser does not handle options
# with no value, such as the flags used in the mysql my.cnf file.
# https://bugs.python.org/issue7005
config = configparser.ConfigParser(allow_no_value=True)
config.readfp(io.StringIO(file_contents))
return config
def validate_config_data(self, sentry_unit, config_file, section,
expected):
"""Validate config file data.
Verify that the specified section of the config file contains
the expected option key:value pairs.
Compare expected dictionary data vs actual dictionary data.
The values in the 'expected' dictionary can be strings, bools, ints,
longs, or can be a function that evaluates a variable and returns a
bool.
"""
self.log.debug('Validating config file data ({} in {} on {})'
'...'.format(section, config_file,
sentry_unit.info['unit_name']))
config = self._get_config(sentry_unit, config_file)
if section != 'DEFAULT' and not config.has_section(section):
return "section [{}] does not exist".format(section)
for k in expected.keys():
if not config.has_option(section, k):
return "section [{}] is missing option {}".format(section, k)
actual = config.get(section, k)
v = expected[k]
if (isinstance(v, six.string_types) or
isinstance(v, bool) or
isinstance(v, six.integer_types)):
# handle explicit values
if actual != v:
return "section [{}] {}:{} != expected {}:{}".format(
section, k, actual, k, expected[k])
# handle function pointers, such as not_null or valid_ip
elif not v(actual):
return "section [{}] {}:{} != expected {}:{}".format(
section, k, actual, k, expected[k])
return None
def _validate_dict_data(self, expected, actual):
"""Validate dictionary data.
Compare expected dictionary data vs actual dictionary data.
The values in the 'expected' dictionary can be strings, bools, ints,
longs, or can be a function that evaluates a variable and returns a
bool.
"""
self.log.debug('actual: {}'.format(repr(actual)))
self.log.debug('expected: {}'.format(repr(expected)))
for k, v in six.iteritems(expected):
if k in actual:
if (isinstance(v, six.string_types) or
isinstance(v, bool) or
isinstance(v, six.integer_types)):
# handle explicit values
if v != actual[k]:
return "{}:{}".format(k, actual[k])
# handle function pointers, such as not_null or valid_ip
elif not v(actual[k]):
return "{}:{}".format(k, actual[k])
else:
return "key '{}' does not exist".format(k)
return None
def validate_relation_data(self, sentry_unit, relation, expected):
"""Validate actual relation data based on expected relation data."""
actual = sentry_unit.relation(relation[0], relation[1])
return self._validate_dict_data(expected, actual)
def _validate_list_data(self, expected, actual):
"""Compare expected list vs actual list data."""
for e in expected:
if e not in actual:
return "expected item {} not found in actual list".format(e)
return None
def not_null(self, string):
if string is not None:
return True
else:
return False
def _get_file_mtime(self, sentry_unit, filename):
"""Get last modification time of file."""
return sentry_unit.file_stat(filename)['mtime']
def _get_dir_mtime(self, sentry_unit, directory):
"""Get last modification time of directory."""
return sentry_unit.directory_stat(directory)['mtime']
def _get_proc_start_time(self, sentry_unit, service, pgrep_full=None):
"""Get start time of a process based on the last modification time
of the /proc/pid directory.
:sentry_unit: The sentry unit to check for the service on
:service: service name to look for in process table
:pgrep_full: [Deprecated] Use full command line search mode with pgrep
:returns: epoch time of service process start
:param commands: list of bash commands
:param sentry_units: list of sentry unit pointers
:returns: None if successful; Failure message otherwise
"""
if pgrep_full is not None:
# /!\ DEPRECATION WARNING (beisner):
# No longer implemented, as pidof is now used instead of pgrep.
# https://bugs.launchpad.net/charm-helpers/+bug/1474030
self.log.warn('DEPRECATION WARNING: pgrep_full bool is no '
'longer implemented re: lp 1474030.')
pid_list = self.get_process_id_list(sentry_unit, service)
pid = pid_list[0]
proc_dir = '/proc/{}'.format(pid)
self.log.debug('Pid for {} on {}: {}'.format(
service, sentry_unit.info['unit_name'], pid))
return self._get_dir_mtime(sentry_unit, proc_dir)
def service_restarted(self, sentry_unit, service, filename,
pgrep_full=None, sleep_time=20):
"""Check if service was restarted.
Compare a service's start time vs a file's last modification time
(such as a config file for that service) to determine if the service
has been restarted.
"""
# /!\ DEPRECATION WARNING (beisner):
# This method is prone to races in that no before-time is known.
# Use validate_service_config_changed instead.
# NOTE(beisner) pgrep_full is no longer implemented, as pidof is now
# used instead of pgrep. pgrep_full is still passed through to ensure
# deprecation WARNS. lp1474030
self.log.warn('DEPRECATION WARNING: use '
'validate_service_config_changed instead of '
'service_restarted due to known races.')
time.sleep(sleep_time)
if (self._get_proc_start_time(sentry_unit, service, pgrep_full) >=
self._get_file_mtime(sentry_unit, filename)):
return True
else:
return False
def service_restarted_since(self, sentry_unit, mtime, service,
pgrep_full=None, sleep_time=20,
retry_count=30, retry_sleep_time=10):
"""Check if service was been started after a given time.
Args:
sentry_unit (sentry): The sentry unit to check for the service on
mtime (float): The epoch time to check against
service (string): service name to look for in process table
pgrep_full: [Deprecated] Use full command line search mode with pgrep
sleep_time (int): Initial sleep time (s) before looking for file
retry_sleep_time (int): Time (s) to sleep between retries
retry_count (int): If file is not found, how many times to retry
Returns:
bool: True if service found and its start time it newer than mtime,
False if service is older than mtime or if service was
not found.
"""
# NOTE(beisner) pgrep_full is no longer implemented, as pidof is now
# used instead of pgrep. pgrep_full is still passed through to ensure
# deprecation WARNS. lp1474030
unit_name = sentry_unit.info['unit_name']
self.log.debug('Checking that %s service restarted since %s on '
'%s' % (service, mtime, unit_name))
time.sleep(sleep_time)
proc_start_time = None
tries = 0
while tries <= retry_count and not proc_start_time:
try:
proc_start_time = self._get_proc_start_time(sentry_unit,
service,
pgrep_full)
self.log.debug('Attempt {} to get {} proc start time on {} '
'OK'.format(tries, service, unit_name))
except IOError as e:
# NOTE(beisner) - race avoidance, proc may not exist yet.
# https://bugs.launchpad.net/charm-helpers/+bug/1474030
self.log.debug('Attempt {} to get {} proc start time on {} '
'failed\n{}'.format(tries, service,
unit_name, e))
time.sleep(retry_sleep_time)
tries += 1
if not proc_start_time:
self.log.warn('No proc start time found, assuming service did '
'not start')
return False
if proc_start_time >= mtime:
self.log.debug('Proc start time is newer than provided mtime'
'(%s >= %s) on %s (OK)' % (proc_start_time,
mtime, unit_name))
return True
else:
self.log.warn('Proc start time (%s) is older than provided mtime '
'(%s) on %s, service did not '
'restart' % (proc_start_time, mtime, unit_name))
return False
def config_updated_since(self, sentry_unit, filename, mtime,
sleep_time=20, retry_count=30,
retry_sleep_time=10):
"""Check if file was modified after a given time.
Args:
sentry_unit (sentry): The sentry unit to check the file mtime on
filename (string): The file to check mtime of
mtime (float): The epoch time to check against
sleep_time (int): Initial sleep time (s) before looking for file
retry_sleep_time (int): Time (s) to sleep between retries
retry_count (int): If file is not found, how many times to retry
Returns:
bool: True if file was modified more recently than mtime, False if
file was modified before mtime, or if file not found.
"""
unit_name = sentry_unit.info['unit_name']
self.log.debug('Checking that %s updated since %s on '
'%s' % (filename, mtime, unit_name))
time.sleep(sleep_time)
file_mtime = None
tries = 0
while tries <= retry_count and not file_mtime:
try:
file_mtime = self._get_file_mtime(sentry_unit, filename)
self.log.debug('Attempt {} to get {} file mtime on {} '
'OK'.format(tries, filename, unit_name))
except IOError as e:
# NOTE(beisner) - race avoidance, file may not exist yet.
# https://bugs.launchpad.net/charm-helpers/+bug/1474030
self.log.debug('Attempt {} to get {} file mtime on {} '
'failed\n{}'.format(tries, filename,
unit_name, e))
time.sleep(retry_sleep_time)
tries += 1
if not file_mtime:
self.log.warn('Could not determine file mtime, assuming '
'file does not exist')
return False
if file_mtime >= mtime:
self.log.debug('File mtime is newer than provided mtime '
'(%s >= %s) on %s (OK)' % (file_mtime,
mtime, unit_name))
return True
else:
self.log.warn('File mtime is older than provided mtime'
'(%s < on %s) on %s' % (file_mtime,
mtime, unit_name))
return False
def validate_service_config_changed(self, sentry_unit, mtime, service,
filename, pgrep_full=None,
sleep_time=20, retry_count=30,
retry_sleep_time=10):
"""Check service and file were updated after mtime
Args:
sentry_unit (sentry): The sentry unit to check for the service on
mtime (float): The epoch time to check against
service (string): service name to look for in process table
filename (string): The file to check mtime of
pgrep_full: [Deprecated] Use full command line search mode with pgrep
sleep_time (int): Initial sleep in seconds to pass to test helpers
retry_count (int): If service is not found, how many times to retry
retry_sleep_time (int): Time in seconds to wait between retries
Typical Usage:
u = OpenStackAmuletUtils(ERROR)
...
mtime = u.get_sentry_time(self.cinder_sentry)
self.d.configure('cinder', {'verbose': 'True', 'debug': 'True'})
if not u.validate_service_config_changed(self.cinder_sentry,
mtime,
'cinder-api',
'/etc/cinder/cinder.conf')
amulet.raise_status(amulet.FAIL, msg='update failed')
Returns:
bool: True if both service and file where updated/restarted after
mtime, False if service is older than mtime or if service was
not found or if filename was modified before mtime.
"""
# NOTE(beisner) pgrep_full is no longer implemented, as pidof is now
# used instead of pgrep. pgrep_full is still passed through to ensure
# deprecation WARNS. lp1474030
service_restart = self.service_restarted_since(
sentry_unit, mtime,
service,
pgrep_full=pgrep_full,
sleep_time=sleep_time,
retry_count=retry_count,
retry_sleep_time=retry_sleep_time)
config_update = self.config_updated_since(
sentry_unit,
filename,
mtime,
sleep_time=sleep_time,
retry_count=retry_count,
retry_sleep_time=retry_sleep_time)
return service_restart and config_update
def get_sentry_time(self, sentry_unit):
"""Return current epoch time on a sentry"""
cmd = "date +'%s'"
return float(sentry_unit.run(cmd)[0])
def relation_error(self, name, data):
return 'unexpected relation data in {} - {}'.format(name, data)
def endpoint_error(self, name, data):
return 'unexpected endpoint data in {} - {}'.format(name, data)
def get_ubuntu_releases(self):
"""Return a list of all Ubuntu releases in order of release."""
_d = distro_info.UbuntuDistroInfo()
_release_list = _d.all
return _release_list
def file_to_url(self, file_rel_path):
"""Convert a relative file path to a file URL."""
_abs_path = os.path.abspath(file_rel_path)
return urlparse.urlparse(_abs_path, scheme='file').geturl()
def check_commands_on_units(self, commands, sentry_units):
"""Check that all commands in a list exit zero on all
sentry units in a list.
:param commands: list of bash commands
:param sentry_units: list of sentry unit pointers
:returns: None if successful; Failure message otherwise
"""
self.log.debug('Checking exit codes for {} commands on {} '
'sentry units...'.format(len(commands),
len(sentry_units)))
for sentry_unit in sentry_units:
for cmd in commands:
output, code = sentry_unit.run(cmd)
if code == 0:
self.log.debug('{} `{}` returned {} '
'(OK)'.format(sentry_unit.info['unit_name'],
cmd, code))
else:
return ('{} `{}` returned {} '
'{}'.format(sentry_unit.info['unit_name'],
cmd, code, output))
return None
def get_process_id_list(self, sentry_unit, process_name,
expect_success=True):
"""Get a list of process ID(s) from a single sentry juju unit
for a single process name.
:param sentry_unit: Amulet sentry instance (juju unit)
:param process_name: Process name
:param expect_success: If False, expect the PID to be missing,
raise if it is present.
:returns: List of process IDs
"""
cmd = 'pidof -x {}'.format(process_name)
if not expect_success:
cmd += " || exit 0 && exit 1"
output, code = sentry_unit.run(cmd)
if code != 0:
msg = ('{} `{}` returned {} '
'{}'.format(sentry_unit.info['unit_name'],
cmd, code, output))
amulet.raise_status(amulet.FAIL, msg=msg)
return str(output).split()
def get_unit_process_ids(self, unit_processes, expect_success=True):
"""Construct a dict containing unit sentries, process names, and
process IDs.
:param unit_processes: A dictionary of Amulet sentry instance
to list of process names.
:param expect_success: if False expect the processes to not be
running, raise if they are.
:returns: Dictionary of Amulet sentry instance to dictionary
of process names to PIDs.
"""
pid_dict = {}
for sentry_unit, process_list in six.iteritems(unit_processes):
pid_dict[sentry_unit] = {}
for process in process_list:
pids = self.get_process_id_list(
sentry_unit, process, expect_success=expect_success)
pid_dict[sentry_unit].update({process: pids})
return pid_dict
def validate_unit_process_ids(self, expected, actual):
"""Validate process id quantities for services on units."""
self.log.debug('Checking units for running processes...')
self.log.debug('Expected PIDs: {}'.format(expected))
self.log.debug('Actual PIDs: {}'.format(actual))
if len(actual) != len(expected):
return ('Unit count mismatch. expected, actual: {}, '
'{} '.format(len(expected), len(actual)))
for (e_sentry, e_proc_names) in six.iteritems(expected):
e_sentry_name = e_sentry.info['unit_name']
if e_sentry in actual.keys():
a_proc_names = actual[e_sentry]
else:
return ('Expected sentry ({}) not found in actual dict data.'
'{}'.format(e_sentry_name, e_sentry))
if len(e_proc_names.keys()) != len(a_proc_names.keys()):
return ('Process name count mismatch. expected, actual: {}, '
'{}'.format(len(expected), len(actual)))
for (e_proc_name, e_pids), (a_proc_name, a_pids) in \
zip(e_proc_names.items(), a_proc_names.items()):
if e_proc_name != a_proc_name:
return ('Process name mismatch. expected, actual: {}, '
'{}'.format(e_proc_name, a_proc_name))
a_pids_length = len(a_pids)
fail_msg = ('PID count mismatch. {} ({}) expected, actual: '
'{}, {} ({})'.format(e_sentry_name, e_proc_name,
e_pids, a_pids_length,
a_pids))
# If expected is a list, ensure at least one PID quantity match
if isinstance(e_pids, list) and \
a_pids_length not in e_pids:
return fail_msg
# If expected is not bool and not list,
# ensure PID quantities match
elif not isinstance(e_pids, bool) and \
not isinstance(e_pids, list) and \
a_pids_length != e_pids:
return fail_msg
# If expected is bool True, ensure 1 or more PIDs exist
elif isinstance(e_pids, bool) and \
e_pids is True and a_pids_length < 1:
return fail_msg
# If expected is bool False, ensure 0 PIDs exist
elif isinstance(e_pids, bool) and \
e_pids is False and a_pids_length != 0:
return fail_msg
else:
self.log.debug('PID check OK: {} {} {}: '
'{}'.format(e_sentry_name, e_proc_name,
e_pids, a_pids))
return None
def validate_list_of_identical_dicts(self, list_of_dicts):
"""Check that all dicts within a list are identical."""
hashes = []
for _dict in list_of_dicts:
hashes.append(hash(frozenset(_dict.items())))
self.log.debug('Hashes: {}'.format(hashes))
if len(set(hashes)) == 1:
self.log.debug('Dicts within list are identical')
else:
return 'Dicts within list are not identical'
return None
def validate_sectionless_conf(self, file_contents, expected):
"""A crude conf parser. Useful to inspect configuration files which
do not have section headers (as would be necessary in order to use
the configparser). Such as openstack-dashboard or rabbitmq confs."""
for line in file_contents.split('\n'):
if '=' in line:
args = line.split('=')
if len(args) <= 1:
continue
key = args[0].strip()
value = args[1].strip()
if key in expected.keys():
if expected[key] != value:
msg = ('Config mismatch. Expected, actual: {}, '
'{}'.format(expected[key], value))
amulet.raise_status(amulet.FAIL, msg=msg)
def get_unit_hostnames(self, units):
"""Return a dict of juju unit names to hostnames."""
host_names = {}
for unit in units:
host_names[unit.info['unit_name']] = \
str(unit.file_contents('/etc/hostname').strip())
self.log.debug('Unit host names: {}'.format(host_names))
return host_names
def run_cmd_unit(self, sentry_unit, cmd):
"""Run a command on a unit, return the output and exit code."""
output, code = sentry_unit.run(cmd)
if code == 0:
self.log.debug('{} `{}` command returned {} '
'(OK)'.format(sentry_unit.info['unit_name'],
cmd, code))
else:
msg = ('{} `{}` command returned {} '
'{}'.format(sentry_unit.info['unit_name'],
cmd, code, output))
amulet.raise_status(amulet.FAIL, msg=msg)
return str(output), code
def file_exists_on_unit(self, sentry_unit, file_name):
"""Check if a file exists on a unit."""
try:
sentry_unit.file_stat(file_name)
return True
except IOError:
return False
except Exception as e:
msg = 'Error checking file {}: {}'.format(file_name, e)
amulet.raise_status(amulet.FAIL, msg=msg)
def file_contents_safe(self, sentry_unit, file_name,
max_wait=60, fatal=False):
"""Get file contents from a sentry unit. Wrap amulet file_contents
with retry logic to address races where a file checks as existing,
but no longer exists by the time file_contents is called.
Return None if file not found. Optionally raise if fatal is True."""
unit_name = sentry_unit.info['unit_name']
file_contents = False
tries = 0
while not file_contents and tries < (max_wait / 4):
try:
file_contents = sentry_unit.file_contents(file_name)
except IOError:
self.log.debug('Attempt {} to open file {} from {} '
'failed'.format(tries, file_name,
unit_name))
time.sleep(4)
tries += 1
if file_contents:
return file_contents
elif not fatal:
return None
elif fatal:
msg = 'Failed to get file contents from unit.'
amulet.raise_status(amulet.FAIL, msg)
def port_knock_tcp(self, host="localhost", port=22, timeout=15):
"""Open a TCP socket to check for a listening sevice on a host.
:param host: host name or IP address, default to localhost
:param port: TCP port number, default to 22
:param timeout: Connect timeout, default to 15 seconds
:returns: True if successful, False if connect failed
"""
# Resolve host name if possible
try:
connect_host = socket.gethostbyname(host)
host_human = "{} ({})".format(connect_host, host)
except socket.error as e:
self.log.warn('Unable to resolve address: '
'{} ({}) Trying anyway!'.format(host, e))
connect_host = host
host_human = connect_host
# Attempt socket connection
try:
knock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
knock.settimeout(timeout)
knock.connect((connect_host, port))
knock.close()
self.log.debug('Socket connect OK for host '
'{} on port {}.'.format(host_human, port))
return True
except socket.error as e:
self.log.debug('Socket connect FAIL for'
' {} port {} ({})'.format(host_human, port, e))
return False
def port_knock_units(self, sentry_units, port=22,
timeout=15, expect_success=True):
"""Open a TCP socket to check for a listening sevice on each
listed juju unit.
:param sentry_units: list of sentry unit pointers
:param port: TCP port number, default to 22
:param timeout: Connect timeout, default to 15 seconds
:expect_success: True by default, set False to invert logic
:returns: None if successful, Failure message otherwise
"""
for unit in sentry_units:
host = unit.info['public-address']
connected = self.port_knock_tcp(host, port, timeout)
if not connected and expect_success:
return 'Socket connect failed.'
elif connected and not expect_success:
return 'Socket connected unexpectedly.'
def get_uuid_epoch_stamp(self):
"""Returns a stamp string based on uuid4 and epoch time. Useful in
generating test messages which need to be unique-ish."""
return '[{}-{}]'.format(uuid.uuid4(), time.time())
# amulet juju action helpers:
def run_action(self, unit_sentry, action,
_check_output=subprocess.check_output,
params=None):
"""Run the named action on a given unit sentry.
params a dict of parameters to use
_check_output parameter is used for dependency injection.
@return action_id.
"""
unit_id = unit_sentry.info["unit_name"]
command = ["juju", "action", "do", "--format=json", unit_id, action]
if params is not None:
for key, value in params.iteritems():
command.append("{}={}".format(key, value))
self.log.info("Running command: %s\n" % " ".join(command))
output = _check_output(command, universal_newlines=True)
data = json.loads(output)
action_id = data[u'Action queued with id']
return action_id
def wait_on_action(self, action_id, _check_output=subprocess.check_output):
"""Wait for a given action, returning if it completed or not.
_check_output parameter is used for dependency injection.
"""
command = ["juju", "action", "fetch", "--format=json", "--wait=0",
action_id]
output = _check_output(command, universal_newlines=True)
data = json.loads(output)
return data.get(u"status") == "completed"
def status_get(self, unit):
"""Return the current service status of this unit."""
raw_status, return_code = unit.run(
"status-get --format=json --include-data")
if return_code != 0:
return ("unknown", "")
status = json.loads(raw_status)
return (status["status"], status["message"])

View File

@ -1,15 +0,0 @@
# Copyright 2014-2015 Canonical Limited.
#
# This file is part of charm-helpers.
#
# charm-helpers is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License version 3 as
# published by the Free Software Foundation.
#
# charm-helpers is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with charm-helpers. If not, see <http://www.gnu.org/licenses/>.

View File

@ -1,15 +0,0 @@
# Copyright 2014-2015 Canonical Limited.
#
# This file is part of charm-helpers.
#
# charm-helpers is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License version 3 as
# published by the Free Software Foundation.
#
# charm-helpers is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with charm-helpers. If not, see <http://www.gnu.org/licenses/>.

View File

@ -1,304 +0,0 @@
# Copyright 2014-2015 Canonical Limited.
#
# This file is part of charm-helpers.
#
# charm-helpers is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License version 3 as
# published by the Free Software Foundation.
#
# charm-helpers is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with charm-helpers. If not, see <http://www.gnu.org/licenses/>.
import logging
import re
import sys
import six
from collections import OrderedDict
from charmhelpers.contrib.amulet.deployment import (
AmuletDeployment
)
DEBUG = logging.DEBUG
ERROR = logging.ERROR
class OpenStackAmuletDeployment(AmuletDeployment):
"""OpenStack amulet deployment.
This class inherits from AmuletDeployment and has additional support
that is specifically for use by OpenStack charms.
"""
def __init__(self, series=None, openstack=None, source=None,
stable=True, log_level=DEBUG):
"""Initialize the deployment environment."""
super(OpenStackAmuletDeployment, self).__init__(series)
self.log = self.get_logger(level=log_level)
self.log.info('OpenStackAmuletDeployment: init')
self.openstack = openstack
self.source = source
self.stable = stable
# Note(coreycb): this needs to be changed when new next branches come
# out.
self.current_next = "trusty"
def get_logger(self, name="deployment-logger", level=logging.DEBUG):
"""Get a logger object that will log to stdout."""
log = logging
logger = log.getLogger(name)
fmt = log.Formatter("%(asctime)s %(funcName)s "
"%(levelname)s: %(message)s")
handler = log.StreamHandler(stream=sys.stdout)
handler.setLevel(level)
handler.setFormatter(fmt)
logger.addHandler(handler)
logger.setLevel(level)
return logger
def _determine_branch_locations(self, other_services):
"""Determine the branch locations for the other services.
Determine if the local branch being tested is derived from its
stable or next (dev) branch, and based on this, use the corresonding
stable or next branches for the other_services."""
self.log.info('OpenStackAmuletDeployment: determine branch locations')
# Charms outside the lp:~openstack-charmers namespace
base_charms = ['mysql', 'mongodb', 'nrpe']
# Force these charms to current series even when using an older series.
# ie. Use trusty/nrpe even when series is precise, as the P charm
# does not possess the necessary external master config and hooks.
force_series_current = ['nrpe']
if self.series in ['precise', 'trusty']:
base_series = self.series
else:
base_series = self.current_next
for svc in other_services:
if svc['name'] in force_series_current:
base_series = self.current_next
# If a location has been explicitly set, use it
if svc.get('location'):
continue
if self.stable:
temp = 'lp:charms/{}/{}'
svc['location'] = temp.format(base_series,
svc['name'])
else:
if svc['name'] in base_charms:
temp = 'lp:charms/{}/{}'
svc['location'] = temp.format(base_series,
svc['name'])
else:
temp = 'lp:~openstack-charmers/charms/{}/{}/next'
svc['location'] = temp.format(self.current_next,
svc['name'])
return other_services
def _add_services(self, this_service, other_services):
"""Add services to the deployment and set openstack-origin/source."""
self.log.info('OpenStackAmuletDeployment: adding services')
other_services = self._determine_branch_locations(other_services)
super(OpenStackAmuletDeployment, self)._add_services(this_service,
other_services)
services = other_services
services.append(this_service)
# Charms which should use the source config option
use_source = ['mysql', 'mongodb', 'rabbitmq-server', 'ceph',
'ceph-osd', 'ceph-radosgw', 'ceph-mon']
# Charms which can not use openstack-origin, ie. many subordinates
no_origin = ['cinder-ceph', 'hacluster', 'neutron-openvswitch', 'nrpe',
'openvswitch-odl', 'neutron-api-odl', 'odl-controller',
'cinder-backup', 'nexentaedge-data',
'nexentaedge-iscsi-gw', 'nexentaedge-swift-gw',
'cinder-nexentaedge', 'nexentaedge-mgmt']
if self.openstack:
for svc in services:
if svc['name'] not in use_source + no_origin:
config = {'openstack-origin': self.openstack}
self.d.configure(svc['name'], config)
if self.source:
for svc in services:
if svc['name'] in use_source and svc['name'] not in no_origin:
config = {'source': self.source}
self.d.configure(svc['name'], config)
def _configure_services(self, configs):
"""Configure all of the services."""
self.log.info('OpenStackAmuletDeployment: configure services')
for service, config in six.iteritems(configs):
self.d.configure(service, config)
def _auto_wait_for_status(self, message=None, exclude_services=None,
include_only=None, timeout=1800):
"""Wait for all units to have a specific extended status, except
for any defined as excluded. Unless specified via message, any
status containing any case of 'ready' will be considered a match.
Examples of message usage:
Wait for all unit status to CONTAIN any case of 'ready' or 'ok':
message = re.compile('.*ready.*|.*ok.*', re.IGNORECASE)
Wait for all units to reach this status (exact match):
message = re.compile('^Unit is ready and clustered$')
Wait for all units to reach any one of these (exact match):
message = re.compile('Unit is ready|OK|Ready')
Wait for at least one unit to reach this status (exact match):
message = {'ready'}
See Amulet's sentry.wait_for_messages() for message usage detail.
https://github.com/juju/amulet/blob/master/amulet/sentry.py
:param message: Expected status match
:param exclude_services: List of juju service names to ignore,
not to be used in conjuction with include_only.
:param include_only: List of juju service names to exclusively check,
not to be used in conjuction with exclude_services.
:param timeout: Maximum time in seconds to wait for status match
:returns: None. Raises if timeout is hit.
"""
self.log.info('Waiting for extended status on units...')
all_services = self.d.services.keys()
if exclude_services and include_only:
raise ValueError('exclude_services can not be used '
'with include_only')
if message:
if isinstance(message, re._pattern_type):
match = message.pattern
else:
match = message
self.log.debug('Custom extended status wait match: '
'{}'.format(match))
else:
self.log.debug('Default extended status wait match: contains '
'READY (case-insensitive)')
message = re.compile('.*ready.*', re.IGNORECASE)
if exclude_services:
self.log.debug('Excluding services from extended status match: '
'{}'.format(exclude_services))
else:
exclude_services = []
if include_only:
services = include_only
else:
services = list(set(all_services) - set(exclude_services))
self.log.debug('Waiting up to {}s for extended status on services: '
'{}'.format(timeout, services))
service_messages = {service: message for service in services}
self.d.sentry.wait_for_messages(service_messages, timeout=timeout)
self.log.info('OK')
def _get_openstack_release(self):
"""Get openstack release.
Return an integer representing the enum value of the openstack
release.
"""
# Must be ordered by OpenStack release (not by Ubuntu release):
(self.precise_essex, self.precise_folsom, self.precise_grizzly,
self.precise_havana, self.precise_icehouse,
self.trusty_icehouse, self.trusty_juno, self.utopic_juno,
self.trusty_kilo, self.vivid_kilo, self.trusty_liberty,
self.wily_liberty, self.trusty_mitaka,
self.xenial_mitaka) = range(14)
releases = {
('precise', None): self.precise_essex,
('precise', 'cloud:precise-folsom'): self.precise_folsom,
('precise', 'cloud:precise-grizzly'): self.precise_grizzly,
('precise', 'cloud:precise-havana'): self.precise_havana,
('precise', 'cloud:precise-icehouse'): self.precise_icehouse,
('trusty', None): self.trusty_icehouse,
('trusty', 'cloud:trusty-juno'): self.trusty_juno,
('trusty', 'cloud:trusty-kilo'): self.trusty_kilo,
('trusty', 'cloud:trusty-liberty'): self.trusty_liberty,
('trusty', 'cloud:trusty-mitaka'): self.trusty_mitaka,
('utopic', None): self.utopic_juno,
('vivid', None): self.vivid_kilo,
('wily', None): self.wily_liberty,
('xenial', None): self.xenial_mitaka}
return releases[(self.series, self.openstack)]
def _get_openstack_release_string(self):
"""Get openstack release string.
Return a string representing the openstack release.
"""
releases = OrderedDict([
('precise', 'essex'),
('quantal', 'folsom'),
('raring', 'grizzly'),
('saucy', 'havana'),
('trusty', 'icehouse'),
('utopic', 'juno'),
('vivid', 'kilo'),
('wily', 'liberty'),
('xenial', 'mitaka'),
])
if self.openstack:
os_origin = self.openstack.split(':')[1]
return os_origin.split('%s-' % self.series)[1].split('/')[0]
else:
return releases[self.series]
def get_ceph_expected_pools(self, radosgw=False):
"""Return a list of expected ceph pools in a ceph + cinder + glance
test scenario, based on OpenStack release and whether ceph radosgw
is flagged as present or not."""
if self._get_openstack_release() >= self.trusty_kilo:
# Kilo or later
pools = [
'rbd',
'cinder',
'glance'
]
else:
# Juno or earlier
pools = [
'data',
'metadata',
'rbd',
'cinder',
'glance'
]
if radosgw:
pools.extend([
'.rgw.root',
'.rgw.control',
'.rgw',
'.rgw.gc',
'.users.uid'
])
return pools

File diff suppressed because it is too large Load Diff

View File

@ -1,17 +0,0 @@
#!/bin/bash
set -ex
sudo add-apt-repository --yes ppa:juju/stable
sudo apt-get update --yes
sudo apt-get install --yes amulet \
distro-info-data \
python-cinderclient \
python-distro-info \
python-glanceclient \
python-heatclient \
python-keystoneclient \
python-neutronclient \
python-novaclient \
python-pika \
python-swiftclient

View File

@ -1,22 +0,0 @@
bootstrap: true
reset: false
virtualenv: true
makefile:
- lint
- test
sources:
- ppa:juju/stable
packages:
- amulet
- distro-info-data
- python-ceilometerclient
- python-cinderclient
- python-distro-info
- python-glanceclient
- python-heatclient
- python-keystoneclient
- python-neutronclient
- python-novaclient
- python-pika
- python-swiftclient
- python-nose

View File

@ -1,2 +0,0 @@
import sys
sys.path.append('hooks')

View File

@ -1,137 +0,0 @@
import json
import unittest
import mock
import ceph_broker
class CephBrokerTestCase(unittest.TestCase):
def setUp(self):
super(CephBrokerTestCase, self).setUp()
@mock.patch('ceph_broker.log')
def test_process_requests_noop(self, mock_log):
req = json.dumps({'api-version': 1, 'ops': []})
rc = ceph_broker.process_requests(req)
self.assertEqual(json.loads(rc), {'exit-code': 0})
@mock.patch('ceph_broker.log')
def test_process_requests_missing_api_version(self, mock_log):
req = json.dumps({'ops': []})
rc = ceph_broker.process_requests(req)
self.assertEqual(json.loads(rc), {
'exit-code': 1,
'stderr': 'Missing or invalid api version (None)'})
@mock.patch('ceph_broker.log')
def test_process_requests_invalid_api_version(self, mock_log):
req = json.dumps({'api-version': 2, 'ops': []})
rc = ceph_broker.process_requests(req)
print "Return: %s" % rc
self.assertEqual(json.loads(rc),
{'exit-code': 1,
'stderr': 'Missing or invalid api version (2)'})
@mock.patch('ceph_broker.log')
def test_process_requests_invalid(self, mock_log):
reqs = json.dumps({'api-version': 1, 'ops': [{'op': 'invalid_op'}]})
rc = ceph_broker.process_requests(reqs)
self.assertEqual(json.loads(rc),
{'exit-code': 1,
'stderr': "Unknown operation 'invalid_op'"})
@mock.patch('ceph_broker.get_osds')
@mock.patch('ceph_broker.ReplicatedPool')
@mock.patch('ceph_broker.pool_exists')
@mock.patch('ceph_broker.log')
def test_process_requests_create_pool_w_pg_num(self, mock_log,
mock_pool_exists,
mock_replicated_pool,
mock_get_osds):
mock_get_osds.return_value = [0, 1, 2]
mock_pool_exists.return_value = False
reqs = json.dumps({'api-version': 1,
'ops': [{
'op': 'create-pool',
'name': 'foo',
'replicas': 3,
'pg_num': 100}]})
rc = ceph_broker.process_requests(reqs)
mock_pool_exists.assert_called_with(service='admin', name='foo')
mock_replicated_pool.assert_called_with(service='admin', name='foo',
replicas=3, pg_num=100)
self.assertEqual(json.loads(rc), {'exit-code': 0})
@mock.patch('ceph_broker.get_osds')
@mock.patch('ceph_broker.ReplicatedPool')
@mock.patch('ceph_broker.pool_exists')
@mock.patch('ceph_broker.log')
def test_process_requests_create_pool_w_pg_num_capped(self, mock_log,
mock_pool_exists,
mock_replicated_pool,
mock_get_osds):
mock_get_osds.return_value = [0, 1, 2]
mock_pool_exists.return_value = False
reqs = json.dumps({'api-version': 1,
'ops': [{
'op': 'create-pool',
'name': 'foo',
'replicas': 3,
'pg_num': 300}]})
rc = ceph_broker.process_requests(reqs)
mock_pool_exists.assert_called_with(service='admin',
name='foo')
mock_replicated_pool.assert_called_with(service='admin', name='foo',
replicas=3, pg_num=100)
self.assertEqual(json.loads(rc), {'exit-code': 0})
self.assertEqual(json.loads(rc), {'exit-code': 0})
@mock.patch('ceph_broker.ReplicatedPool')
@mock.patch('ceph_broker.pool_exists')
@mock.patch('ceph_broker.log')
def test_process_requests_create_pool_exists(self, mock_log,
mock_pool_exists,
mock_replicated_pool):
mock_pool_exists.return_value = True
reqs = json.dumps({'api-version': 1,
'ops': [{'op': 'create-pool',
'name': 'foo',
'replicas': 3}]})
rc = ceph_broker.process_requests(reqs)
mock_pool_exists.assert_called_with(service='admin',
name='foo')
self.assertFalse(mock_replicated_pool.create.called)
self.assertEqual(json.loads(rc), {'exit-code': 0})
@mock.patch('ceph_broker.ReplicatedPool')
@mock.patch('ceph_broker.pool_exists')
@mock.patch('ceph_broker.log')
def test_process_requests_create_pool_rid(self, mock_log,
mock_pool_exists,
mock_replicated_pool):
mock_pool_exists.return_value = False
reqs = json.dumps({'api-version': 1,
'request-id': '1ef5aede',
'ops': [{
'op': 'create-pool',
'name': 'foo',
'replicas': 3}]})
rc = ceph_broker.process_requests(reqs)
mock_pool_exists.assert_called_with(service='admin', name='foo')
mock_replicated_pool.assert_called_with(service='admin',
name='foo',
pg_num=None,
replicas=3)
self.assertEqual(json.loads(rc)['exit-code'], 0)
self.assertEqual(json.loads(rc)['request-id'], '1ef5aede')
@mock.patch('ceph_broker.log')
def test_process_requests_invalid_api_rid(self, mock_log):
reqs = json.dumps({'api-version': 0, 'request-id': '1ef5aede',
'ops': [{'op': 'create-pool'}]})
rc = ceph_broker.process_requests(reqs)
self.assertEqual(json.loads(rc)['exit-code'], 1)
self.assertEqual(json.loads(rc)['stderr'],
"Missing or invalid api version (0)")
self.assertEqual(json.loads(rc)['request-id'], '1ef5aede')

View File

@ -1,51 +0,0 @@
import test_utils
import charmhelpers.core.hookenv as hookenv
import utils as ceph_utils
TO_PATCH_SPACES = [
'network_get_primary_address',
'log',
'get_host_ip',
'config',
'get_network_addrs',
'cached',
]
class CephNetworkSpaceTestCase(test_utils.CharmTestCase):
def setUp(self):
super(CephNetworkSpaceTestCase, self).setUp(ceph_utils,
TO_PATCH_SPACES)
self.config.side_effect = self.test_config.get
def tearDown(self):
# Reset @cached cache
hookenv.cache = {}
def test_no_network_space_support(self):
self.get_host_ip.return_value = '192.168.2.1'
self.network_get_primary_address.side_effect = NotImplementedError
self.assertEqual(ceph_utils.get_cluster_addr(),
'192.168.2.1')
self.assertEqual(ceph_utils.get_public_addr(),
'192.168.2.1')
def test_public_network_space(self):
self.network_get_primary_address.return_value = '10.20.40.2'
self.assertEqual(ceph_utils.get_public_addr(),
'10.20.40.2')
self.network_get_primary_address.assert_called_with('public')
self.config.assert_called_with('ceph-public-network')
def test_cluster_network_space(self):
self.network_get_primary_address.return_value = '10.20.50.2'
self.assertEqual(ceph_utils.get_cluster_addr(),
'10.20.50.2')
self.network_get_primary_address.assert_called_with('cluster')
self.config.assert_called_with('ceph-cluster-network')
def test_config_options_in_use(self):
self.get_network_addrs.return_value = ['192.122.20.2']
self.test_config.set('ceph-cluster-network', '192.122.20.0/24')
self.assertEqual(ceph_utils.get_cluster_addr(),
'192.122.20.2')

View File

@ -1,214 +0,0 @@
__author__ = 'chris'
import json
import unittest
from mock import (
call,
patch,
)
from hooks import ceph_broker
class TestCephOps(unittest.TestCase):
@patch.object(ceph_broker, 'create_erasure_profile')
@patch.object(ceph_broker, 'log', lambda *args, **kwargs: None)
def test_create_erasure_profile(self, mock_create_erasure):
req = json.dumps({'api-version': 1,
'ops': [{
'op': 'create-erasure-profile',
'name': 'foo',
'erasure-type': 'jerasure',
'failure-domain': 'rack',
'k': 3,
'm': 2,
}]})
rc = ceph_broker.process_requests(req)
mock_create_erasure.assert_called_with(service='admin',
profile_name='foo',
coding_chunks=2,
data_chunks=3,
locality=None,
failure_domain='rack',
erasure_plugin_name='jerasure')
self.assertEqual(json.loads(rc), {'exit-code': 0})
@patch.object(ceph_broker, 'get_osds')
@patch.object(ceph_broker, 'pool_exists')
@patch.object(ceph_broker, 'ReplicatedPool')
@patch.object(ceph_broker, 'log', lambda *args, **kwargs: None)
def test_process_requests_create_replicated_pool(self,
mock_replicated_pool,
mock_pool_exists,
mock_get_osds):
mock_get_osds.return_value = 0
mock_pool_exists.return_value = False
reqs = json.dumps({'api-version': 1,
'ops': [{
'op': 'create-pool',
'pool-type': 'replicated',
'name': 'foo',
'replicas': 3
}]})
rc = ceph_broker.process_requests(reqs)
mock_pool_exists.assert_called_with(service='admin', name='foo')
calls = [call(pg_num=None, name=u'foo', service='admin', replicas=3)]
mock_replicated_pool.assert_has_calls(calls)
self.assertEqual(json.loads(rc), {'exit-code': 0})
@patch.object(ceph_broker, 'delete_pool')
@patch.object(ceph_broker, 'log', lambda *args, **kwargs: None)
def test_process_requests_delete_pool(self,
mock_delete_pool):
reqs = json.dumps({'api-version': 1,
'ops': [{
'op': 'delete-pool',
'name': 'foo',
}]})
mock_delete_pool.return_value = {'exit-code': 0}
rc = ceph_broker.process_requests(reqs)
mock_delete_pool.assert_called_with(service='admin', name='foo')
self.assertEqual(json.loads(rc), {'exit-code': 0})
@patch.object(ceph_broker, 'pool_exists')
@patch.object(ceph_broker.ErasurePool, 'create')
@patch.object(ceph_broker, 'erasure_profile_exists')
@patch.object(ceph_broker, 'log', lambda *args, **kwargs: None)
def test_process_requests_create_erasure_pool(self, mock_profile_exists,
mock_erasure_pool,
mock_pool_exists):
mock_pool_exists.return_value = False
reqs = json.dumps({'api-version': 1,
'ops': [{
'op': 'create-pool',
'pool-type': 'erasure',
'name': 'foo',
'erasure-profile': 'default'
}]})
rc = ceph_broker.process_requests(reqs)
mock_profile_exists.assert_called_with(service='admin', name='default')
mock_pool_exists.assert_called_with(service='admin', name='foo')
mock_erasure_pool.assert_called_with()
self.assertEqual(json.loads(rc), {'exit-code': 0})
@patch.object(ceph_broker, 'pool_exists')
@patch.object(ceph_broker.Pool, 'add_cache_tier')
@patch.object(ceph_broker, 'log', lambda *args, **kwargs: None)
def test_process_requests_create_cache_tier(self, mock_pool,
mock_pool_exists):
mock_pool_exists.return_value = True
reqs = json.dumps({'api-version': 1,
'ops': [{
'op': 'create-cache-tier',
'cold-pool': 'foo',
'hot-pool': 'foo-ssd',
'mode': 'writeback',
'erasure-profile': 'default'
}]})
rc = ceph_broker.process_requests(reqs)
mock_pool_exists.assert_any_call(service='admin', name='foo')
mock_pool_exists.assert_any_call(service='admin', name='foo-ssd')
mock_pool.assert_called_with(cache_pool='foo-ssd', mode='writeback')
self.assertEqual(json.loads(rc), {'exit-code': 0})
@patch.object(ceph_broker, 'pool_exists')
@patch.object(ceph_broker.Pool, 'remove_cache_tier')
@patch.object(ceph_broker, 'log', lambda *args, **kwargs: None)
def test_process_requests_remove_cache_tier(self, mock_pool,
mock_pool_exists):
mock_pool_exists.return_value = True
reqs = json.dumps({'api-version': 1,
'ops': [{
'op': 'remove-cache-tier',
'hot-pool': 'foo-ssd',
}]})
rc = ceph_broker.process_requests(reqs)
mock_pool_exists.assert_any_call(service='admin', name='foo-ssd')
mock_pool.assert_called_with(cache_pool='foo-ssd')
self.assertEqual(json.loads(rc), {'exit-code': 0})
@patch.object(ceph_broker, 'snapshot_pool')
@patch.object(ceph_broker, 'log', lambda *args, **kwargs: None)
def test_snapshot_pool(self, mock_snapshot_pool):
reqs = json.dumps({'api-version': 1,
'ops': [{
'op': 'snapshot-pool',
'name': 'foo',
'snapshot-name': 'foo-snap1',
}]})
mock_snapshot_pool.return_value = {'exit-code': 0}
rc = ceph_broker.process_requests(reqs)
mock_snapshot_pool.assert_called_with(service='admin',
pool_name='foo',
snapshot_name='foo-snap1')
self.assertEqual(json.loads(rc), {'exit-code': 0})
@patch.object(ceph_broker, 'rename_pool')
@patch.object(ceph_broker, 'log', lambda *args, **kwargs: None)
def test_rename_pool(self, mock_rename_pool):
reqs = json.dumps({'api-version': 1,
'ops': [{
'op': 'rename-pool',
'name': 'foo',
'new-name': 'foo2',
}]})
mock_rename_pool.return_value = {'exit-code': 0}
rc = ceph_broker.process_requests(reqs)
mock_rename_pool.assert_called_with(service='admin',
old_name='foo',
new_name='foo2')
self.assertEqual(json.loads(rc), {'exit-code': 0})
@patch.object(ceph_broker, 'remove_pool_snapshot')
@patch.object(ceph_broker, 'log', lambda *args, **kwargs: None)
def test_remove_pool_snapshot(self, mock_snapshot_pool):
reqs = json.dumps({'api-version': 1,
'ops': [{
'op': 'remove-pool-snapshot',
'name': 'foo',
'snapshot-name': 'foo-snap1',
}]})
mock_snapshot_pool.return_value = {'exit-code': 0}
rc = ceph_broker.process_requests(reqs)
mock_snapshot_pool.assert_called_with(service='admin',
pool_name='foo',
snapshot_name='foo-snap1')
self.assertEqual(json.loads(rc), {'exit-code': 0})
@patch.object(ceph_broker, 'pool_set')
@patch.object(ceph_broker, 'log', lambda *args, **kwargs: None)
def test_set_pool_value(self, mock_set_pool):
reqs = json.dumps({'api-version': 1,
'ops': [{
'op': 'set-pool-value',
'name': 'foo',
'key': 'size',
'value': 3,
}]})
mock_set_pool.return_value = {'exit-code': 0}
rc = ceph_broker.process_requests(reqs)
mock_set_pool.assert_called_with(service='admin',
pool_name='foo',
key='size',
value=3)
self.assertEqual(json.loads(rc), {'exit-code': 0})
@patch.object(ceph_broker, 'log', lambda *args, **kwargs: None)
def test_set_invalid_pool_value(self):
reqs = json.dumps({'api-version': 1,
'ops': [{
'op': 'set-pool-value',
'name': 'foo',
'key': 'size',
'value': 'abc',
}]})
rc = ceph_broker.process_requests(reqs)
self.assertEqual(json.loads(rc)['exit-code'], 1)
if __name__ == '__main__':
unittest.main()

View File

@ -1,103 +0,0 @@
import mock
import test_utils
import sys
# python-apt is not installed as part of test-requirements but is imported by
# some charmhelpers modules so create a fake import.
mock_apt = mock.MagicMock()
sys.modules['apt'] = mock_apt
mock_apt.apt_pkg = mock.MagicMock()
with mock.patch('charmhelpers.contrib.hardening.harden.harden') as mock_dec:
mock_dec.side_effect = (lambda *dargs, **dkwargs: lambda f:
lambda *args, **kwargs: f(*args, **kwargs))
import ceph_hooks as hooks
TO_PATCH = [
'status_set',
'config',
'ceph',
'relation_ids',
'relation_get',
'related_units',
'local_unit',
]
NO_PEERS = {
'ceph-mon1': True
}
ENOUGH_PEERS_INCOMPLETE = {
'ceph-mon1': True,
'ceph-mon2': False,
'ceph-mon3': False,
}
ENOUGH_PEERS_COMPLETE = {
'ceph-mon1': True,
'ceph-mon2': True,
'ceph-mon3': True,
}
class ServiceStatusTestCase(test_utils.CharmTestCase):
def setUp(self):
super(ServiceStatusTestCase, self).setUp(hooks, TO_PATCH)
self.config.side_effect = self.test_config.get
self.test_config.set('monitor-count', 3)
self.local_unit.return_value = 'ceph-mon1'
@mock.patch.object(hooks, 'get_peer_units')
def test_assess_status_no_peers(self, _peer_units):
_peer_units.return_value = NO_PEERS
hooks.assess_status()
self.status_set.assert_called_with('blocked', mock.ANY)
@mock.patch.object(hooks, 'get_peer_units')
def test_assess_status_peers_incomplete(self, _peer_units):
_peer_units.return_value = ENOUGH_PEERS_INCOMPLETE
hooks.assess_status()
self.status_set.assert_called_with('waiting', mock.ANY)
@mock.patch.object(hooks, 'get_peer_units')
def test_assess_status_peers_complete_active(self, _peer_units):
_peer_units.return_value = ENOUGH_PEERS_COMPLETE
self.ceph.is_bootstrapped.return_value = True
self.ceph.is_quorum.return_value = True
hooks.assess_status()
self.status_set.assert_called_with('active', mock.ANY)
@mock.patch.object(hooks, 'get_peer_units')
def test_assess_status_peers_complete_down(self, _peer_units):
_peer_units.return_value = ENOUGH_PEERS_COMPLETE
self.ceph.is_bootstrapped.return_value = False
self.ceph.is_quorum.return_value = False
hooks.assess_status()
self.status_set.assert_called_with('blocked', mock.ANY)
def test_get_peer_units_no_peers(self):
self.relation_ids.return_value = ['mon:1']
self.related_units.return_value = []
self.assertEquals({'ceph-mon1': True},
hooks.get_peer_units())
def test_get_peer_units_peers_incomplete(self):
self.relation_ids.return_value = ['mon:1']
self.related_units.return_value = ['ceph-mon2',
'ceph-mon3']
self.relation_get.return_value = None
self.assertEquals({'ceph-mon1': True,
'ceph-mon2': False,
'ceph-mon3': False},
hooks.get_peer_units())
def test_get_peer_units_peers_complete(self):
self.relation_ids.return_value = ['mon:1']
self.related_units.return_value = ['ceph-mon2',
'ceph-mon3']
self.relation_get.side_effect = ['ceph-mon2',
'ceph-mon3']
self.assertEquals({'ceph-mon1': True,
'ceph-mon2': True,
'ceph-mon3': True},
hooks.get_peer_units())

View File

@ -1,154 +0,0 @@
__author__ = 'chris'
import time
from mock import patch, call, MagicMock
import sys
sys.path.append('/home/chris/repos/ceph-mon/hooks')
import test_utils
# python-apt is not installed as part of test-requirements but is imported by
# some charmhelpers modules so create a fake import.
mock_apt = MagicMock()
sys.modules['apt'] = mock_apt
mock_apt.apt_pkg = MagicMock()
with patch('charmhelpers.contrib.hardening.harden.harden') as mock_dec:
mock_dec.side_effect = (lambda *dargs, **dkwargs: lambda f:
lambda *args, **kwargs: f(*args, **kwargs))
import ceph_hooks
TO_PATCH = [
'hookenv',
'status_set',
'config',
'ceph',
'log',
'add_source',
'apt_update',
'apt_install',
'service_stop',
'service_start',
'host',
]
def config_side_effect(*args):
if args[0] == 'source':
return 'cloud:trusty-kilo'
elif args[0] == 'key':
return 'key'
elif args[0] == 'release-version':
return 'cloud:trusty-kilo'
previous_node_start_time = time.time() - (9 * 60)
def monitor_key_side_effect(*args):
if args[1] == \
'ip-192-168-1-2_done':
return False
elif args[1] == \
'ip-192-168-1-2_start':
# Return that the previous node started 9 minutes ago
return previous_node_start_time
class UpgradeRollingTestCase(test_utils.CharmTestCase):
def setUp(self):
super(UpgradeRollingTestCase, self).setUp(ceph_hooks, TO_PATCH)
@patch('ceph_hooks.roll_monitor_cluster')
def test_check_for_upgrade(self, roll_monitor_cluster):
self.host.lsb_release.return_value = {
'DISTRIB_CODENAME': 'trusty',
}
previous_mock = MagicMock().return_value
previous_mock.previous.return_value = "cloud:trusty-juno"
self.hookenv.config.side_effect = [previous_mock,
config_side_effect('source')]
ceph_hooks.check_for_upgrade()
roll_monitor_cluster.assert_called_with('cloud:trusty-kilo')
@patch('ceph_hooks.upgrade_monitor')
@patch('ceph_hooks.monitor_key_set')
def test_lock_and_roll(self, monitor_key_set, upgrade_monitor):
monitor_key_set.monitor_key_set.return_value = None
ceph_hooks.lock_and_roll(my_name='ip-192-168-1-2')
upgrade_monitor.assert_called_once_with()
def test_upgrade_monitor(self):
self.config.side_effect = config_side_effect
self.ceph.get_version.return_value = "0.80"
self.ceph.systemd.return_value = False
ceph_hooks.upgrade_monitor()
self.service_stop.assert_called_with('ceph-mon-all')
self.service_start.assert_called_with('ceph-mon-all')
self.status_set.assert_has_calls([
call('maintenance', 'Upgrading monitor'),
call('active', '')
])
@patch('ceph_hooks.lock_and_roll')
@patch('ceph_hooks.wait_on_previous_node')
@patch('ceph_hooks.get_mon_map')
@patch('ceph_hooks.socket')
def test_roll_monitor_cluster_second(self,
socket,
get_mon_map,
wait_on_previous_node,
lock_and_roll):
wait_on_previous_node.return_value = None
socket.gethostname.return_value = "ip-192-168-1-3"
get_mon_map.return_value = {
'monmap': {
'mons': [
{
'name': 'ip-192-168-1-2',
},
{
'name': 'ip-192-168-1-3',
},
]
}
}
ceph_hooks.roll_monitor_cluster('0.94.1')
self.status_set.assert_called_with(
'blocked',
'Waiting on ip-192-168-1-2 to finish upgrading')
lock_and_roll.assert_called_with(my_name="ip-192-168-1-3")
@patch.object(ceph_hooks, 'time')
@patch('ceph_hooks.monitor_key_get')
@patch('ceph_hooks.monitor_key_exists')
def test_wait_on_previous_node(self, monitor_key_exists, monitor_key_get,
mock_time):
tval = [previous_node_start_time]
def fake_time():
tval[0] += 100
return tval[0]
mock_time.time.side_effect = fake_time
monitor_key_get.side_effect = monitor_key_side_effect
monitor_key_exists.return_value = False
ceph_hooks.wait_on_previous_node("ip-192-168-1-2")
# Make sure we checked to see if the previous node started
monitor_key_get.assert_has_calls(
[call('admin', 'ip-192-168-1-2_start')]
)
# Make sure we checked to see if the previous node was finished
monitor_key_exists.assert_has_calls(
[call('admin', 'ip-192-168-1-2_done')]
)
# Make sure we waited at last once before proceeding
self.log.assert_has_calls(
[call('Previous node is: ip-192-168-1-2')],
[call('ip-192-168-1-2 is not finished. Waiting')],
)
self.assertEqual(tval[0], previous_node_start_time + 700)

View File

@ -1,121 +0,0 @@
import logging
import unittest
import os
import yaml
from contextlib import contextmanager
from mock import patch, MagicMock
def load_config():
'''
Walk backwords from __file__ looking for config.yaml, load and return the
'options' section'
'''
config = None
f = __file__
while config is None:
d = os.path.dirname(f)
if os.path.isfile(os.path.join(d, 'config.yaml')):
config = os.path.join(d, 'config.yaml')
break
f = d
if not config:
logging.error('Could not find config.yaml in any parent directory '
'of %s. ' % f)
raise Exception
return yaml.safe_load(open(config).read())['options']
def get_default_config():
'''
Load default charm config from config.yaml return as a dict.
If no default is set in config.yaml, its value is None.
'''
default_config = {}
config = load_config()
for k, v in config.iteritems():
if 'default' in v:
default_config[k] = v['default']
else:
default_config[k] = None
return default_config
class CharmTestCase(unittest.TestCase):
def setUp(self, obj, patches):
super(CharmTestCase, self).setUp()
self.patches = patches
self.obj = obj
self.test_config = TestConfig()
self.test_relation = TestRelation()
self.patch_all()
def patch(self, method):
_m = patch.object(self.obj, method)
mock = _m.start()
self.addCleanup(_m.stop)
return mock
def patch_all(self):
for method in self.patches:
setattr(self, method, self.patch(method))
class TestConfig(object):
def __init__(self):
self.config = get_default_config()
def get(self, attr=None):
if not attr:
return self.get_all()
try:
return self.config[attr]
except KeyError:
return None
def get_all(self):
return self.config
def set(self, attr, value):
if attr not in self.config:
raise KeyError
self.config[attr] = value
class TestRelation(object):
def __init__(self, relation_data={}):
self.relation_data = relation_data
def set(self, relation_data):
self.relation_data = relation_data
def get(self, attr=None, unit=None, rid=None):
if attr is None:
return self.relation_data
elif attr in self.relation_data:
return self.relation_data[attr]
return None
@contextmanager
def patch_open():
'''Patch open() to allow mocking both open() itself and the file that is
yielded.
Yields the mock for "open" and "file", respectively.'''
mock_open = MagicMock(spec=open)
mock_file = MagicMock(spec=file)
@contextmanager
def stub_open(*args, **kwargs):
mock_open(*args, **kwargs)
yield mock_file
with patch('__builtin__.open', stub_open):
yield mock_open, mock_file