Redesign cluster buildup process

In order to fix bug#1756928 the whole cluster buildup process needed to
be redesigned. The assumptions about what is_bootstrapped and clustered
meant and when to restart on configuration changed needed to be
re-evaluated.

The timing of restarts needed to be protected to avoid collisions.
Only bootstrapped hosts should go in to the
wsrep_cluster_address=gcomm:// setting. Adding or removing units should
be handled gracefully. Starting with a single unit and expanding to a
cluster must work.

This change guarantees mysqld is restarted when the configuration file
changes and meets all the above requirements. As a consequence of the redesign,
the workload status now more accurately reflects the state of the unit.

Charm-helpers sync to bring in distributed_wait fix.

Closes-Bug: #1756308
Closes-Bug: #1756928
Change-Id: I0742e6889b32201806cec6a0b5835e11a8027567
This commit is contained in:
David Ames 2018-03-19 11:25:56 -07:00
parent 9cac8b8521
commit 801c2e7829
39 changed files with 3939 additions and 313 deletions

1
.gitignore vendored
View File

@ -9,4 +9,5 @@ __pycache__
.testrepository
.tox
.stestr
.unit-state.db
func-results.json

View File

@ -3,4 +3,5 @@ destination: tests/charmhelpers
include:
- contrib.amulet
- contrib.openstack.amulet
- core.hookenv
- core
- osplatform

View File

@ -371,6 +371,7 @@ def distributed_wait(modulo=None, wait=None, operation_name='operation'):
''' Distribute operations by waiting based on modulo_distribution
If modulo and or wait are not set, check config_get for those values.
If config values are not set, default to modulo=3 and wait=30.
:param modulo: int The modulo number creates the group distribution
:param wait: int The constant time wait value
@ -382,10 +383,17 @@ def distributed_wait(modulo=None, wait=None, operation_name='operation'):
:side effect: Calls time.sleep()
'''
if modulo is None:
modulo = config_get('modulo-nodes')
modulo = config_get('modulo-nodes') or 3
if wait is None:
wait = config_get('known-wait')
calculated_wait = modulo_distribution(modulo=modulo, wait=wait)
wait = config_get('known-wait') or 30
if juju_is_leader():
# The leader should never wait
calculated_wait = 0
else:
# non_zero_wait=True guarantees the non-leader who gets modulo 0
# will still wait
calculated_wait = modulo_distribution(modulo=modulo, wait=wait,
non_zero_wait=True)
msg = "Waiting {} seconds for {} ...".format(calculated_wait,
operation_name)
log(msg, DEBUG)

View File

@ -182,7 +182,7 @@ SWIFT_CODENAMES = OrderedDict([
('pike',
['2.13.0', '2.15.0']),
('queens',
['2.16.0']),
['2.16.0', '2.17.0']),
])
# >= Liberty version->codename mapping

View File

@ -151,3 +151,32 @@ def extend_logical_volume_by_device(lv_name, block_device):
'''
cmd = ['lvextend', lv_name, block_device]
check_call(cmd)
def create_logical_volume(lv_name, volume_group, size=None):
'''
Create a new logical volume in an existing volume group
:param lv_name: str: name of logical volume to be created.
:param volume_group: str: Name of volume group to use for the new volume.
:param size: str: Size of logical volume to create (100% if not supplied)
:raises subprocess.CalledProcessError: in the event that the lvcreate fails.
'''
if size:
check_call([
'lvcreate',
'--yes',
'-L',
'{}'.format(size),
'-n', lv_name, volume_group
])
# create the lv with all the space available, this is needed because the
# system call is different for LVM
else:
check_call([
'lvcreate',
'--yes',
'-l',
'100%FREE',
'-n', lv_name, volume_group
])

View File

@ -27,6 +27,7 @@ import glob
import os
import json
import yaml
import re
import subprocess
import sys
import errno
@ -67,7 +68,7 @@ def cached(func):
@wraps(func)
def wrapper(*args, **kwargs):
global cache
key = str((func, args, kwargs))
key = json.dumps((func, args, kwargs), sort_keys=True, default=str)
try:
return cache[key]
except KeyError:
@ -1043,7 +1044,6 @@ def juju_version():
universal_newlines=True).strip()
@cached
def has_juju_version(minimum_version):
"""Return True if the Juju version is at least the provided version"""
return LooseVersion(juju_version()) >= LooseVersion(minimum_version)
@ -1103,6 +1103,8 @@ def _run_atexit():
@translate_exc(from_exc=OSError, to_exc=NotImplementedError)
def network_get_primary_address(binding):
'''
Deprecated since Juju 2.3; use network_get()
Retrieve the primary network address for a named binding
:param binding: string. The name of a relation of extra-binding
@ -1123,7 +1125,6 @@ def network_get_primary_address(binding):
return response
@translate_exc(from_exc=OSError, to_exc=NotImplementedError)
def network_get(endpoint, relation_id=None):
"""
Retrieve the network details for a relation endpoint
@ -1131,24 +1132,20 @@ def network_get(endpoint, relation_id=None):
:param endpoint: string. The name of a relation endpoint
:param relation_id: int. The ID of the relation for the current context.
:return: dict. The loaded YAML output of the network-get query.
:raise: NotImplementedError if run on Juju < 2.1
:raise: NotImplementedError if request not supported by the Juju version.
"""
if not has_juju_version('2.2'):
raise NotImplementedError(juju_version()) # earlier versions require --primary-address
if relation_id and not has_juju_version('2.3'):
raise NotImplementedError # 2.3 added the -r option
cmd = ['network-get', endpoint, '--format', 'yaml']
if relation_id:
cmd.append('-r')
cmd.append(relation_id)
try:
response = subprocess.check_output(
cmd,
stderr=subprocess.STDOUT).decode('UTF-8').strip()
except CalledProcessError as e:
# Early versions of Juju 2.0.x required the --primary-address argument.
# We catch that condition here and raise NotImplementedError since
# the requested semantics are not available - the caller can then
# use the network_get_primary_address() method instead.
if '--primary-address is currently required' in e.output.decode('UTF-8'):
raise NotImplementedError
raise
response = subprocess.check_output(
cmd,
stderr=subprocess.STDOUT).decode('UTF-8').strip()
return yaml.safe_load(response)
@ -1204,9 +1201,23 @@ def iter_units_for_relation_name(relation_name):
def ingress_address(rid=None, unit=None):
"""
Retrieve the ingress-address from a relation when available. Otherwise,
return the private-address. This function is to be used on the consuming
side of the relation.
Retrieve the ingress-address from a relation when available.
Otherwise, return the private-address.
When used on the consuming side of the relation (unit is a remote
unit), the ingress-address is the IP address that this unit needs
to use to reach the provided service on the remote unit.
When used on the providing side of the relation (unit == local_unit()),
the ingress-address is the IP address that is advertised to remote
units on this relation. Remote units need to use this address to
reach the local provided service on this unit.
Note that charms may document some other method to use in
preference to the ingress_address(), such as an address provided
on a different relation attribute or a service discovery mechanism.
This allows charms to redirect inbound connections to their peers
or different applications such as load balancers.
Usage:
addresses = [ingress_address(rid=u.rid, unit=u.unit)
@ -1220,3 +1231,40 @@ def ingress_address(rid=None, unit=None):
settings = relation_get(rid=rid, unit=unit)
return (settings.get('ingress-address') or
settings.get('private-address'))
def egress_subnets(rid=None, unit=None):
"""
Retrieve the egress-subnets from a relation.
This function is to be used on the providing side of the
relation, and provides the ranges of addresses that client
connections may come from. The result is uninteresting on
the consuming side of a relation (unit == local_unit()).
Returns a stable list of subnets in CIDR format.
eg. ['192.168.1.0/24', '2001::F00F/128']
If egress-subnets is not available, falls back to using the published
ingress-address, or finally private-address.
:param rid: string relation id
:param unit: string unit name
:side effect: calls relation_get
:return: list of subnets in CIDR format. eg. ['192.168.1.0/24', '2001::F00F/128']
"""
def _to_range(addr):
if re.search(r'^(?:\d{1,3}\.){3}\d{1,3}$', addr) is not None:
addr += '/32'
elif ':' in addr and '/' not in addr: # IPv6
addr += '/128'
return addr
settings = relation_get(rid=rid, unit=unit)
if 'egress-subnets' in settings:
return [n.strip() for n in settings['egress-subnets'].split(',') if n.strip()]
if 'ingress-address' in settings:
return [_to_range(settings['ingress-address'])]
if 'private-address' in settings:
return [_to_range(settings['private-address'])]
return [] # Should never happen

View File

@ -993,7 +993,7 @@ def updatedb(updatedb_text, new_path):
return output
def modulo_distribution(modulo=3, wait=30):
def modulo_distribution(modulo=3, wait=30, non_zero_wait=False):
""" Modulo distribution
This helper uses the unit number, a modulo value and a constant wait time
@ -1015,7 +1015,14 @@ def modulo_distribution(modulo=3, wait=30):
@param modulo: int The modulo number creates the group distribution
@param wait: int The constant time wait value
@param non_zero_wait: boolean Override unit % modulo == 0,
return modulo * wait. Used to avoid collisions with
leader nodes which are often given priority.
@return: int Calculated time to wait for unit operation
"""
unit_number = int(local_unit().split('/')[1])
return (unit_number % modulo) * wait
calculated_wait_time = (unit_number % modulo) * wait
if non_zero_wait and calculated_wait_time == 0:
return modulo * wait
else:
return calculated_wait_time

View File

@ -313,17 +313,26 @@ class PortManagerCallback(ManagerCallback):
with open(port_file) as fp:
old_ports = fp.read().split(',')
for old_port in old_ports:
if bool(old_port):
old_port = int(old_port)
if old_port not in new_ports:
hookenv.close_port(old_port)
if bool(old_port) and not self.ports_contains(old_port, new_ports):
hookenv.close_port(old_port)
with open(port_file, 'w') as fp:
fp.write(','.join(str(port) for port in new_ports))
for port in new_ports:
# A port is either a number or 'ICMP'
protocol = 'TCP'
if str(port).upper() == 'ICMP':
protocol = 'ICMP'
if event_name == 'start':
hookenv.open_port(port)
hookenv.open_port(port, protocol)
elif event_name == 'stop':
hookenv.close_port(port)
hookenv.close_port(port, protocol)
def ports_contains(self, port, ports):
if not bool(port):
return False
if str(port).upper() != 'ICMP':
port = int(port)
return port in ports
def service_stop(service_name):

View File

@ -317,4 +317,4 @@ options:
For very busy clouds or in resource restricted environments this value can be changed.
WARNING Please read all documentation before changing the default value which may have
unintended consequences. It may be necessary to set this value higher during deploy time
(PTS15) and subsequently change it back to the default (PT3S) after deployment.
(PT15S) and subsequently change it back to the default (PT3S) after deployment.

View File

@ -4,7 +4,6 @@ import sys
import json
import os
import socket
import time
from charmhelpers.core.hookenv import (
Hooks, UnregisteredHookError,
@ -12,7 +11,6 @@ from charmhelpers.core.hookenv import (
log,
relation_get,
relation_set,
relation_id,
relation_ids,
related_units,
unit_get,
@ -32,8 +30,8 @@ from charmhelpers.core.hookenv import (
)
from charmhelpers.core.host import (
service_restart,
service_start,
service_running,
service_stop,
file_hash,
lsb_release,
CompareHostReleases,
@ -91,6 +89,8 @@ from percona_utils import (
install_mysql_ocf,
notify_bootstrapped,
is_bootstrapped,
clustered_once,
INITIAL_CLUSTERED_KEY,
is_leader_bootstrapped,
get_wsrep_value,
assess_status,
@ -168,7 +168,7 @@ def install():
install_percona_xtradb_cluster()
def render_config(clustered=False, hosts=None):
def render_config(hosts=None):
if hosts is None:
hosts = []
@ -179,7 +179,6 @@ def render_config(clustered=False, hosts=None):
context = {
'cluster_name': 'juju_cluster',
'private_address': get_cluster_host_ip(),
'clustered': clustered,
'cluster_hosts': ",".join(hosts),
'sst_method': config('sst-method'),
'sst_password': sst_password(),
@ -223,7 +222,7 @@ def render_config(clustered=False, hosts=None):
render(os.path.basename(config_file), config_file, context, perms=0o444)
def render_config_restart_on_changed(clustered, hosts, bootstrap=False):
def render_config_restart_on_changed(hosts, bootstrap=False):
"""Render mysql config and restart mysql service if file changes as a
result.
@ -235,13 +234,9 @@ def render_config_restart_on_changed(clustered, hosts, bootstrap=False):
it is started so long as the new node to be added is guaranteed to have
been restarted so as to apply the new config.
"""
if not is_leader() and not is_bootstrapped():
log('Non-leader waiting on leader bootstrap, skipping render',
DEBUG)
return
config_file = resolve_cnf_file()
pre_hash = file_hash(config_file)
render_config(clustered, hosts)
render_config(hosts)
create_binlogs_directory()
update_db_rels = False
if file_hash(config_file) != pre_hash or bootstrap:
@ -251,37 +246,28 @@ def render_config_restart_on_changed(clustered, hosts, bootstrap=False):
# relation id exists yet.
notify_bootstrapped()
update_db_rels = True
elif not service_running('mysql@bootstrap'):
else:
# NOTE(jamespage):
# if mysql@bootstrap is running, then the native
# bootstrap systemd service was used to start this
# instance, and it was the initial seed unit
# so don't try start the mysql.service unit;
# this also deals with seed units after they have been
# rebooted and mysqld was started by mysql.service.
delay = 1
# stop the bootstap version before restarting normal mysqld
if service_running('mysql@bootstrap'):
service_stop('mysql@bootstrap')
attempts = 0
max_retries = 5
# NOTE(dosaboy): avoid unnecessary restarts. Once mysql is started
# it needn't be restarted when new units join the cluster since the
# new units will join and apply their own config.
if not seeded():
action = service_restart
# If we are restarting avoid simultaneous restart collisions
cluster_wait()
else:
action = service_start
while not action('mysql'):
cluster_wait()
while not service_restart('mysql'):
if attempts == max_retries:
raise Exception("Failed to start mysql (max retries "
"reached)")
log("Failed to start mysql - retrying in %ss" % (delay),
log("Failed to start mysql - retrying per distributed wait",
WARNING)
time.sleep(delay)
delay += 2
attempts += 1
cluster_wait()
# If we get here we assume prior actions have succeeded to always
# this unit is marked as seeded so that subsequent calls don't result
@ -330,6 +316,13 @@ def upgrade():
if not leader_get('root-password') and leader_get('mysql.passwd'):
leader_set(**{'root-password': leader_get('mysql.passwd')})
# On upgrade-charm we assume the cluster was complete at some point
kvstore = kv()
initial_clustered = kvstore.get(INITIAL_CLUSTERED_KEY, False)
if not initial_clustered:
kvstore.set(key=INITIAL_CLUSTERED_KEY, value=True)
kvstore.flush()
# broadcast the bootstrap-uuid
wsrep_ready = get_wsrep_value('wsrep_ready') or ""
if wsrep_ready.lower() in ['on', 'ready']:
@ -368,47 +361,44 @@ def config_changed():
assert_charm_supports_ipv6()
hosts = get_cluster_hosts()
clustered = len(hosts) > 1
bootstrapped = is_bootstrapped()
leader_bootstrapped = is_leader_bootstrapped()
leader_ip = leader_get('leader-ip')
# Handle Edge Cases
if not is_leader():
# Fix Bug #1738896
# Speed up cluster process
if not clustered and leader_bootstrapped:
clustered = True
bootstrapped = True
hosts = [leader_ip]
# Fix gcomm timeout to non-bootstrapped node
if hosts and leader_ip not in hosts:
hosts = [leader_ip] + hosts
# NOTE: only configure the cluster if we have sufficient peers. This only
# applies if min-cluster-size is provided and is used to avoid extraneous
# configuration changes and premature bootstrapping as the cluster is
# deployed.
if is_leader():
# If the cluster has not been fully bootstrapped once yet, use an empty
# hosts list to avoid restarting the leader node's mysqld during
# cluster buildup.
# After, the cluster has bootstrapped at least one time, it is much
# less likely to have restart collisions. It is then safe to use the
# full hosts list and have the leader node's mysqld restart.
if not clustered_once():
hosts = []
log("Leader unit - bootstrap required=%s" % (not leader_bootstrapped),
DEBUG)
render_config_restart_on_changed(clustered, hosts,
render_config_restart_on_changed(hosts,
bootstrap=not leader_bootstrapped)
elif bootstrapped:
log("Cluster is bootstrapped - configuring mysql on this node",
elif leader_bootstrapped:
# Speed up cluster process by bootstrapping when the leader has
# bootstrapped
if leader_ip not in hosts:
# Fix Bug #1738896
hosts = [leader_ip] + hosts
log("Leader is bootstrapped - configuring mysql on this node",
DEBUG)
render_config_restart_on_changed(clustered, hosts)
# Rendering the mysqld.cnf and restarting is bootstrapping for a
# non-leader node.
render_config_restart_on_changed(hosts)
# Assert we are bootstrapped. This will throw an
# InconsistentUUIDError exception if UUIDs do not match.
update_bootstrap_uuid()
else:
log("Not configuring", DEBUG)
if bootstrapped:
try:
update_bootstrap_uuid()
except LeaderNoBootstrapUUIDError:
# until the bootstrap-uuid attribute is not replicated
# cluster_ready() will evaluate to False, so it is necessary to
# feed back this info to the user.
status_set('waiting', "Waiting for bootstrap-uuid set by leader")
# Until the bootstrap-uuid attribute is set by the leader,
# cluster_ready() will evaluate to False. So it is necessary to
# feed this information to the user.
status_set('waiting', "Waiting for bootstrap-uuid set by leader")
log('Non-leader waiting on leader bootstrap, skipping render',
DEBUG)
return
# Notify any changes to the access network
update_client_db_relations()
@ -427,7 +417,7 @@ def config_changed():
# the password needs to be updated only if the node was already
# bootstrapped
if bootstrapped:
if is_bootstrapped():
update_root_password()
@ -446,12 +436,6 @@ def cluster_joined():
level=INFO)
relation_set(relation_settings=relation_settings)
# Ensure all new peers are aware
cluster_state_uuid = leader_get('bootstrap-uuid')
if cluster_state_uuid:
notify_bootstrapped(cluster_rid=relation_id(),
cluster_uuid=cluster_state_uuid)
@hooks.hook('cluster-relation-departed')
@hooks.hook('cluster-relation-changed')

View File

@ -39,6 +39,7 @@ from charmhelpers.core.hookenv import (
leader_get,
leader_set,
)
from charmhelpers.core.unitdata import kv
from charmhelpers.fetch import (
apt_install,
filter_installed_packages,
@ -76,6 +77,7 @@ deb-src http://repo.percona.com/apt {release} main"""
SEEDED_MARKER = "{data_dir}/seeded"
HOSTS_FILE = '/etc/hosts'
DEFAULT_MYSQL_PORT = 3306
INITIAL_CLUSTERED_KEY = 'initial-cluster-complete'
# NOTE(ajkavanagh) - this is 'required' for the pause/resume code for
# maintenance mode, but is currently not populated as the
@ -217,6 +219,20 @@ def is_sufficient_peers():
def get_cluster_hosts():
"""Get the bootstrapped cluster peers
Determine the cluster peers that have bootstrapped and return the list
hosts. Secondarily, update the hosts file with IPv6 address name
resolution.
The returned host list is intended to be used in the
wsrep_cluster_address=gcomm:// setting. Therefore, the hosts must have
already been bootstrapped. If an un-bootstrapped host happens to be first
in the list, mysql will fail to start.
@side_effect update_hosts_file called for IPv6 hostname resolution
@returns list of hosts
"""
hosts_map = {}
local_cluster_address = get_cluster_host_ip()
@ -227,7 +243,7 @@ def get_cluster_hosts():
addr = get_ipv6_addr(exc_list=[config('vip')], fatal=True)[0]
hosts_map = {addr: socket.gethostname()}
hosts = [local_cluster_address]
hosts = []
for relid in relation_ids('cluster'):
for unit in related_units(relid):
rdata = relation_get(unit=unit, rid=relid)
@ -247,13 +263,26 @@ def get_cluster_hosts():
level=DEBUG)
hosts_map[cluster_address] = hostname
hosts.append(hostname)
host = hostname
else:
hosts.append(resolve_hostname_to_ip(cluster_address))
host = resolve_hostname_to_ip(cluster_address)
# Add only cluster peers who have set bootstrap-uuid
# An indiction they themselves are bootstrapped.
# Un-bootstrapped hosts in gcom lead mysql to fail to start
# if it happens to be the first address in the list
# Also fix strange bug when executed from actions where the local
# unit is returned in related_units. We do not want the local IP
# in the gcom hosts list.
if (rdata.get('bootstrap-uuid') and
host not in hosts and
host != local_cluster_address):
hosts.append(host)
if hosts_map:
update_hosts_file(hosts_map)
# Return a sorted list to avoid uneccessary restarts
hosts.sort()
return hosts
@ -443,28 +472,83 @@ def is_leader_bootstrapped():
return True
def is_bootstrapped():
""" Check that this unit is bootstrapped
def clustered_once():
"""Determine if the cluster has ever bootstrapped completely
Check unittest.kv if the cluster has bootstrapped at least once.
@returns boolean
"""
uuids = []
rids = relation_ids('cluster') or []
for rid in rids:
units = related_units(rid)
units.append(local_unit())
for unit in units:
id = relation_get('bootstrap-uuid', unit=unit, rid=rid)
if id:
uuids.append(id)
if uuids:
if len(set(uuids)) > 1:
log("Found inconsistent bootstrap uuids - %s" % (uuids), WARNING)
# Run is_bootstrapped once to guarantee kvstore is up to date
is_bootstrapped()
kvstore = kv()
return kvstore.get(INITIAL_CLUSTERED_KEY, False)
return True
return False
def is_bootstrapped():
"""Determine if each node in the cluster has been bootstrapped and the
cluster is complete with the expected number of peers.
Check that each node in the cluster, including this one, has set
bootstrap-uuid on the cluster relation.
Having min-cluster-size set will guarantee is_bootstrapped will not
return True until the expected number of peers are bootstrapped. If
min-cluster-size is not set, it will check peer relations to estimate the
expected cluster size. If min-cluster-size is not set and there are no
peers it must assume the cluster is bootstrapped in order to allow for
single unit deployments.
@returns boolean
"""
min_size = config('min-cluster-size')
if not min_size:
units = 1
for relation_id in relation_ids('cluster'):
units += len(related_units(relation_id))
min_size = units
if not is_sufficient_peers():
return False
elif min_size > 1:
uuids = []
for relation_id in relation_ids('cluster'):
units = related_units(relation_id) or []
units.append(local_unit())
for unit in units:
if not relation_get(attribute='bootstrap-uuid',
rid=relation_id,
unit=unit):
log("{} is not yet clustered".format(unit),
DEBUG)
return False
else:
bootstrap_uuid = relation_get(attribute='bootstrap-uuid',
rid=relation_id,
unit=unit)
if bootstrap_uuid:
uuids.append(bootstrap_uuid)
if len(uuids) < min_size:
log("Fewer than minimum cluster size: "
"{} percona units reporting clustered".format(min_size),
DEBUG)
return False
elif len(set(uuids)) > 1:
raise Exception("Found inconsistent bootstrap uuids: "
"{}".format((uuids)))
else:
log("All {} percona units reporting clustered".format(min_size),
DEBUG)
# Set INITIAL_CLUSTERED_KEY as the cluster has fully bootstrapped
kvstore = kv()
if not kvstore.get(INITIAL_CLUSTERED_KEY, False):
kvstore.set(key=INITIAL_CLUSTERED_KEY, value=True)
kvstore.flush()
return True
def bootstrap_pxc():
@ -606,12 +690,14 @@ def charm_check_func():
# and has the required peers
if not is_bootstrapped():
return ('waiting', 'Unit waiting for cluster bootstrap')
elif is_bootstrapped():
elif cluster_ready():
try:
_cluster_in_sync()
return ('active', 'Unit is ready and clustered')
except DesyncedException:
return ('blocked', 'Unit is not in sync')
else:
return ('waiting', 'Unit waiting on hacluster relation')
else:
return ('active', 'Unit is ready')
@ -763,15 +849,14 @@ def get_cluster_host_ip():
def cluster_ready():
"""Determine if each node in the cluster is ready and the cluster is
complete with the expected number of peers.
"""Determine if each node in the cluster is ready to respond to client
requests.
Once cluster_ready returns True it is safe to execute client relation
hooks. Having min-cluster-size set will guarantee cluster_ready will not
return True until the expected number of peers are clustered and ready.
hooks.
If min-cluster-size is not set it must assume the cluster is ready in order
to allow for single unit deployments.
If a VIP is set do not return ready until hacluster relationship is
complete.
@returns boolean
"""
@ -780,49 +865,7 @@ def cluster_ready():
DEBUG)
return False
min_size = config('min-cluster-size')
units = 1
for relation_id in relation_ids('cluster'):
units += len(related_units(relation_id))
if not min_size:
min_size = units
if not is_sufficient_peers():
return False
elif min_size > 1:
uuids = []
for relation_id in relation_ids('cluster'):
units = related_units(relation_id) or []
units.append(local_unit())
for unit in units:
if not relation_get(attribute='bootstrap-uuid',
rid=relation_id,
unit=unit):
log("{} is not yet clustered".format(unit),
DEBUG)
return False
else:
bootstrap_uuid = relation_get(attribute='bootstrap-uuid',
rid=relation_id,
unit=unit)
if bootstrap_uuid:
uuids.append(bootstrap_uuid)
if len(uuids) < min_size:
log("Fewer than minimum cluster size: "
"{} percona units reporting clustered".format(min_size),
DEBUG)
return False
elif len(set(uuids)) > 1:
raise Exception("Found inconsistent bootstrap uuids: "
"{}".format((uuids)))
else:
log("All {} percona units reporting clustered".format(min_size),
DEBUG)
return True
log("Must assume this is a single unit returning 'cluster' ready", DEBUG)
return True
return is_bootstrapped()
def client_node_is_ready():

View File

@ -19,13 +19,12 @@ pid_file = /var/run/mysqld/mysqld.pid
# Path to Galera library
wsrep_provider=/usr/lib/libgalera_smm.so
{% if not clustered %}
# Empty gcomm address is being used when cluster is getting bootstrapped
wsrep_cluster_address=gcomm://
{% else %}
# Add address of other cluster nodes here
# Cluster connection URL contains the IPs of node#1, node#2 and node#3
# Empty when bootstrapping the cluster
wsrep_cluster_address=gcomm://{{ cluster_hosts }}
{% endif %}
# In order for Galera to work correctly binlog format should be ROW
binlog_format=ROW

View File

@ -131,13 +131,9 @@ innodb_autoinc_lock_mode = {{ innodb_autoinc_lock_mode }}
wsrep_provider={{ wsrep_provider }}
# Add address of other cluster nodes here
{% if not clustered and is_leader -%}
# Empty gcomm address is being used when cluster is getting bootstrapped
wsrep_cluster_address=gcomm://
{% else -%}
# Cluster connection URL contains the IPs of node#1, node#2 and node#3
# Empty when bootstrapping the cluster
wsrep_cluster_address=gcomm://{{ cluster_hosts }}
{% endif %}
#
# Node address

View File

@ -12,7 +12,6 @@ from charmhelpers.contrib.openstack.amulet.deployment import (
)
from charmhelpers.contrib.amulet.utils import AmuletUtils
PXC_ROOT_PASSWD = 'ubuntu'
@ -107,6 +106,7 @@ class BasicDeployment(OpenStackAmuletDeployment):
self.test_pxc_running()
self.test_bootstrapped_and_clustered()
self.test_bootstrap_uuid_set_in_the_relation()
self.test_restart_on_config_change()
self.test_pause_resume()
if self.ha:
self.test_kill_master()
@ -192,6 +192,7 @@ class BasicDeployment(OpenStackAmuletDeployment):
action_id = self.utils.run_action(unit, "pause")
assert self.utils.wait_on_action(action_id), "Pause action failed."
self.d.sentry.wait()
# Note that is_mysqld_running will print an error message when
# mysqld is not running. This is by design but it looks odd
@ -206,6 +207,7 @@ class BasicDeployment(OpenStackAmuletDeployment):
assert self.utils.status_get(unit)[0] == "active"
assert self.is_mysqld_running(unit=unit), \
"mysqld not running after resume."
self._auto_wait_for_status()
def test_kill_master(self):
'''
@ -349,3 +351,51 @@ class BasicDeployment(OpenStackAmuletDeployment):
" to %s:%s" % (addr,
port))
return False
def resolve_cnf_file(self):
if self._get_openstack_release() < self.xenial_mitaka:
return '/etc/mysql/my.cnf'
else:
return '/etc/mysql/percona-xtradb-cluster.conf.d/mysqld.cnf'
def test_restart_on_config_change(self):
"""Verify that the specified services are restarted when the
config is changed."""
sentry = self.d.sentry['percona-cluster'][0]
juju_service = 'percona-cluster'
# Expected default and alternate values
set_default = {'peer-timeout': 'PT3S'}
set_alternate = {'peer-timeout': 'PT15S'}
# Config file affected by juju set config change
conf_file = self.resolve_cnf_file()
# Services which are expected to restart upon config change
services = {
'mysqld': conf_file,
}
# Make config change, check for service restarts
self.utils.log.debug('Making config change on {}...'
.format(juju_service))
mtime = self.utils.get_sentry_time(sentry)
self.d.configure(juju_service, set_alternate)
self._auto_wait_for_status()
sleep_time = 40
for s, conf_file in services.iteritems():
self.utils.log.debug("Checking that service restarted: {}"
.format(s))
if not self.utils.validate_service_config_changed(
sentry, mtime, s, conf_file, retry_count=5,
retry_sleep_time=sleep_time,
sleep_time=sleep_time):
self.d.configure(juju_service, set_default)
msg = "service {} didn't restart after config change".format(s)
amulet.raise_status(amulet.FAIL, msg=msg)
sleep_time = 0
self.d.configure(juju_service, set_default)
self._auto_wait_for_status()

View File

@ -21,6 +21,9 @@ from collections import OrderedDict
from charmhelpers.contrib.amulet.deployment import (
AmuletDeployment
)
from charmhelpers.contrib.openstack.amulet.utils import (
OPENSTACK_RELEASES_PAIRS
)
DEBUG = logging.DEBUG
ERROR = logging.ERROR
@ -271,11 +274,8 @@ class OpenStackAmuletDeployment(AmuletDeployment):
release.
"""
# Must be ordered by OpenStack release (not by Ubuntu release):
(self.trusty_icehouse, self.trusty_kilo, self.trusty_liberty,
self.trusty_mitaka, self.xenial_mitaka, self.xenial_newton,
self.yakkety_newton, self.xenial_ocata, self.zesty_ocata,
self.xenial_pike, self.artful_pike, self.xenial_queens,
self.bionic_queens,) = range(13)
for i, os_pair in enumerate(OPENSTACK_RELEASES_PAIRS):
setattr(self, os_pair, i)
releases = {
('trusty', None): self.trusty_icehouse,

View File

@ -50,6 +50,13 @@ ERROR = logging.ERROR
NOVA_CLIENT_VERSION = "2"
OPENSTACK_RELEASES_PAIRS = [
'trusty_icehouse', 'trusty_kilo', 'trusty_liberty',
'trusty_mitaka', 'xenial_mitaka', 'xenial_newton',
'yakkety_newton', 'xenial_ocata', 'zesty_ocata',
'xenial_pike', 'artful_pike', 'xenial_queens',
'bionic_queens']
class OpenStackAmuletUtils(AmuletUtils):
"""OpenStack amulet utilities.
@ -63,7 +70,34 @@ class OpenStackAmuletUtils(AmuletUtils):
super(OpenStackAmuletUtils, self).__init__(log_level)
def validate_endpoint_data(self, endpoints, admin_port, internal_port,
public_port, expected):
public_port, expected, openstack_release=None):
"""Validate endpoint data. Pick the correct validator based on
OpenStack release. Expected data should be in the v2 format:
{
'id': id,
'region': region,
'adminurl': adminurl,
'internalurl': internalurl,
'publicurl': publicurl,
'service_id': service_id}
"""
validation_function = self.validate_v2_endpoint_data
xenial_queens = OPENSTACK_RELEASES_PAIRS.index('xenial_queens')
if openstack_release and openstack_release >= xenial_queens:
validation_function = self.validate_v3_endpoint_data
expected = {
'id': expected['id'],
'region': expected['region'],
'region_id': 'RegionOne',
'url': self.valid_url,
'interface': self.not_null,
'service_id': expected['service_id']}
return validation_function(endpoints, admin_port, internal_port,
public_port, expected)
def validate_v2_endpoint_data(self, endpoints, admin_port, internal_port,
public_port, expected):
"""Validate endpoint data.
Validate actual endpoint data vs expected endpoint data. The ports
@ -141,7 +175,86 @@ class OpenStackAmuletUtils(AmuletUtils):
if len(found) != expected_num_eps:
return 'Unexpected number of endpoints found'
def validate_svc_catalog_endpoint_data(self, expected, actual):
def convert_svc_catalog_endpoint_data_to_v3(self, ep_data):
"""Convert v2 endpoint data into v3.
{
'service_name1': [
{
'adminURL': adminURL,
'id': id,
'region': region.
'publicURL': publicURL,
'internalURL': internalURL
}],
'service_name2': [
{
'adminURL': adminURL,
'id': id,
'region': region.
'publicURL': publicURL,
'internalURL': internalURL
}],
}
"""
self.log.warn("Endpoint ID and Region ID validation is limited to not "
"null checks after v2 to v3 conversion")
for svc in ep_data.keys():
assert len(ep_data[svc]) == 1, "Unknown data format"
svc_ep_data = ep_data[svc][0]
ep_data[svc] = [
{
'url': svc_ep_data['adminURL'],
'interface': 'admin',
'region': svc_ep_data['region'],
'region_id': self.not_null,
'id': self.not_null},
{
'url': svc_ep_data['publicURL'],
'interface': 'public',
'region': svc_ep_data['region'],
'region_id': self.not_null,
'id': self.not_null},
{
'url': svc_ep_data['internalURL'],
'interface': 'internal',
'region': svc_ep_data['region'],
'region_id': self.not_null,
'id': self.not_null}]
return ep_data
def validate_svc_catalog_endpoint_data(self, expected, actual,
openstack_release=None):
"""Validate service catalog endpoint data. Pick the correct validator
for the OpenStack version. Expected data should be in the v2 format:
{
'service_name1': [
{
'adminURL': adminURL,
'id': id,
'region': region.
'publicURL': publicURL,
'internalURL': internalURL
}],
'service_name2': [
{
'adminURL': adminURL,
'id': id,
'region': region.
'publicURL': publicURL,
'internalURL': internalURL
}],
}
"""
validation_function = self.validate_v2_svc_catalog_endpoint_data
xenial_queens = OPENSTACK_RELEASES_PAIRS.index('xenial_queens')
if openstack_release and openstack_release >= xenial_queens:
validation_function = self.validate_v3_svc_catalog_endpoint_data
expected = self.convert_svc_catalog_endpoint_data_to_v3(expected)
return validation_function(expected, actual)
def validate_v2_svc_catalog_endpoint_data(self, expected, actual):
"""Validate service catalog endpoint data.
Validate a list of actual service catalog endpoints vs a list of
@ -350,16 +463,13 @@ class OpenStackAmuletUtils(AmuletUtils):
deployment._auto_wait_for_status()
self.keystone_wait_for_propagation(sentry_relation_pairs, api_version)
def authenticate_cinder_admin(self, keystone_sentry, username,
password, tenant, api_version=2):
def authenticate_cinder_admin(self, keystone, api_version=2):
"""Authenticates admin user with cinder."""
# NOTE(beisner): cinder python client doesn't accept tokens.
keystone_ip = keystone_sentry.info['public-address']
ept = "http://{}:5000/v2.0".format(keystone_ip.strip().decode('utf-8'))
self.log.debug('Authenticating cinder admin...')
_clients = {
1: cinder_client.Client,
2: cinder_clientv2.Client}
return _clients[api_version](username, password, tenant, ept)
return _clients[api_version](session=keystone.session)
def authenticate_keystone(self, keystone_ip, username, password,
api_version=False, admin_port=False,
@ -367,13 +477,36 @@ class OpenStackAmuletUtils(AmuletUtils):
project_domain_name=None, project_name=None):
"""Authenticate with Keystone"""
self.log.debug('Authenticating with keystone...')
port = 5000
if admin_port:
port = 35357
base_ep = "http://{}:{}".format(keystone_ip.strip().decode('utf-8'),
port)
if not api_version or api_version == 2:
ep = base_ep + "/v2.0"
if not api_version:
api_version = 2
sess, auth = self.get_keystone_session(
keystone_ip=keystone_ip,
username=username,
password=password,
api_version=api_version,
admin_port=admin_port,
user_domain_name=user_domain_name,
domain_name=domain_name,
project_domain_name=project_domain_name,
project_name=project_name
)
if api_version == 2:
client = keystone_client.Client(session=sess)
else:
client = keystone_client_v3.Client(session=sess)
# This populates the client.service_catalog
client.auth_ref = auth.get_access(sess)
return client
def get_keystone_session(self, keystone_ip, username, password,
api_version=False, admin_port=False,
user_domain_name=None, domain_name=None,
project_domain_name=None, project_name=None):
"""Return a keystone session object"""
ep = self.get_keystone_endpoint(keystone_ip,
api_version=api_version,
admin_port=admin_port)
if api_version == 2:
auth = v2.Password(
username=username,
password=password,
@ -381,12 +514,7 @@ class OpenStackAmuletUtils(AmuletUtils):
auth_url=ep
)
sess = keystone_session.Session(auth=auth)
client = keystone_client.Client(session=sess)
# This populates the client.service_catalog
client.auth_ref = auth.get_access(sess)
return client
else:
ep = base_ep + "/v3"
auth = v3.Password(
user_domain_name=user_domain_name,
username=username,
@ -397,10 +525,57 @@ class OpenStackAmuletUtils(AmuletUtils):
auth_url=ep
)
sess = keystone_session.Session(auth=auth)
client = keystone_client_v3.Client(session=sess)
# This populates the client.service_catalog
client.auth_ref = auth.get_access(sess)
return client
return (sess, auth)
def get_keystone_endpoint(self, keystone_ip, api_version=None,
admin_port=False):
"""Return keystone endpoint"""
port = 5000
if admin_port:
port = 35357
base_ep = "http://{}:{}".format(keystone_ip.strip().decode('utf-8'),
port)
if api_version == 2:
ep = base_ep + "/v2.0"
else:
ep = base_ep + "/v3"
return ep
def get_default_keystone_session(self, keystone_sentry,
openstack_release=None):
"""Return a keystone session object and client object assuming standard
default settings
Example call in amulet tests:
self.keystone_session, self.keystone = u.get_default_keystone_session(
self.keystone_sentry,
openstack_release=self._get_openstack_release())
The session can then be used to auth other clients:
neutronclient.Client(session=session)
aodh_client.Client(session=session)
eyc
"""
self.log.debug('Authenticating keystone admin...')
api_version = 2
client_class = keystone_client.Client
# 11 => xenial_queens
if openstack_release and openstack_release >= 11:
api_version = 3
client_class = keystone_client_v3.Client
keystone_ip = keystone_sentry.info['public-address']
session, auth = self.get_keystone_session(
keystone_ip,
api_version=api_version,
username='admin',
password='openstack',
project_name='admin',
user_domain_name='admin_domain',
project_domain_name='admin_domain')
client = client_class(session=session)
# This populates the client.service_catalog
client.auth_ref = auth.get_access(session)
return session, client
def authenticate_keystone_admin(self, keystone_sentry, user, password,
tenant=None, api_version=None,

View File

@ -0,0 +1,55 @@
# Copyright 2014-2015 Canonical Limited.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Copyright 2014 Canonical Ltd.
#
# Authors:
# Edward Hope-Morley <opentastic@gmail.com>
#
import time
from charmhelpers.core.hookenv import (
log,
INFO,
)
def retry_on_exception(num_retries, base_delay=0, exc_type=Exception):
"""If the decorated function raises exception exc_type, allow num_retries
retry attempts before raise the exception.
"""
def _retry_on_exception_inner_1(f):
def _retry_on_exception_inner_2(*args, **kwargs):
retries = num_retries
multiplier = 1
while True:
try:
return f(*args, **kwargs)
except exc_type:
if not retries:
raise
delay = base_delay * multiplier
multiplier += 1
log("Retrying '%s' %d more times (delay=%s)" %
(f.__name__, retries, delay), level=INFO)
retries -= 1
if delay:
time.sleep(delay)
return _retry_on_exception_inner_2
return _retry_on_exception_inner_1

View File

@ -0,0 +1,43 @@
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright 2014-2015 Canonical Limited.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
__author__ = 'Jorge Niedbalski <niedbalski@ubuntu.com>'
import os
import subprocess
def sed(filename, before, after, flags='g'):
"""
Search and replaces the given pattern on filename.
:param filename: relative or absolute file path.
:param before: expression to be replaced (see 'man sed')
:param after: expression to replace with (see 'man sed')
:param flags: sed-compatible regex flags in example, to make
the search and replace case insensitive, specify ``flags="i"``.
The ``g`` flag is always specified regardless, so you do not
need to remember to include it when overriding this parameter.
:returns: If the sed command exit code was zero then return,
otherwise raise CalledProcessError.
"""
expression = r's/{0}/{1}/{2}'.format(before,
after, flags)
return subprocess.check_call(["sed", "-i", "-r", "-e",
expression,
os.path.expanduser(filename)])

View File

@ -0,0 +1,132 @@
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright 2014-2015 Canonical Limited.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import io
import os
__author__ = 'Jorge Niedbalski R. <jorge.niedbalski@canonical.com>'
class Fstab(io.FileIO):
"""This class extends file in order to implement a file reader/writer
for file `/etc/fstab`
"""
class Entry(object):
"""Entry class represents a non-comment line on the `/etc/fstab` file
"""
def __init__(self, device, mountpoint, filesystem,
options, d=0, p=0):
self.device = device
self.mountpoint = mountpoint
self.filesystem = filesystem
if not options:
options = "defaults"
self.options = options
self.d = int(d)
self.p = int(p)
def __eq__(self, o):
return str(self) == str(o)
def __str__(self):
return "{} {} {} {} {} {}".format(self.device,
self.mountpoint,
self.filesystem,
self.options,
self.d,
self.p)
DEFAULT_PATH = os.path.join(os.path.sep, 'etc', 'fstab')
def __init__(self, path=None):
if path:
self._path = path
else:
self._path = self.DEFAULT_PATH
super(Fstab, self).__init__(self._path, 'rb+')
def _hydrate_entry(self, line):
# NOTE: use split with no arguments to split on any
# whitespace including tabs
return Fstab.Entry(*filter(
lambda x: x not in ('', None),
line.strip("\n").split()))
@property
def entries(self):
self.seek(0)
for line in self.readlines():
line = line.decode('us-ascii')
try:
if line.strip() and not line.strip().startswith("#"):
yield self._hydrate_entry(line)
except ValueError:
pass
def get_entry_by_attr(self, attr, value):
for entry in self.entries:
e_attr = getattr(entry, attr)
if e_attr == value:
return entry
return None
def add_entry(self, entry):
if self.get_entry_by_attr('device', entry.device):
return False
self.write((str(entry) + '\n').encode('us-ascii'))
self.truncate()
return entry
def remove_entry(self, entry):
self.seek(0)
lines = [l.decode('us-ascii') for l in self.readlines()]
found = False
for index, line in enumerate(lines):
if line.strip() and not line.strip().startswith("#"):
if self._hydrate_entry(line) == entry:
found = True
break
if not found:
return False
lines.remove(line)
self.seek(0)
self.write(''.join(lines).encode('us-ascii'))
self.truncate()
return True
@classmethod
def remove_by_mountpoint(cls, mountpoint, path=None):
fstab = cls(path=path)
entry = fstab.get_entry_by_attr('mountpoint', mountpoint)
if entry:
return fstab.remove_entry(entry)
return False
@classmethod
def add(cls, device, mountpoint, filesystem, options=None, path=None):
return cls(path=path).add_entry(Fstab.Entry(device,
mountpoint, filesystem,
options=options))

View File

@ -27,6 +27,7 @@ import glob
import os
import json
import yaml
import re
import subprocess
import sys
import errno
@ -67,7 +68,7 @@ def cached(func):
@wraps(func)
def wrapper(*args, **kwargs):
global cache
key = str((func, args, kwargs))
key = json.dumps((func, args, kwargs), sort_keys=True, default=str)
try:
return cache[key]
except KeyError:
@ -1043,7 +1044,6 @@ def juju_version():
universal_newlines=True).strip()
@cached
def has_juju_version(minimum_version):
"""Return True if the Juju version is at least the provided version"""
return LooseVersion(juju_version()) >= LooseVersion(minimum_version)
@ -1103,6 +1103,8 @@ def _run_atexit():
@translate_exc(from_exc=OSError, to_exc=NotImplementedError)
def network_get_primary_address(binding):
'''
Deprecated since Juju 2.3; use network_get()
Retrieve the primary network address for a named binding
:param binding: string. The name of a relation of extra-binding
@ -1123,7 +1125,6 @@ def network_get_primary_address(binding):
return response
@translate_exc(from_exc=OSError, to_exc=NotImplementedError)
def network_get(endpoint, relation_id=None):
"""
Retrieve the network details for a relation endpoint
@ -1131,24 +1132,20 @@ def network_get(endpoint, relation_id=None):
:param endpoint: string. The name of a relation endpoint
:param relation_id: int. The ID of the relation for the current context.
:return: dict. The loaded YAML output of the network-get query.
:raise: NotImplementedError if run on Juju < 2.1
:raise: NotImplementedError if request not supported by the Juju version.
"""
if not has_juju_version('2.2'):
raise NotImplementedError(juju_version()) # earlier versions require --primary-address
if relation_id and not has_juju_version('2.3'):
raise NotImplementedError # 2.3 added the -r option
cmd = ['network-get', endpoint, '--format', 'yaml']
if relation_id:
cmd.append('-r')
cmd.append(relation_id)
try:
response = subprocess.check_output(
cmd,
stderr=subprocess.STDOUT).decode('UTF-8').strip()
except CalledProcessError as e:
# Early versions of Juju 2.0.x required the --primary-address argument.
# We catch that condition here and raise NotImplementedError since
# the requested semantics are not available - the caller can then
# use the network_get_primary_address() method instead.
if '--primary-address is currently required' in e.output.decode('UTF-8'):
raise NotImplementedError
raise
response = subprocess.check_output(
cmd,
stderr=subprocess.STDOUT).decode('UTF-8').strip()
return yaml.safe_load(response)
@ -1204,9 +1201,23 @@ def iter_units_for_relation_name(relation_name):
def ingress_address(rid=None, unit=None):
"""
Retrieve the ingress-address from a relation when available. Otherwise,
return the private-address. This function is to be used on the consuming
side of the relation.
Retrieve the ingress-address from a relation when available.
Otherwise, return the private-address.
When used on the consuming side of the relation (unit is a remote
unit), the ingress-address is the IP address that this unit needs
to use to reach the provided service on the remote unit.
When used on the providing side of the relation (unit == local_unit()),
the ingress-address is the IP address that is advertised to remote
units on this relation. Remote units need to use this address to
reach the local provided service on this unit.
Note that charms may document some other method to use in
preference to the ingress_address(), such as an address provided
on a different relation attribute or a service discovery mechanism.
This allows charms to redirect inbound connections to their peers
or different applications such as load balancers.
Usage:
addresses = [ingress_address(rid=u.rid, unit=u.unit)
@ -1220,3 +1231,40 @@ def ingress_address(rid=None, unit=None):
settings = relation_get(rid=rid, unit=unit)
return (settings.get('ingress-address') or
settings.get('private-address'))
def egress_subnets(rid=None, unit=None):
"""
Retrieve the egress-subnets from a relation.
This function is to be used on the providing side of the
relation, and provides the ranges of addresses that client
connections may come from. The result is uninteresting on
the consuming side of a relation (unit == local_unit()).
Returns a stable list of subnets in CIDR format.
eg. ['192.168.1.0/24', '2001::F00F/128']
If egress-subnets is not available, falls back to using the published
ingress-address, or finally private-address.
:param rid: string relation id
:param unit: string unit name
:side effect: calls relation_get
:return: list of subnets in CIDR format. eg. ['192.168.1.0/24', '2001::F00F/128']
"""
def _to_range(addr):
if re.search(r'^(?:\d{1,3}\.){3}\d{1,3}$', addr) is not None:
addr += '/32'
elif ':' in addr and '/' not in addr: # IPv6
addr += '/128'
return addr
settings = relation_get(rid=rid, unit=unit)
if 'egress-subnets' in settings:
return [n.strip() for n in settings['egress-subnets'].split(',') if n.strip()]
if 'ingress-address' in settings:
return [_to_range(settings['ingress-address'])]
if 'private-address' in settings:
return [_to_range(settings['private-address'])]
return [] # Should never happen

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,72 @@
import subprocess
import yum
import os
from charmhelpers.core.strutils import BasicStringComparator
class CompareHostReleases(BasicStringComparator):
"""Provide comparisons of Host releases.
Use in the form of
if CompareHostReleases(release) > 'trusty':
# do something with mitaka
"""
def __init__(self, item):
raise NotImplementedError(
"CompareHostReleases() is not implemented for CentOS")
def service_available(service_name):
# """Determine whether a system service is available."""
if os.path.isdir('/run/systemd/system'):
cmd = ['systemctl', 'is-enabled', service_name]
else:
cmd = ['service', service_name, 'is-enabled']
return subprocess.call(cmd) == 0
def add_new_group(group_name, system_group=False, gid=None):
cmd = ['groupadd']
if gid:
cmd.extend(['--gid', str(gid)])
if system_group:
cmd.append('-r')
cmd.append(group_name)
subprocess.check_call(cmd)
def lsb_release():
"""Return /etc/os-release in a dict."""
d = {}
with open('/etc/os-release', 'r') as lsb:
for l in lsb:
s = l.split('=')
if len(s) != 2:
continue
d[s[0].strip()] = s[1].strip()
return d
def cmp_pkgrevno(package, revno, pkgcache=None):
"""Compare supplied revno with the revno of the installed package.
* 1 => Installed revno is greater than supplied arg
* 0 => Installed revno is the same as supplied arg
* -1 => Installed revno is less than supplied arg
This function imports YumBase function if the pkgcache argument
is None.
"""
if not pkgcache:
y = yum.YumBase()
packages = y.doPackageLists()
pkgcache = {i.Name: i.version for i in packages['installed']}
pkg = pkgcache[package]
if pkg > revno:
return 1
if pkg < revno:
return -1
return 0

View File

@ -0,0 +1,90 @@
import subprocess
from charmhelpers.core.strutils import BasicStringComparator
UBUNTU_RELEASES = (
'lucid',
'maverick',
'natty',
'oneiric',
'precise',
'quantal',
'raring',
'saucy',
'trusty',
'utopic',
'vivid',
'wily',
'xenial',
'yakkety',
'zesty',
'artful',
'bionic',
)
class CompareHostReleases(BasicStringComparator):
"""Provide comparisons of Ubuntu releases.
Use in the form of
if CompareHostReleases(release) > 'trusty':
# do something with mitaka
"""
_list = UBUNTU_RELEASES
def service_available(service_name):
"""Determine whether a system service is available"""
try:
subprocess.check_output(
['service', service_name, 'status'],
stderr=subprocess.STDOUT).decode('UTF-8')
except subprocess.CalledProcessError as e:
return b'unrecognized service' not in e.output
else:
return True
def add_new_group(group_name, system_group=False, gid=None):
cmd = ['addgroup']
if gid:
cmd.extend(['--gid', str(gid)])
if system_group:
cmd.append('--system')
else:
cmd.extend([
'--group',
])
cmd.append(group_name)
subprocess.check_call(cmd)
def lsb_release():
"""Return /etc/lsb-release in a dict"""
d = {}
with open('/etc/lsb-release', 'r') as lsb:
for l in lsb:
k, v = l.split('=')
d[k.strip()] = v.strip()
return d
def cmp_pkgrevno(package, revno, pkgcache=None):
"""Compare supplied revno with the revno of the installed package.
* 1 => Installed revno is greater than supplied arg
* 0 => Installed revno is the same as supplied arg
* -1 => Installed revno is less than supplied arg
This function imports apt_cache function from charmhelpers.fetch if
the pkgcache argument is None. Be sure to add charmhelpers.fetch if
you call this function, or pass an apt_pkg.Cache() instance.
"""
import apt_pkg
if not pkgcache:
from charmhelpers.fetch import apt_cache
pkgcache = apt_cache()
pkg = pkgcache[package]
return apt_pkg.version_compare(pkg.current_ver.ver_str, revno)

View File

@ -0,0 +1,69 @@
# -*- coding: utf-8 -*-
# Copyright 2014-2015 Canonical Limited.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import yaml
from charmhelpers.core import fstab
from charmhelpers.core import sysctl
from charmhelpers.core.host import (
add_group,
add_user_to_group,
fstab_mount,
mkdir,
)
from charmhelpers.core.strutils import bytes_from_string
from subprocess import check_output
def hugepage_support(user, group='hugetlb', nr_hugepages=256,
max_map_count=65536, mnt_point='/run/hugepages/kvm',
pagesize='2MB', mount=True, set_shmmax=False):
"""Enable hugepages on system.
Args:
user (str) -- Username to allow access to hugepages to
group (str) -- Group name to own hugepages
nr_hugepages (int) -- Number of pages to reserve
max_map_count (int) -- Number of Virtual Memory Areas a process can own
mnt_point (str) -- Directory to mount hugepages on
pagesize (str) -- Size of hugepages
mount (bool) -- Whether to Mount hugepages
"""
group_info = add_group(group)
gid = group_info.gr_gid
add_user_to_group(user, group)
if max_map_count < 2 * nr_hugepages:
max_map_count = 2 * nr_hugepages
sysctl_settings = {
'vm.nr_hugepages': nr_hugepages,
'vm.max_map_count': max_map_count,
'vm.hugetlb_shm_group': gid,
}
if set_shmmax:
shmmax_current = int(check_output(['sysctl', '-n', 'kernel.shmmax']))
shmmax_minsize = bytes_from_string(pagesize) * nr_hugepages
if shmmax_minsize > shmmax_current:
sysctl_settings['kernel.shmmax'] = shmmax_minsize
sysctl.create(yaml.dump(sysctl_settings), '/etc/sysctl.d/10-hugepage.conf')
mkdir(mnt_point, owner='root', group='root', perms=0o755, force=False)
lfstab = fstab.Fstab()
fstab_entry = lfstab.get_entry_by_attr('mountpoint', mnt_point)
if fstab_entry:
lfstab.remove_entry(fstab_entry)
entry = lfstab.Entry('nodev', mnt_point, 'hugetlbfs',
'mode=1770,gid={},pagesize={}'.format(gid, pagesize), 0, 0)
lfstab.add_entry(entry)
if mount:
fstab_mount(mnt_point)

View File

@ -0,0 +1,72 @@
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright 2014-2015 Canonical Limited.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import re
import subprocess
from charmhelpers.osplatform import get_platform
from charmhelpers.core.hookenv import (
log,
INFO
)
__platform__ = get_platform()
if __platform__ == "ubuntu":
from charmhelpers.core.kernel_factory.ubuntu import (
persistent_modprobe,
update_initramfs,
) # flake8: noqa -- ignore F401 for this import
elif __platform__ == "centos":
from charmhelpers.core.kernel_factory.centos import (
persistent_modprobe,
update_initramfs,
) # flake8: noqa -- ignore F401 for this import
__author__ = "Jorge Niedbalski <jorge.niedbalski@canonical.com>"
def modprobe(module, persist=True):
"""Load a kernel module and configure for auto-load on reboot."""
cmd = ['modprobe', module]
log('Loading kernel module %s' % module, level=INFO)
subprocess.check_call(cmd)
if persist:
persistent_modprobe(module)
def rmmod(module, force=False):
"""Remove a module from the linux kernel"""
cmd = ['rmmod']
if force:
cmd.append('-f')
cmd.append(module)
log('Removing kernel module %s' % module, level=INFO)
return subprocess.check_call(cmd)
def lsmod():
"""Shows what kernel modules are currently loaded"""
return subprocess.check_output(['lsmod'],
universal_newlines=True)
def is_module_loaded(module):
"""Checks if a kernel module is already loaded"""
matches = re.findall('^%s[ ]+' % module, lsmod(), re.M)
return len(matches) > 0

View File

@ -0,0 +1,17 @@
import subprocess
import os
def persistent_modprobe(module):
"""Load a kernel module and configure for auto-load on reboot."""
if not os.path.exists('/etc/rc.modules'):
open('/etc/rc.modules', 'a')
os.chmod('/etc/rc.modules', 111)
with open('/etc/rc.modules', 'r+') as modules:
if module not in modules.read():
modules.write('modprobe %s\n' % module)
def update_initramfs(version='all'):
"""Updates an initramfs image."""
return subprocess.check_call(["dracut", "-f", version])

View File

@ -0,0 +1,13 @@
import subprocess
def persistent_modprobe(module):
"""Load a kernel module and configure for auto-load on reboot."""
with open('/etc/modules', 'r+') as modules:
if module not in modules.read():
modules.write(module + "\n")
def update_initramfs(version='all'):
"""Updates an initramfs image."""
return subprocess.check_call(["update-initramfs", "-k", version, "-u"])

View File

@ -0,0 +1,16 @@
# Copyright 2014-2015 Canonical Limited.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from .base import * # NOQA
from .helpers import * # NOQA

View File

@ -0,0 +1,360 @@
# Copyright 2014-2015 Canonical Limited.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import json
from inspect import getargspec
from collections import Iterable, OrderedDict
from charmhelpers.core import host
from charmhelpers.core import hookenv
__all__ = ['ServiceManager', 'ManagerCallback',
'PortManagerCallback', 'open_ports', 'close_ports', 'manage_ports',
'service_restart', 'service_stop']
class ServiceManager(object):
def __init__(self, services=None):
"""
Register a list of services, given their definitions.
Service definitions are dicts in the following formats (all keys except
'service' are optional)::
{
"service": <service name>,
"required_data": <list of required data contexts>,
"provided_data": <list of provided data contexts>,
"data_ready": <one or more callbacks>,
"data_lost": <one or more callbacks>,
"start": <one or more callbacks>,
"stop": <one or more callbacks>,
"ports": <list of ports to manage>,
}
The 'required_data' list should contain dicts of required data (or
dependency managers that act like dicts and know how to collect the data).
Only when all items in the 'required_data' list are populated are the list
of 'data_ready' and 'start' callbacks executed. See `is_ready()` for more
information.
The 'provided_data' list should contain relation data providers, most likely
a subclass of :class:`charmhelpers.core.services.helpers.RelationContext`,
that will indicate a set of data to set on a given relation.
The 'data_ready' value should be either a single callback, or a list of
callbacks, to be called when all items in 'required_data' pass `is_ready()`.
Each callback will be called with the service name as the only parameter.
After all of the 'data_ready' callbacks are called, the 'start' callbacks
are fired.
The 'data_lost' value should be either a single callback, or a list of
callbacks, to be called when a 'required_data' item no longer passes
`is_ready()`. Each callback will be called with the service name as the
only parameter. After all of the 'data_lost' callbacks are called,
the 'stop' callbacks are fired.
The 'start' value should be either a single callback, or a list of
callbacks, to be called when starting the service, after the 'data_ready'
callbacks are complete. Each callback will be called with the service
name as the only parameter. This defaults to
`[host.service_start, services.open_ports]`.
The 'stop' value should be either a single callback, or a list of
callbacks, to be called when stopping the service. If the service is
being stopped because it no longer has all of its 'required_data', this
will be called after all of the 'data_lost' callbacks are complete.
Each callback will be called with the service name as the only parameter.
This defaults to `[services.close_ports, host.service_stop]`.
The 'ports' value should be a list of ports to manage. The default
'start' handler will open the ports after the service is started,
and the default 'stop' handler will close the ports prior to stopping
the service.
Examples:
The following registers an Upstart service called bingod that depends on
a mongodb relation and which runs a custom `db_migrate` function prior to
restarting the service, and a Runit service called spadesd::
manager = services.ServiceManager([
{
'service': 'bingod',
'ports': [80, 443],
'required_data': [MongoRelation(), config(), {'my': 'data'}],
'data_ready': [
services.template(source='bingod.conf'),
services.template(source='bingod.ini',
target='/etc/bingod.ini',
owner='bingo', perms=0400),
],
},
{
'service': 'spadesd',
'data_ready': services.template(source='spadesd_run.j2',
target='/etc/sv/spadesd/run',
perms=0555),
'start': runit_start,
'stop': runit_stop,
},
])
manager.manage()
"""
self._ready_file = os.path.join(hookenv.charm_dir(), 'READY-SERVICES.json')
self._ready = None
self.services = OrderedDict()
for service in services or []:
service_name = service['service']
self.services[service_name] = service
def manage(self):
"""
Handle the current hook by doing The Right Thing with the registered services.
"""
hookenv._run_atstart()
try:
hook_name = hookenv.hook_name()
if hook_name == 'stop':
self.stop_services()
else:
self.reconfigure_services()
self.provide_data()
except SystemExit as x:
if x.code is None or x.code == 0:
hookenv._run_atexit()
hookenv._run_atexit()
def provide_data(self):
"""
Set the relation data for each provider in the ``provided_data`` list.
A provider must have a `name` attribute, which indicates which relation
to set data on, and a `provide_data()` method, which returns a dict of
data to set.
The `provide_data()` method can optionally accept two parameters:
* ``remote_service`` The name of the remote service that the data will
be provided to. The `provide_data()` method will be called once
for each connected service (not unit). This allows the method to
tailor its data to the given service.
* ``service_ready`` Whether or not the service definition had all of
its requirements met, and thus the ``data_ready`` callbacks run.
Note that the ``provided_data`` methods are now called **after** the
``data_ready`` callbacks are run. This gives the ``data_ready`` callbacks
a chance to generate any data necessary for the providing to the remote
services.
"""
for service_name, service in self.services.items():
service_ready = self.is_ready(service_name)
for provider in service.get('provided_data', []):
for relid in hookenv.relation_ids(provider.name):
units = hookenv.related_units(relid)
if not units:
continue
remote_service = units[0].split('/')[0]
argspec = getargspec(provider.provide_data)
if len(argspec.args) > 1:
data = provider.provide_data(remote_service, service_ready)
else:
data = provider.provide_data()
if data:
hookenv.relation_set(relid, data)
def reconfigure_services(self, *service_names):
"""
Update all files for one or more registered services, and,
if ready, optionally restart them.
If no service names are given, reconfigures all registered services.
"""
for service_name in service_names or self.services.keys():
if self.is_ready(service_name):
self.fire_event('data_ready', service_name)
self.fire_event('start', service_name, default=[
service_restart,
manage_ports])
self.save_ready(service_name)
else:
if self.was_ready(service_name):
self.fire_event('data_lost', service_name)
self.fire_event('stop', service_name, default=[
manage_ports,
service_stop])
self.save_lost(service_name)
def stop_services(self, *service_names):
"""
Stop one or more registered services, by name.
If no service names are given, stops all registered services.
"""
for service_name in service_names or self.services.keys():
self.fire_event('stop', service_name, default=[
manage_ports,
service_stop])
def get_service(self, service_name):
"""
Given the name of a registered service, return its service definition.
"""
service = self.services.get(service_name)
if not service:
raise KeyError('Service not registered: %s' % service_name)
return service
def fire_event(self, event_name, service_name, default=None):
"""
Fire a data_ready, data_lost, start, or stop event on a given service.
"""
service = self.get_service(service_name)
callbacks = service.get(event_name, default)
if not callbacks:
return
if not isinstance(callbacks, Iterable):
callbacks = [callbacks]
for callback in callbacks:
if isinstance(callback, ManagerCallback):
callback(self, service_name, event_name)
else:
callback(service_name)
def is_ready(self, service_name):
"""
Determine if a registered service is ready, by checking its 'required_data'.
A 'required_data' item can be any mapping type, and is considered ready
if `bool(item)` evaluates as True.
"""
service = self.get_service(service_name)
reqs = service.get('required_data', [])
return all(bool(req) for req in reqs)
def _load_ready_file(self):
if self._ready is not None:
return
if os.path.exists(self._ready_file):
with open(self._ready_file) as fp:
self._ready = set(json.load(fp))
else:
self._ready = set()
def _save_ready_file(self):
if self._ready is None:
return
with open(self._ready_file, 'w') as fp:
json.dump(list(self._ready), fp)
def save_ready(self, service_name):
"""
Save an indicator that the given service is now data_ready.
"""
self._load_ready_file()
self._ready.add(service_name)
self._save_ready_file()
def save_lost(self, service_name):
"""
Save an indicator that the given service is no longer data_ready.
"""
self._load_ready_file()
self._ready.discard(service_name)
self._save_ready_file()
def was_ready(self, service_name):
"""
Determine if the given service was previously data_ready.
"""
self._load_ready_file()
return service_name in self._ready
class ManagerCallback(object):
"""
Special case of a callback that takes the `ServiceManager` instance
in addition to the service name.
Subclasses should implement `__call__` which should accept three parameters:
* `manager` The `ServiceManager` instance
* `service_name` The name of the service it's being triggered for
* `event_name` The name of the event that this callback is handling
"""
def __call__(self, manager, service_name, event_name):
raise NotImplementedError()
class PortManagerCallback(ManagerCallback):
"""
Callback class that will open or close ports, for use as either
a start or stop action.
"""
def __call__(self, manager, service_name, event_name):
service = manager.get_service(service_name)
new_ports = service.get('ports', [])
port_file = os.path.join(hookenv.charm_dir(), '.{}.ports'.format(service_name))
if os.path.exists(port_file):
with open(port_file) as fp:
old_ports = fp.read().split(',')
for old_port in old_ports:
if bool(old_port) and not self.ports_contains(old_port, new_ports):
hookenv.close_port(old_port)
with open(port_file, 'w') as fp:
fp.write(','.join(str(port) for port in new_ports))
for port in new_ports:
# A port is either a number or 'ICMP'
protocol = 'TCP'
if str(port).upper() == 'ICMP':
protocol = 'ICMP'
if event_name == 'start':
hookenv.open_port(port, protocol)
elif event_name == 'stop':
hookenv.close_port(port, protocol)
def ports_contains(self, port, ports):
if not bool(port):
return False
if str(port).upper() != 'ICMP':
port = int(port)
return port in ports
def service_stop(service_name):
"""
Wrapper around host.service_stop to prevent spurious "unknown service"
messages in the logs.
"""
if host.service_running(service_name):
host.service_stop(service_name)
def service_restart(service_name):
"""
Wrapper around host.service_restart to prevent spurious "unknown service"
messages in the logs.
"""
if host.service_available(service_name):
if host.service_running(service_name):
host.service_restart(service_name)
else:
host.service_start(service_name)
# Convenience aliases
open_ports = close_ports = manage_ports = PortManagerCallback()

View File

@ -0,0 +1,290 @@
# Copyright 2014-2015 Canonical Limited.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import yaml
from charmhelpers.core import hookenv
from charmhelpers.core import host
from charmhelpers.core import templating
from charmhelpers.core.services.base import ManagerCallback
__all__ = ['RelationContext', 'TemplateCallback',
'render_template', 'template']
class RelationContext(dict):
"""
Base class for a context generator that gets relation data from juju.
Subclasses must provide the attributes `name`, which is the name of the
interface of interest, `interface`, which is the type of the interface of
interest, and `required_keys`, which is the set of keys required for the
relation to be considered complete. The data for all interfaces matching
the `name` attribute that are complete will used to populate the dictionary
values (see `get_data`, below).
The generated context will be namespaced under the relation :attr:`name`,
to prevent potential naming conflicts.
:param str name: Override the relation :attr:`name`, since it can vary from charm to charm
:param list additional_required_keys: Extend the list of :attr:`required_keys`
"""
name = None
interface = None
def __init__(self, name=None, additional_required_keys=None):
if not hasattr(self, 'required_keys'):
self.required_keys = []
if name is not None:
self.name = name
if additional_required_keys:
self.required_keys.extend(additional_required_keys)
self.get_data()
def __bool__(self):
"""
Returns True if all of the required_keys are available.
"""
return self.is_ready()
__nonzero__ = __bool__
def __repr__(self):
return super(RelationContext, self).__repr__()
def is_ready(self):
"""
Returns True if all of the `required_keys` are available from any units.
"""
ready = len(self.get(self.name, [])) > 0
if not ready:
hookenv.log('Incomplete relation: {}'.format(self.__class__.__name__), hookenv.DEBUG)
return ready
def _is_ready(self, unit_data):
"""
Helper method that tests a set of relation data and returns True if
all of the `required_keys` are present.
"""
return set(unit_data.keys()).issuperset(set(self.required_keys))
def get_data(self):
"""
Retrieve the relation data for each unit involved in a relation and,
if complete, store it in a list under `self[self.name]`. This
is automatically called when the RelationContext is instantiated.
The units are sorted lexographically first by the service ID, then by
the unit ID. Thus, if an interface has two other services, 'db:1'
and 'db:2', with 'db:1' having two units, 'wordpress/0' and 'wordpress/1',
and 'db:2' having one unit, 'mediawiki/0', all of which have a complete
set of data, the relation data for the units will be stored in the
order: 'wordpress/0', 'wordpress/1', 'mediawiki/0'.
If you only care about a single unit on the relation, you can just
access it as `{{ interface[0]['key'] }}`. However, if you can at all
support multiple units on a relation, you should iterate over the list,
like::
{% for unit in interface -%}
{{ unit['key'] }}{% if not loop.last %},{% endif %}
{%- endfor %}
Note that since all sets of relation data from all related services and
units are in a single list, if you need to know which service or unit a
set of data came from, you'll need to extend this class to preserve
that information.
"""
if not hookenv.relation_ids(self.name):
return
ns = self.setdefault(self.name, [])
for rid in sorted(hookenv.relation_ids(self.name)):
for unit in sorted(hookenv.related_units(rid)):
reldata = hookenv.relation_get(rid=rid, unit=unit)
if self._is_ready(reldata):
ns.append(reldata)
def provide_data(self):
"""
Return data to be relation_set for this interface.
"""
return {}
class MysqlRelation(RelationContext):
"""
Relation context for the `mysql` interface.
:param str name: Override the relation :attr:`name`, since it can vary from charm to charm
:param list additional_required_keys: Extend the list of :attr:`required_keys`
"""
name = 'db'
interface = 'mysql'
def __init__(self, *args, **kwargs):
self.required_keys = ['host', 'user', 'password', 'database']
RelationContext.__init__(self, *args, **kwargs)
class HttpRelation(RelationContext):
"""
Relation context for the `http` interface.
:param str name: Override the relation :attr:`name`, since it can vary from charm to charm
:param list additional_required_keys: Extend the list of :attr:`required_keys`
"""
name = 'website'
interface = 'http'
def __init__(self, *args, **kwargs):
self.required_keys = ['host', 'port']
RelationContext.__init__(self, *args, **kwargs)
def provide_data(self):
return {
'host': hookenv.unit_get('private-address'),
'port': 80,
}
class RequiredConfig(dict):
"""
Data context that loads config options with one or more mandatory options.
Once the required options have been changed from their default values, all
config options will be available, namespaced under `config` to prevent
potential naming conflicts (for example, between a config option and a
relation property).
:param list *args: List of options that must be changed from their default values.
"""
def __init__(self, *args):
self.required_options = args
self['config'] = hookenv.config()
with open(os.path.join(hookenv.charm_dir(), 'config.yaml')) as fp:
self.config = yaml.load(fp).get('options', {})
def __bool__(self):
for option in self.required_options:
if option not in self['config']:
return False
current_value = self['config'][option]
default_value = self.config[option].get('default')
if current_value == default_value:
return False
if current_value in (None, '') and default_value in (None, ''):
return False
return True
def __nonzero__(self):
return self.__bool__()
class StoredContext(dict):
"""
A data context that always returns the data that it was first created with.
This is useful to do a one-time generation of things like passwords, that
will thereafter use the same value that was originally generated, instead
of generating a new value each time it is run.
"""
def __init__(self, file_name, config_data):
"""
If the file exists, populate `self` with the data from the file.
Otherwise, populate with the given data and persist it to the file.
"""
if os.path.exists(file_name):
self.update(self.read_context(file_name))
else:
self.store_context(file_name, config_data)
self.update(config_data)
def store_context(self, file_name, config_data):
if not os.path.isabs(file_name):
file_name = os.path.join(hookenv.charm_dir(), file_name)
with open(file_name, 'w') as file_stream:
os.fchmod(file_stream.fileno(), 0o600)
yaml.dump(config_data, file_stream)
def read_context(self, file_name):
if not os.path.isabs(file_name):
file_name = os.path.join(hookenv.charm_dir(), file_name)
with open(file_name, 'r') as file_stream:
data = yaml.load(file_stream)
if not data:
raise OSError("%s is empty" % file_name)
return data
class TemplateCallback(ManagerCallback):
"""
Callback class that will render a Jinja2 template, for use as a ready
action.
:param str source: The template source file, relative to
`$CHARM_DIR/templates`
:param str target: The target to write the rendered template to (or None)
:param str owner: The owner of the rendered file
:param str group: The group of the rendered file
:param int perms: The permissions of the rendered file
:param partial on_change_action: functools partial to be executed when
rendered file changes
:param jinja2 loader template_loader: A jinja2 template loader
:return str: The rendered template
"""
def __init__(self, source, target,
owner='root', group='root', perms=0o444,
on_change_action=None, template_loader=None):
self.source = source
self.target = target
self.owner = owner
self.group = group
self.perms = perms
self.on_change_action = on_change_action
self.template_loader = template_loader
def __call__(self, manager, service_name, event_name):
pre_checksum = ''
if self.on_change_action and os.path.isfile(self.target):
pre_checksum = host.file_hash(self.target)
service = manager.get_service(service_name)
context = {'ctx': {}}
for ctx in service.get('required_data', []):
context.update(ctx)
context['ctx'].update(ctx)
result = templating.render(self.source, self.target, context,
self.owner, self.group, self.perms,
template_loader=self.template_loader)
if self.on_change_action:
if pre_checksum == host.file_hash(self.target):
hookenv.log(
'No change detected: {}'.format(self.target),
hookenv.DEBUG)
else:
self.on_change_action()
return result
# Convenience aliases for templates
render_template = template = TemplateCallback

View File

@ -0,0 +1,129 @@
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright 2014-2015 Canonical Limited.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import six
import re
def bool_from_string(value):
"""Interpret string value as boolean.
Returns True if value translates to True otherwise False.
"""
if isinstance(value, six.string_types):
value = six.text_type(value)
else:
msg = "Unable to interpret non-string value '%s' as boolean" % (value)
raise ValueError(msg)
value = value.strip().lower()
if value in ['y', 'yes', 'true', 't', 'on']:
return True
elif value in ['n', 'no', 'false', 'f', 'off']:
return False
msg = "Unable to interpret string value '%s' as boolean" % (value)
raise ValueError(msg)
def bytes_from_string(value):
"""Interpret human readable string value as bytes.
Returns int
"""
BYTE_POWER = {
'K': 1,
'KB': 1,
'M': 2,
'MB': 2,
'G': 3,
'GB': 3,
'T': 4,
'TB': 4,
'P': 5,
'PB': 5,
}
if isinstance(value, six.string_types):
value = six.text_type(value)
else:
msg = "Unable to interpret non-string value '%s' as bytes" % (value)
raise ValueError(msg)
matches = re.match("([0-9]+)([a-zA-Z]+)", value)
if matches:
size = int(matches.group(1)) * (1024 ** BYTE_POWER[matches.group(2)])
else:
# Assume that value passed in is bytes
try:
size = int(value)
except ValueError:
msg = "Unable to interpret string value '%s' as bytes" % (value)
raise ValueError(msg)
return size
class BasicStringComparator(object):
"""Provides a class that will compare strings from an iterator type object.
Used to provide > and < comparisons on strings that may not necessarily be
alphanumerically ordered. e.g. OpenStack or Ubuntu releases AFTER the
z-wrap.
"""
_list = None
def __init__(self, item):
if self._list is None:
raise Exception("Must define the _list in the class definition!")
try:
self.index = self._list.index(item)
except Exception:
raise KeyError("Item '{}' is not in list '{}'"
.format(item, self._list))
def __eq__(self, other):
assert isinstance(other, str) or isinstance(other, self.__class__)
return self.index == self._list.index(other)
def __ne__(self, other):
return not self.__eq__(other)
def __lt__(self, other):
assert isinstance(other, str) or isinstance(other, self.__class__)
return self.index < self._list.index(other)
def __ge__(self, other):
return not self.__lt__(other)
def __gt__(self, other):
assert isinstance(other, str) or isinstance(other, self.__class__)
return self.index > self._list.index(other)
def __le__(self, other):
return not self.__gt__(other)
def __str__(self):
"""Always give back the item at the index so it can be used in
comparisons like:
s_mitaka = CompareOpenStack('mitaka')
s_newton = CompareOpenstack('newton')
assert s_newton > s_mitaka
@returns: <string>
"""
return self._list[self.index]

View File

@ -0,0 +1,54 @@
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright 2014-2015 Canonical Limited.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import yaml
from subprocess import check_call
from charmhelpers.core.hookenv import (
log,
DEBUG,
ERROR,
)
__author__ = 'Jorge Niedbalski R. <jorge.niedbalski@canonical.com>'
def create(sysctl_dict, sysctl_file):
"""Creates a sysctl.conf file from a YAML associative array
:param sysctl_dict: a YAML-formatted string of sysctl options eg "{ 'kernel.max_pid': 1337 }"
:type sysctl_dict: str
:param sysctl_file: path to the sysctl file to be saved
:type sysctl_file: str or unicode
:returns: None
"""
try:
sysctl_dict_parsed = yaml.safe_load(sysctl_dict)
except yaml.YAMLError:
log("Error parsing YAML sysctl_dict: {}".format(sysctl_dict),
level=ERROR)
return
with open(sysctl_file, "w") as fd:
for key, value in sysctl_dict_parsed.items():
fd.write("{}={}\n".format(key, value))
log("Updating sysctl_file: %s values: %s" % (sysctl_file, sysctl_dict_parsed),
level=DEBUG)
check_call(["sysctl", "-p", sysctl_file])

View File

@ -0,0 +1,93 @@
# Copyright 2014-2015 Canonical Limited.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import sys
from charmhelpers.core import host
from charmhelpers.core import hookenv
def render(source, target, context, owner='root', group='root',
perms=0o444, templates_dir=None, encoding='UTF-8',
template_loader=None, config_template=None):
"""
Render a template.
The `source` path, if not absolute, is relative to the `templates_dir`.
The `target` path should be absolute. It can also be `None`, in which
case no file will be written.
The context should be a dict containing the values to be replaced in the
template.
config_template may be provided to render from a provided template instead
of loading from a file.
The `owner`, `group`, and `perms` options will be passed to `write_file`.
If omitted, `templates_dir` defaults to the `templates` folder in the charm.
The rendered template will be written to the file as well as being returned
as a string.
Note: Using this requires python-jinja2 or python3-jinja2; if it is not
installed, calling this will attempt to use charmhelpers.fetch.apt_install
to install it.
"""
try:
from jinja2 import FileSystemLoader, Environment, exceptions
except ImportError:
try:
from charmhelpers.fetch import apt_install
except ImportError:
hookenv.log('Could not import jinja2, and could not import '
'charmhelpers.fetch to install it',
level=hookenv.ERROR)
raise
if sys.version_info.major == 2:
apt_install('python-jinja2', fatal=True)
else:
apt_install('python3-jinja2', fatal=True)
from jinja2 import FileSystemLoader, Environment, exceptions
if template_loader:
template_env = Environment(loader=template_loader)
else:
if templates_dir is None:
templates_dir = os.path.join(hookenv.charm_dir(), 'templates')
template_env = Environment(loader=FileSystemLoader(templates_dir))
# load from a string if provided explicitly
if config_template is not None:
template = template_env.from_string(config_template)
else:
try:
source = source
template = template_env.get_template(source)
except exceptions.TemplateNotFound as e:
hookenv.log('Could not load template %s from %s.' %
(source, templates_dir),
level=hookenv.ERROR)
raise e
content = template.render(context)
if target is not None:
target_dir = os.path.dirname(target)
if not os.path.exists(target_dir):
# This is a terrible default directory permission, as the file
# or its siblings will often contain secrets.
host.mkdir(os.path.dirname(target), owner, group, perms=0o755)
host.write_file(target, content.encode(encoding), owner, group, perms)
return content

View File

@ -0,0 +1,520 @@
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright 2014-2015 Canonical Limited.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Authors:
# Kapil Thangavelu <kapil.foss@gmail.com>
#
"""
Intro
-----
A simple way to store state in units. This provides a key value
storage with support for versioned, transactional operation,
and can calculate deltas from previous values to simplify unit logic
when processing changes.
Hook Integration
----------------
There are several extant frameworks for hook execution, including
- charmhelpers.core.hookenv.Hooks
- charmhelpers.core.services.ServiceManager
The storage classes are framework agnostic, one simple integration is
via the HookData contextmanager. It will record the current hook
execution environment (including relation data, config data, etc.),
setup a transaction and allow easy access to the changes from
previously seen values. One consequence of the integration is the
reservation of particular keys ('rels', 'unit', 'env', 'config',
'charm_revisions') for their respective values.
Here's a fully worked integration example using hookenv.Hooks::
from charmhelper.core import hookenv, unitdata
hook_data = unitdata.HookData()
db = unitdata.kv()
hooks = hookenv.Hooks()
@hooks.hook
def config_changed():
# Print all changes to configuration from previously seen
# values.
for changed, (prev, cur) in hook_data.conf.items():
print('config changed', changed,
'previous value', prev,
'current value', cur)
# Get some unit specific bookeeping
if not db.get('pkg_key'):
key = urllib.urlopen('https://example.com/pkg_key').read()
db.set('pkg_key', key)
# Directly access all charm config as a mapping.
conf = db.getrange('config', True)
# Directly access all relation data as a mapping
rels = db.getrange('rels', True)
if __name__ == '__main__':
with hook_data():
hook.execute()
A more basic integration is via the hook_scope context manager which simply
manages transaction scope (and records hook name, and timestamp)::
>>> from unitdata import kv
>>> db = kv()
>>> with db.hook_scope('install'):
... # do work, in transactional scope.
... db.set('x', 1)
>>> db.get('x')
1
Usage
-----
Values are automatically json de/serialized to preserve basic typing
and complex data struct capabilities (dicts, lists, ints, booleans, etc).
Individual values can be manipulated via get/set::
>>> kv.set('y', True)
>>> kv.get('y')
True
# We can set complex values (dicts, lists) as a single key.
>>> kv.set('config', {'a': 1, 'b': True'})
# Also supports returning dictionaries as a record which
# provides attribute access.
>>> config = kv.get('config', record=True)
>>> config.b
True
Groups of keys can be manipulated with update/getrange::
>>> kv.update({'z': 1, 'y': 2}, prefix="gui.")
>>> kv.getrange('gui.', strip=True)
{'z': 1, 'y': 2}
When updating values, its very helpful to understand which values
have actually changed and how have they changed. The storage
provides a delta method to provide for this::
>>> data = {'debug': True, 'option': 2}
>>> delta = kv.delta(data, 'config.')
>>> delta.debug.previous
None
>>> delta.debug.current
True
>>> delta
{'debug': (None, True), 'option': (None, 2)}
Note the delta method does not persist the actual change, it needs to
be explicitly saved via 'update' method::
>>> kv.update(data, 'config.')
Values modified in the context of a hook scope retain historical values
associated to the hookname.
>>> with db.hook_scope('config-changed'):
... db.set('x', 42)
>>> db.gethistory('x')
[(1, u'x', 1, u'install', u'2015-01-21T16:49:30.038372'),
(2, u'x', 42, u'config-changed', u'2015-01-21T16:49:30.038786')]
"""
import collections
import contextlib
import datetime
import itertools
import json
import os
import pprint
import sqlite3
import sys
__author__ = 'Kapil Thangavelu <kapil.foss@gmail.com>'
class Storage(object):
"""Simple key value database for local unit state within charms.
Modifications are not persisted unless :meth:`flush` is called.
To support dicts, lists, integer, floats, and booleans values
are automatically json encoded/decoded.
"""
def __init__(self, path=None):
self.db_path = path
if path is None:
if 'UNIT_STATE_DB' in os.environ:
self.db_path = os.environ['UNIT_STATE_DB']
else:
self.db_path = os.path.join(
os.environ.get('CHARM_DIR', ''), '.unit-state.db')
with open(self.db_path, 'a') as f:
os.fchmod(f.fileno(), 0o600)
self.conn = sqlite3.connect('%s' % self.db_path)
self.cursor = self.conn.cursor()
self.revision = None
self._closed = False
self._init()
def close(self):
if self._closed:
return
self.flush(False)
self.cursor.close()
self.conn.close()
self._closed = True
def get(self, key, default=None, record=False):
self.cursor.execute('select data from kv where key=?', [key])
result = self.cursor.fetchone()
if not result:
return default
if record:
return Record(json.loads(result[0]))
return json.loads(result[0])
def getrange(self, key_prefix, strip=False):
"""
Get a range of keys starting with a common prefix as a mapping of
keys to values.
:param str key_prefix: Common prefix among all keys
:param bool strip: Optionally strip the common prefix from the key
names in the returned dict
:return dict: A (possibly empty) dict of key-value mappings
"""
self.cursor.execute("select key, data from kv where key like ?",
['%s%%' % key_prefix])
result = self.cursor.fetchall()
if not result:
return {}
if not strip:
key_prefix = ''
return dict([
(k[len(key_prefix):], json.loads(v)) for k, v in result])
def update(self, mapping, prefix=""):
"""
Set the values of multiple keys at once.
:param dict mapping: Mapping of keys to values
:param str prefix: Optional prefix to apply to all keys in `mapping`
before setting
"""
for k, v in mapping.items():
self.set("%s%s" % (prefix, k), v)
def unset(self, key):
"""
Remove a key from the database entirely.
"""
self.cursor.execute('delete from kv where key=?', [key])
if self.revision and self.cursor.rowcount:
self.cursor.execute(
'insert into kv_revisions values (?, ?, ?)',
[key, self.revision, json.dumps('DELETED')])
def unsetrange(self, keys=None, prefix=""):
"""
Remove a range of keys starting with a common prefix, from the database
entirely.
:param list keys: List of keys to remove.
:param str prefix: Optional prefix to apply to all keys in ``keys``
before removing.
"""
if keys is not None:
keys = ['%s%s' % (prefix, key) for key in keys]
self.cursor.execute('delete from kv where key in (%s)' % ','.join(['?'] * len(keys)), keys)
if self.revision and self.cursor.rowcount:
self.cursor.execute(
'insert into kv_revisions values %s' % ','.join(['(?, ?, ?)'] * len(keys)),
list(itertools.chain.from_iterable((key, self.revision, json.dumps('DELETED')) for key in keys)))
else:
self.cursor.execute('delete from kv where key like ?',
['%s%%' % prefix])
if self.revision and self.cursor.rowcount:
self.cursor.execute(
'insert into kv_revisions values (?, ?, ?)',
['%s%%' % prefix, self.revision, json.dumps('DELETED')])
def set(self, key, value):
"""
Set a value in the database.
:param str key: Key to set the value for
:param value: Any JSON-serializable value to be set
"""
serialized = json.dumps(value)
self.cursor.execute('select data from kv where key=?', [key])
exists = self.cursor.fetchone()
# Skip mutations to the same value
if exists:
if exists[0] == serialized:
return value
if not exists:
self.cursor.execute(
'insert into kv (key, data) values (?, ?)',
(key, serialized))
else:
self.cursor.execute('''
update kv
set data = ?
where key = ?''', [serialized, key])
# Save
if not self.revision:
return value
self.cursor.execute(
'select 1 from kv_revisions where key=? and revision=?',
[key, self.revision])
exists = self.cursor.fetchone()
if not exists:
self.cursor.execute(
'''insert into kv_revisions (
revision, key, data) values (?, ?, ?)''',
(self.revision, key, serialized))
else:
self.cursor.execute(
'''
update kv_revisions
set data = ?
where key = ?
and revision = ?''',
[serialized, key, self.revision])
return value
def delta(self, mapping, prefix):
"""
return a delta containing values that have changed.
"""
previous = self.getrange(prefix, strip=True)
if not previous:
pk = set()
else:
pk = set(previous.keys())
ck = set(mapping.keys())
delta = DeltaSet()
# added
for k in ck.difference(pk):
delta[k] = Delta(None, mapping[k])
# removed
for k in pk.difference(ck):
delta[k] = Delta(previous[k], None)
# changed
for k in pk.intersection(ck):
c = mapping[k]
p = previous[k]
if c != p:
delta[k] = Delta(p, c)
return delta
@contextlib.contextmanager
def hook_scope(self, name=""):
"""Scope all future interactions to the current hook execution
revision."""
assert not self.revision
self.cursor.execute(
'insert into hooks (hook, date) values (?, ?)',
(name or sys.argv[0],
datetime.datetime.utcnow().isoformat()))
self.revision = self.cursor.lastrowid
try:
yield self.revision
self.revision = None
except Exception:
self.flush(False)
self.revision = None
raise
else:
self.flush()
def flush(self, save=True):
if save:
self.conn.commit()
elif self._closed:
return
else:
self.conn.rollback()
def _init(self):
self.cursor.execute('''
create table if not exists kv (
key text,
data text,
primary key (key)
)''')
self.cursor.execute('''
create table if not exists kv_revisions (
key text,
revision integer,
data text,
primary key (key, revision)
)''')
self.cursor.execute('''
create table if not exists hooks (
version integer primary key autoincrement,
hook text,
date text
)''')
self.conn.commit()
def gethistory(self, key, deserialize=False):
self.cursor.execute(
'''
select kv.revision, kv.key, kv.data, h.hook, h.date
from kv_revisions kv,
hooks h
where kv.key=?
and kv.revision = h.version
''', [key])
if deserialize is False:
return self.cursor.fetchall()
return map(_parse_history, self.cursor.fetchall())
def debug(self, fh=sys.stderr):
self.cursor.execute('select * from kv')
pprint.pprint(self.cursor.fetchall(), stream=fh)
self.cursor.execute('select * from kv_revisions')
pprint.pprint(self.cursor.fetchall(), stream=fh)
def _parse_history(d):
return (d[0], d[1], json.loads(d[2]), d[3],
datetime.datetime.strptime(d[-1], "%Y-%m-%dT%H:%M:%S.%f"))
class HookData(object):
"""Simple integration for existing hook exec frameworks.
Records all unit information, and stores deltas for processing
by the hook.
Sample::
from charmhelper.core import hookenv, unitdata
changes = unitdata.HookData()
db = unitdata.kv()
hooks = hookenv.Hooks()
@hooks.hook
def config_changed():
# View all changes to configuration
for changed, (prev, cur) in changes.conf.items():
print('config changed', changed,
'previous value', prev,
'current value', cur)
# Get some unit specific bookeeping
if not db.get('pkg_key'):
key = urllib.urlopen('https://example.com/pkg_key').read()
db.set('pkg_key', key)
if __name__ == '__main__':
with changes():
hook.execute()
"""
def __init__(self):
self.kv = kv()
self.conf = None
self.rels = None
@contextlib.contextmanager
def __call__(self):
from charmhelpers.core import hookenv
hook_name = hookenv.hook_name()
with self.kv.hook_scope(hook_name):
self._record_charm_version(hookenv.charm_dir())
delta_config, delta_relation = self._record_hook(hookenv)
yield self.kv, delta_config, delta_relation
def _record_charm_version(self, charm_dir):
# Record revisions.. charm revisions are meaningless
# to charm authors as they don't control the revision.
# so logic dependnent on revision is not particularly
# useful, however it is useful for debugging analysis.
charm_rev = open(
os.path.join(charm_dir, 'revision')).read().strip()
charm_rev = charm_rev or '0'
revs = self.kv.get('charm_revisions', [])
if charm_rev not in revs:
revs.append(charm_rev.strip() or '0')
self.kv.set('charm_revisions', revs)
def _record_hook(self, hookenv):
data = hookenv.execution_environment()
self.conf = conf_delta = self.kv.delta(data['conf'], 'config')
self.rels = rels_delta = self.kv.delta(data['rels'], 'rels')
self.kv.set('env', dict(data['env']))
self.kv.set('unit', data['unit'])
self.kv.set('relid', data.get('relid'))
return conf_delta, rels_delta
class Record(dict):
__slots__ = ()
def __getattr__(self, k):
if k in self:
return self[k]
raise AttributeError(k)
class DeltaSet(Record):
__slots__ = ()
Delta = collections.namedtuple('Delta', ['previous', 'current'])
_KV = None
def kv():
global _KV
if _KV is None:
_KV = Storage()
return _KV

View File

@ -0,0 +1,25 @@
import platform
def get_platform():
"""Return the current OS platform.
For example: if current os platform is Ubuntu then a string "ubuntu"
will be returned (which is the name of the module).
This string is used to decide which platform module should be imported.
"""
# linux_distribution is deprecated and will be removed in Python 3.7
# Warings *not* disabled, as we certainly need to fix this.
tuple_platform = platform.linux_distribution()
current_platform = tuple_platform[0]
if "Ubuntu" in current_platform:
return "ubuntu"
elif "CentOS" in current_platform:
return "centos"
elif "debian" in current_platform:
# Stock Python does not detect Ubuntu and instead returns debian.
# Or at least it does in some build environments like Travis CI
return "ubuntu"
else:
raise RuntimeError("This module is not supported on {}."
.format(current_platform))

View File

@ -292,6 +292,7 @@ class TestConfigChanged(CharmTestCase):
'get_cluster_hosts',
'is_leader_bootstrapped',
'is_bootstrapped',
'clustered_once',
'is_leader',
'render_config_restart_on_changed',
'update_client_db_relations',
@ -315,6 +316,7 @@ class TestConfigChanged(CharmTestCase):
self.is_leader.return_value = False
self.is_leader_bootstrapped.return_value = False
self.is_bootstrapped.return_value = False
self.clustered_once.return_value = False
self.relation_ids.return_value = []
self.is_relation_made.return_value = False
self.leader_get.return_value = '10.10.10.10'
@ -322,6 +324,7 @@ class TestConfigChanged(CharmTestCase):
def test_config_changed_open_port(self):
'''Ensure open_port is called with MySQL default port'''
self.is_leader_bootstrapped.return_value = True
hooks.config_changed()
self.open_port.assert_called_with(3306)
@ -329,93 +332,123 @@ class TestConfigChanged(CharmTestCase):
'''Ensure configuration is only rendered when ready for the leader'''
self.is_leader.return_value = True
# Render without hosts
# Render without peers, leader not bootsrapped
self.get_cluster_hosts.return_value = []
hooks.config_changed()
self.install_percona_xtradb_cluster.assert_called_once_with()
self.install_percona_xtradb_cluster.assert_called_once()
self.render_config_restart_on_changed.assert_called_once_with(
False, [], bootstrap=True)
# Render with hosts, not bootstrapped
# NOTE: It is unclear this scenario could occur.
# We may need to preclude it from occuring.
self.is_leader_bootstrapped.return_value = False
self.get_cluster_hosts.return_value = ['10.10.10.20', '10.10.10.30']
self.is_bootstrapped.return_value = True
[], bootstrap=True)
# Render without peers, leader bootstrapped
self.is_leader_bootstrapped.return_value = True
self.get_cluster_hosts.return_value = []
self.render_config_restart_on_changed.reset_mock()
hooks.config_changed()
self.render_config_restart_on_changed.assert_called_once_with(
True, ['10.10.10.20', '10.10.10.30'], bootstrap=True)
[], bootstrap=False)
# Render with hosts, bootstrapped
# Render without hosts, leader bootstrapped, never clustered
self.is_leader_bootstrapped.return_value = True
self.get_cluster_hosts.return_value = ['10.10.10.20', '10.10.10.30']
self.is_bootstrapped.return_value = True
self.render_config_restart_on_changed.reset_mock()
hooks.config_changed()
self.render_config_restart_on_changed.assert_called_once_with(
True, ['10.10.10.20', '10.10.10.30'], bootstrap=False)
[], bootstrap=False)
# Clustered at least once
self.clustered_once.return_value = True
# Render with hosts, leader bootstrapped
self.is_leader_bootstrapped.return_value = True
self.get_cluster_hosts.return_value = ['10.10.10.20', '10.10.10.30']
self.render_config_restart_on_changed.reset_mock()
hooks.config_changed()
self.render_config_restart_on_changed.assert_called_once_with(
['10.10.10.20', '10.10.10.30'], bootstrap=False)
# In none of the prior scenarios should update_root_password have been
# called.
self.update_root_password.assert_not_called()
# Render with hosts, leader and cluster bootstrapped
self.is_leader_bootstrapped.return_value = True
self.is_bootstrapped.return_value = True
self.get_cluster_hosts.return_value = ['10.10.10.20', '10.10.10.30']
self.render_config_restart_on_changed.reset_mock()
hooks.config_changed()
self.render_config_restart_on_changed.assert_called_once_with(
['10.10.10.20', '10.10.10.30'], bootstrap=False)
self.update_root_password.assert_called_once()
def test_config_changed_render_non_leader(self):
'''Ensure configuration is only rendered when ready for
non-leaders'''
# Avoid rendering for non-leader.
# Bug #1738896
# Leader not bootstrapped
# Do not render
self.get_cluster_hosts.return_value = ['10.10.10.20', '10.10.10.30',
'10.10.10.10']
self.is_leader_bootstrapped.return_value = False
hooks.config_changed()
self.install_percona_xtradb_cluster.assert_called_once_with()
self.render_config_restart_on_changed.assert_not_called()
self.update_bootstrap_uuid.assert_not_called()
# Bug #1738896
# Avoid clusterend = False and rendering for non-leader.
# This should no longer be possible in the code. Thus the test.
# Instead use the leader.
# Leader is bootstrapped, no peers
# Use the leader node and render.
self.is_leader_bootstrapped.return_value = True
self.get_cluster_hosts.return_value = []
self.render_config_restart_on_changed.reset_mock()
hooks.config_changed()
self.render_config_restart_on_changed.assert_called_once_with(
True, ['10.10.10.10'])
['10.10.10.10'])
# Missing leader, leader bootstrapped
# Bug #1738896
# Avoid clustered = Flase, bootstrapped = True
# and rendering for non-leader.
# This should no longer be possible in the code. Thus the test.
# Instead use the leader.
self.is_leader_bootstrapped.return_value = True
self.is_bootstrapped.return_value = True
# Leader bootstrapped
# Add the leader node and render.
self.render_config_restart_on_changed.reset_mock()
hooks.config_changed()
self.render_config_restart_on_changed.assert_called_once_with(
True, ['10.10.10.10'])
# Just one host, bootstrapped
self.render_config_restart_on_changed.reset_mock()
self.get_cluster_hosts.return_value = ['10.10.10.20']
hooks.config_changed()
self.render_config_restart_on_changed.assert_called_once_with(
True, ['10.10.10.10'])
# Missing leader, bootstrapped
self.render_config_restart_on_changed.reset_mock()
self.is_bootstrapped.return_value = True
self.update_bootstrap_uuid.reset_mock()
self.get_cluster_hosts.return_value = ['10.10.10.20', '10.10.10.30']
hooks.config_changed()
self.render_config_restart_on_changed.assert_called_once_with(
True, ['10.10.10.10', '10.10.10.20', '10.10.10.30'])
['10.10.10.10', '10.10.10.20', '10.10.10.30'])
self.update_bootstrap_uuid.assert_called_once()
# Leader present, bootstrapped
# Leader present, leader bootstrapped
self.render_config_restart_on_changed.reset_mock()
self.is_bootstrapped.return_value = True
self.update_bootstrap_uuid.reset_mock()
self.get_cluster_hosts.return_value = ['10.10.10.20', '10.10.10.30',
'10.10.10.10']
hooks.config_changed()
self.render_config_restart_on_changed.assert_called_once_with(
True, ['10.10.10.20', '10.10.10.30', '10.10.10.10'])
['10.10.10.20', '10.10.10.30', '10.10.10.10'])
self.update_bootstrap_uuid.assert_called_once()
# In none of the prior scenarios should update_root_password have been
# called. is_bootstrapped was defaulted to False
self.update_root_password.assert_not_called()
# Leader present, leader bootstrapped, cluster bootstrapped
self.is_bootstrapped.return_value = True
self.render_config_restart_on_changed.reset_mock()
self.update_bootstrap_uuid.reset_mock()
self.get_cluster_hosts.return_value = ['10.10.10.20', '10.10.10.30',
'10.10.10.10']
hooks.config_changed()
self.render_config_restart_on_changed.assert_called_once_with(
['10.10.10.20', '10.10.10.30', '10.10.10.10'])
self.update_bootstrap_uuid.assert_called_once()
self.update_root_password.assert_called_once()
class TestInstallPerconaXtraDB(CharmTestCase):

View File

@ -1,5 +1,4 @@
import os
import unittest
import sys
import tempfile
@ -13,9 +12,18 @@ from test_utils import CharmTestCase
os.environ['JUJU_UNIT_NAME'] = 'percona-cluster/2'
class UtilsTests(unittest.TestCase):
class UtilsTests(CharmTestCase):
TO_PATCH = [
'config',
'log',
'relation_ids',
'related_units',
'relation_get',
'relation_set',
]
def setUp(self):
super(UtilsTests, self).setUp()
super(UtilsTests, self).setUp(percona_utils, self.TO_PATCH)
@mock.patch("percona_utils.log")
def test_update_empty_hosts_file(self, mock_log):
@ -92,7 +100,8 @@ class UtilsTests(unittest.TestCase):
mock_get_cluster_host_ip.return_value = '10.2.0.1'
def _mock_rel_get(*args, **kwargs):
return {'private-address': '10.2.0.2'}
return {'private-address': '10.2.0.2',
'bootstrap-uuid': 'UUID'}
mock_rel_get.side_effect = _mock_rel_get
mock_config.side_effect = lambda k: False
@ -101,7 +110,79 @@ class UtilsTests(unittest.TestCase):
self.assertFalse(mock_update_hosts_file.called)
mock_rel_get.assert_called_with(rid=1, unit=2)
self.assertEqual(hosts, ['10.2.0.1', '10.2.0.2'])
self.assertEqual(hosts, ['10.2.0.2'])
@mock.patch("percona_utils.get_cluster_host_ip")
@mock.patch("percona_utils.update_hosts_file")
def test_get_cluster_hosts_sorted(self, mock_update_hosts_file,
mock_get_cluster_host_ip):
self.relation_ids.return_value = [1]
self.related_units.return_value = [5, 4, 3]
mock_get_cluster_host_ip.return_value = '10.2.0.1'
def _mock_rel_get(*args, **kwargs):
unit_id = kwargs.get('unit')
# Generate list in reverse sort order
return {'private-address': '10.2.0.{}'.format(unit_id - 1),
'bootstrap-uuid': 'UUUID'}
self.relation_get.side_effect = _mock_rel_get
self.config.side_effect = lambda k: False
hosts = percona_utils.get_cluster_hosts()
self.assertFalse(mock_update_hosts_file.called)
# Verify the IPs are sorted
self.assertEqual(hosts, ['10.2.0.2', '10.2.0.3', '10.2.0.4'])
@mock.patch("percona_utils.get_cluster_host_ip")
@mock.patch("percona_utils.update_hosts_file")
def test_get_cluster_hosts_none_bootstrapped(self, mock_update_hosts_file,
mock_get_cluster_host_ip):
self.relation_ids.return_value = [1]
self.related_units.return_value = [4, 3, 2]
mock_get_cluster_host_ip.return_value = '10.2.0.1'
def _mock_rel_get(*args, **kwargs):
unit_id = kwargs.get('unit')
# None set bootstrap-uuid
return {'private-address': '10.2.0.{}'.format(unit_id)}
self.relation_get.side_effect = _mock_rel_get
self.config.side_effect = lambda k: False
hosts = percona_utils.get_cluster_hosts()
self.assertFalse(mock_update_hosts_file.called)
# Verify empty list
self.assertEqual(hosts, [])
@mock.patch("percona_utils.get_cluster_host_ip")
@mock.patch("percona_utils.update_hosts_file")
def test_get_cluster_hosts_one_not_bootstrapped(self,
mock_update_hosts_file,
mock_get_cluster_host_ip):
self.relation_ids.return_value = [1]
self.related_units.return_value = [4, 3, 2]
mock_get_cluster_host_ip.return_value = '10.2.0.1'
def _mock_rel_get(*args, **kwargs):
unit_id = kwargs.get('unit')
if unit_id == 3:
# unit/3 does not set bootstrap-uuid
return {'private-address': '10.2.0.{}'.format(unit_id)}
else:
return {'private-address': '10.2.0.{}'.format(unit_id),
'bootstrap-uuid': 'UUUID'}
self.relation_get.side_effect = _mock_rel_get
self.config.side_effect = lambda k: False
hosts = percona_utils.get_cluster_hosts()
self.assertFalse(mock_update_hosts_file.called)
# Verify unit/3 not in the list
self.assertEqual(hosts, ['10.2.0.2', '10.2.0.4'])
@mock.patch.object(percona_utils, 'socket')
@mock.patch("percona_utils.get_cluster_host_ip")
@ -130,7 +211,8 @@ class UtilsTests(unittest.TestCase):
id = kwargs.get('unit')
hostname = "host{}".format(host_suffix[id - 1])
return {'private-address': '10.0.0.{}'.format(id + 1),
'hostname': hostname}
'hostname': hostname,
'bootstrap-uuid': 'UUID'}
config = {'prefer-ipv6': True}
mock_rel_get.side_effect = _mock_rel_get
@ -143,7 +225,7 @@ class UtilsTests(unittest.TestCase):
'10.0.0.3': 'hostC'})
mock_rel_get.assert_has_calls([mock.call(rid=88, unit=1),
mock.call(rid=88, unit=2)])
self.assertEqual(hosts, ['hostA', 'hostB', 'hostC'])
self.assertEqual(hosts, ['hostB', 'hostC'])
@mock.patch.object(percona_utils, 'get_address_in_network')
@mock.patch.object(percona_utils, 'log')
@ -167,7 +249,8 @@ class UtilsTests(unittest.TestCase):
hostname = "host{}".format(host_suffix[unit - 1])
return {'private-address': '10.0.0.{}'.format(unit + 1),
'cluster-address': '10.100.0.{}'.format(unit + 1),
'hostname': hostname}
'hostname': hostname,
'bootstrap-uuid': 'UUID'}
config = {'cluster-network': '10.100.0.0/24'}
mock_rel_get.side_effect = _mock_rel_get
@ -176,7 +259,7 @@ class UtilsTests(unittest.TestCase):
hosts = percona_utils.get_cluster_hosts()
mock_rel_get.assert_has_calls([mock.call(rid=88, unit=1),
mock.call(rid=88, unit=2)])
self.assertEqual(hosts, ['10.100.0.1', '10.100.0.2', '10.100.0.3'])
self.assertEqual(hosts, ['10.100.0.2', '10.100.0.3'])
@mock.patch.object(percona_utils, 'is_leader')
@mock.patch.object(percona_utils, 'related_units')
@ -276,25 +359,25 @@ class UtilsTests(unittest.TestCase):
percona_utils.get_wsrep_provider_options()
TO_PATCH = [
'is_sufficient_peers',
'is_bootstrapped',
'config',
'cluster_in_sync',
'is_leader',
'related_units',
'relation_ids',
'relation_get',
'leader_get',
'is_unit_paused_set',
'is_clustered',
'distributed_wait',
]
class UtilsTestsStatus(CharmTestCase):
TO_PATCH = [
'is_sufficient_peers',
'is_bootstrapped',
'config',
'cluster_in_sync',
'is_leader',
'related_units',
'relation_ids',
'relation_get',
'leader_get',
'is_unit_paused_set',
'is_clustered',
'distributed_wait',
]
class UtilsTestsCTC(CharmTestCase):
def setUp(self):
CharmTestCase.setUp(self, percona_utils, TO_PATCH)
super(UtilsTestsStatus, self).setUp(percona_utils, self.TO_PATCH)
def test_single_unit(self):
self.config.return_value = None
@ -341,6 +424,27 @@ class UtilsTestsCTC(CharmTestCase):
stat, _ = percona_utils.charm_check_func()
assert stat == 'active'
class UtilsTestsCTC(CharmTestCase):
TO_PATCH = [
'is_sufficient_peers',
'config',
'cluster_in_sync',
'is_leader',
'related_units',
'relation_ids',
'relation_get',
'leader_get',
'is_unit_paused_set',
'is_clustered',
'distributed_wait',
'clustered_once',
'kv'
]
def setUp(self):
super(UtilsTestsCTC, self).setUp(percona_utils, self.TO_PATCH)
@mock.patch.object(percona_utils, 'pxc_installed')
@mock.patch.object(percona_utils, 'determine_packages')
@mock.patch.object(percona_utils, 'application_version_set')
@ -400,11 +504,14 @@ class UtilsTestsCTC(CharmTestCase):
f.assert_called_once_with('assessor', services='s1', ports=None)
@mock.patch.object(percona_utils, 'is_sufficient_peers')
def test_cluster_ready(self, mock_is_sufficient_peers):
def test_is_bootstrapped(self, mock_is_sufficient_peers):
kvstore = mock.MagicMock()
kvstore.get.return_value = False
self.kv.return_value = kvstore
# Not sufficient number of peers
mock_is_sufficient_peers.return_value = False
self.assertFalse(percona_utils.cluster_ready())
self.assertFalse(percona_utils.is_bootstrapped())
# Not all cluster ready
mock_is_sufficient_peers.return_value = True
@ -413,7 +520,10 @@ class UtilsTestsCTC(CharmTestCase):
self.relation_get.return_value = False
_config = {'min-cluster-size': 3}
self.config.side_effect = lambda key: _config.get(key)
self.assertFalse(percona_utils.cluster_ready())
self.assertFalse(percona_utils.is_bootstrapped())
# kvstore.set has not been called with incomplete clusters
kvstore.set.assert_not_called()
# All cluster ready
mock_is_sufficient_peers.return_value = True
@ -422,7 +532,13 @@ class UtilsTestsCTC(CharmTestCase):
self.relation_get.return_value = 'UUID'
_config = {'min-cluster-size': 3}
self.config.side_effect = lambda key: _config.get(key)
self.assertTrue(percona_utils.cluster_ready())
self.assertTrue(percona_utils.is_bootstrapped())
kvstore.set.assert_called_once_with(key='initial-cluster-complete',
value=True)
# Now set the key for clustered at least once
kvstore.get.return_value = True
kvstore.set.reset_mock()
# Not all cluster ready no min-cluster-size
mock_is_sufficient_peers.return_value = True
@ -431,7 +547,8 @@ class UtilsTestsCTC(CharmTestCase):
self.relation_get.return_value = False
_config = {'min-cluster-size': None}
self.config.side_effect = lambda key: _config.get(key)
self.assertFalse(percona_utils.cluster_ready())
self.assertFalse(percona_utils.is_bootstrapped())
kvstore.set.assert_not_called()
# All cluster ready no min-cluster-size
mock_is_sufficient_peers.return_value = True
@ -440,7 +557,7 @@ class UtilsTestsCTC(CharmTestCase):
self.relation_get.return_value = 'UUID'
_config = {'min-cluster-size': None}
self.config.side_effect = lambda key: _config.get(key)
self.assertTrue(percona_utils.cluster_ready())
self.assertTrue(percona_utils.is_bootstrapped())
# Assume single unit no-min-cluster-size
mock_is_sufficient_peers.return_value = True
@ -449,13 +566,12 @@ class UtilsTestsCTC(CharmTestCase):
self.relation_get.return_value = None
_config = {'min-cluster-size': None}
self.config.side_effect = lambda key: _config.get(key)
self.assertTrue(percona_utils.cluster_ready())
self.assertTrue(percona_utils.is_bootstrapped())
@mock.patch.object(percona_utils, 'is_bootstrapped')
def test_cluster_ready(self, mock_is_bootstrapped):
# When VIP configured check is_clustered
mock_is_sufficient_peers.return_value = True
self.relation_ids.return_value = ['cluster:0']
self.related_units.return_value = ['test/0', 'test/1']
self.relation_get.return_value = 'UUID'
mock_is_bootstrapped.return_value = True
_config = {'vip': '10.10.10.10', 'min-cluster-size': 3}
self.config.side_effect = lambda key: _config.get(key)
# HACluster not ready
@ -545,8 +661,8 @@ class TestResolveHostnameToIP(CharmTestCase):
TO_PATCH = []
def setUp(self):
CharmTestCase.setUp(self, percona_utils,
self.TO_PATCH)
super(TestResolveHostnameToIP, self).setUp(percona_utils,
self.TO_PATCH)
@mock.patch.object(percona_utils, 'is_ipv6')
@mock.patch.object(percona_utils, 'is_ip')
@ -625,7 +741,8 @@ class TestUpdateBootstrapUUID(CharmTestCase):
]
def setUp(self):
CharmTestCase.setUp(self, percona_utils, self.TO_PATCH)
super(TestUpdateBootstrapUUID, self).setUp(percona_utils,
self.TO_PATCH)
self.log.side_effect = self.juju_log
def juju_log(self, msg, level=None):