Add support for unit status

This commit is contained in:
James Page 2015-10-08 16:44:50 -07:00
commit f93f52dc5b
22 changed files with 1021 additions and 98 deletions

View File

@ -5,6 +5,7 @@ include:
- cli
- fetch
- contrib.openstack|inc=*
- contrib.openstack.utils
- contrib.storage
- contrib.hahelpers
- contrib.network.ip

View File

@ -23,7 +23,7 @@ import socket
from functools import partial
from charmhelpers.core.hookenv import unit_get
from charmhelpers.fetch import apt_install
from charmhelpers.fetch import apt_install, apt_update
from charmhelpers.core.hookenv import (
log,
WARNING,
@ -32,13 +32,15 @@ from charmhelpers.core.hookenv import (
try:
import netifaces
except ImportError:
apt_install('python-netifaces')
apt_update(fatal=True)
apt_install('python-netifaces', fatal=True)
import netifaces
try:
import netaddr
except ImportError:
apt_install('python-netaddr')
apt_update(fatal=True)
apt_install('python-netaddr', fatal=True)
import netaddr

View File

@ -44,20 +44,31 @@ class OpenStackAmuletDeployment(AmuletDeployment):
Determine if the local branch being tested is derived from its
stable or next (dev) branch, and based on this, use the corresonding
stable or next branches for the other_services."""
# Charms outside the lp:~openstack-charmers namespace
base_charms = ['mysql', 'mongodb', 'nrpe']
# Force these charms to current series even when using an older series.
# ie. Use trusty/nrpe even when series is precise, as the P charm
# does not possess the necessary external master config and hooks.
force_series_current = ['nrpe']
if self.series in ['precise', 'trusty']:
base_series = self.series
else:
base_series = self.current_next
if self.stable:
for svc in other_services:
for svc in other_services:
if svc['name'] in force_series_current:
base_series = self.current_next
# If a location has been explicitly set, use it
if svc.get('location'):
continue
if self.stable:
temp = 'lp:charms/{}/{}'
svc['location'] = temp.format(base_series,
svc['name'])
else:
for svc in other_services:
else:
if svc['name'] in base_charms:
temp = 'lp:charms/{}/{}'
svc['location'] = temp.format(base_series,
@ -66,6 +77,7 @@ class OpenStackAmuletDeployment(AmuletDeployment):
temp = 'lp:~openstack-charmers/charms/{}/{}/next'
svc['location'] = temp.format(self.current_next,
svc['name'])
return other_services
def _add_services(self, this_service, other_services):
@ -77,21 +89,23 @@ class OpenStackAmuletDeployment(AmuletDeployment):
services = other_services
services.append(this_service)
# Charms which should use the source config option
use_source = ['mysql', 'mongodb', 'rabbitmq-server', 'ceph',
'ceph-osd', 'ceph-radosgw']
# Most OpenStack subordinate charms do not expose an origin option
# as that is controlled by the principle.
ignore = ['cinder-ceph', 'hacluster', 'neutron-openvswitch', 'nrpe']
# Charms which can not use openstack-origin, ie. many subordinates
no_origin = ['cinder-ceph', 'hacluster', 'neutron-openvswitch', 'nrpe']
if self.openstack:
for svc in services:
if svc['name'] not in use_source + ignore:
if svc['name'] not in use_source + no_origin:
config = {'openstack-origin': self.openstack}
self.d.configure(svc['name'], config)
if self.source:
for svc in services:
if svc['name'] in use_source and svc['name'] not in ignore:
if svc['name'] in use_source and svc['name'] not in no_origin:
config = {'source': self.source}
self.d.configure(svc['name'], config)

View File

@ -27,6 +27,7 @@ import glanceclient.v1.client as glance_client
import heatclient.v1.client as heat_client
import keystoneclient.v2_0 as keystone_client
import novaclient.v1_1.client as nova_client
import pika
import swiftclient
from charmhelpers.contrib.amulet.utils import (
@ -602,3 +603,361 @@ class OpenStackAmuletUtils(AmuletUtils):
self.log.debug('Ceph {} samples (OK): '
'{}'.format(sample_type, samples))
return None
# rabbitmq/amqp specific helpers:
def add_rmq_test_user(self, sentry_units,
username="testuser1", password="changeme"):
"""Add a test user via the first rmq juju unit, check connection as
the new user against all sentry units.
:param sentry_units: list of sentry unit pointers
:param username: amqp user name, default to testuser1
:param password: amqp user password
:returns: None if successful. Raise on error.
"""
self.log.debug('Adding rmq user ({})...'.format(username))
# Check that user does not already exist
cmd_user_list = 'rabbitmqctl list_users'
output, _ = self.run_cmd_unit(sentry_units[0], cmd_user_list)
if username in output:
self.log.warning('User ({}) already exists, returning '
'gracefully.'.format(username))
return
perms = '".*" ".*" ".*"'
cmds = ['rabbitmqctl add_user {} {}'.format(username, password),
'rabbitmqctl set_permissions {} {}'.format(username, perms)]
# Add user via first unit
for cmd in cmds:
output, _ = self.run_cmd_unit(sentry_units[0], cmd)
# Check connection against the other sentry_units
self.log.debug('Checking user connect against units...')
for sentry_unit in sentry_units:
connection = self.connect_amqp_by_unit(sentry_unit, ssl=False,
username=username,
password=password)
connection.close()
def delete_rmq_test_user(self, sentry_units, username="testuser1"):
"""Delete a rabbitmq user via the first rmq juju unit.
:param sentry_units: list of sentry unit pointers
:param username: amqp user name, default to testuser1
:param password: amqp user password
:returns: None if successful or no such user.
"""
self.log.debug('Deleting rmq user ({})...'.format(username))
# Check that the user exists
cmd_user_list = 'rabbitmqctl list_users'
output, _ = self.run_cmd_unit(sentry_units[0], cmd_user_list)
if username not in output:
self.log.warning('User ({}) does not exist, returning '
'gracefully.'.format(username))
return
# Delete the user
cmd_user_del = 'rabbitmqctl delete_user {}'.format(username)
output, _ = self.run_cmd_unit(sentry_units[0], cmd_user_del)
def get_rmq_cluster_status(self, sentry_unit):
"""Execute rabbitmq cluster status command on a unit and return
the full output.
:param unit: sentry unit
:returns: String containing console output of cluster status command
"""
cmd = 'rabbitmqctl cluster_status'
output, _ = self.run_cmd_unit(sentry_unit, cmd)
self.log.debug('{} cluster_status:\n{}'.format(
sentry_unit.info['unit_name'], output))
return str(output)
def get_rmq_cluster_running_nodes(self, sentry_unit):
"""Parse rabbitmqctl cluster_status output string, return list of
running rabbitmq cluster nodes.
:param unit: sentry unit
:returns: List containing node names of running nodes
"""
# NOTE(beisner): rabbitmqctl cluster_status output is not
# json-parsable, do string chop foo, then json.loads that.
str_stat = self.get_rmq_cluster_status(sentry_unit)
if 'running_nodes' in str_stat:
pos_start = str_stat.find("{running_nodes,") + 15
pos_end = str_stat.find("]},", pos_start) + 1
str_run_nodes = str_stat[pos_start:pos_end].replace("'", '"')
run_nodes = json.loads(str_run_nodes)
return run_nodes
else:
return []
def validate_rmq_cluster_running_nodes(self, sentry_units):
"""Check that all rmq unit hostnames are represented in the
cluster_status output of all units.
:param host_names: dict of juju unit names to host names
:param units: list of sentry unit pointers (all rmq units)
:returns: None if successful, otherwise return error message
"""
host_names = self.get_unit_hostnames(sentry_units)
errors = []
# Query every unit for cluster_status running nodes
for query_unit in sentry_units:
query_unit_name = query_unit.info['unit_name']
running_nodes = self.get_rmq_cluster_running_nodes(query_unit)
# Confirm that every unit is represented in the queried unit's
# cluster_status running nodes output.
for validate_unit in sentry_units:
val_host_name = host_names[validate_unit.info['unit_name']]
val_node_name = 'rabbit@{}'.format(val_host_name)
if val_node_name not in running_nodes:
errors.append('Cluster member check failed on {}: {} not '
'in {}\n'.format(query_unit_name,
val_node_name,
running_nodes))
if errors:
return ''.join(errors)
def rmq_ssl_is_enabled_on_unit(self, sentry_unit, port=None):
"""Check a single juju rmq unit for ssl and port in the config file."""
host = sentry_unit.info['public-address']
unit_name = sentry_unit.info['unit_name']
conf_file = '/etc/rabbitmq/rabbitmq.config'
conf_contents = str(self.file_contents_safe(sentry_unit,
conf_file, max_wait=16))
# Checks
conf_ssl = 'ssl' in conf_contents
conf_port = str(port) in conf_contents
# Port explicitly checked in config
if port and conf_port and conf_ssl:
self.log.debug('SSL is enabled @{}:{} '
'({})'.format(host, port, unit_name))
return True
elif port and not conf_port and conf_ssl:
self.log.debug('SSL is enabled @{} but not on port {} '
'({})'.format(host, port, unit_name))
return False
# Port not checked (useful when checking that ssl is disabled)
elif not port and conf_ssl:
self.log.debug('SSL is enabled @{}:{} '
'({})'.format(host, port, unit_name))
return True
elif not conf_ssl:
self.log.debug('SSL not enabled @{}:{} '
'({})'.format(host, port, unit_name))
return False
else:
msg = ('Unknown condition when checking SSL status @{}:{} '
'({})'.format(host, port, unit_name))
amulet.raise_status(amulet.FAIL, msg)
def validate_rmq_ssl_enabled_units(self, sentry_units, port=None):
"""Check that ssl is enabled on rmq juju sentry units.
:param sentry_units: list of all rmq sentry units
:param port: optional ssl port override to validate
:returns: None if successful, otherwise return error message
"""
for sentry_unit in sentry_units:
if not self.rmq_ssl_is_enabled_on_unit(sentry_unit, port=port):
return ('Unexpected condition: ssl is disabled on unit '
'({})'.format(sentry_unit.info['unit_name']))
return None
def validate_rmq_ssl_disabled_units(self, sentry_units):
"""Check that ssl is enabled on listed rmq juju sentry units.
:param sentry_units: list of all rmq sentry units
:returns: True if successful. Raise on error.
"""
for sentry_unit in sentry_units:
if self.rmq_ssl_is_enabled_on_unit(sentry_unit):
return ('Unexpected condition: ssl is enabled on unit '
'({})'.format(sentry_unit.info['unit_name']))
return None
def configure_rmq_ssl_on(self, sentry_units, deployment,
port=None, max_wait=60):
"""Turn ssl charm config option on, with optional non-default
ssl port specification. Confirm that it is enabled on every
unit.
:param sentry_units: list of sentry units
:param deployment: amulet deployment object pointer
:param port: amqp port, use defaults if None
:param max_wait: maximum time to wait in seconds to confirm
:returns: None if successful. Raise on error.
"""
self.log.debug('Setting ssl charm config option: on')
# Enable RMQ SSL
config = {'ssl': 'on'}
if port:
config['ssl_port'] = port
deployment.configure('rabbitmq-server', config)
# Confirm
tries = 0
ret = self.validate_rmq_ssl_enabled_units(sentry_units, port=port)
while ret and tries < (max_wait / 4):
time.sleep(4)
self.log.debug('Attempt {}: {}'.format(tries, ret))
ret = self.validate_rmq_ssl_enabled_units(sentry_units, port=port)
tries += 1
if ret:
amulet.raise_status(amulet.FAIL, ret)
def configure_rmq_ssl_off(self, sentry_units, deployment, max_wait=60):
"""Turn ssl charm config option off, confirm that it is disabled
on every unit.
:param sentry_units: list of sentry units
:param deployment: amulet deployment object pointer
:param max_wait: maximum time to wait in seconds to confirm
:returns: None if successful. Raise on error.
"""
self.log.debug('Setting ssl charm config option: off')
# Disable RMQ SSL
config = {'ssl': 'off'}
deployment.configure('rabbitmq-server', config)
# Confirm
tries = 0
ret = self.validate_rmq_ssl_disabled_units(sentry_units)
while ret and tries < (max_wait / 4):
time.sleep(4)
self.log.debug('Attempt {}: {}'.format(tries, ret))
ret = self.validate_rmq_ssl_disabled_units(sentry_units)
tries += 1
if ret:
amulet.raise_status(amulet.FAIL, ret)
def connect_amqp_by_unit(self, sentry_unit, ssl=False,
port=None, fatal=True,
username="testuser1", password="changeme"):
"""Establish and return a pika amqp connection to the rabbitmq service
running on a rmq juju unit.
:param sentry_unit: sentry unit pointer
:param ssl: boolean, default to False
:param port: amqp port, use defaults if None
:param fatal: boolean, default to True (raises on connect error)
:param username: amqp user name, default to testuser1
:param password: amqp user password
:returns: pika amqp connection pointer or None if failed and non-fatal
"""
host = sentry_unit.info['public-address']
unit_name = sentry_unit.info['unit_name']
# Default port logic if port is not specified
if ssl and not port:
port = 5671
elif not ssl and not port:
port = 5672
self.log.debug('Connecting to amqp on {}:{} ({}) as '
'{}...'.format(host, port, unit_name, username))
try:
credentials = pika.PlainCredentials(username, password)
parameters = pika.ConnectionParameters(host=host, port=port,
credentials=credentials,
ssl=ssl,
connection_attempts=3,
retry_delay=5,
socket_timeout=1)
connection = pika.BlockingConnection(parameters)
assert connection.server_properties['product'] == 'RabbitMQ'
self.log.debug('Connect OK')
return connection
except Exception as e:
msg = ('amqp connection failed to {}:{} as '
'{} ({})'.format(host, port, username, str(e)))
if fatal:
amulet.raise_status(amulet.FAIL, msg)
else:
self.log.warn(msg)
return None
def publish_amqp_message_by_unit(self, sentry_unit, message,
queue="test", ssl=False,
username="testuser1",
password="changeme",
port=None):
"""Publish an amqp message to a rmq juju unit.
:param sentry_unit: sentry unit pointer
:param message: amqp message string
:param queue: message queue, default to test
:param username: amqp user name, default to testuser1
:param password: amqp user password
:param ssl: boolean, default to False
:param port: amqp port, use defaults if None
:returns: None. Raises exception if publish failed.
"""
self.log.debug('Publishing message to {} queue:\n{}'.format(queue,
message))
connection = self.connect_amqp_by_unit(sentry_unit, ssl=ssl,
port=port,
username=username,
password=password)
# NOTE(beisner): extra debug here re: pika hang potential:
# https://github.com/pika/pika/issues/297
# https://groups.google.com/forum/#!topic/rabbitmq-users/Ja0iyfF0Szw
self.log.debug('Defining channel...')
channel = connection.channel()
self.log.debug('Declaring queue...')
channel.queue_declare(queue=queue, auto_delete=False, durable=True)
self.log.debug('Publishing message...')
channel.basic_publish(exchange='', routing_key=queue, body=message)
self.log.debug('Closing channel...')
channel.close()
self.log.debug('Closing connection...')
connection.close()
def get_amqp_message_by_unit(self, sentry_unit, queue="test",
username="testuser1",
password="changeme",
ssl=False, port=None):
"""Get an amqp message from a rmq juju unit.
:param sentry_unit: sentry unit pointer
:param queue: message queue, default to test
:param username: amqp user name, default to testuser1
:param password: amqp user password
:param ssl: boolean, default to False
:param port: amqp port, use defaults if None
:returns: amqp message body as string. Raise if get fails.
"""
connection = self.connect_amqp_by_unit(sentry_unit, ssl=ssl,
port=port,
username=username,
password=password)
channel = connection.channel()
method_frame, _, body = channel.basic_get(queue)
if method_frame:
self.log.debug('Retreived message from {} queue:\n{}'.format(queue,
body))
channel.basic_ack(method_frame.delivery_tag)
channel.close()
connection.close()
return body
else:
msg = 'No message retrieved.'
amulet.raise_status(amulet.FAIL, msg)

View File

@ -14,6 +14,7 @@
# You should have received a copy of the GNU Lesser General Public License
# along with charm-helpers. If not, see <http://www.gnu.org/licenses/>.
import glob
import json
import os
import re
@ -194,10 +195,50 @@ def config_flags_parser(config_flags):
class OSContextGenerator(object):
"""Base class for all context generators."""
interfaces = []
related = False
complete = False
missing_data = []
def __call__(self):
raise NotImplementedError
def context_complete(self, ctxt):
"""Check for missing data for the required context data.
Set self.missing_data if it exists and return False.
Set self.complete if no missing data and return True.
"""
# Fresh start
self.complete = False
self.missing_data = []
for k, v in six.iteritems(ctxt):
if v is None or v == '':
if k not in self.missing_data:
self.missing_data.append(k)
if self.missing_data:
self.complete = False
log('Missing required data: %s' % ' '.join(self.missing_data), level=INFO)
else:
self.complete = True
return self.complete
def get_related(self):
"""Check if any of the context interfaces have relation ids.
Set self.related and return True if one of the interfaces
has relation ids.
"""
# Fresh start
self.related = False
try:
for interface in self.interfaces:
if relation_ids(interface):
self.related = True
return self.related
except AttributeError as e:
log("{} {}"
"".format(self, e), 'INFO')
return self.related
class SharedDBContext(OSContextGenerator):
interfaces = ['shared-db']
@ -213,6 +254,7 @@ class SharedDBContext(OSContextGenerator):
self.database = database
self.user = user
self.ssl_dir = ssl_dir
self.rel_name = self.interfaces[0]
def __call__(self):
self.database = self.database or config('database')
@ -246,6 +288,7 @@ class SharedDBContext(OSContextGenerator):
password_setting = self.relation_prefix + '_password'
for rid in relation_ids(self.interfaces[0]):
self.related = True
for unit in related_units(rid):
rdata = relation_get(rid=rid, unit=unit)
host = rdata.get('db_host')
@ -257,7 +300,7 @@ class SharedDBContext(OSContextGenerator):
'database_password': rdata.get(password_setting),
'database_type': 'mysql'
}
if context_complete(ctxt):
if self.context_complete(ctxt):
db_ssl(rdata, ctxt, self.ssl_dir)
return ctxt
return {}
@ -278,6 +321,7 @@ class PostgresqlDBContext(OSContextGenerator):
ctxt = {}
for rid in relation_ids(self.interfaces[0]):
self.related = True
for unit in related_units(rid):
rel_host = relation_get('host', rid=rid, unit=unit)
rel_user = relation_get('user', rid=rid, unit=unit)
@ -287,7 +331,7 @@ class PostgresqlDBContext(OSContextGenerator):
'database_user': rel_user,
'database_password': rel_passwd,
'database_type': 'postgresql'}
if context_complete(ctxt):
if self.context_complete(ctxt):
return ctxt
return {}
@ -348,6 +392,7 @@ class IdentityServiceContext(OSContextGenerator):
ctxt['signing_dir'] = cachedir
for rid in relation_ids(self.rel_name):
self.related = True
for unit in related_units(rid):
rdata = relation_get(rid=rid, unit=unit)
serv_host = rdata.get('service_host')
@ -366,7 +411,7 @@ class IdentityServiceContext(OSContextGenerator):
'service_protocol': svc_protocol,
'auth_protocol': auth_protocol})
if context_complete(ctxt):
if self.context_complete(ctxt):
# NOTE(jamespage) this is required for >= icehouse
# so a missing value just indicates keystone needs
# upgrading
@ -405,6 +450,7 @@ class AMQPContext(OSContextGenerator):
ctxt = {}
for rid in relation_ids(self.rel_name):
ha_vip_only = False
self.related = True
for unit in related_units(rid):
if relation_get('clustered', rid=rid, unit=unit):
ctxt['clustered'] = True
@ -437,7 +483,7 @@ class AMQPContext(OSContextGenerator):
ha_vip_only = relation_get('ha-vip-only',
rid=rid, unit=unit) is not None
if context_complete(ctxt):
if self.context_complete(ctxt):
if 'rabbit_ssl_ca' in ctxt:
if not self.ssl_dir:
log("Charm not setup for ssl support but ssl ca "
@ -469,7 +515,7 @@ class AMQPContext(OSContextGenerator):
ctxt['oslo_messaging_flags'] = config_flags_parser(
oslo_messaging_flags)
if not context_complete(ctxt):
if not self.complete:
return {}
return ctxt
@ -507,7 +553,7 @@ class CephContext(OSContextGenerator):
if not os.path.isdir('/etc/ceph'):
os.mkdir('/etc/ceph')
if not context_complete(ctxt):
if not self.context_complete(ctxt):
return {}
ensure_packages(['ceph-common'])
@ -906,6 +952,19 @@ class NeutronContext(OSContextGenerator):
'config': config}
return ovs_ctxt
def midonet_ctxt(self):
driver = neutron_plugin_attribute(self.plugin, 'driver',
self.network_manager)
midonet_config = neutron_plugin_attribute(self.plugin, 'config',
self.network_manager)
mido_ctxt = {'core_plugin': driver,
'neutron_plugin': 'midonet',
'neutron_security_groups': self.neutron_security_groups,
'local_ip': unit_private_ip(),
'config': midonet_config}
return mido_ctxt
def __call__(self):
if self.network_manager not in ['quantum', 'neutron']:
return {}
@ -927,6 +986,8 @@ class NeutronContext(OSContextGenerator):
ctxt.update(self.nuage_ctxt())
elif self.plugin == 'plumgrid':
ctxt.update(self.pg_ctxt())
elif self.plugin == 'midonet':
ctxt.update(self.midonet_ctxt())
alchemy_flags = config('neutron-alchemy-flags')
if alchemy_flags:
@ -1059,7 +1120,7 @@ class SubordinateConfigContext(OSContextGenerator):
ctxt = {
... other context ...
'subordinate_config': {
'subordinate_configuration': {
'DEFAULT': {
'key1': 'value1',
},
@ -1100,22 +1161,23 @@ class SubordinateConfigContext(OSContextGenerator):
try:
sub_config = json.loads(sub_config)
except:
log('Could not parse JSON from subordinate_config '
'setting from %s' % rid, level=ERROR)
log('Could not parse JSON from '
'subordinate_configuration setting from %s'
% rid, level=ERROR)
continue
for service in self.services:
if service not in sub_config:
log('Found subordinate_config on %s but it contained'
'nothing for %s service' % (rid, service),
level=INFO)
log('Found subordinate_configuration on %s but it '
'contained nothing for %s service'
% (rid, service), level=INFO)
continue
sub_config = sub_config[service]
if self.config_file not in sub_config:
log('Found subordinate_config on %s but it contained'
'nothing for %s' % (rid, self.config_file),
level=INFO)
log('Found subordinate_configuration on %s but it '
'contained nothing for %s'
% (rid, self.config_file), level=INFO)
continue
sub_config = sub_config[self.config_file]
@ -1318,7 +1380,7 @@ class DataPortContext(NeutronPortContext):
normalized.update({port: port for port in resolved
if port in ports})
if resolved:
return {bridge: normalized[port] for port, bridge in
return {normalized[port]: bridge for port, bridge in
six.iteritems(portmap) if port in normalized.keys()}
return None
@ -1329,12 +1391,22 @@ class PhyNICMTUContext(DataPortContext):
def __call__(self):
ctxt = {}
mappings = super(PhyNICMTUContext, self).__call__()
if mappings and mappings.values():
ports = mappings.values()
if mappings and mappings.keys():
ports = sorted(mappings.keys())
napi_settings = NeutronAPIContext()()
mtu = napi_settings.get('network_device_mtu')
all_ports = set()
# If any of ports is a vlan device, its underlying device must have
# mtu applied first.
for port in ports:
for lport in glob.glob("/sys/class/net/%s/lower_*" % port):
lport = os.path.basename(lport)
all_ports.add(lport.split('_')[1])
all_ports = list(all_ports)
all_ports.extend(ports)
if mtu:
ctxt["devs"] = '\\n'.join(ports)
ctxt["devs"] = '\\n'.join(all_ports)
ctxt['mtu'] = mtu
return ctxt
@ -1366,6 +1438,6 @@ class NetworkServiceContext(OSContextGenerator):
'auth_protocol':
rdata.get('auth_protocol') or 'http',
}
if context_complete(ctxt):
if self.context_complete(ctxt):
return ctxt
return {}

View File

@ -209,6 +209,20 @@ def neutron_plugins():
'server_packages': ['neutron-server',
'neutron-plugin-plumgrid'],
'server_services': ['neutron-server']
},
'midonet': {
'config': '/etc/neutron/plugins/midonet/midonet.ini',
'driver': 'midonet.neutron.plugin.MidonetPluginV2',
'contexts': [
context.SharedDBContext(user=config('neutron-database-user'),
database=config('neutron-database'),
relation_prefix='neutron',
ssl_dir=NEUTRON_CONF_DIR)],
'services': [],
'packages': [[headers_package()] + determine_dkms_package()],
'server_packages': ['neutron-server',
'python-neutron-plugin-midonet'],
'server_services': ['neutron-server']
}
}
if release >= 'icehouse':
@ -310,10 +324,10 @@ def parse_bridge_mappings(mappings):
def parse_data_port_mappings(mappings, default_bridge='br-data'):
"""Parse data port mappings.
Mappings must be a space-delimited list of port:bridge mappings.
Mappings must be a space-delimited list of bridge:port.
Returns dict of the form {port:bridge} where port may be an mac address or
interface name.
Returns dict of the form {port:bridge} where ports may be mac addresses or
interface names.
"""
# NOTE(dosaboy): we use rvalue for key to allow multiple values to be

View File

@ -13,3 +13,9 @@ log to syslog = {{ use_syslog }}
err to syslog = {{ use_syslog }}
clog to syslog = {{ use_syslog }}
[client]
{% if rbd_client_cache_settings -%}
{% for key, value in rbd_client_cache_settings.iteritems() -%}
{{ key }} = {{ value }}
{% endfor -%}
{%- endif %}

View File

@ -18,7 +18,7 @@ import os
import six
from charmhelpers.fetch import apt_install
from charmhelpers.fetch import apt_install, apt_update
from charmhelpers.core.hookenv import (
log,
ERROR,
@ -29,6 +29,7 @@ from charmhelpers.contrib.openstack.utils import OPENSTACK_CODENAMES
try:
from jinja2 import FileSystemLoader, ChoiceLoader, Environment, exceptions
except ImportError:
apt_update(fatal=True)
apt_install('python-jinja2', fatal=True)
from jinja2 import FileSystemLoader, ChoiceLoader, Environment, exceptions
@ -112,7 +113,7 @@ class OSConfigTemplate(object):
def complete_contexts(self):
'''
Return a list of interfaces that have atisfied contexts.
Return a list of interfaces that have satisfied contexts.
'''
if self._complete_contexts:
return self._complete_contexts
@ -293,3 +294,30 @@ class OSConfigRenderer(object):
[interfaces.extend(i.complete_contexts())
for i in six.itervalues(self.templates)]
return interfaces
def get_incomplete_context_data(self, interfaces):
'''
Return dictionary of relation status of interfaces and any missing
required context data. Example:
{'amqp': {'missing_data': ['rabbitmq_password'], 'related': True},
'zeromq-configuration': {'related': False}}
'''
incomplete_context_data = {}
for i in six.itervalues(self.templates):
for context in i.contexts:
for interface in interfaces:
related = False
if interface in context.interfaces:
related = context.get_related()
missing_data = context.missing_data
if missing_data:
incomplete_context_data[interface] = {'missing_data': missing_data}
if related:
if incomplete_context_data.get(interface):
incomplete_context_data[interface].update({'related': True})
else:
incomplete_context_data[interface] = {'related': True}
else:
incomplete_context_data[interface] = {'related': False}
return incomplete_context_data

View File

@ -25,6 +25,7 @@ import sys
import re
import six
import traceback
import yaml
from charmhelpers.contrib.network import ip
@ -34,12 +35,16 @@ from charmhelpers.core import (
)
from charmhelpers.core.hookenv import (
action_fail,
action_set,
config,
log as juju_log,
charm_dir,
INFO,
relation_ids,
relation_set
relation_set,
status_set,
hook_name
)
from charmhelpers.contrib.storage.linux.lvm import (
@ -49,7 +54,8 @@ from charmhelpers.contrib.storage.linux.lvm import (
)
from charmhelpers.contrib.network.ip import (
get_ipv6_addr
get_ipv6_addr,
is_ipv6,
)
from charmhelpers.contrib.python.packages import (
@ -114,6 +120,7 @@ SWIFT_CODENAMES = OrderedDict([
('2.2.1', 'kilo'),
('2.2.2', 'kilo'),
('2.3.0', 'liberty'),
('2.4.0', 'liberty'),
])
# >= Liberty version->codename mapping
@ -142,6 +149,9 @@ PACKAGE_CODENAMES = {
'glance-common': OrderedDict([
('11.0.0', 'liberty'),
]),
'openstack-dashboard': OrderedDict([
('8.0.0', 'liberty'),
]),
}
DEFAULT_LOOPBACK_SIZE = '5G'
@ -510,6 +520,12 @@ def sync_db_with_multi_ipv6_addresses(database, database_user,
relation_prefix=None):
hosts = get_ipv6_addr(dynamic_only=False)
if config('vip'):
vips = config('vip').split()
for vip in vips:
if vip and is_ipv6(vip):
hosts.append(vip)
kwargs = {'database': database,
'username': database_user,
'hostname': json.dumps(hosts)}
@ -745,3 +761,217 @@ def git_yaml_value(projects_yaml, key):
return projects[key]
return None
def os_workload_status(configs, required_interfaces, charm_func=None):
"""
Decorator to set workload status based on complete contexts
"""
def wrap(f):
@wraps(f)
def wrapped_f(*args, **kwargs):
# Run the original function first
f(*args, **kwargs)
# Set workload status now that contexts have been
# acted on
set_os_workload_status(configs, required_interfaces, charm_func)
return wrapped_f
return wrap
def set_os_workload_status(configs, required_interfaces, charm_func=None):
"""
Set workload status based on complete contexts.
status-set missing or incomplete contexts
and juju-log details of missing required data.
charm_func is a charm specific function to run checking
for charm specific requirements such as a VIP setting.
"""
incomplete_rel_data = incomplete_relation_data(configs, required_interfaces)
state = 'active'
missing_relations = []
incomplete_relations = []
message = None
charm_state = None
charm_message = None
for generic_interface in incomplete_rel_data.keys():
related_interface = None
missing_data = {}
# Related or not?
for interface in incomplete_rel_data[generic_interface]:
if incomplete_rel_data[generic_interface][interface].get('related'):
related_interface = interface
missing_data = incomplete_rel_data[generic_interface][interface].get('missing_data')
# No relation ID for the generic_interface
if not related_interface:
juju_log("{} relation is missing and must be related for "
"functionality. ".format(generic_interface), 'WARN')
state = 'blocked'
if generic_interface not in missing_relations:
missing_relations.append(generic_interface)
else:
# Relation ID exists but no related unit
if not missing_data:
# Edge case relation ID exists but departing
if ('departed' in hook_name() or 'broken' in hook_name()) \
and related_interface in hook_name():
state = 'blocked'
if generic_interface not in missing_relations:
missing_relations.append(generic_interface)
juju_log("{} relation's interface, {}, "
"relationship is departed or broken "
"and is required for functionality."
"".format(generic_interface, related_interface), "WARN")
# Normal case relation ID exists but no related unit
# (joining)
else:
juju_log("{} relations's interface, {}, is related but has "
"no units in the relation."
"".format(generic_interface, related_interface), "INFO")
# Related unit exists and data missing on the relation
else:
juju_log("{} relation's interface, {}, is related awaiting "
"the following data from the relationship: {}. "
"".format(generic_interface, related_interface,
", ".join(missing_data)), "INFO")
if state != 'blocked':
state = 'waiting'
if generic_interface not in incomplete_relations \
and generic_interface not in missing_relations:
incomplete_relations.append(generic_interface)
if missing_relations:
message = "Missing relations: {}".format(", ".join(missing_relations))
if incomplete_relations:
message += "; incomplete relations: {}" \
"".format(", ".join(incomplete_relations))
state = 'blocked'
elif incomplete_relations:
message = "Incomplete relations: {}" \
"".format(", ".join(incomplete_relations))
state = 'waiting'
# Run charm specific checks
if charm_func:
charm_state, charm_message = charm_func(configs)
if charm_state != 'active' and charm_state != 'unknown':
state = workload_state_compare(state, charm_state)
if message:
message = "{} {}".format(message, charm_message)
else:
message = charm_message
# Set to active if all requirements have been met
if state == 'active':
message = "Unit is ready"
juju_log(message, "INFO")
status_set(state, message)
def workload_state_compare(current_workload_state, workload_state):
""" Return highest priority of two states"""
hierarchy = {'unknown': -1,
'active': 0,
'maintenance': 1,
'waiting': 2,
'blocked': 3,
}
if hierarchy.get(workload_state) is None:
workload_state = 'unknown'
if hierarchy.get(current_workload_state) is None:
current_workload_state = 'unknown'
# Set workload_state based on hierarchy of statuses
if hierarchy.get(current_workload_state) > hierarchy.get(workload_state):
return current_workload_state
else:
return workload_state
def incomplete_relation_data(configs, required_interfaces):
"""
Check complete contexts against required_interfaces
Return dictionary of incomplete relation data.
configs is an OSConfigRenderer object with configs registered
required_interfaces is a dictionary of required general interfaces
with dictionary values of possible specific interfaces.
Example:
required_interfaces = {'database': ['shared-db', 'pgsql-db']}
The interface is said to be satisfied if anyone of the interfaces in the
list has a complete context.
Return dictionary of incomplete or missing required contexts with relation
status of interfaces and any missing data points. Example:
{'message':
{'amqp': {'missing_data': ['rabbitmq_password'], 'related': True},
'zeromq-configuration': {'related': False}},
'identity':
{'identity-service': {'related': False}},
'database':
{'pgsql-db': {'related': False},
'shared-db': {'related': True}}}
"""
complete_ctxts = configs.complete_contexts()
incomplete_relations = []
for svc_type in required_interfaces.keys():
# Avoid duplicates
found_ctxt = False
for interface in required_interfaces[svc_type]:
if interface in complete_ctxts:
found_ctxt = True
if not found_ctxt:
incomplete_relations.append(svc_type)
incomplete_context_data = {}
for i in incomplete_relations:
incomplete_context_data[i] = configs.get_incomplete_context_data(required_interfaces[i])
return incomplete_context_data
def do_action_openstack_upgrade(package, upgrade_callback, configs):
"""Perform action-managed OpenStack upgrade.
Upgrades packages to the configured openstack-origin version and sets
the corresponding action status as a result.
If the charm was installed from source we cannot upgrade it.
For backwards compatibility a config flag (action-managed-upgrade) must
be set for this code to run, otherwise a full service level upgrade will
fire on config-changed.
@param package: package name for determining if upgrade available
@param upgrade_callback: function callback to charm's upgrade function
@param configs: templating object derived from OSConfigRenderer class
@return: True if upgrade successful; False if upgrade failed or skipped
"""
ret = False
if git_install_requested():
action_set({'outcome': 'installed from source, skipped upgrade.'})
else:
if openstack_upgrade_available(package):
if config('action-managed-upgrade'):
juju_log('Upgrading OpenStack release')
try:
upgrade_callback(configs=configs)
action_set({'outcome': 'success, upgrade completed.'})
ret = True
except:
action_set({'outcome': 'upgrade failed, see traceback.'})
action_set({'traceback': traceback.format_exc()})
action_fail('do_openstack_upgrade resulted in an '
'unexpected error')
else:
action_set({'outcome': 'action-managed-upgrade config is '
'False, skipped upgrade.'})
else:
action_set({'outcome': 'no upgrade available.'})
return ret

View File

@ -59,6 +59,8 @@ from charmhelpers.fetch import (
apt_install,
)
from charmhelpers.core.kernel import modprobe
KEYRING = '/etc/ceph/ceph.client.{}.keyring'
KEYFILE = '/etc/ceph/ceph.client.{}.key'
@ -291,17 +293,6 @@ def place_data_on_block_device(blk_device, data_src_dst):
os.chown(data_src_dst, uid, gid)
# TODO: re-use
def modprobe(module):
"""Load a kernel module and configure for auto-load on reboot."""
log('Loading kernel module', level=INFO)
cmd = ['modprobe', module]
check_call(cmd)
with open('/etc/modules', 'r+') as modules:
if module not in modules.read():
modules.write(module)
def copy_files(src, dst, symlinks=False, ignore=None):
"""Copy files from src to dst."""
for item in os.listdir(src):

View File

@ -623,6 +623,38 @@ def unit_private_ip():
return unit_get('private-address')
@cached
def storage_get(attribute="", storage_id=""):
"""Get storage attributes"""
_args = ['storage-get', '--format=json']
if storage_id:
_args.extend(('-s', storage_id))
if attribute:
_args.append(attribute)
try:
return json.loads(subprocess.check_output(_args).decode('UTF-8'))
except ValueError:
return None
@cached
def storage_list(storage_name=""):
"""List the storage IDs for the unit"""
_args = ['storage-list', '--format=json']
if storage_name:
_args.append(storage_name)
try:
return json.loads(subprocess.check_output(_args).decode('UTF-8'))
except ValueError:
return None
except OSError as e:
import errno
if e.errno == errno.ENOENT:
# storage-list does not exist
return []
raise
class UnregisteredHookError(Exception):
"""Raised when an undefined hook is called"""
pass

View File

@ -63,32 +63,48 @@ def service_reload(service_name, restart_on_failure=False):
return service_result
def service_pause(service_name, init_dir=None):
def service_pause(service_name, init_dir="/etc/init", initd_dir="/etc/init.d"):
"""Pause a system service.
Stop it, and prevent it from starting again at boot."""
if init_dir is None:
init_dir = "/etc/init"
stopped = service_stop(service_name)
# XXX: Support systemd too
override_path = os.path.join(
init_dir, '{}.override'.format(service_name))
with open(override_path, 'w') as fh:
fh.write("manual\n")
upstart_file = os.path.join(init_dir, "{}.conf".format(service_name))
sysv_file = os.path.join(initd_dir, service_name)
if os.path.exists(upstart_file):
override_path = os.path.join(
init_dir, '{}.override'.format(service_name))
with open(override_path, 'w') as fh:
fh.write("manual\n")
elif os.path.exists(sysv_file):
subprocess.check_call(["update-rc.d", service_name, "disable"])
else:
# XXX: Support SystemD too
raise ValueError(
"Unable to detect {0} as either Upstart {1} or SysV {2}".format(
service_name, upstart_file, sysv_file))
return stopped
def service_resume(service_name, init_dir=None):
def service_resume(service_name, init_dir="/etc/init",
initd_dir="/etc/init.d"):
"""Resume a system service.
Reenable starting again at boot. Start the service"""
# XXX: Support systemd too
if init_dir is None:
init_dir = "/etc/init"
override_path = os.path.join(
init_dir, '{}.override'.format(service_name))
if os.path.exists(override_path):
os.unlink(override_path)
upstart_file = os.path.join(init_dir, "{}.conf".format(service_name))
sysv_file = os.path.join(initd_dir, service_name)
if os.path.exists(upstart_file):
override_path = os.path.join(
init_dir, '{}.override'.format(service_name))
if os.path.exists(override_path):
os.unlink(override_path)
elif os.path.exists(sysv_file):
subprocess.check_call(["update-rc.d", service_name, "enable"])
else:
# XXX: Support SystemD too
raise ValueError(
"Unable to detect {0} as either Upstart {1} or SysV {2}".format(
service_name, upstart_file, sysv_file))
started = service_start(service_name)
return started

View File

@ -25,11 +25,13 @@ from charmhelpers.core.host import (
fstab_mount,
mkdir,
)
from charmhelpers.core.strutils import bytes_from_string
from subprocess import check_output
def hugepage_support(user, group='hugetlb', nr_hugepages=256,
max_map_count=65536, mnt_point='/run/hugepages/kvm',
pagesize='2MB', mount=True):
pagesize='2MB', mount=True, set_shmmax=False):
"""Enable hugepages on system.
Args:
@ -49,6 +51,11 @@ def hugepage_support(user, group='hugetlb', nr_hugepages=256,
'vm.max_map_count': max_map_count,
'vm.hugetlb_shm_group': gid,
}
if set_shmmax:
shmmax_current = int(check_output(['sysctl', '-n', 'kernel.shmmax']))
shmmax_minsize = bytes_from_string(pagesize) * nr_hugepages
if shmmax_minsize > shmmax_current:
sysctl_settings['kernel.shmmax'] = shmmax_minsize
sysctl.create(yaml.dump(sysctl_settings), '/etc/sysctl.d/10-hugepage.conf')
mkdir(mnt_point, owner='root', group='root', perms=0o755, force=False)
lfstab = fstab.Fstab()

View File

@ -0,0 +1,68 @@
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright 2014-2015 Canonical Limited.
#
# This file is part of charm-helpers.
#
# charm-helpers is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License version 3 as
# published by the Free Software Foundation.
#
# charm-helpers is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with charm-helpers. If not, see <http://www.gnu.org/licenses/>.
__author__ = "Jorge Niedbalski <jorge.niedbalski@canonical.com>"
from charmhelpers.core.hookenv import (
log,
INFO
)
from subprocess import check_call, check_output
import re
def modprobe(module, persist=True):
"""Load a kernel module and configure for auto-load on reboot."""
cmd = ['modprobe', module]
log('Loading kernel module %s' % module, level=INFO)
check_call(cmd)
if persist:
with open('/etc/modules', 'r+') as modules:
if module not in modules.read():
modules.write(module)
def rmmod(module, force=False):
"""Remove a module from the linux kernel"""
cmd = ['rmmod']
if force:
cmd.append('-f')
cmd.append(module)
log('Removing kernel module %s' % module, level=INFO)
return check_call(cmd)
def lsmod():
"""Shows what kernel modules are currently loaded"""
return check_output(['lsmod'],
universal_newlines=True)
def is_module_loaded(module):
"""Checks if a kernel module is already loaded"""
matches = re.findall('^%s[ ]+' % module, lsmod(), re.M)
return len(matches) > 0
def update_initramfs(version='all'):
"""Updates an initramfs image"""
return check_call(["update-initramfs", "-k", version, "-u"])

View File

@ -18,6 +18,7 @@
# along with charm-helpers. If not, see <http://www.gnu.org/licenses/>.
import six
import re
def bool_from_string(value):
@ -40,3 +41,32 @@ def bool_from_string(value):
msg = "Unable to interpret string value '%s' as boolean" % (value)
raise ValueError(msg)
def bytes_from_string(value):
"""Interpret human readable string value as bytes.
Returns int
"""
BYTE_POWER = {
'K': 1,
'KB': 1,
'M': 2,
'MB': 2,
'G': 3,
'GB': 3,
'T': 4,
'TB': 4,
'P': 5,
'PB': 5,
}
if isinstance(value, six.string_types):
value = six.text_type(value)
else:
msg = "Unable to interpret non-string value '%s' as boolean" % (value)
raise ValueError(msg)
matches = re.match("([0-9]+)([a-zA-Z]+)", value)
if not matches:
msg = "Unable to interpret string value '%s' as bytes" % (value)
raise ValueError(msg)
return int(matches.group(1)) * (1024 ** BYTE_POWER[matches.group(2)])

View File

@ -8,7 +8,8 @@ from cinder_utils import (
register_configs,
restart_map,
set_ceph_env_variables,
PACKAGES
PACKAGES,
REQUIRED_INTERFACES,
)
from cinder_contexts import CephSubordinateContext
@ -19,6 +20,7 @@ from charmhelpers.core.hookenv import (
service_name,
relation_set,
relation_ids,
status_set,
log,
)
from charmhelpers.fetch import apt_install, apt_update
@ -34,6 +36,8 @@ from charmhelpers.contrib.storage.linux.ceph import (
delete_keyring,
)
from charmhelpers.payload.execd import execd_preinstall
from charmhelpers.contrib.openstack.utils import set_os_workload_status
hooks = Hooks()
@ -42,7 +46,9 @@ CONFIGS = register_configs()
@hooks.hook('install')
def install():
status_set('maintenance', 'Executing pre-install')
execd_preinstall()
status_set('maintenance', 'Installing apt packages')
apt_update(fatal=True)
apt_install(PACKAGES, fatal=True)
@ -136,3 +142,4 @@ if __name__ == '__main__':
hooks.execute(sys.argv)
except UnregisteredHookError as e:
log('Unknown hook {} - skipping.'.format(e))
set_os_workload_status(CONFIGS, REQUIRED_INTERFACES)

View File

@ -23,6 +23,10 @@ PACKAGES = [
'ceph-common',
]
REQUIRED_INTERFACES = {
'ceph': ['ceph'],
}
CHARM_CEPH_CONF = '/var/lib/charm/{}/ceph.conf'
CEPH_CONF = '/etc/ceph/ceph.conf'

View File

@ -51,7 +51,8 @@ class AmuletDeployment(object):
if 'units' not in this_service:
this_service['units'] = 1
self.d.add(this_service['name'], units=this_service['units'])
self.d.add(this_service['name'], units=this_service['units'],
constraints=this_service.get('constraints'))
for svc in other_services:
if 'location' in svc:
@ -64,7 +65,8 @@ class AmuletDeployment(object):
if 'units' not in svc:
svc['units'] = 1
self.d.add(svc['name'], charm=branch_location, units=svc['units'])
self.d.add(svc['name'], charm=branch_location, units=svc['units'],
constraints=svc.get('constraints'))
def _add_relations(self, relations):
"""Add all of the relations for the services."""

View File

@ -326,7 +326,7 @@ class AmuletUtils(object):
def service_restarted_since(self, sentry_unit, mtime, service,
pgrep_full=None, sleep_time=20,
retry_count=2, retry_sleep_time=30):
retry_count=30, retry_sleep_time=10):
"""Check if service was been started after a given time.
Args:
@ -334,8 +334,9 @@ class AmuletUtils(object):
mtime (float): The epoch time to check against
service (string): service name to look for in process table
pgrep_full: [Deprecated] Use full command line search mode with pgrep
sleep_time (int): Seconds to sleep before looking for process
retry_count (int): If service is not found, how many times to retry
sleep_time (int): Initial sleep time (s) before looking for file
retry_sleep_time (int): Time (s) to sleep between retries
retry_count (int): If file is not found, how many times to retry
Returns:
bool: True if service found and its start time it newer than mtime,
@ -359,11 +360,12 @@ class AmuletUtils(object):
pgrep_full)
self.log.debug('Attempt {} to get {} proc start time on {} '
'OK'.format(tries, service, unit_name))
except IOError:
except IOError as e:
# NOTE(beisner) - race avoidance, proc may not exist yet.
# https://bugs.launchpad.net/charm-helpers/+bug/1474030
self.log.debug('Attempt {} to get {} proc start time on {} '
'failed'.format(tries, service, unit_name))
'failed\n{}'.format(tries, service,
unit_name, e))
time.sleep(retry_sleep_time)
tries += 1
@ -383,35 +385,62 @@ class AmuletUtils(object):
return False
def config_updated_since(self, sentry_unit, filename, mtime,
sleep_time=20):
sleep_time=20, retry_count=30,
retry_sleep_time=10):
"""Check if file was modified after a given time.
Args:
sentry_unit (sentry): The sentry unit to check the file mtime on
filename (string): The file to check mtime of
mtime (float): The epoch time to check against
sleep_time (int): Seconds to sleep before looking for process
sleep_time (int): Initial sleep time (s) before looking for file
retry_sleep_time (int): Time (s) to sleep between retries
retry_count (int): If file is not found, how many times to retry
Returns:
bool: True if file was modified more recently than mtime, False if
file was modified before mtime,
file was modified before mtime, or if file not found.
"""
self.log.debug('Checking %s updated since %s' % (filename, mtime))
unit_name = sentry_unit.info['unit_name']
self.log.debug('Checking that %s updated since %s on '
'%s' % (filename, mtime, unit_name))
time.sleep(sleep_time)
file_mtime = self._get_file_mtime(sentry_unit, filename)
file_mtime = None
tries = 0
while tries <= retry_count and not file_mtime:
try:
file_mtime = self._get_file_mtime(sentry_unit, filename)
self.log.debug('Attempt {} to get {} file mtime on {} '
'OK'.format(tries, filename, unit_name))
except IOError as e:
# NOTE(beisner) - race avoidance, file may not exist yet.
# https://bugs.launchpad.net/charm-helpers/+bug/1474030
self.log.debug('Attempt {} to get {} file mtime on {} '
'failed\n{}'.format(tries, filename,
unit_name, e))
time.sleep(retry_sleep_time)
tries += 1
if not file_mtime:
self.log.warn('Could not determine file mtime, assuming '
'file does not exist')
return False
if file_mtime >= mtime:
self.log.debug('File mtime is newer than provided mtime '
'(%s >= %s)' % (file_mtime, mtime))
'(%s >= %s) on %s (OK)' % (file_mtime,
mtime, unit_name))
return True
else:
self.log.warn('File mtime %s is older than provided mtime %s'
% (file_mtime, mtime))
self.log.warn('File mtime is older than provided mtime'
'(%s < on %s) on %s' % (file_mtime,
mtime, unit_name))
return False
def validate_service_config_changed(self, sentry_unit, mtime, service,
filename, pgrep_full=None,
sleep_time=20, retry_count=2,
retry_sleep_time=30):
sleep_time=20, retry_count=30,
retry_sleep_time=10):
"""Check service and file were updated after mtime
Args:
@ -456,7 +485,9 @@ class AmuletUtils(object):
sentry_unit,
filename,
mtime,
sleep_time=0)
sleep_time=sleep_time,
retry_count=retry_count,
retry_sleep_time=retry_sleep_time)
return service_restart and config_update
@ -776,3 +807,12 @@ class AmuletUtils(object):
output = _check_output(command, universal_newlines=True)
data = json.loads(output)
return data.get(u"status") == "completed"
def status_get(self, unit):
"""Return the current service status of this unit."""
raw_status, return_code = unit.run(
"status-get --format=json --include-data")
if return_code != 0:
return ("unknown", "")
status = json.loads(raw_status)
return (status["status"], status["message"])

View File

@ -58,19 +58,17 @@ class OpenStackAmuletDeployment(AmuletDeployment):
else:
base_series = self.current_next
if self.stable:
for svc in other_services:
if svc['name'] in force_series_current:
base_series = self.current_next
for svc in other_services:
if svc['name'] in force_series_current:
base_series = self.current_next
# If a location has been explicitly set, use it
if svc.get('location'):
continue
if self.stable:
temp = 'lp:charms/{}/{}'
svc['location'] = temp.format(base_series,
svc['name'])
else:
for svc in other_services:
if svc['name'] in force_series_current:
base_series = self.current_next
else:
if svc['name'] in base_charms:
temp = 'lp:charms/{}/{}'
svc['location'] = temp.format(base_series,
@ -79,6 +77,7 @@ class OpenStackAmuletDeployment(AmuletDeployment):
temp = 'lp:~openstack-charmers/charms/{}/{}/next'
svc['location'] = temp.format(self.current_next,
svc['name'])
return other_services
def _add_services(self, this_service, other_services):

View File

@ -752,7 +752,7 @@ class OpenStackAmuletUtils(AmuletUtils):
self.log.debug('SSL is enabled @{}:{} '
'({})'.format(host, port, unit_name))
return True
elif not port and not conf_ssl:
elif not conf_ssl:
self.log.debug('SSL not enabled @{}:{} '
'({})'.format(host, port, unit_name))
return False

View File

@ -34,7 +34,8 @@ TO_PATCH = [
# charmhelpers.contrib.hahelpers.cluster_utils
'execd_preinstall',
'CephSubordinateContext',
'delete_keyring'
'delete_keyring',
'status_set'
]