Merge "Sync charm-helpers"

This commit is contained in:
Zuul 2019-01-09 08:47:13 +00:00 committed by Gerrit Code Review
commit 54e35242a8
9 changed files with 223 additions and 78 deletions

View File

@ -305,7 +305,7 @@ class NRPE(object):
# update-status hooks are configured to firing every 5 minutes by # update-status hooks are configured to firing every 5 minutes by
# default. When nagios-nrpe-server is restarted, the nagios server # default. When nagios-nrpe-server is restarted, the nagios server
# reports checks failing causing unneccessary alerts. Let's not restart # reports checks failing causing unnecessary alerts. Let's not restart
# on update-status hooks. # on update-status hooks.
if not hook_name() == 'update-status': if not hook_name() == 'update-status':
service('restart', 'nagios-nrpe-server') service('restart', 'nagios-nrpe-server')
@ -416,15 +416,20 @@ def copy_nrpe_checks(nrpe_files_dir=None):
""" """
NAGIOS_PLUGINS = '/usr/local/lib/nagios/plugins' NAGIOS_PLUGINS = '/usr/local/lib/nagios/plugins'
default_nrpe_files_dir = os.path.join( if nrpe_files_dir is None:
os.getenv('CHARM_DIR'), # determine if "charmhelpers" is in CHARMDIR or CHARMDIR/hooks
'hooks', for segment in ['.', 'hooks']:
'charmhelpers', nrpe_files_dir = os.path.abspath(os.path.join(
'contrib', os.getenv('CHARM_DIR'),
'openstack', segment,
'files') 'charmhelpers',
if not nrpe_files_dir: 'contrib',
nrpe_files_dir = default_nrpe_files_dir 'openstack',
'files'))
if os.path.isdir(nrpe_files_dir):
break
else:
raise RuntimeError("Couldn't find charmhelpers directory")
if not os.path.exists(NAGIOS_PLUGINS): if not os.path.exists(NAGIOS_PLUGINS):
os.makedirs(NAGIOS_PLUGINS) os.makedirs(NAGIOS_PLUGINS)
for fname in glob.glob(os.path.join(nrpe_files_dir, "check_*")): for fname in glob.glob(os.path.join(nrpe_files_dir, "check_*")):

View File

@ -168,7 +168,8 @@ class OpenStackAmuletDeployment(AmuletDeployment):
'nrpe', 'openvswitch-odl', 'neutron-api-odl', 'nrpe', 'openvswitch-odl', 'neutron-api-odl',
'odl-controller', 'cinder-backup', 'nexentaedge-data', 'odl-controller', 'cinder-backup', 'nexentaedge-data',
'nexentaedge-iscsi-gw', 'nexentaedge-swift-gw', 'nexentaedge-iscsi-gw', 'nexentaedge-swift-gw',
'cinder-nexentaedge', 'nexentaedge-mgmt'])) 'cinder-nexentaedge', 'nexentaedge-mgmt',
'ceilometer-agent']))
if self.openstack: if self.openstack:
for svc in services: for svc in services:
@ -292,7 +293,9 @@ class OpenStackAmuletDeployment(AmuletDeployment):
('artful', None): self.artful_pike, ('artful', None): self.artful_pike,
('bionic', None): self.bionic_queens, ('bionic', None): self.bionic_queens,
('bionic', 'cloud:bionic-rocky'): self.bionic_rocky, ('bionic', 'cloud:bionic-rocky'): self.bionic_rocky,
('bionic', 'cloud:bionic-stein'): self.bionic_stein,
('cosmic', None): self.cosmic_rocky, ('cosmic', None): self.cosmic_rocky,
('disco', None): self.disco_stein,
} }
return releases[(self.series, self.openstack)] return releases[(self.series, self.openstack)]

View File

@ -57,7 +57,8 @@ OPENSTACK_RELEASES_PAIRS = [
'trusty_mitaka', 'xenial_mitaka', 'xenial_newton', 'trusty_mitaka', 'xenial_mitaka', 'xenial_newton',
'yakkety_newton', 'xenial_ocata', 'zesty_ocata', 'yakkety_newton', 'xenial_ocata', 'zesty_ocata',
'xenial_pike', 'artful_pike', 'xenial_queens', 'xenial_pike', 'artful_pike', 'xenial_queens',
'bionic_queens', 'bionic_rocky', 'cosmic_rocky'] 'bionic_queens', 'bionic_rocky', 'cosmic_rocky',
'bionic_stein', 'disco_stein']
class OpenStackAmuletUtils(AmuletUtils): class OpenStackAmuletUtils(AmuletUtils):

View File

@ -195,7 +195,7 @@ def install_certs(ssl_dir, certs, chain=None):
if chain: if chain:
# Append chain file so that clients that trust the root CA will # Append chain file so that clients that trust the root CA will
# trust certs signed by an intermediate in the chain # trust certs signed by an intermediate in the chain
cert_data = cert_data + chain cert_data = cert_data + os.linesep + chain
write_file( write_file(
path=os.path.join(ssl_dir, cert_filename), path=os.path.join(ssl_dir, cert_filename),
content=cert_data, perms=0o640) content=cert_data, perms=0o640)

View File

@ -98,7 +98,6 @@ from charmhelpers.contrib.network.ip import (
from charmhelpers.contrib.openstack.utils import ( from charmhelpers.contrib.openstack.utils import (
config_flags_parser, config_flags_parser,
enable_memcache, enable_memcache,
snap_install_requested,
CompareOpenStackReleases, CompareOpenStackReleases,
os_release, os_release,
) )
@ -252,13 +251,8 @@ class SharedDBContext(OSContextGenerator):
'database': self.database, 'database': self.database,
'database_user': self.user, 'database_user': self.user,
'database_password': rdata.get(password_setting), 'database_password': rdata.get(password_setting),
'database_type': 'mysql' 'database_type': 'mysql+pymysql'
} }
# Note(coreycb): We can drop mysql+pymysql if we want when the
# following review lands, though it seems mysql+pymysql would
# be preferred. https://review.openstack.org/#/c/462190/
if snap_install_requested():
ctxt['database_type'] = 'mysql+pymysql'
if self.context_complete(ctxt): if self.context_complete(ctxt):
db_ssl(rdata, ctxt, self.ssl_dir) db_ssl(rdata, ctxt, self.ssl_dir)
return ctxt return ctxt

View File

@ -23,6 +23,7 @@
Helpers for high availability. Helpers for high availability.
""" """
import hashlib
import json import json
import re import re
@ -35,7 +36,6 @@ from charmhelpers.core.hookenv import (
config, config,
status_set, status_set,
DEBUG, DEBUG,
WARNING,
) )
from charmhelpers.core.host import ( from charmhelpers.core.host import (
@ -63,6 +63,9 @@ JSON_ENCODE_OPTIONS = dict(
separators=(',', ':'), separators=(',', ':'),
) )
VIP_GROUP_NAME = 'grp_{service}_vips'
DNSHA_GROUP_NAME = 'grp_{service}_hostnames'
class DNSHAException(Exception): class DNSHAException(Exception):
"""Raised when an error occurs setting up DNS HA """Raised when an error occurs setting up DNS HA
@ -124,13 +127,29 @@ def expect_ha():
return len(ha_related_units) > 0 or config('vip') or config('dns-ha') return len(ha_related_units) > 0 or config('vip') or config('dns-ha')
def generate_ha_relation_data(service): def generate_ha_relation_data(service, extra_settings=None):
""" Generate relation data for ha relation """ Generate relation data for ha relation
Based on configuration options and unit interfaces, generate a json Based on configuration options and unit interfaces, generate a json
encoded dict of relation data items for the hacluster relation, encoded dict of relation data items for the hacluster relation,
providing configuration for DNS HA or VIP's + haproxy clone sets. providing configuration for DNS HA or VIP's + haproxy clone sets.
Example of supplying additional settings::
COLO_CONSOLEAUTH = 'inf: res_nova_consoleauth grp_nova_vips'
AGENT_CONSOLEAUTH = 'ocf:openstack:nova-consoleauth'
AGENT_CA_PARAMS = 'op monitor interval="5s"'
ha_console_settings = {
'colocations': {'vip_consoleauth': COLO_CONSOLEAUTH},
'init_services': {'res_nova_consoleauth': 'nova-consoleauth'},
'resources': {'res_nova_consoleauth': AGENT_CONSOLEAUTH},
'resource_params': {'res_nova_consoleauth': AGENT_CA_PARAMS})
generate_ha_relation_data('nova', extra_settings=ha_console_settings)
@param service: Name of the service being configured
@param extra_settings: Dict of additional resource data
@returns dict: json encoded data for use with relation_set @returns dict: json encoded data for use with relation_set
""" """
_haproxy_res = 'res_{}_haproxy'.format(service) _haproxy_res = 'res_{}_haproxy'.format(service)
@ -149,6 +168,13 @@ def generate_ha_relation_data(service):
}, },
} }
if extra_settings:
for k, v in extra_settings.items():
if _relation_data.get(k):
_relation_data[k].update(v)
else:
_relation_data[k] = v
if config('dns-ha'): if config('dns-ha'):
update_hacluster_dns_ha(service, _relation_data) update_hacluster_dns_ha(service, _relation_data)
else: else:
@ -216,7 +242,7 @@ def update_hacluster_dns_ha(service, relation_data,
'Informing the ha relation'.format(' '.join(hostname_group)), 'Informing the ha relation'.format(' '.join(hostname_group)),
DEBUG) DEBUG)
relation_data['groups'] = { relation_data['groups'] = {
'grp_{}_hostnames'.format(service): ' '.join(hostname_group) DNSHA_GROUP_NAME.format(service=service): ' '.join(hostname_group)
} }
else: else:
msg = 'DNS HA: Hostname group has no members.' msg = 'DNS HA: Hostname group has no members.'
@ -224,6 +250,27 @@ def update_hacluster_dns_ha(service, relation_data,
raise DNSHAException(msg) raise DNSHAException(msg)
def get_vip_settings(vip):
"""Calculate which nic is on the correct network for the given vip.
If nic or netmask discovery fail then fallback to using charm supplied
config. If fallback is used this is indicated via the fallback variable.
@param vip: VIP to lookup nic and cidr for.
@returns (str, str, bool): eg (iface, netmask, fallback)
"""
iface = get_iface_for_address(vip)
netmask = get_netmask_for_address(vip)
fallback = False
if iface is None:
iface = config('vip_iface')
fallback = True
if netmask is None:
netmask = config('vip_cidr')
fallback = True
return iface, netmask, fallback
def update_hacluster_vip(service, relation_data): def update_hacluster_vip(service, relation_data):
""" Configure VIP resources based on provided configuration """ Configure VIP resources based on provided configuration
@ -232,40 +279,70 @@ def update_hacluster_vip(service, relation_data):
""" """
cluster_config = get_hacluster_config() cluster_config = get_hacluster_config()
vip_group = [] vip_group = []
vips_to_delete = []
for vip in cluster_config['vip'].split(): for vip in cluster_config['vip'].split():
if is_ipv6(vip): if is_ipv6(vip):
res_neutron_vip = 'ocf:heartbeat:IPv6addr' res_vip = 'ocf:heartbeat:IPv6addr'
vip_params = 'ipv6addr' vip_params = 'ipv6addr'
else: else:
res_neutron_vip = 'ocf:heartbeat:IPaddr2' res_vip = 'ocf:heartbeat:IPaddr2'
vip_params = 'ip' vip_params = 'ip'
iface = (get_iface_for_address(vip) or iface, netmask, fallback = get_vip_settings(vip)
config('vip_iface'))
netmask = (get_netmask_for_address(vip) or
config('vip_cidr'))
vip_monitoring = 'op monitor depth="0" timeout="20s" interval="10s"'
if iface is not None: if iface is not None:
# NOTE(jamespage): Delete old VIP resources
# Old style naming encoding iface in name
# does not work well in environments where
# interface/subnet wiring is not consistent
vip_key = 'res_{}_{}_vip'.format(service, iface) vip_key = 'res_{}_{}_vip'.format(service, iface)
if vip_key in vip_group: if vip_key in vips_to_delete:
if vip not in relation_data['resource_params'][vip_key]: vip_key = '{}_{}'.format(vip_key, vip_params)
vip_key = '{}_{}'.format(vip_key, vip_params) vips_to_delete.append(vip_key)
else:
log("Resource '%s' (vip='%s') already exists in " vip_key = 'res_{}_{}_vip'.format(
"vip group - skipping" % (vip_key, vip), WARNING) service,
continue hashlib.sha1(vip.encode('UTF-8')).hexdigest()[:7])
relation_data['resources'][vip_key] = res_vip
# NOTE(jamespage):
# Use option provided vip params if these where used
# instead of auto-detected values
if fallback:
relation_data['resource_params'][vip_key] = (
'params {ip}="{vip}" cidr_netmask="{netmask}" '
'nic="{iface}" {vip_monitoring}'.format(
ip=vip_params,
vip=vip,
iface=iface,
netmask=netmask,
vip_monitoring=vip_monitoring))
else:
# NOTE(jamespage):
# let heartbeat figure out which interface and
# netmask to configure, which works nicely
# when network interface naming is not
# consistent across units.
relation_data['resource_params'][vip_key] = (
'params {ip}="{vip}" {vip_monitoring}'.format(
ip=vip_params,
vip=vip,
vip_monitoring=vip_monitoring))
relation_data['resources'][vip_key] = res_neutron_vip
relation_data['resource_params'][vip_key] = (
'params {ip}="{vip}" cidr_netmask="{netmask}" '
'nic="{iface}"'.format(ip=vip_params,
vip=vip,
iface=iface,
netmask=netmask)
)
vip_group.append(vip_key) vip_group.append(vip_key)
if vips_to_delete:
try:
relation_data['delete_resources'].extend(vips_to_delete)
except KeyError:
relation_data['delete_resources'] = vips_to_delete
if len(vip_group) >= 1: if len(vip_group) >= 1:
relation_data['groups'] = { key = VIP_GROUP_NAME.format(service=service)
'grp_{}_vips'.format(service): ' '.join(vip_group) try:
} relation_data['groups'][key] = ' '.join(vip_group)
except KeyError:
relation_data['groups'] = {
key: ' '.join(vip_group)
}

View File

@ -73,6 +73,8 @@ from charmhelpers.core.host import (
service_running, service_running,
service_pause, service_pause,
service_resume, service_resume,
service_stop,
service_start,
restart_on_change_helper, restart_on_change_helper,
) )
from charmhelpers.fetch import ( from charmhelpers.fetch import (
@ -116,6 +118,7 @@ OPENSTACK_RELEASES = (
'pike', 'pike',
'queens', 'queens',
'rocky', 'rocky',
'stein',
) )
UBUNTU_OPENSTACK_RELEASE = OrderedDict([ UBUNTU_OPENSTACK_RELEASE = OrderedDict([
@ -134,6 +137,7 @@ UBUNTU_OPENSTACK_RELEASE = OrderedDict([
('artful', 'pike'), ('artful', 'pike'),
('bionic', 'queens'), ('bionic', 'queens'),
('cosmic', 'rocky'), ('cosmic', 'rocky'),
('disco', 'stein'),
]) ])
@ -153,6 +157,7 @@ OPENSTACK_CODENAMES = OrderedDict([
('2017.2', 'pike'), ('2017.2', 'pike'),
('2018.1', 'queens'), ('2018.1', 'queens'),
('2018.2', 'rocky'), ('2018.2', 'rocky'),
('2019.1', 'stein'),
]) ])
# The ugly duckling - must list releases oldest to newest # The ugly duckling - must list releases oldest to newest
@ -187,6 +192,8 @@ SWIFT_CODENAMES = OrderedDict([
['2.16.0', '2.17.0']), ['2.16.0', '2.17.0']),
('rocky', ('rocky',
['2.18.0', '2.19.0']), ['2.18.0', '2.19.0']),
('stein',
['2.19.0']),
]) ])
# >= Liberty version->codename mapping # >= Liberty version->codename mapping
@ -199,6 +206,7 @@ PACKAGE_CODENAMES = {
('16', 'pike'), ('16', 'pike'),
('17', 'queens'), ('17', 'queens'),
('18', 'rocky'), ('18', 'rocky'),
('19', 'stein'),
]), ]),
'neutron-common': OrderedDict([ 'neutron-common': OrderedDict([
('7', 'liberty'), ('7', 'liberty'),
@ -208,6 +216,7 @@ PACKAGE_CODENAMES = {
('11', 'pike'), ('11', 'pike'),
('12', 'queens'), ('12', 'queens'),
('13', 'rocky'), ('13', 'rocky'),
('14', 'stein'),
]), ]),
'cinder-common': OrderedDict([ 'cinder-common': OrderedDict([
('7', 'liberty'), ('7', 'liberty'),
@ -217,6 +226,7 @@ PACKAGE_CODENAMES = {
('11', 'pike'), ('11', 'pike'),
('12', 'queens'), ('12', 'queens'),
('13', 'rocky'), ('13', 'rocky'),
('14', 'stein'),
]), ]),
'keystone': OrderedDict([ 'keystone': OrderedDict([
('8', 'liberty'), ('8', 'liberty'),
@ -226,6 +236,7 @@ PACKAGE_CODENAMES = {
('12', 'pike'), ('12', 'pike'),
('13', 'queens'), ('13', 'queens'),
('14', 'rocky'), ('14', 'rocky'),
('15', 'stein'),
]), ]),
'horizon-common': OrderedDict([ 'horizon-common': OrderedDict([
('8', 'liberty'), ('8', 'liberty'),
@ -235,6 +246,7 @@ PACKAGE_CODENAMES = {
('12', 'pike'), ('12', 'pike'),
('13', 'queens'), ('13', 'queens'),
('14', 'rocky'), ('14', 'rocky'),
('15', 'stein'),
]), ]),
'ceilometer-common': OrderedDict([ 'ceilometer-common': OrderedDict([
('5', 'liberty'), ('5', 'liberty'),
@ -244,6 +256,7 @@ PACKAGE_CODENAMES = {
('9', 'pike'), ('9', 'pike'),
('10', 'queens'), ('10', 'queens'),
('11', 'rocky'), ('11', 'rocky'),
('12', 'stein'),
]), ]),
'heat-common': OrderedDict([ 'heat-common': OrderedDict([
('5', 'liberty'), ('5', 'liberty'),
@ -253,6 +266,7 @@ PACKAGE_CODENAMES = {
('9', 'pike'), ('9', 'pike'),
('10', 'queens'), ('10', 'queens'),
('11', 'rocky'), ('11', 'rocky'),
('12', 'stein'),
]), ]),
'glance-common': OrderedDict([ 'glance-common': OrderedDict([
('11', 'liberty'), ('11', 'liberty'),
@ -262,6 +276,7 @@ PACKAGE_CODENAMES = {
('15', 'pike'), ('15', 'pike'),
('16', 'queens'), ('16', 'queens'),
('17', 'rocky'), ('17', 'rocky'),
('18', 'stein'),
]), ]),
'openstack-dashboard': OrderedDict([ 'openstack-dashboard': OrderedDict([
('8', 'liberty'), ('8', 'liberty'),
@ -271,6 +286,7 @@ PACKAGE_CODENAMES = {
('12', 'pike'), ('12', 'pike'),
('13', 'queens'), ('13', 'queens'),
('14', 'rocky'), ('14', 'rocky'),
('15', 'stein'),
]), ]),
} }
@ -299,7 +315,7 @@ def get_os_codename_install_source(src):
rel = '' rel = ''
if src is None: if src is None:
return rel return rel
if src in ['distro', 'distro-proposed']: if src in ['distro', 'distro-proposed', 'proposed']:
try: try:
rel = UBUNTU_OPENSTACK_RELEASE[ubuntu_rel] rel = UBUNTU_OPENSTACK_RELEASE[ubuntu_rel]
except KeyError: except KeyError:
@ -1303,6 +1319,65 @@ def is_unit_paused_set():
return False return False
def manage_payload_services(action, services=None, charm_func=None):
"""Run an action against all services.
An optional charm_func() can be called. It should raise an Exception to
indicate that the function failed. If it was succesfull it should return
None or an optional message.
The signature for charm_func is:
charm_func() -> message: str
charm_func() is executed after any services are stopped, if supplied.
The services object can either be:
- None : no services were passed (an empty dict is returned)
- a list of strings
- A dictionary (optionally OrderedDict) {service_name: {'service': ..}}
- An array of [{'service': service_name, ...}, ...]
:param action: Action to run: pause, resume, start or stop.
:type action: str
:param services: See above
:type services: See above
:param charm_func: function to run for custom charm pausing.
:type charm_func: f()
:returns: Status boolean and list of messages
:rtype: (bool, [])
:raises: RuntimeError
"""
actions = {
'pause': service_pause,
'resume': service_resume,
'start': service_start,
'stop': service_stop}
action = action.lower()
if action not in actions.keys():
raise RuntimeError(
"action: {} must be one of: {}".format(action,
', '.join(actions.keys())))
services = _extract_services_list_helper(services)
messages = []
success = True
if services:
for service in services.keys():
rc = actions[action](service)
if not rc:
success = False
messages.append("{} didn't {} cleanly.".format(service,
action))
if charm_func:
try:
message = charm_func()
if message:
messages.append(message)
except Exception as e:
success = False
messages.append(str(e))
return success, messages
def pause_unit(assess_status_func, services=None, ports=None, def pause_unit(assess_status_func, services=None, ports=None,
charm_func=None): charm_func=None):
"""Pause a unit by stopping the services and setting 'unit-paused' """Pause a unit by stopping the services and setting 'unit-paused'
@ -1333,20 +1408,10 @@ def pause_unit(assess_status_func, services=None, ports=None,
@returns None @returns None
@raises Exception(message) on an error for action_fail(). @raises Exception(message) on an error for action_fail().
""" """
services = _extract_services_list_helper(services) _, messages = manage_payload_services(
messages = [] 'pause',
if services: services=services,
for service in services.keys(): charm_func=charm_func)
stopped = service_pause(service)
if not stopped:
messages.append("{} didn't stop cleanly.".format(service))
if charm_func:
try:
message = charm_func()
if message:
messages.append(message)
except Exception as e:
message.append(str(e))
set_unit_paused() set_unit_paused()
if assess_status_func: if assess_status_func:
message = assess_status_func() message = assess_status_func()
@ -1385,20 +1450,10 @@ def resume_unit(assess_status_func, services=None, ports=None,
@returns None @returns None
@raises Exception(message) on an error for action_fail(). @raises Exception(message) on an error for action_fail().
""" """
services = _extract_services_list_helper(services) _, messages = manage_payload_services(
messages = [] 'resume',
if services: services=services,
for service in services.keys(): charm_func=charm_func)
started = service_resume(service)
if not started:
messages.append("{} didn't start cleanly.".format(service))
if charm_func:
try:
message = charm_func()
if message:
messages.append(message)
except Exception as e:
message.append(str(e))
clear_unit_paused() clear_unit_paused()
if assess_status_func: if assess_status_func:
message = assess_status_func() message = assess_status_func()

View File

@ -36,8 +36,10 @@ def loopback_devices():
''' '''
loopbacks = {} loopbacks = {}
cmd = ['losetup', '-a'] cmd = ['losetup', '-a']
devs = [d.strip().split(' ') for d in output = check_output(cmd)
check_output(cmd).splitlines() if d != ''] if six.PY3:
output = output.decode('utf-8')
devs = [d.strip().split(' ') for d in output.splitlines() if d != '']
for dev, _, f in devs: for dev, _, f in devs:
loopbacks[dev.replace(':', '')] = re.search(r'\((\S+)\)', f).groups()[0] loopbacks[dev.replace(':', '')] = re.search(r'\((\S+)\)', f).groups()[0]
return loopbacks return loopbacks

View File

@ -166,6 +166,14 @@ CLOUD_ARCHIVE_POCKETS = {
'rocky/proposed': 'bionic-proposed/rocky', 'rocky/proposed': 'bionic-proposed/rocky',
'bionic-rocky/proposed': 'bionic-proposed/rocky', 'bionic-rocky/proposed': 'bionic-proposed/rocky',
'bionic-proposed/rocky': 'bionic-proposed/rocky', 'bionic-proposed/rocky': 'bionic-proposed/rocky',
# Stein
'stein': 'bionic-updates/stein',
'bionic-stein': 'bionic-updates/stein',
'bionic-stein/updates': 'bionic-updates/stein',
'bionic-updates/stein': 'bionic-updates/stein',
'stein/proposed': 'bionic-proposed/stein',
'bionic-stein/proposed': 'bionic-proposed/stein',
'bionic-proposed/stein': 'bionic-proposed/stein',
} }