Use chelper generate_ha_relation_data for ha rel

Use the generate_ha_relation_data helper from charmhelpers to
generate the data to send down the relation to the hacluster
charm.

This results in a few changes in behaviour:

1) The charm will no longer specify a nic name to bind the vip. This
   is because Pacemaker VIP resources are able to automatically
   detect and configure correct iface and netmask parameters based
   on local configuration of the unit.
2) The original iface named VIP resource will be stopped and deleted
   prior to the creation of the new short hash named VIP resource.

Change-Id: I7018e94e75c7c873c6c610b06d3e7cc9fedcc507
This commit is contained in:
Liam Young 2018-11-28 08:31:13 +00:00
parent c88155d17a
commit 6d1d15149e
7 changed files with 220 additions and 265 deletions

View File

@ -416,15 +416,20 @@ def copy_nrpe_checks(nrpe_files_dir=None):
"""
NAGIOS_PLUGINS = '/usr/local/lib/nagios/plugins'
default_nrpe_files_dir = os.path.join(
os.getenv('CHARM_DIR'),
'hooks',
'charmhelpers',
'contrib',
'openstack',
'files')
if not nrpe_files_dir:
nrpe_files_dir = default_nrpe_files_dir
if nrpe_files_dir is None:
# determine if "charmhelpers" is in CHARMDIR or CHARMDIR/hooks
for segment in ['.', 'hooks']:
nrpe_files_dir = os.path.abspath(os.path.join(
os.getenv('CHARM_DIR'),
segment,
'charmhelpers',
'contrib',
'openstack',
'files'))
if os.path.isdir(nrpe_files_dir):
break
else:
raise RuntimeError("Couldn't find charmhelpers directory")
if not os.path.exists(NAGIOS_PLUGINS):
os.makedirs(NAGIOS_PLUGINS)
for fname in glob.glob(os.path.join(nrpe_files_dir, "check_*")):

View File

@ -168,7 +168,8 @@ class OpenStackAmuletDeployment(AmuletDeployment):
'nrpe', 'openvswitch-odl', 'neutron-api-odl',
'odl-controller', 'cinder-backup', 'nexentaedge-data',
'nexentaedge-iscsi-gw', 'nexentaedge-swift-gw',
'cinder-nexentaedge', 'nexentaedge-mgmt']))
'cinder-nexentaedge', 'nexentaedge-mgmt',
'ceilometer-agent']))
if self.openstack:
for svc in services:

View File

@ -23,6 +23,7 @@
Helpers for high availability.
"""
import hashlib
import json
import re
@ -35,7 +36,6 @@ from charmhelpers.core.hookenv import (
config,
status_set,
DEBUG,
WARNING,
)
from charmhelpers.core.host import (
@ -124,13 +124,29 @@ def expect_ha():
return len(ha_related_units) > 0 or config('vip') or config('dns-ha')
def generate_ha_relation_data(service):
def generate_ha_relation_data(service, extra_settings=None):
""" Generate relation data for ha relation
Based on configuration options and unit interfaces, generate a json
encoded dict of relation data items for the hacluster relation,
providing configuration for DNS HA or VIP's + haproxy clone sets.
Example of supplying additional settings::
COLO_CONSOLEAUTH = 'inf: res_nova_consoleauth grp_nova_vips'
AGENT_CONSOLEAUTH = 'ocf:openstack:nova-consoleauth'
AGENT_CA_PARAMS = 'op monitor interval="5s"'
ha_console_settings = {
'colocations': {'vip_consoleauth': COLO_CONSOLEAUTH},
'init_services': {'res_nova_consoleauth': 'nova-consoleauth'},
'resources': {'res_nova_consoleauth': AGENT_CONSOLEAUTH},
'resource_params': {'res_nova_consoleauth': AGENT_CA_PARAMS})
generate_ha_relation_data('nova', extra_settings=ha_console_settings)
@param service: Name of the service being configured
@param extra_settings: Dict of additional resource data
@returns dict: json encoded data for use with relation_set
"""
_haproxy_res = 'res_{}_haproxy'.format(service)
@ -149,6 +165,13 @@ def generate_ha_relation_data(service):
},
}
if extra_settings:
for k, v in extra_settings.items():
if _relation_data.get(k):
_relation_data[k].update(v)
else:
_relation_data[k] = v
if config('dns-ha'):
update_hacluster_dns_ha(service, _relation_data)
else:
@ -232,39 +255,67 @@ def update_hacluster_vip(service, relation_data):
"""
cluster_config = get_hacluster_config()
vip_group = []
vips_to_delete = []
for vip in cluster_config['vip'].split():
if is_ipv6(vip):
res_neutron_vip = 'ocf:heartbeat:IPv6addr'
res_vip = 'ocf:heartbeat:IPv6addr'
vip_params = 'ipv6addr'
else:
res_neutron_vip = 'ocf:heartbeat:IPaddr2'
res_vip = 'ocf:heartbeat:IPaddr2'
vip_params = 'ip'
iface = (get_iface_for_address(vip) or
config('vip_iface'))
netmask = (get_netmask_for_address(vip) or
config('vip_cidr'))
iface = get_iface_for_address(vip)
netmask = get_netmask_for_address(vip)
fallback_params = False
if iface is None:
iface = config('vip_iface')
fallback_params = True
if netmask is None:
netmask = config('vip_cidr')
fallback_params = True
if iface is not None:
# NOTE(jamespage): Delete old VIP resources
# Old style naming encoding iface in name
# does not work well in environments where
# interface/subnet wiring is not consistent
vip_key = 'res_{}_{}_vip'.format(service, iface)
if vip_key in vip_group:
if vip not in relation_data['resource_params'][vip_key]:
vip_key = '{}_{}'.format(vip_key, vip_params)
else:
log("Resource '%s' (vip='%s') already exists in "
"vip group - skipping" % (vip_key, vip), WARNING)
continue
if vip_key in vips_to_delete:
vip_key = '{}_{}'.format(vip_key, vip_params)
vips_to_delete.append(vip_key)
vip_key = 'res_{}_{}_vip'.format(
service,
hashlib.sha1(vip.encode('UTF-8')).hexdigest()[:7])
relation_data['resources'][vip_key] = res_vip
# NOTE(jamespage):
# Use option provided vip params if these where used
# instead of auto-detected values
if fallback_params:
relation_data['resource_params'][vip_key] = (
'params {ip}="{vip}" cidr_netmask="{netmask}" '
'nic="{iface}"'.format(ip=vip_params,
vip=vip,
iface=iface,
netmask=netmask)
)
else:
# NOTE(jamespage):
# let heartbeat figure out which interface and
# netmask to configure, which works nicely
# when network interface naming is not
# consistent across units.
relation_data['resource_params'][vip_key] = (
'params {ip}="{vip}"'.format(ip=vip_params,
vip=vip))
relation_data['resources'][vip_key] = res_neutron_vip
relation_data['resource_params'][vip_key] = (
'params {ip}="{vip}" cidr_netmask="{netmask}" '
'nic="{iface}"'.format(ip=vip_params,
vip=vip,
iface=iface,
netmask=netmask)
)
vip_group.append(vip_key)
if vips_to_delete:
relation_data['delete_resources'] = vips_to_delete
if len(vip_group) >= 1:
relation_data['groups'] = {
'grp_{}_vips'.format(service): ' '.join(vip_group)

View File

@ -73,6 +73,8 @@ from charmhelpers.core.host import (
service_running,
service_pause,
service_resume,
service_stop,
service_start,
restart_on_change_helper,
)
from charmhelpers.fetch import (
@ -299,7 +301,7 @@ def get_os_codename_install_source(src):
rel = ''
if src is None:
return rel
if src in ['distro', 'distro-proposed']:
if src in ['distro', 'distro-proposed', 'proposed']:
try:
rel = UBUNTU_OPENSTACK_RELEASE[ubuntu_rel]
except KeyError:
@ -1303,6 +1305,65 @@ def is_unit_paused_set():
return False
def manage_payload_services(action, services=None, charm_func=None):
"""Run an action against all services.
An optional charm_func() can be called. It should raise an Exception to
indicate that the function failed. If it was succesfull it should return
None or an optional message.
The signature for charm_func is:
charm_func() -> message: str
charm_func() is executed after any services are stopped, if supplied.
The services object can either be:
- None : no services were passed (an empty dict is returned)
- a list of strings
- A dictionary (optionally OrderedDict) {service_name: {'service': ..}}
- An array of [{'service': service_name, ...}, ...]
:param action: Action to run: pause, resume, start or stop.
:type action: str
:param services: See above
:type services: See above
:param charm_func: function to run for custom charm pausing.
:type charm_func: f()
:returns: Status boolean and list of messages
:rtype: (bool, [])
:raises: RuntimeError
"""
actions = {
'pause': service_pause,
'resume': service_resume,
'start': service_start,
'stop': service_stop}
action = action.lower()
if action not in actions.keys():
raise RuntimeError(
"action: {} must be one of: {}".format(action,
', '.join(actions.keys())))
services = _extract_services_list_helper(services)
messages = []
success = True
if services:
for service in services.keys():
rc = actions[action](service)
if not rc:
success = False
messages.append("{} didn't {} cleanly.".format(service,
action))
if charm_func:
try:
message = charm_func()
if message:
messages.append(message)
except Exception as e:
success = False
messages.append(str(e))
return success, messages
def pause_unit(assess_status_func, services=None, ports=None,
charm_func=None):
"""Pause a unit by stopping the services and setting 'unit-paused'
@ -1333,20 +1394,10 @@ def pause_unit(assess_status_func, services=None, ports=None,
@returns None
@raises Exception(message) on an error for action_fail().
"""
services = _extract_services_list_helper(services)
messages = []
if services:
for service in services.keys():
stopped = service_pause(service)
if not stopped:
messages.append("{} didn't stop cleanly.".format(service))
if charm_func:
try:
message = charm_func()
if message:
messages.append(message)
except Exception as e:
message.append(str(e))
_, messages = manage_payload_services(
'pause',
services=services,
charm_func=charm_func)
set_unit_paused()
if assess_status_func:
message = assess_status_func()
@ -1385,20 +1436,10 @@ def resume_unit(assess_status_func, services=None, ports=None,
@returns None
@raises Exception(message) on an error for action_fail().
"""
services = _extract_services_list_helper(services)
messages = []
if services:
for service in services.keys():
started = service_resume(service)
if not started:
messages.append("{} didn't start cleanly.".format(service))
if charm_func:
try:
message = charm_func()
if message:
messages.append(message)
except Exception as e:
message.append(str(e))
_, messages = manage_payload_services(
'resume',
services=services,
charm_func=charm_func)
clear_unit_paused()
if assess_status_func:
message = assess_status_func()

View File

@ -36,8 +36,10 @@ def loopback_devices():
'''
loopbacks = {}
cmd = ['losetup', '-a']
devs = [d.strip().split(' ') for d in
check_output(cmd).splitlines() if d != '']
output = check_output(cmd)
if six.PY3:
output = output.decode('utf-8')
devs = [d.strip().split(' ') for d in output.splitlines() if d != '']
for dev, _, f in devs:
loopbacks[dev.replace(':', '')] = re.search(r'\((\S+)\)', f).groups()[0]
return loopbacks

View File

@ -291,6 +291,8 @@ def config_changed():
ncc_utils.write_vendordata(hookenv.config('vendor-data'))
if hookenv.is_leader() and not ncc_utils.get_shared_metadatasecret():
ncc_utils.set_shared_metadatasecret()
for rid in hookenv.relation_ids('ha'):
ha_joined(rid)
@hooks.hook('amqp-relation-joined')
@ -778,82 +780,21 @@ def cluster_changed():
@hooks.hook('ha-relation-joined')
def ha_joined(relation_id=None):
cluster_config = ch_cluster.get_hacluster_config()
resources = {
'res_nova_haproxy': 'lsb:haproxy',
}
resource_params = {
'res_nova_haproxy': 'op monitor interval="5s"',
}
init_services = {
'res_nova_haproxy': 'haproxy'
}
clones = {
'cl_nova_haproxy': 'res_nova_haproxy'
}
colocations = {}
if hookenv.config('dns-ha'):
ch_ha_utils.update_dns_ha_resource_params(
relation_id=relation_id,
resources=resources,
resource_params=resource_params)
else:
vip_group = []
for vip in cluster_config['vip'].split():
if ch_network_ip.is_ipv6(vip):
res_nova_vip = 'ocf:heartbeat:IPv6addr'
vip_params = 'ipv6addr'
else:
res_nova_vip = 'ocf:heartbeat:IPaddr2'
vip_params = 'ip'
iface = (ch_network_ip.get_iface_for_address(vip) or
hookenv.config('vip_iface'))
netmask = (ch_network_ip.get_netmask_for_address(vip) or
hookenv.config('vip_cidr'))
if iface is not None:
vip_key = 'res_nova_{}_vip'.format(iface)
if vip_key in vip_group:
if vip not in resource_params[vip_key]:
vip_key = '{}_{}'.format(vip_key, vip_params)
else:
hookenv.log(
"Resource '%s' (vip='%s') already exists in "
"vip group - skipping" % (vip_key, vip),
hookenv.WARNING)
continue
resources[vip_key] = res_nova_vip
resource_params[vip_key] = (
'params {ip}="{vip}" cidr_netmask="{netmask}"'
' nic="{iface}"'.format(ip=vip_params,
vip=vip,
iface=iface,
netmask=netmask)
)
vip_group.append(vip_key)
if len(vip_group) >= 1:
hookenv.relation_set(
groups={'grp_nova_vips': ' '.join(vip_group)})
ha_console_settings = {}
if not hookenv.config('dns-ha'):
if (hookenv.config('single-nova-consoleauth') and
common.console_attributes('protocol')):
colocations['vip_consoleauth'] = COLO_CONSOLEAUTH
init_services['res_nova_consoleauth'] = 'nova-consoleauth'
resources['res_nova_consoleauth'] = AGENT_CONSOLEAUTH
resource_params['res_nova_consoleauth'] = AGENT_CA_PARAMS
ha_console_settings = {
'colocations': {'vip_consoleauth': COLO_CONSOLEAUTH},
'init_services': {'res_nova_consoleauth': 'nova-consoleauth'},
'resources': {'res_nova_consoleauth': AGENT_CONSOLEAUTH},
'resource_params': {'res_nova_consoleauth': AGENT_CA_PARAMS}}
hookenv.relation_set(relation_id=relation_id,
init_services=init_services,
corosync_bindiface=cluster_config['ha-bindiface'],
corosync_mcastport=cluster_config['ha-mcastport'],
resources=resources,
resource_params=resource_params,
clones=clones,
colocations=colocations)
settings = ch_ha_utils.generate_ha_relation_data(
'nova',
extra_settings=ha_console_settings)
hookenv.relation_set(relation_id=relation_id, **settings)
@hooks.hook('ha-relation-changed')

View File

@ -30,6 +30,7 @@ TO_PATCH = [
'charmhelpers.contrib.network.ip.get_iface_for_address',
'charmhelpers.contrib.network.ip.get_netmask_for_address',
'charmhelpers.contrib.network.ip.get_relation_ip',
'charmhelpers.contrib.openstack.ha.utils.generate_ha_relation_data',
'charmhelpers.contrib.openstack.ha.utils.update_dns_ha_resource_params',
'charmhelpers.contrib.openstack.neutron.network_manager',
'charmhelpers.contrib.openstack.utils.configure_installation_source',
@ -950,139 +951,52 @@ class NovaCCHooksTests(CharmTestCase):
}
self.assertEqual(_con_sets, console_settings)
def test_ha_relation_joined_no_bound_ip(self):
self.get_hacluster_config.return_value = {
'ha-bindiface': 'em0',
'ha-mcastport': '8080',
'vip': '10.10.10.10',
}
self.test_config.set('vip_iface', 'eth120')
self.test_config.set('vip_cidr', '21')
self.get_iface_for_address.return_value = None
self.get_netmask_for_address.return_value = None
hooks.ha_joined()
args = {
'relation_id': None,
'corosync_bindiface': 'em0',
'corosync_mcastport': '8080',
'init_services': {'res_nova_haproxy': 'haproxy'},
'resources': {'res_nova_eth120_vip': 'ocf:heartbeat:IPaddr2',
'res_nova_haproxy': 'lsb:haproxy'},
'resource_params': {
'res_nova_eth120_vip': 'params ip="10.10.10.10"'
' cidr_netmask="21" nic="eth120"',
'res_nova_haproxy': 'op monitor interval="5s"'},
'colocations': {},
'clones': {'cl_nova_haproxy': 'res_nova_haproxy'}
}
self.relation_set.assert_has_calls([
call(groups={'grp_nova_vips': 'res_nova_eth120_vip'}),
call(**args),
])
def test_ha_joined_dns_ha(self):
def _fake_update(resources, resource_params, relation_id=None):
resources.update({'res_nova_public_hostname': 'ocf:maas:dns'})
resource_params.update({'res_nova_public_hostname':
'params fqdn="nova.maas" '
'ip_address="10.0.0.1"'})
self.test_config.set('dns-ha', True)
self.get_hacluster_config.return_value = {
'vip': None,
'ha-bindiface': 'em0',
'ha-mcastport': '8080',
'os-admin-hostname': None,
'os-internal-hostname': None,
'os-public-hostname': 'nova.maas',
}
args = {
'relation_id': None,
'corosync_bindiface': 'em0',
'corosync_mcastport': '8080',
'init_services': {'res_nova_haproxy': 'haproxy'},
'resources': {'res_nova_public_hostname': 'ocf:maas:dns',
'res_nova_haproxy': 'lsb:haproxy'},
'resource_params': {
'res_nova_public_hostname': 'params fqdn="nova.maas" '
'ip_address="10.0.0.1"',
'res_nova_haproxy': 'op monitor interval="5s"'},
'clones': {'cl_nova_haproxy': 'res_nova_haproxy'},
'colocations': {},
}
self.update_dns_ha_resource_params.side_effect = _fake_update
hooks.ha_joined()
self.assertTrue(self.update_dns_ha_resource_params.called)
self.relation_set.assert_called_with(**args)
def test_ha_relation_multi_consoleauth(self):
self.get_hacluster_config.return_value = {
'ha-bindiface': 'em0',
'ha-mcastport': '8080',
'vip': '10.10.10.10',
}
self.test_config.set('vip_iface', 'eth120')
self.test_config.set('vip_cidr', '21')
def test_ha_relation_joined(self):
self.test_config.set('single-nova-consoleauth', False)
self.get_iface_for_address.return_value = None
self.get_netmask_for_address.return_value = None
self.test_config.set('dns-ha', False)
self.generate_ha_relation_data.return_value = {'ha': 'settings'}
hooks.ha_joined()
args = {
'relation_id': None,
'corosync_bindiface': 'em0',
'corosync_mcastport': '8080',
'init_services': {'res_nova_haproxy': 'haproxy'},
'resources': {'res_nova_eth120_vip': 'ocf:heartbeat:IPaddr2',
'res_nova_haproxy': 'lsb:haproxy'},
'resource_params': {
'res_nova_eth120_vip': 'params ip="10.10.10.10"'
' cidr_netmask="21" nic="eth120"',
'res_nova_haproxy': 'op monitor interval="5s"'},
'colocations': {},
'clones': {'cl_nova_haproxy': 'res_nova_haproxy'}
}
self.relation_set.assert_has_calls([
call(groups={'grp_nova_vips': 'res_nova_eth120_vip'}),
call(**args),
])
self.generate_ha_relation_data.assert_called_once_with(
'nova',
extra_settings={})
self.relation_set.assert_called_once_with(
ha='settings',
relation_id=None)
def test_ha_relation_single_consoleauth(self):
self.get_hacluster_config.return_value = {
'ha-bindiface': 'em0',
'ha-mcastport': '8080',
'vip': '10.10.10.10',
}
self.test_config.set('vip_iface', 'eth120')
self.test_config.set('vip_cidr', '21')
def test_ha_relation_joined_consoleauth(self):
self.test_config.set('single-nova-consoleauth', True)
self.test_config.set('console-access-protocol', 'novnc')
self.get_iface_for_address.return_value = None
self.get_netmask_for_address.return_value = None
self.test_config.set('dns-ha', False)
self.generate_ha_relation_data.return_value = {'ha': 'settings'}
hooks.ha_joined()
args = {
'relation_id': None,
'corosync_bindiface': 'em0',
'corosync_mcastport': '8080',
'init_services': {'res_nova_haproxy': 'haproxy',
'res_nova_consoleauth': 'nova-consoleauth'},
'resources': {'res_nova_eth120_vip': 'ocf:heartbeat:IPaddr2',
'res_nova_haproxy': 'lsb:haproxy',
'res_nova_consoleauth':
'ocf:openstack:nova-consoleauth'},
'resource_params': {
'res_nova_eth120_vip': 'params ip="10.10.10.10"'
' cidr_netmask="21" nic="eth120"',
'res_nova_haproxy': 'op monitor interval="5s"',
'res_nova_consoleauth': 'op monitor interval="5s"'},
'colocations': {
'vip_consoleauth': 'inf: res_nova_consoleauth grp_nova_vips'
},
'clones': {'cl_nova_haproxy': 'res_nova_haproxy'}
}
self.relation_set.assert_has_calls([
call(groups={'grp_nova_vips': 'res_nova_eth120_vip'}),
call(**args),
])
self.generate_ha_relation_data.assert_called_once_with(
'nova',
extra_settings={
'colocations': {
'vip_consoleauth': ('inf: res_nova_consoleauth '
'grp_nova_vips')},
'init_services': {
'res_nova_consoleauth': 'nova-consoleauth'},
'resources': {
'res_nova_consoleauth': 'ocf:openstack:nova-consoleauth'},
'resource_params': {
'res_nova_consoleauth': 'op monitor interval="5s"'}})
self.relation_set.assert_called_once_with(
ha='settings',
relation_id=None)
def test_ha_relation_joined_dnsha(self):
self.test_config.set('single-nova-consoleauth', True)
self.test_config.set('console-access-protocol', 'novnc')
self.test_config.set('dns-ha', True)
self.generate_ha_relation_data.return_value = {'ha': 'settings'}
hooks.ha_joined()
self.generate_ha_relation_data.assert_called_once_with(
'nova',
extra_settings={})
self.relation_set.assert_called_once_with(
ha='settings',
relation_id=None)
@patch.object(utils, 'set_shared_metadatasecret')
@patch.object(utils, 'get_shared_metadatasecret')