Use chelper generate_ha_relation_data for ha rel

Use the generate_ha_relation_data helper from charmhelpers to
generate the data to send down the relation to the hacluster
charm.

This results in a few changes in behaviour:

1) The charm will no longer specify a nic name to bind the vip. This
   is because Pacemaker VIP resources are able to automatically
   detect and configure correct iface and netmask parameters based
   on local configuration of the unit.
2) The original iface named VIP resource will be stopped and deleted
   prior to the creation of the new short hash named VIP resource.

Change-Id: I6c10762e4f3367684f19852d03d5e5b84cf37600
This commit is contained in:
Liam Young 2018-12-04 09:14:42 +00:00
parent 8cde93e515
commit 83d47b4b0e
7 changed files with 190 additions and 220 deletions

View File

@ -416,15 +416,20 @@ def copy_nrpe_checks(nrpe_files_dir=None):
"""
NAGIOS_PLUGINS = '/usr/local/lib/nagios/plugins'
default_nrpe_files_dir = os.path.join(
os.getenv('CHARM_DIR'),
'hooks',
'charmhelpers',
'contrib',
'openstack',
'files')
if not nrpe_files_dir:
nrpe_files_dir = default_nrpe_files_dir
if nrpe_files_dir is None:
# determine if "charmhelpers" is in CHARMDIR or CHARMDIR/hooks
for segment in ['.', 'hooks']:
nrpe_files_dir = os.path.abspath(os.path.join(
os.getenv('CHARM_DIR'),
segment,
'charmhelpers',
'contrib',
'openstack',
'files'))
if os.path.isdir(nrpe_files_dir):
break
else:
raise RuntimeError("Couldn't find charmhelpers directory")
if not os.path.exists(NAGIOS_PLUGINS):
os.makedirs(NAGIOS_PLUGINS)
for fname in glob.glob(os.path.join(nrpe_files_dir, "check_*")):

View File

@ -168,7 +168,8 @@ class OpenStackAmuletDeployment(AmuletDeployment):
'nrpe', 'openvswitch-odl', 'neutron-api-odl',
'odl-controller', 'cinder-backup', 'nexentaedge-data',
'nexentaedge-iscsi-gw', 'nexentaedge-swift-gw',
'cinder-nexentaedge', 'nexentaedge-mgmt']))
'cinder-nexentaedge', 'nexentaedge-mgmt',
'ceilometer-agent']))
if self.openstack:
for svc in services:

View File

@ -23,6 +23,7 @@
Helpers for high availability.
"""
import hashlib
import json
import re
@ -35,7 +36,6 @@ from charmhelpers.core.hookenv import (
config,
status_set,
DEBUG,
WARNING,
)
from charmhelpers.core.host import (
@ -124,13 +124,29 @@ def expect_ha():
return len(ha_related_units) > 0 or config('vip') or config('dns-ha')
def generate_ha_relation_data(service):
def generate_ha_relation_data(service, extra_settings=None):
""" Generate relation data for ha relation
Based on configuration options and unit interfaces, generate a json
encoded dict of relation data items for the hacluster relation,
providing configuration for DNS HA or VIP's + haproxy clone sets.
Example of supplying additional settings::
COLO_CONSOLEAUTH = 'inf: res_nova_consoleauth grp_nova_vips'
AGENT_CONSOLEAUTH = 'ocf:openstack:nova-consoleauth'
AGENT_CA_PARAMS = 'op monitor interval="5s"'
ha_console_settings = {
'colocations': {'vip_consoleauth': COLO_CONSOLEAUTH},
'init_services': {'res_nova_consoleauth': 'nova-consoleauth'},
'resources': {'res_nova_consoleauth': AGENT_CONSOLEAUTH},
'resource_params': {'res_nova_consoleauth': AGENT_CA_PARAMS})
generate_ha_relation_data('nova', extra_settings=ha_console_settings)
@param service: Name of the service being configured
@param extra_settings: Dict of additional resource data
@returns dict: json encoded data for use with relation_set
"""
_haproxy_res = 'res_{}_haproxy'.format(service)
@ -149,6 +165,13 @@ def generate_ha_relation_data(service):
},
}
if extra_settings:
for k, v in extra_settings.items():
if _relation_data.get(k):
_relation_data[k].update(v)
else:
_relation_data[k] = v
if config('dns-ha'):
update_hacluster_dns_ha(service, _relation_data)
else:
@ -232,40 +255,75 @@ def update_hacluster_vip(service, relation_data):
"""
cluster_config = get_hacluster_config()
vip_group = []
vips_to_delete = []
for vip in cluster_config['vip'].split():
if is_ipv6(vip):
res_neutron_vip = 'ocf:heartbeat:IPv6addr'
res_vip = 'ocf:heartbeat:IPv6addr'
vip_params = 'ipv6addr'
else:
res_neutron_vip = 'ocf:heartbeat:IPaddr2'
res_vip = 'ocf:heartbeat:IPaddr2'
vip_params = 'ip'
iface = (get_iface_for_address(vip) or
config('vip_iface'))
netmask = (get_netmask_for_address(vip) or
config('vip_cidr'))
iface = get_iface_for_address(vip)
netmask = get_netmask_for_address(vip)
fallback_params = False
if iface is None:
iface = config('vip_iface')
fallback_params = True
if netmask is None:
netmask = config('vip_cidr')
fallback_params = True
if iface is not None:
# NOTE(jamespage): Delete old VIP resources
# Old style naming encoding iface in name
# does not work well in environments where
# interface/subnet wiring is not consistent
vip_key = 'res_{}_{}_vip'.format(service, iface)
if vip_key in vip_group:
if vip not in relation_data['resource_params'][vip_key]:
vip_key = '{}_{}'.format(vip_key, vip_params)
else:
log("Resource '%s' (vip='%s') already exists in "
"vip group - skipping" % (vip_key, vip), WARNING)
continue
if vip_key in vips_to_delete:
vip_key = '{}_{}'.format(vip_key, vip_params)
vips_to_delete.append(vip_key)
vip_key = 'res_{}_{}_vip'.format(
service,
hashlib.sha1(vip.encode('UTF-8')).hexdigest()[:7])
relation_data['resources'][vip_key] = res_vip
# NOTE(jamespage):
# Use option provided vip params if these where used
# instead of auto-detected values
if fallback_params:
relation_data['resource_params'][vip_key] = (
'params {ip}="{vip}" cidr_netmask="{netmask}" '
'nic="{iface}"'.format(ip=vip_params,
vip=vip,
iface=iface,
netmask=netmask)
)
else:
# NOTE(jamespage):
# let heartbeat figure out which interface and
# netmask to configure, which works nicely
# when network interface naming is not
# consistent across units.
relation_data['resource_params'][vip_key] = (
'params {ip}="{vip}"'.format(ip=vip_params,
vip=vip))
relation_data['resources'][vip_key] = res_neutron_vip
relation_data['resource_params'][vip_key] = (
'params {ip}="{vip}" cidr_netmask="{netmask}" '
'nic="{iface}"'.format(ip=vip_params,
vip=vip,
iface=iface,
netmask=netmask)
)
vip_group.append(vip_key)
if vips_to_delete:
try:
relation_data['delete_resources'].extend(vips_to_delete)
except KeyError:
relation_data['delete_resources'] = vips_to_delete
if len(vip_group) >= 1:
relation_data['groups'] = {
'grp_{}_vips'.format(service): ' '.join(vip_group)
}
key = 'grp_{}_vips'.format(service)
try:
relation_data['groups'][key] = ' '.join(vip_group)
except KeyError:
relation_data['groups'] = {
key: ' '.join(vip_group)
}

View File

@ -73,6 +73,8 @@ from charmhelpers.core.host import (
service_running,
service_pause,
service_resume,
service_stop,
service_start,
restart_on_change_helper,
)
from charmhelpers.fetch import (
@ -299,7 +301,7 @@ def get_os_codename_install_source(src):
rel = ''
if src is None:
return rel
if src in ['distro', 'distro-proposed']:
if src in ['distro', 'distro-proposed', 'proposed']:
try:
rel = UBUNTU_OPENSTACK_RELEASE[ubuntu_rel]
except KeyError:
@ -1303,6 +1305,65 @@ def is_unit_paused_set():
return False
def manage_payload_services(action, services=None, charm_func=None):
"""Run an action against all services.
An optional charm_func() can be called. It should raise an Exception to
indicate that the function failed. If it was succesfull it should return
None or an optional message.
The signature for charm_func is:
charm_func() -> message: str
charm_func() is executed after any services are stopped, if supplied.
The services object can either be:
- None : no services were passed (an empty dict is returned)
- a list of strings
- A dictionary (optionally OrderedDict) {service_name: {'service': ..}}
- An array of [{'service': service_name, ...}, ...]
:param action: Action to run: pause, resume, start or stop.
:type action: str
:param services: See above
:type services: See above
:param charm_func: function to run for custom charm pausing.
:type charm_func: f()
:returns: Status boolean and list of messages
:rtype: (bool, [])
:raises: RuntimeError
"""
actions = {
'pause': service_pause,
'resume': service_resume,
'start': service_start,
'stop': service_stop}
action = action.lower()
if action not in actions.keys():
raise RuntimeError(
"action: {} must be one of: {}".format(action,
', '.join(actions.keys())))
services = _extract_services_list_helper(services)
messages = []
success = True
if services:
for service in services.keys():
rc = actions[action](service)
if not rc:
success = False
messages.append("{} didn't {} cleanly.".format(service,
action))
if charm_func:
try:
message = charm_func()
if message:
messages.append(message)
except Exception as e:
success = False
messages.append(str(e))
return success, messages
def pause_unit(assess_status_func, services=None, ports=None,
charm_func=None):
"""Pause a unit by stopping the services and setting 'unit-paused'
@ -1333,20 +1394,10 @@ def pause_unit(assess_status_func, services=None, ports=None,
@returns None
@raises Exception(message) on an error for action_fail().
"""
services = _extract_services_list_helper(services)
messages = []
if services:
for service in services.keys():
stopped = service_pause(service)
if not stopped:
messages.append("{} didn't stop cleanly.".format(service))
if charm_func:
try:
message = charm_func()
if message:
messages.append(message)
except Exception as e:
message.append(str(e))
_, messages = manage_payload_services(
'pause',
services=services,
charm_func=charm_func)
set_unit_paused()
if assess_status_func:
message = assess_status_func()
@ -1385,20 +1436,10 @@ def resume_unit(assess_status_func, services=None, ports=None,
@returns None
@raises Exception(message) on an error for action_fail().
"""
services = _extract_services_list_helper(services)
messages = []
if services:
for service in services.keys():
started = service_resume(service)
if not started:
messages.append("{} didn't start cleanly.".format(service))
if charm_func:
try:
message = charm_func()
if message:
messages.append(message)
except Exception as e:
message.append(str(e))
_, messages = manage_payload_services(
'resume',
services=services,
charm_func=charm_func)
clear_unit_paused()
if assess_status_func:
message = assess_status_func()

View File

@ -36,8 +36,10 @@ def loopback_devices():
'''
loopbacks = {}
cmd = ['losetup', '-a']
devs = [d.strip().split(' ') for d in
check_output(cmd).splitlines() if d != '']
output = check_output(cmd)
if six.PY3:
output = output.decode('utf-8')
devs = [d.strip().split(' ') for d in output.splitlines() if d != '']
for dev, _, f in devs:
loopbacks[dev.replace(':', '')] = re.search(r'\((\S+)\)', f).groups()[0]
return loopbacks

View File

@ -56,7 +56,7 @@ from charmhelpers.contrib.openstack.utils import (
series_upgrade_complete,
)
from charmhelpers.contrib.openstack.ha.utils import (
update_dns_ha_resource_params,
generate_ha_relation_data,
)
from ceilometer_utils import (
ApacheSSLContext,
@ -89,13 +89,9 @@ from charmhelpers.contrib.openstack.ip import (
)
from charmhelpers.contrib.charmsupport import nrpe
from charmhelpers.contrib.network.ip import (
get_iface_for_address,
get_netmask_for_address,
get_relation_ip,
is_ipv6,
)
from charmhelpers.contrib.hahelpers.cluster import (
get_hacluster_config,
is_clustered,
is_elected_leader
)
@ -312,75 +308,17 @@ def cluster_changed():
@hooks.hook('ha-relation-joined')
def ha_joined(relation_id=None):
cluster_config = get_hacluster_config()
delete_resources = []
delete_resources.append('res_ceilometer_polling')
resources = {
'res_ceilometer_haproxy': 'lsb:haproxy',
'res_ceilometer_agent_central': 'lsb:ceilometer-agent-central',
ceil_ha_settings = {
'resources': {
'res_ceilometer_agent_central': 'lsb:ceilometer-agent-central'},
'resource_params': {
'res_ceilometer_agent_central': 'op monitor interval="30s"'},
'delete_resources': ['res_ceilometer_polling'],
}
resource_params = {
'res_ceilometer_haproxy': 'op monitor interval="5s"',
'res_ceilometer_agent_central': 'op monitor interval="30s"'
}
if config('dns-ha'):
update_dns_ha_resource_params(relation_id=relation_id,
resources=resources,
resource_params=resource_params)
else:
vip_group = []
for vip in cluster_config['vip'].split():
if is_ipv6(vip):
res_ceilometer_vip = 'ocf:heartbeat:IPv6addr'
vip_params = 'ipv6addr'
else:
res_ceilometer_vip = 'ocf:heartbeat:IPaddr2'
vip_params = 'ip'
iface = get_iface_for_address(vip)
if iface is not None:
vip_key = 'res_ceilometer_{}_vip'.format(iface)
if vip_key in vip_group:
if vip not in resource_params[vip_key]:
vip_key = '{}_{}'.format(vip_key, vip_params)
else:
log("Resource '%s' (vip='%s') already exists in "
"vip group - skipping" % (vip_key, vip), WARNING)
continue
resources[vip_key] = res_ceilometer_vip
resource_params[vip_key] = (
'params {ip}="{vip}" cidr_netmask="{netmask}"'
' nic="{iface}"'
''.format(ip=vip_params,
vip=vip,
iface=iface,
netmask=get_netmask_for_address(vip))
)
vip_group.append(vip_key)
if len(vip_group) >= 1:
relation_set(relation_id=relation_id,
groups={'grp_ceilometer_vips':
' '.join(vip_group)})
init_services = {
'res_ceilometer_haproxy': 'haproxy'
}
clones = {
'cl_ceilometer_haproxy': 'res_ceilometer_haproxy'
}
relation_set(relation_id=relation_id,
init_services=init_services,
corosync_bindiface=cluster_config['ha-bindiface'],
corosync_mcastport=cluster_config['ha-mcastport'],
resources=resources,
resource_params=resource_params,
delete_resources=delete_resources,
clones=clones)
settings = generate_ha_relation_data(
'ceilometer',
extra_settings=ceil_ha_settings)
relation_set(relation_id=relation_id, **settings)
@hooks.hook('ha-relation-changed')

View File

@ -63,7 +63,7 @@ TO_PATCH = [
'peer_store',
'configure_https',
'status_set',
'update_dns_ha_resource_params',
'generate_ha_relation_data',
'reload_systemd',
'run_in_apache',
'mkdir',
@ -381,86 +381,11 @@ class CeilometerHooksTest(CharmTestCase):
hooks.hooks.execute(['hooks/cluster-relation-changed'])
self.assertEqual(mock_set_secret.call_count, 0)
@patch('charmhelpers.core.hookenv.config')
@patch.object(hooks, 'get_hacluster_config')
@patch.object(hooks, 'get_iface_for_address')
@patch.object(hooks, 'get_netmask_for_address')
def test_ha_joined(self, mock_netmask, mock_iface, mock_cluster_config,
mock_config):
mock_cluster_config.return_value = {'vip': '10.0.5.100',
'ha-bindiface': 'bnd0',
'ha-mcastport': 5802}
mock_iface.return_value = 'eth0'
mock_netmask.return_value = '255.255.255.10'
def test_ha_relation_joined(self):
self.generate_ha_relation_data.return_value = {'rel_data': 'data'}
hooks.hooks.execute(['hooks/ha-relation-joined'])
self.assertEqual(self.relation_set.call_count, 2)
exp_resources = {
'res_ceilometer_haproxy': 'lsb:haproxy',
'res_ceilometer_agent_central': 'lsb:ceilometer-agent-central',
'res_ceilometer_eth0_vip': 'ocf:heartbeat:IPaddr2'
}
exp_resource_params = {
'res_ceilometer_haproxy': 'op monitor interval="5s"',
'res_ceilometer_agent_central': 'op monitor interval="30s"',
'res_ceilometer_eth0_vip': ('params ip="10.0.5.100" '
'cidr_netmask="255.255.255.10" '
'nic="eth0"')
}
exp_clones = {'cl_ceilometer_haproxy': 'res_ceilometer_haproxy'}
call1 = call(relation_id=None,
groups={'grp_ceilometer_vips': 'res_ceilometer_eth0_vip'})
call2 = call(relation_id=None,
init_services={'res_ceilometer_haproxy': 'haproxy'},
corosync_bindiface='bnd0',
corosync_mcastport=5802,
resources=exp_resources,
resource_params=exp_resource_params,
delete_resources=['res_ceilometer_polling'],
clones=exp_clones)
self.relation_set.assert_has_calls([call1, call2], any_order=False)
@patch.object(hooks, 'get_hacluster_config')
def test_ha_joined_dns_ha(self, mock_cluster_config):
def _fake_update(resources, resource_params, relation_id=None):
resources.update({'res_ceilometer_public_hostname':
'ocf:maas:dns'})
resource_params.update({'res_ceilometer_public_hostname':
'params fqdn="ceilometer.maas" '
'ip_address="10.0.0.1"'})
self.test_config.set('dns-ha', True)
mock_cluster_config.return_value = {
'vip': None,
'ha-bindiface': 'em0',
'ha-mcastport': '8080',
'os-admin-hostname': None,
'os-internal-hostname': None,
'os-public-hostname': 'ceilometer.maas',
}
args = {
'relation_id': None,
'corosync_bindiface': 'em0',
'corosync_mcastport': '8080',
'init_services': {'res_ceilometer_haproxy': 'haproxy'},
'resources': {'res_ceilometer_public_hostname': 'ocf:maas:dns',
'res_ceilometer_haproxy': 'lsb:haproxy',
'res_ceilometer_agent_central':
'lsb:ceilometer-agent-central'},
'resource_params': {
'res_ceilometer_public_hostname':
'params fqdn="ceilometer.maas" '
'ip_address="10.0.0.1"',
'res_ceilometer_haproxy': 'op monitor interval="5s"',
'res_ceilometer_agent_central': 'op monitor interval="30s"'},
'delete_resources': ['res_ceilometer_polling'],
'clones': {'cl_ceilometer_haproxy': 'res_ceilometer_haproxy'}
}
self.update_dns_ha_resource_params.side_effect = _fake_update
hooks.ha_joined()
self.assertTrue(self.update_dns_ha_resource_params.called)
self.relation_set.assert_called_with(**args)
self.relation_set.assert_called_once_with(
relation_id=None, rel_data='data')
@patch('charmhelpers.core.hookenv.config')
@patch.object(hooks, 'keystone_joined')