Use OpenStack HA charmhelper for ha configuration

Use the new OpenStack HA charmhelper to generate required data
to pass to the hacluster charm when running in clustered
deployments.

This also makes the switch to iface-less configuration of VIP
resources resolving issues in deployments where LXD containers
don't have consistent interface ordering across the application.

Change-Id: Ie0ca7fb0221cb6c3f886161e1b446d4fae5775a9
This commit is contained in:
James Page 2019-01-25 10:45:51 +00:00
parent 2858f1b02d
commit 93b263831c
2 changed files with 16 additions and 135 deletions

View File

@ -30,7 +30,6 @@ from charmhelpers.core.hookenv import (
relation_set,
log,
DEBUG,
WARNING,
Hooks, UnregisteredHookError,
status_set,
)
@ -52,9 +51,6 @@ from charmhelpers.core.host import (
)
from charmhelpers.contrib.network.ip import (
get_relation_ip,
get_iface_for_address,
get_netmask_for_address,
is_ipv6,
)
from charmhelpers.contrib.openstack.context import ADDRESS_TYPES
from charmhelpers.contrib.openstack.ip import (
@ -71,15 +67,11 @@ from charmhelpers.contrib.openstack.utils import (
series_upgrade_prepare,
series_upgrade_complete,
)
from charmhelpers.contrib.hahelpers.cluster import (
get_hacluster_config,
)
from charmhelpers.contrib.openstack.ha.utils import (
update_dns_ha_resource_params,
generate_ha_relation_data,
)
from utils import (
enable_pocket,
CEPHRG_HA_RES,
register_configs,
setup_ipv6,
services,
@ -174,6 +166,11 @@ def config_changed():
for unit in related_units(r_id):
mon_relation(r_id, unit)
# Re-trigger hacluster relations to switch to ifaceless
# vip configuration
for r_id in relation_ids('ha'):
ha_relation_joined(r_id)
CONFIGS.write_all()
configure_https()
@ -291,69 +288,8 @@ def cluster_changed():
@hooks.hook('ha-relation-joined')
def ha_relation_joined(relation_id=None):
cluster_config = get_hacluster_config()
# Obtain resources
resources = {
'res_cephrg_haproxy': 'lsb:haproxy'
}
resource_params = {
'res_cephrg_haproxy': 'op monitor interval="5s"'
}
if config('dns-ha'):
update_dns_ha_resource_params(relation_id=relation_id,
resources=resources,
resource_params=resource_params)
else:
vip_group = []
for vip in cluster_config['vip'].split():
if is_ipv6(vip):
res_rgw_vip = 'ocf:heartbeat:IPv6addr'
vip_params = 'ipv6addr'
else:
res_rgw_vip = 'ocf:heartbeat:IPaddr2'
vip_params = 'ip'
iface = get_iface_for_address(vip)
netmask = get_netmask_for_address(vip)
if iface is not None:
vip_key = 'res_cephrg_{}_vip'.format(iface)
if vip_key in vip_group:
if vip not in resource_params[vip_key]:
vip_key = '{}_{}'.format(vip_key, vip_params)
else:
log("Resource '%s' (vip='%s') already exists in "
"vip group - skipping" % (vip_key, vip), WARNING)
continue
resources[vip_key] = res_rgw_vip
resource_params[vip_key] = (
'params {ip}="{vip}" cidr_netmask="{netmask}"'
' nic="{iface}"'.format(ip=vip_params,
vip=vip,
iface=iface,
netmask=netmask)
)
vip_group.append(vip_key)
if len(vip_group) >= 1:
relation_set(groups={CEPHRG_HA_RES: ' '.join(vip_group)})
init_services = {
'res_cephrg_haproxy': 'haproxy'
}
clones = {
'cl_cephrg_haproxy': 'res_cephrg_haproxy'
}
relation_set(relation_id=relation_id,
init_services=init_services,
corosync_bindiface=cluster_config['ha-bindiface'],
corosync_mcastport=cluster_config['ha-mcastport'],
resources=resources,
resource_params=resource_params,
clones=clones)
settings = generate_ha_relation_data('cephrg')
relation_set(relation_id=relation_id, **settings)
@hooks.hook('ha-relation-changed')

View File

@ -38,8 +38,6 @@ TO_PATCH = [
'cmp_pkgrevno',
'execd_preinstall',
'enable_pocket',
'get_iface_for_address',
'get_netmask_for_address',
'log',
'open_port',
'os',
@ -50,8 +48,7 @@ TO_PATCH = [
'status_set',
'subprocess',
'sys',
'get_hacluster_config',
'update_dns_ha_resource_params',
'generate_ha_relation_data',
'get_relation_ip',
'disable_unused_apache_sites',
'service_reload',
@ -261,67 +258,15 @@ class CephRadosGWTests(CharmTestCase):
self.CONFIGS.write_all.assert_called_with()
_id_joined.assert_called_with(relid='rid')
def test_ha_relation_joined_vip(self):
self.test_config.set('ha-bindiface', 'eth8')
self.test_config.set('ha-mcastport', '5000')
self.test_config.set('vip', '10.0.0.10')
self.get_hacluster_config.return_value = {
'vip': '10.0.0.10',
'ha-bindiface': 'eth8',
'ha-mcastport': '5000',
def test_ha_relation_joined(self):
self.generate_ha_relation_data.return_value = {
'test': 'data'
}
self.get_iface_for_address.return_value = 'eth7'
self.get_netmask_for_address.return_value = '255.255.0.0'
ceph_hooks.ha_relation_joined()
eth_params = ('params ip="10.0.0.10" cidr_netmask="255.255.0.0" '
'nic="eth7"')
resources = {'res_cephrg_haproxy': 'lsb:haproxy',
'res_cephrg_eth7_vip': 'ocf:heartbeat:IPaddr2'}
resource_params = {'res_cephrg_haproxy': 'op monitor interval="5s"',
'res_cephrg_eth7_vip': eth_params}
ceph_hooks.ha_relation_joined(relation_id='ha:1')
self.relation_set.assert_called_with(
relation_id=None,
init_services={'res_cephrg_haproxy': 'haproxy'},
corosync_bindiface='eth8',
corosync_mcastport='5000',
resource_params=resource_params,
resources=resources,
clones={'cl_cephrg_haproxy': 'res_cephrg_haproxy'})
def test_ha_joined_dns_ha(self):
def _fake_update(resources, resource_params, relation_id=None):
resources.update({'res_cephrg_public_hostname': 'ocf:maas:dns'})
resource_params.update({'res_cephrg_public_hostname':
'params fqdn="keystone.maas" '
'ip_address="10.0.0.1"'})
self.test_config.set('dns-ha', True)
self.get_hacluster_config.return_value = {
'vip': None,
'ha-bindiface': 'em0',
'ha-mcastport': '8080',
'os-admin-hostname': None,
'os-internal-hostname': None,
'os-public-hostname': 'keystone.maas',
}
args = {
'relation_id': None,
'corosync_bindiface': 'em0',
'corosync_mcastport': '8080',
'init_services': {'res_cephrg_haproxy': 'haproxy'},
'resources': {'res_cephrg_public_hostname': 'ocf:maas:dns',
'res_cephrg_haproxy': 'lsb:haproxy'},
'resource_params': {
'res_cephrg_public_hostname': 'params fqdn="keystone.maas" '
'ip_address="10.0.0.1"',
'res_cephrg_haproxy': 'op monitor interval="5s"'},
'clones': {'cl_cephrg_haproxy': 'res_cephrg_haproxy'}
}
self.update_dns_ha_resource_params.side_effect = _fake_update
ceph_hooks.ha_relation_joined()
self.assertTrue(self.update_dns_ha_resource_params.called)
self.relation_set.assert_called_with(**args)
relation_id='ha:1',
test='data'
)
def test_ha_relation_changed(self):
_id_joined = self.patch('identity_joined')