Implement DNS high availability. Pass the correct information to
hacluster to register a DNS entry with MAAS 2.0 or greater rather
than using a virtual IP.

Charm-helpers sync to bring in DNS HA helpers

Change-Id: I2d9e2b92b0ebf33dc2f62327df6edbfd1e81dcca
This commit is contained in:
David Ames 2016-07-08 07:45:24 -07:00
parent dd9f4e7b71
commit 7f3cfab538
8 changed files with 397 additions and 52 deletions

View File

@ -49,17 +49,37 @@ deployment:
A minimium cluster size of three units is recommended.
In order to access the cluster, use the hacluster charm to provide a single IP
address:
juju set percona-cluster vip=10.0.3.200
juju deploy hacluster
juju add-relation hacluster percona-cluster
HA/Clustering
-------------
Clients can then access using the vip provided. This vip will be passed to
related services:
There are two mutually exclusive high availability options: using virtual
IP(s) or DNS. In both cases, a relationship to hacluster is required which
provides the corosync back end HA functionality.
juju add-relation keystone percona-cluster
To use virtual IP(s) the clustered nodes must be on the same subnet such that
the VIP is a valid IP on the subnet for one of the node's interfaces and each
node has an interface in said subnet. The VIP becomes a highly-available API
endpoint.
At a minimum, the config option 'vip' must be set in order to use virtual IP
HA. If multiple networks are being used, a VIP should be provided for each
network, separated by spaces. Optionally, vip_iface or vip_cidr may be
specified.
To use DNS high availability there are several prerequisites. However, DNS HA
does not require the clustered nodes to be on the same subnet.
Currently the DNS HA feature is only available for MAAS 2.0 or greater
environments. MAAS 2.0 requires Juju 2.0 or greater. The clustered nodes must
have static or "reserved" IP addresses registered in MAAS. The DNS hostname(s)
must be pre-registered in MAAS before use with DNS HA.
At a minimum, the config option 'dns-ha' must be set to true and
'os-access-hostname' must be set in order to use DNS HA.
The charm will throw an exception in the following circumstances:
If neither 'vip' nor 'dns-ha' is set and the charm is related to hacluster
If both 'vip' and 'dns-ha' are set as they are mutually exclusive
If 'dns-ha' is set and os-access-hostname is not set
Network Space support
---------------------

View File

@ -6,6 +6,8 @@ include:
- fetch
- contrib.hahelpers.cluster
- contrib.openstack.utils
- contrib.openstack.ip
- contrib.openstack.ha.utils
- contrib.openstack.exceptions
- contrib.storage.linux
- contrib.python.packages

View File

@ -0,0 +1,111 @@
# Copyright 2014-2016 Canonical Limited.
#
# This file is part of charm-helpers.
#
# charm-helpers is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License version 3 as
# published by the Free Software Foundation.
#
# charm-helpers is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with charm-helpers. If not, see <http://www.gnu.org/licenses/>.
#
# Copyright 2016 Canonical Ltd.
#
# Authors:
# Openstack Charmers <
#
"""
Helpers for high availability.
"""
import re
from charmhelpers.core.hookenv import (
log,
relation_set,
charm_name,
config,
status_set,
DEBUG,
)
from charmhelpers.contrib.openstack.ip import (
resolve_address,
)
class DNSHAException(Exception):
"""Raised when an error occurs setting up DNS HA
"""
pass
def update_dns_ha_resource_params(resources, resource_params,
relation_id=None,
crm_ocf='ocf:maas:dns'):
""" Check for os-*-hostname settings and update resource dictionaries for
the HA relation.
@param resources: Pointer to dictionary of resources.
Usually instantiated in ha_joined().
@param resource_params: Pointer to dictionary of resource parameters.
Usually instantiated in ha_joined()
@param relation_id: Relation ID of the ha relation
@param crm_ocf: Corosync Open Cluster Framework resource agent to use for
DNS HA
"""
settings = ['os-admin-hostname', 'os-internal-hostname',
'os-public-hostname']
# Check which DNS settings are set and update dictionaries
hostname_group = []
for setting in settings:
hostname = config(setting)
if hostname is None:
log('DNS HA: Hostname setting {} is None. Ignoring.'
''.format(setting),
DEBUG)
continue
m = re.search('os-(.+?)-hostname', setting)
if m:
networkspace = m.group(1)
else:
msg = ('Unexpected DNS hostname setting: {}. '
'Cannot determine network space name'
''.format(setting))
status_set('blocked', msg)
raise DNSHAException(msg)
hostname_key = 'res_{}_{}_hostname'.format(charm_name(), networkspace)
if hostname_key in hostname_group:
log('DNS HA: Resource {}: {} already exists in '
'hostname group - skipping'.format(hostname_key, hostname),
DEBUG)
continue
hostname_group.append(hostname_key)
resources[hostname_key] = crm_ocf
resource_params[hostname_key] = (
'params fqdn="{}" ip_address="{}" '
''.format(hostname, resolve_address(endpoint_type=networkspace,
override=False)))
if len(hostname_group) >= 1:
log('DNS HA: Hostname group is set with {} as members. '
'Informing the ha relation'.format(' '.join(hostname_group)),
DEBUG)
relation_set(relation_id=relation_id, groups={
'grp_{}_hostnames'.format(charm_name()): ' '.join(hostname_group)})
else:
msg = 'DNS HA: Hostname group has no members.'
status_set('blocked', msg)
raise DNSHAException(msg)

View File

@ -0,0 +1,182 @@
# Copyright 2014-2015 Canonical Limited.
#
# This file is part of charm-helpers.
#
# charm-helpers is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License version 3 as
# published by the Free Software Foundation.
#
# charm-helpers is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with charm-helpers. If not, see <http://www.gnu.org/licenses/>.
from charmhelpers.core.hookenv import (
config,
unit_get,
service_name,
network_get_primary_address,
)
from charmhelpers.contrib.network.ip import (
get_address_in_network,
is_address_in_network,
is_ipv6,
get_ipv6_addr,
resolve_network_cidr,
)
from charmhelpers.contrib.hahelpers.cluster import is_clustered
PUBLIC = 'public'
INTERNAL = 'int'
ADMIN = 'admin'
ADDRESS_MAP = {
PUBLIC: {
'binding': 'public',
'config': 'os-public-network',
'fallback': 'public-address',
'override': 'os-public-hostname',
},
INTERNAL: {
'binding': 'internal',
'config': 'os-internal-network',
'fallback': 'private-address',
'override': 'os-internal-hostname',
},
ADMIN: {
'binding': 'admin',
'config': 'os-admin-network',
'fallback': 'private-address',
'override': 'os-admin-hostname',
}
}
def canonical_url(configs, endpoint_type=PUBLIC):
"""Returns the correct HTTP URL to this host given the state of HTTPS
configuration, hacluster and charm configuration.
:param configs: OSTemplateRenderer config templating object to inspect
for a complete https context.
:param endpoint_type: str endpoint type to resolve.
:param returns: str base URL for services on the current service unit.
"""
scheme = _get_scheme(configs)
address = resolve_address(endpoint_type)
if is_ipv6(address):
address = "[{}]".format(address)
return '%s://%s' % (scheme, address)
def _get_scheme(configs):
"""Returns the scheme to use for the url (either http or https)
depending upon whether https is in the configs value.
:param configs: OSTemplateRenderer config templating object to inspect
for a complete https context.
:returns: either 'http' or 'https' depending on whether https is
configured within the configs context.
"""
scheme = 'http'
if configs and 'https' in configs.complete_contexts():
scheme = 'https'
return scheme
def _get_address_override(endpoint_type=PUBLIC):
"""Returns any address overrides that the user has defined based on the
endpoint type.
Note: this function allows for the service name to be inserted into the
address if the user specifies {service_name}.somehost.org.
:param endpoint_type: the type of endpoint to retrieve the override
value for.
:returns: any endpoint address or hostname that the user has overridden
or None if an override is not present.
"""
override_key = ADDRESS_MAP[endpoint_type]['override']
addr_override = config(override_key)
if not addr_override:
return None
else:
return addr_override.format(service_name=service_name())
def resolve_address(endpoint_type=PUBLIC, override=True):
"""Return unit address depending on net config.
If unit is clustered with vip(s) and has net splits defined, return vip on
correct network. If clustered with no nets defined, return primary vip.
If not clustered, return unit address ensuring address is on configured net
split if one is configured, or a Juju 2.0 extra-binding has been used.
:param endpoint_type: Network endpoing type
:param override: Accept hostname overrides or not
"""
resolved_address = None
if override:
resolved_address = _get_address_override(endpoint_type)
if resolved_address:
return resolved_address
vips = config('vip')
if vips:
vips = vips.split()
net_type = ADDRESS_MAP[endpoint_type]['config']
net_addr = config(net_type)
net_fallback = ADDRESS_MAP[endpoint_type]['fallback']
binding = ADDRESS_MAP[endpoint_type]['binding']
clustered = is_clustered()
if clustered and vips:
if net_addr:
for vip in vips:
if is_address_in_network(net_addr, vip):
resolved_address = vip
break
else:
# NOTE: endeavour to check vips against network space
# bindings
try:
bound_cidr = resolve_network_cidr(
network_get_primary_address(binding)
)
for vip in vips:
if is_address_in_network(bound_cidr, vip):
resolved_address = vip
break
except NotImplementedError:
# If no net-splits configured and no support for extra
# bindings/network spaces so we expect a single vip
resolved_address = vips[0]
else:
if config('prefer-ipv6'):
fallback_addr = get_ipv6_addr(exc_list=vips)[0]
else:
fallback_addr = unit_get(net_fallback)
if net_addr:
resolved_address = get_address_in_network(net_addr, fallback_addr)
else:
# NOTE: only try to use extra bindings if legacy network
# configuration is not in use
try:
resolved_address = network_get_primary_address(binding)
except NotImplementedError:
resolved_address = fallback_addr
if resolved_address is None:
raise ValueError("Unable to resolve a suitable IP address based on "
"charm state and configuration. (net_type=%s, "
"clustered=%s)" % (net_type, clustered))
return resolved_address

View File

@ -79,6 +79,12 @@ options:
Minimum number of units expected to exist before charm will attempt to
bootstrap percona cluster. If no value is provided this setting is
ignored.
dns-ha:
type: boolean
default: False
description: |
Use DNS HA with MAAS 2.0. Note if this is set do not set vip
settings below.
vip:
type: string
default:
@ -114,6 +120,11 @@ options:
The IP address and netmask of the 'access' network (e.g., 192.168.0.0/24)
.
This network will be used for access to database services.
os-access-hostname:
type: string
default:
description: |
The hostname or address of the access endpoint for percona-cluster.
prefer-ipv6:
type: boolean
default: False

View File

@ -25,6 +25,7 @@ from charmhelpers.core.hookenv import (
WARNING,
is_leader,
network_get_primary_address,
charm_name,
)
from charmhelpers.core.host import (
service_restart,
@ -52,6 +53,7 @@ from charmhelpers.contrib.hahelpers.cluster import (
oldest_peer,
DC_RESOURCE_NAME,
peer_units,
get_hacluster_config,
)
from charmhelpers.payload.execd import execd_preinstall
from charmhelpers.contrib.network.ip import (
@ -68,6 +70,9 @@ from charmhelpers.contrib.hardening.mysql.checks import run_mysql_checks
from charmhelpers.contrib.openstack.utils import (
is_unit_paused_set,
)
from charmhelpers.contrib.openstack.ha.utils import (
update_dns_ha_resource_params,
)
from percona_utils import (
determine_packages,
@ -586,46 +591,55 @@ def shared_db_changed(relation_id=None, unit=None):
@hooks.hook('ha-relation-joined')
def ha_relation_joined():
vip = config('vip')
vip_iface = get_iface_for_address(vip) or config('vip_iface')
vip_cidr = get_netmask_for_address(vip) or config('vip_cidr')
corosync_bindiface = config('ha-bindiface')
corosync_mcastport = config('ha-mcastport')
if None in [vip, vip_cidr, vip_iface]:
log('Insufficient VIP information to configure cluster')
sys.exit(1)
if config('prefer-ipv6'):
res_mysql_vip = 'ocf:heartbeat:IPv6addr'
vip_params = 'params ipv6addr="%s" cidr_netmask="%s" nic="%s"' % \
(vip, vip_cidr, vip_iface)
else:
res_mysql_vip = 'ocf:heartbeat:IPaddr2'
vip_params = 'params ip="%s" cidr_netmask="%s" nic="%s"' % \
(vip, vip_cidr, vip_iface)
resources = {'res_mysql_vip': res_mysql_vip,
'res_mysql_monitor': 'ocf:percona:mysql_monitor'}
def ha_relation_joined(relation_id=None):
cluster_config = get_hacluster_config()
sstpsswd = config('sst-password')
resource_params = {'res_mysql_vip': vip_params,
'res_mysql_monitor':
resources = {'res_mysql_monitor': 'ocf:percona:mysql_monitor'}
resource_params = {'res_mysql_monitor':
RES_MONITOR_PARAMS % {'sstpass': sstpsswd}}
groups = {'grp_percona_cluster': 'res_mysql_vip'}
if config('dns-ha'):
update_dns_ha_resource_params(relation_id=relation_id,
resources=resources,
resource_params=resource_params)
group_name = 'grp_{}_hostnames'.format(charm_name())
groups = {group_name: 'res_{}_access_hostname'.format(charm_name())}
else:
vip_iface = (get_iface_for_address(cluster_config['vip']) or
config('vip_iface'))
vip_cidr = (get_netmask_for_address(cluster_config['vip']) or
config('vip_cidr'))
if config('prefer-ipv6'):
res_mysql_vip = 'ocf:heartbeat:IPv6addr'
vip_params = 'params ipv6addr="%s" cidr_netmask="%s" nic="%s"' % \
(cluster_config['vip'], vip_cidr, vip_iface)
else:
res_mysql_vip = 'ocf:heartbeat:IPaddr2'
vip_params = 'params ip="%s" cidr_netmask="%s" nic="%s"' % \
(cluster_config['vip'], vip_cidr, vip_iface)
resources['res_mysql_vip'] = res_mysql_vip
resource_params['res_mysql_vip'] = vip_params
group_name = 'grp_percona_cluster'
groups = {group_name: 'res_mysql_vip'}
clones = {'cl_mysql_monitor': 'res_mysql_monitor meta interleave=true'}
colocations = {'vip_mysqld': 'inf: grp_percona_cluster cl_mysql_monitor'}
colocations = {'colo_percona_cluster': 'inf: {} cl_mysql_monitor'
''.format(group_name)}
locations = {'loc_percona_cluster':
'grp_percona_cluster rule inf: writable eq 1'}
'{} rule inf: writable eq 1'
''.format(group_name)}
for rel_id in relation_ids('ha'):
relation_set(relation_id=rel_id,
corosync_bindiface=corosync_bindiface,
corosync_mcastport=corosync_mcastport,
corosync_bindiface=cluster_config['ha-bindiface'],
corosync_mcastport=cluster_config['ha-mcastport'],
resources=resources,
resource_params=resource_params,
groups=groups,

View File

@ -28,7 +28,9 @@ TO_PATCH = ['log', 'config',
'unit_get',
'get_host_ip',
'is_clustered',
'get_ipv6_addr']
'get_ipv6_addr',
'get_hacluster_config',
'update_dns_ha_resource_params']
class TestHARelation(CharmTestCase):
@ -36,18 +38,6 @@ class TestHARelation(CharmTestCase):
CharmTestCase.setUp(self, hooks, TO_PATCH)
self.network_get_primary_address.side_effect = NotImplementedError
@mock.patch('sys.exit')
def test_relation_not_configured(self, exit_):
self.config.return_value = None
class MyError(Exception):
pass
def f(x):
raise MyError(x)
exit_.side_effect = f
self.assertRaises(MyError, hooks.ha_relation_joined)
def test_resources(self):
self.relation_ids.return_value = ['ha:1']
password = 'ubuntu'
@ -59,6 +49,11 @@ class TestHARelation(CharmTestCase):
self.get_iface_for_address.return_value = None
self.test_config.set('vip', '10.0.3.3')
self.test_config.set('sst-password', password)
self.get_hacluster_config.return_value = {
'vip': '10.0.3.3',
'ha-bindiface': 'eth0',
'ha-mcastport': 5490,
}
def f(k):
return self.test_config.get(k)
@ -77,7 +72,7 @@ class TestHARelation(CharmTestCase):
clones = {'cl_mysql_monitor': 'res_mysql_monitor meta interleave=true'}
colocations = {'vip_mysqld': 'inf: grp_percona_cluster cl_mysql_monitor'} # noqa
colocations = {'colo_percona_cluster': 'inf: grp_percona_cluster cl_mysql_monitor'} # noqa
locations = {'loc_percona_cluster':
'grp_percona_cluster rule inf: writable eq 1'}
@ -101,6 +96,11 @@ class TestHARelation(CharmTestCase):
self.test_config.set('vip', '10.0.3.3')
self.test_config.set('vip_cidr', '16')
self.test_config.set('vip_iface', 'eth0')
self.get_hacluster_config.return_value = {
'vip': '10.0.3.3',
'ha-bindiface': 'eth0',
'ha-mcastport': 5490,
}
def f(k):
return self.test_config.get(k)
@ -130,6 +130,11 @@ class TestHARelation(CharmTestCase):
self.test_config.set('vip', '10.0.3.3')
self.test_config.set('vip_cidr', '16')
self.test_config.set('vip_iface', 'eth1')
self.get_hacluster_config.return_value = {
'vip': '10.0.3.3',
'ha-bindiface': 'eth1',
'ha-mcastport': 5490,
}
def f(k):
return self.test_config.get(k)