Implement DNS high availability. Pass the correct information to
hacluster to register a DNS entry with MAAS 2.0 or greater rather
than using a virtual IP.

Charm-helpers sync to bring in DNS HA helpers

Change-Id: I62bb49fbaebdd3c787f96f4b6ad107f8e3e368a7
This commit is contained in:
David Ames 2016-05-13 14:12:55 -07:00 committed by James Page
parent df578e943b
commit b032915cb1
11 changed files with 300 additions and 68 deletions

1
.gitignore vendored
View File

@ -8,3 +8,4 @@ tags
joined-string
.unit-state.db
trusty/**
xenial/

View File

@ -61,8 +61,35 @@ credentials exist (as configured via charm configuration)
HA/Clustering
-------------
VIP is only required if you plan on multi-unit clustering (requires relating
with hacluster charm). The VIP becomes a highly-available API endpoint.
There are two mutually exclusive high availability options: using virtual
IP(s) or DNS. In both cases, a relationship to hacluster is required which
provides the corosync back end HA functionality.
To use virtual IP(s) the clustered nodes must be on the same subnet such that
the VIP is a valid IP on the subnet for one of the node's interfaces and each
node has an interface in said subnet. The VIP becomes a highly-available API
endpoint.
At a minimum, the config option 'vip' must be set in order to use virtual IP
HA. If multiple networks are being used, a VIP should be provided for each
network, separated by spaces. Optionally, vip_iface or vip_cidr may be
specified.
To use DNS high availability there are several prerequisites. However, DNS HA
does not require the clustered nodes to be on the same subnet.
Currently the DNS HA feature is only available for MAAS 2.0 or greater
environments. MAAS 2.0 requires Juju 2.0 or greater. The clustered nodes must
have static or "reserved" IP addresses registered in MAAS. The DNS hostname(s)
must be pre-registered in MAAS before use with DNS HA.
At a minimum, the config option 'dns-ha' must be set to true and at least one
of 'os-public-hostname', 'os-internal-hostname' or 'os-internal-hostname' must
be set in order to use DNS HA. One or more of the above hostnames may be set.
The charm will throw an exception in the following circumstances:
If neither 'vip' nor 'dns-ha' is set and the charm is related to hacluster
If both 'vip' and 'dns-ha' are set as they are mutually exclusive
If 'dns-ha' is set and none of the os-{admin,internal,public}-hostname(s) are set
SSL/HTTPS
---------

View File

@ -280,14 +280,14 @@ def get_hacluster_config(exclude_keys=None):
for initiating a relation to hacluster:
ha-bindiface, ha-mcastport, vip, os-internal-hostname,
os-admin-hostname, os-public-hostname
os-admin-hostname, os-public-hostname, os-access-hostname
param: exclude_keys: list of setting key(s) to be excluded.
returns: dict: A dict containing settings keyed by setting name.
raises: HAIncompleteConfig if settings are missing or incorrect.
'''
settings = ['ha-bindiface', 'ha-mcastport', 'vip', 'os-internal-hostname',
'os-admin-hostname', 'os-public-hostname']
'os-admin-hostname', 'os-public-hostname', 'os-access-hostname']
conf = {}
for setting in settings:
if exclude_keys and setting in exclude_keys:
@ -324,7 +324,7 @@ def valid_hacluster_config():
# If dns-ha then one of os-*-hostname must be set
if dns:
dns_settings = ['os-internal-hostname', 'os-admin-hostname',
'os-public-hostname']
'os-public-hostname', 'os-access-hostname']
# At this point it is unknown if one or all of the possible
# network spaces are in HA. Validate at least one is set which is
# the minimum required.

View File

@ -0,0 +1,130 @@
# Copyright 2014-2016 Canonical Limited.
#
# This file is part of charm-helpers.
#
# charm-helpers is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License version 3 as
# published by the Free Software Foundation.
#
# charm-helpers is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with charm-helpers. If not, see <http://www.gnu.org/licenses/>.
#
# Copyright 2016 Canonical Ltd.
#
# Authors:
# Openstack Charmers <
#
"""
Helpers for high availability.
"""
import re
from charmhelpers.core.hookenv import (
log,
relation_set,
charm_name,
config,
status_set,
DEBUG,
)
from charmhelpers.core.host import (
lsb_release
)
from charmhelpers.contrib.openstack.ip import (
resolve_address,
)
class DNSHAException(Exception):
"""Raised when an error occurs setting up DNS HA
"""
pass
def update_dns_ha_resource_params(resources, resource_params,
relation_id=None,
crm_ocf='ocf:maas:dns'):
""" Check for os-*-hostname settings and update resource dictionaries for
the HA relation.
@param resources: Pointer to dictionary of resources.
Usually instantiated in ha_joined().
@param resource_params: Pointer to dictionary of resource parameters.
Usually instantiated in ha_joined()
@param relation_id: Relation ID of the ha relation
@param crm_ocf: Corosync Open Cluster Framework resource agent to use for
DNS HA
"""
# Validate the charm environment for DNS HA
assert_charm_supports_dns_ha()
settings = ['os-admin-hostname', 'os-internal-hostname',
'os-public-hostname', 'os-access-hostname']
# Check which DNS settings are set and update dictionaries
hostname_group = []
for setting in settings:
hostname = config(setting)
if hostname is None:
log('DNS HA: Hostname setting {} is None. Ignoring.'
''.format(setting),
DEBUG)
continue
m = re.search('os-(.+?)-hostname', setting)
if m:
networkspace = m.group(1)
else:
msg = ('Unexpected DNS hostname setting: {}. '
'Cannot determine network space name'
''.format(setting))
status_set('blocked', msg)
raise DNSHAException(msg)
hostname_key = 'res_{}_{}_hostname'.format(charm_name(), networkspace)
if hostname_key in hostname_group:
log('DNS HA: Resource {}: {} already exists in '
'hostname group - skipping'.format(hostname_key, hostname),
DEBUG)
continue
hostname_group.append(hostname_key)
resources[hostname_key] = crm_ocf
resource_params[hostname_key] = (
'params fqdn="{}" ip_address="{}" '
''.format(hostname, resolve_address(endpoint_type=networkspace,
override=False)))
if len(hostname_group) >= 1:
log('DNS HA: Hostname group is set with {} as members. '
'Informing the ha relation'.format(' '.join(hostname_group)),
DEBUG)
relation_set(relation_id=relation_id, groups={
'grp_{}_hostnames'.format(charm_name()): ' '.join(hostname_group)})
else:
msg = 'DNS HA: Hostname group has no members.'
status_set('blocked', msg)
raise DNSHAException(msg)
def assert_charm_supports_dns_ha():
"""Validate prerequisites for DNS HA
The MAAS client is only available on Xenial or greater
"""
if lsb_release().get('DISTRIB_RELEASE') < '16.04':
msg = ('DNS HA is only supported on 16.04 and greater '
'versions of Ubuntu.')
status_set('blocked', msg)
raise DNSHAException(msg)
return True

View File

@ -725,15 +725,14 @@ def git_install_requested():
requirements_dir = None
def git_default_repos(projects_yaml):
def git_default_repos(projects):
"""
Returns default repos if a default openstack-origin-git value is specified.
"""
service = service_name()
core_project = service
for default, branch in GIT_DEFAULT_BRANCHES.iteritems():
if projects_yaml == default:
if projects == default:
# add the requirements repo first
repo = {
@ -743,41 +742,34 @@ def git_default_repos(projects_yaml):
}
repos = [repo]
# neutron-* and nova-* charms require some additional repos
if service in ['neutron-api', 'neutron-gateway',
'neutron-openvswitch']:
core_project = 'neutron'
for project in ['neutron-fwaas', 'neutron-lbaas',
'neutron-vpnaas']:
# neutron and nova charms require some additional repos
if service == 'neutron':
for svc in ['neutron-fwaas', 'neutron-lbaas', 'neutron-vpnaas']:
repo = {
'name': project,
'repository': GIT_DEFAULT_REPOS[project],
'name': svc,
'repository': GIT_DEFAULT_REPOS[svc],
'branch': branch,
}
repos.append(repo)
elif service in ['nova-cloud-controller', 'nova-compute']:
core_project = 'nova'
elif service == 'nova':
repo = {
'name': 'neutron',
'repository': GIT_DEFAULT_REPOS['neutron'],
'branch': branch,
}
repos.append(repo)
elif service == 'openstack-dashboard':
core_project = 'horizon'
# finally add the current service's core project repo
# finally add the current service's repo
repo = {
'name': core_project,
'repository': GIT_DEFAULT_REPOS[core_project],
'name': service,
'repository': GIT_DEFAULT_REPOS[service],
'branch': branch,
}
repos.append(repo)
return yaml.dump(dict(repositories=repos))
return projects_yaml
return projects
def _git_yaml_load(projects_yaml):

View File

@ -398,16 +398,13 @@ def install_remote(source, *args, **kwargs):
# We ONLY check for True here because can_handle may return a string
# explaining why it can't handle a given source.
handlers = [h for h in plugins() if h.can_handle(source) is True]
installed_to = None
for handler in handlers:
try:
installed_to = handler.install(source, *args, **kwargs)
return handler.install(source, *args, **kwargs)
except UnhandledSource as e:
log('Install source attempt unsuccessful: {}'.format(e),
level='WARNING')
if not installed_to:
raise UnhandledSource("No handler found for source {}".format(source))
return installed_to
raise UnhandledSource("No handler found for source {}".format(source))
def install_from_config(config_var_name):

View File

@ -42,15 +42,23 @@ class BzrUrlFetchHandler(BaseFetchHandler):
else:
return True
def branch(self, source, dest):
def branch(self, source, dest, revno=None):
if not self.can_handle(source):
raise UnhandledSource("Cannot handle {}".format(source))
cmd_opts = []
if revno:
cmd_opts += ['-r', str(revno)]
if os.path.exists(dest):
check_call(['bzr', 'pull', '--overwrite', '-d', dest, source])
cmd = ['bzr', 'pull']
cmd += cmd_opts
cmd += ['--overwrite', '-d', dest, source]
else:
check_call(['bzr', 'branch', source, dest])
cmd = ['bzr', 'branch']
cmd += cmd_opts
cmd += [source, dest]
check_call(cmd)
def install(self, source, dest=None):
def install(self, source, dest=None, revno=None):
url_parts = self.parse_url(source)
branch_name = url_parts.path.strip("/").split("/")[-1]
if dest:
@ -59,10 +67,11 @@ class BzrUrlFetchHandler(BaseFetchHandler):
dest_dir = os.path.join(os.environ.get('CHARM_DIR'), "fetched",
branch_name)
if not os.path.exists(dest_dir):
mkdir(dest_dir, perms=0o755)
if dest and not os.path.exists(dest):
mkdir(dest, perms=0o755)
try:
self.branch(source, dest_dir)
self.branch(source, dest_dir, revno)
except OSError as e:
raise UnhandledSource(e.strerror)
return dest_dir

View File

@ -177,6 +177,12 @@ options:
default: True
description: Ldap identity server backend readonly to keystone.
# HA configuration settings
dns-ha:
type: boolean
default: False
description: |
Use DNS HA with MAAS 2.0. Note if this is set do not set vip
settings below.
vip:
type: string
default:
@ -278,6 +284,30 @@ options:
create a public endpoint for keystone as:
.
https://keystone.example.com:5000/v2.0
os-internal-hostname:
type: string
default:
description: |
The hostname or address of the internal endpoints created for keystone
in the keystone identity provider (itself).
.
This value will be used for internal endpoints. For example, an
os-internal-hostname set to 'keystone.internal.example.com' with ssl enabled will
create a internal endpoint for keystone as:
.
https://keystone.internal.example.com:5000/v2.0
os-admin-hostname:
type: string
default:
description: |
The hostname or address of the admin endpoints created for keystone
in the keystone identity provider (itself).
.
This value will be used for admin endpoints. For example, an
os-admin-hostname set to 'keystone.admin.example.com' with ssl enabled will
create a admin endpoint for keystone as:
.
https://keystone.admin.example.com:5000/v2.0
prefer-ipv6:
type: boolean
default: False

View File

@ -106,6 +106,10 @@ from charmhelpers.contrib.hahelpers.cluster import (
https,
)
from charmhelpers.contrib.openstack.ha.utils import (
update_dns_ha_resource_params,
)
from charmhelpers.payload.execd import execd_preinstall
from charmhelpers.contrib.peerstorage import (
peer_retrieve_by_prefix,
@ -592,41 +596,46 @@ def ha_joined(relation_id=None):
'res_ks_haproxy': 'op monitor interval="5s"'
}
vip_group = []
for vip in cluster_config['vip'].split():
if is_ipv6(vip):
res_ks_vip = 'ocf:heartbeat:IPv6addr'
vip_params = 'ipv6addr'
else:
res_ks_vip = 'ocf:heartbeat:IPaddr2'
vip_params = 'ip'
if config('dns-ha'):
update_dns_ha_resource_params(relation_id=relation_id,
resources=resources,
resource_params=resource_params)
else:
vip_group = []
for vip in cluster_config['vip'].split():
if is_ipv6(vip):
res_ks_vip = 'ocf:heartbeat:IPv6addr'
vip_params = 'ipv6addr'
else:
res_ks_vip = 'ocf:heartbeat:IPaddr2'
vip_params = 'ip'
iface = (get_iface_for_address(vip) or
config('vip_iface'))
netmask = (get_netmask_for_address(vip) or
config('vip_cidr'))
iface = (get_iface_for_address(vip) or
config('vip_iface'))
netmask = (get_netmask_for_address(vip) or
config('vip_cidr'))
if iface is not None:
vip_key = 'res_ks_{}_vip'.format(iface)
if vip_key in vip_group:
log("Resource '%s' (vip='%s') already exists in "
"vip group - skipping" % (vip_key, vip),
WARNING)
continue
if iface is not None:
vip_key = 'res_ks_{}_vip'.format(iface)
if vip_key in vip_group:
log("Resource '%s' (vip='%s') already exists in "
"vip group - skipping" % (vip_key, vip),
WARNING)
continue
vip_group.append(vip_key)
resources[vip_key] = res_ks_vip
resource_params[vip_key] = (
'params {ip}="{vip}" cidr_netmask="{netmask}"'
' nic="{iface}"'.format(ip=vip_params,
vip=vip,
iface=iface,
netmask=netmask)
)
vip_group.append(vip_key)
resources[vip_key] = res_ks_vip
resource_params[vip_key] = (
'params {ip}="{vip}" cidr_netmask="{netmask}"'
' nic="{iface}"'.format(ip=vip_params,
vip=vip,
iface=iface,
netmask=netmask)
)
if len(vip_group) >= 1:
relation_set(relation_id=relation_id,
groups={CLUSTER_RES: ' '.join(vip_group)})
if len(vip_group) >= 1:
relation_set(relation_id=relation_id,
groups={CLUSTER_RES: ' '.join(vip_group)})
init_services = {
'res_ks_haproxy': 'haproxy'

View File

@ -56,6 +56,8 @@ TO_PATCH = [
'configure_installation_source',
# charmhelpers.contrib.openstack.ip
'resolve_address',
# charmhelpers.contrib.openstack.ha.utils
'update_dns_ha_resource_params',
# charmhelpers.contrib.hahelpers.cluster_utils
'is_elected_leader',
'get_hacluster_config',
@ -832,6 +834,41 @@ class KeystoneRelationTests(CharmTestCase):
}
self.relation_set.assert_called_with(**args)
def test_ha_joined_dns_ha(self):
def _fake_update(resources, resource_params, relation_id=None):
resources.update({'res_keystone_public_hostname': 'ocf:maas:dns'})
resource_params.update({'res_keystone_public_hostname':
'params fqdn="keystone.maas" '
'ip_address="10.0.0.1"'})
self.test_config.set('dns-ha', True)
self.get_hacluster_config.return_value = {
'vip': None,
'ha-bindiface': 'em0',
'ha-mcastport': '8080',
'os-admin-hostname': None,
'os-internal-hostname': None,
'os-public-hostname': 'keystone.maas',
}
args = {
'relation_id': None,
'corosync_bindiface': 'em0',
'corosync_mcastport': '8080',
'init_services': {'res_ks_haproxy': 'haproxy'},
'resources': {'res_keystone_public_hostname': 'ocf:maas:dns',
'res_ks_haproxy': 'lsb:haproxy'},
'resource_params': {
'res_keystone_public_hostname': 'params fqdn="keystone.maas" '
'ip_address="10.0.0.1"',
'res_ks_haproxy': 'op monitor interval="5s"'},
'clones': {'cl_ks_haproxy': 'res_ks_haproxy'}
}
self.update_dns_ha_resource_params.side_effect = _fake_update
hooks.ha_joined()
self.assertTrue(self.update_dns_ha_resource_params.called)
self.relation_set.assert_called_with(**args)
@patch('keystone_utils.log')
@patch('keystone_utils.ensure_ssl_cert_master')
@patch('keystone_utils.synchronize_ca')