Rebase, resync

This commit is contained in:
James Page 2014-10-02 10:22:36 +01:00
commit eee9635e0d
16 changed files with 491 additions and 100 deletions

View File

@ -1,4 +1,4 @@
branch: lp:~james-page/charm-helpers/multiple-https-networks
branch: lp:charm-helpers
destination: hooks/charmhelpers
include:
- core
@ -9,3 +9,4 @@ include:
- contrib.hahelpers
- payload.execd
- contrib.network.ip
- contrib.peerstorage

View File

@ -243,3 +243,15 @@ options:
.
Increasing this value will increase instance density on compute nodes
at the potential expense of instance performance.
prefer-ipv6:
type: boolean
default: False
description: |
If True enables IPv6 support. The charm will expect network interfaces
to be configured with an IPv6 address. If set to False (default) IPv4
is expected.
.
NOTE: these charms do not currently support IPv6 privacy extension. In
order for this charm to function correctly, the privacy extension must be
disabled and a non-temporary address must be configured/available on
your network interface.

View File

@ -1,11 +1,16 @@
import glob
import re
import subprocess
import sys
from functools import partial
from charmhelpers.core.hookenv import unit_get
from charmhelpers.fetch import apt_install
from charmhelpers.core.hookenv import (
ERROR, log,
WARNING,
ERROR,
log
)
try:
@ -164,13 +169,14 @@ def format_ipv6_addr(address):
if is_ipv6(address):
address = "[%s]" % address
else:
log("Not an valid ipv6 address: %s" % address,
level=ERROR)
log("Not a valid ipv6 address: %s" % address, level=WARNING)
address = None
return address
def get_iface_addr(iface='eth0', inet_type='AF_INET', inc_aliases=False, fatal=True, exc_list=None):
def get_iface_addr(iface='eth0', inet_type='AF_INET', inc_aliases=False,
fatal=True, exc_list=None):
"""
Return the assigned IP address for a given interface, if any, or [].
"""
@ -210,26 +216,105 @@ def get_iface_addr(iface='eth0', inet_type='AF_INET', inc_aliases=False, fatal=T
if 'addr' in entry and entry['addr'] not in exc_list:
addresses.append(entry['addr'])
if fatal and not addresses:
raise Exception("Interface '%s' doesn't have any %s addresses." % (iface, inet_type))
raise Exception("Interface '%s' doesn't have any %s addresses." %
(iface, inet_type))
return addresses
get_ipv4_addr = partial(get_iface_addr, inet_type='AF_INET')
def get_ipv6_addr(iface='eth0', inc_aliases=False, fatal=True, exc_list=None):
def get_iface_from_addr(addr):
"""Work out on which interface the provided address is configured."""
for iface in netifaces.interfaces():
addresses = netifaces.ifaddresses(iface)
for inet_type in addresses:
for _addr in addresses[inet_type]:
_addr = _addr['addr']
# link local
ll_key = re.compile("(.+)%.*")
raw = re.match(ll_key, _addr)
if raw:
_addr = raw.group(1)
if _addr == addr:
log("Address '%s' is configured on iface '%s'" %
(addr, iface))
return iface
msg = "Unable to infer net iface on which '%s' is configured" % (addr)
raise Exception(msg)
def sniff_iface(f):
"""If no iface provided, inject net iface inferred from unit private
address.
"""
Return the assigned IPv6 address for a given interface, if any, or [].
def iface_sniffer(*args, **kwargs):
if not kwargs.get('iface', None):
kwargs['iface'] = get_iface_from_addr(unit_get('private-address'))
return f(*args, **kwargs)
return iface_sniffer
@sniff_iface
def get_ipv6_addr(iface=None, inc_aliases=False, fatal=True, exc_list=None,
dynamic_only=True):
"""Get assigned IPv6 address for a given interface.
Returns list of addresses found. If no address found, returns empty list.
If iface is None, we infer the current primary interface by doing a reverse
lookup on the unit private-address.
We currently only support scope global IPv6 addresses i.e. non-temporary
addresses. If no global IPv6 address is found, return the first one found
in the ipv6 address list.
"""
addresses = get_iface_addr(iface=iface, inet_type='AF_INET6',
inc_aliases=inc_aliases, fatal=fatal,
exc_list=exc_list)
remotly_addressable = []
for address in addresses:
if not address.startswith('fe80'):
remotly_addressable.append(address)
if fatal and not remotly_addressable:
raise Exception("Interface '%s' doesn't have global ipv6 address." % iface)
return remotly_addressable
if addresses:
global_addrs = []
for addr in addresses:
key_scope_link_local = re.compile("^fe80::..(.+)%(.+)")
m = re.match(key_scope_link_local, addr)
if m:
eui_64_mac = m.group(1)
iface = m.group(2)
else:
global_addrs.append(addr)
if global_addrs:
# Make sure any found global addresses are not temporary
cmd = ['ip', 'addr', 'show', iface]
out = subprocess.check_output(cmd)
if dynamic_only:
key = re.compile("inet6 (.+)/[0-9]+ scope global dynamic.*")
else:
key = re.compile("inet6 (.+)/[0-9]+ scope global.*")
addrs = []
for line in out.split('\n'):
line = line.strip()
m = re.match(key, line)
if m and 'temporary' not in line:
# Return the first valid address we find
for addr in global_addrs:
if m.group(1) == addr:
if not dynamic_only or \
m.group(1).endswith(eui_64_mac):
addrs.append(addr)
if addrs:
return addrs
if fatal:
raise Exception("Interface '%s' doesn't have a scope global "
"non-temporary ipv6 address." % iface)
return []
def get_bridges(vnic_dir='/sys/devices/virtual/net'):

View File

@ -10,32 +10,62 @@ class OpenStackAmuletDeployment(AmuletDeployment):
that is specifically for use by OpenStack charms.
"""
def __init__(self, series=None, openstack=None, source=None):
def __init__(self, series=None, openstack=None, source=None, stable=True):
"""Initialize the deployment environment."""
super(OpenStackAmuletDeployment, self).__init__(series)
self.openstack = openstack
self.source = source
self.stable = stable
# Note(coreycb): this needs to be changed when new next branches come
# out.
self.current_next = "trusty"
def _determine_branch_locations(self, other_services):
"""Determine the branch locations for the other services.
Determine if the local branch being tested is derived from its
stable or next (dev) branch, and based on this, use the corresonding
stable or next branches for the other_services."""
base_charms = ['mysql', 'mongodb', 'rabbitmq-server']
if self.stable:
for svc in other_services:
temp = 'lp:charms/{}'
svc['location'] = temp.format(svc['name'])
else:
for svc in other_services:
if svc['name'] in base_charms:
temp = 'lp:charms/{}'
svc['location'] = temp.format(svc['name'])
else:
temp = 'lp:~openstack-charmers/charms/{}/{}/next'
svc['location'] = temp.format(self.current_next,
svc['name'])
return other_services
def _add_services(self, this_service, other_services):
"""Add services to the deployment and set openstack-origin."""
"""Add services to the deployment and set openstack-origin/source."""
other_services = self._determine_branch_locations(other_services)
super(OpenStackAmuletDeployment, self)._add_services(this_service,
other_services)
name = 0
services = other_services
services.append(this_service)
use_source = ['mysql', 'mongodb', 'rabbitmq-server', 'ceph']
use_source = ['mysql', 'mongodb', 'rabbitmq-server', 'ceph',
'ceph-osd', 'ceph-radosgw']
if self.openstack:
for svc in services:
if svc[name] not in use_source:
if svc['name'] not in use_source:
config = {'openstack-origin': self.openstack}
self.d.configure(svc[name], config)
self.d.configure(svc['name'], config)
if self.source:
for svc in services:
if svc[name] in use_source:
if svc['name'] in use_source:
config = {'source': self.source}
self.d.configure(svc[name], config)
self.d.configure(svc['name'], config)
def _configure_services(self, configs):
"""Configure all of the services."""

View File

@ -187,15 +187,16 @@ class OpenStackAmuletUtils(AmuletUtils):
f = opener.open("http://download.cirros-cloud.net/version/released")
version = f.read().strip()
cirros_img = "tests/cirros-{}-x86_64-disk.img".format(version)
cirros_img = "cirros-{}-x86_64-disk.img".format(version)
local_path = os.path.join('tests', cirros_img)
if not os.path.exists(cirros_img):
if not os.path.exists(local_path):
cirros_url = "http://{}/{}/{}".format("download.cirros-cloud.net",
version, cirros_img)
opener.retrieve(cirros_url, cirros_img)
opener.retrieve(cirros_url, local_path)
f.close()
with open(cirros_img) as f:
with open(local_path) as f:
image = glance.images.create(name=image_name, is_public=True,
disk_format='qcow2',
container_format='bare', data=f)

View File

@ -52,8 +52,9 @@ from charmhelpers.contrib.openstack.neutron import (
from charmhelpers.contrib.network.ip import (
get_address_in_network,
get_ipv6_addr,
is_address_in_network,
get_netmask_for_address
get_netmask_for_address,
format_ipv6_addr,
is_address_in_network
)
CA_CERT_PATH = '/usr/local/share/ca-certificates/keystone_juju_ca_cert.crt'
@ -175,8 +176,10 @@ class SharedDBContext(OSContextGenerator):
for rid in relation_ids('shared-db'):
for unit in related_units(rid):
rdata = relation_get(rid=rid, unit=unit)
host = rdata.get('db_host')
host = format_ipv6_addr(host) or host
ctxt = {
'database_host': rdata.get('db_host'),
'database_host': host,
'database': self.database,
'database_user': self.user,
'database_password': rdata.get(password_setting),
@ -252,10 +255,15 @@ class IdentityServiceContext(OSContextGenerator):
for rid in relation_ids('identity-service'):
for unit in related_units(rid):
rdata = relation_get(rid=rid, unit=unit)
serv_host = rdata.get('service_host')
serv_host = format_ipv6_addr(serv_host) or serv_host
auth_host = rdata.get('auth_host')
auth_host = format_ipv6_addr(auth_host) or auth_host
ctxt = {
'service_port': rdata.get('service_port'),
'service_host': rdata.get('service_host'),
'auth_host': rdata.get('auth_host'),
'service_host': serv_host,
'auth_host': auth_host,
'auth_port': rdata.get('auth_port'),
'admin_tenant_name': rdata.get('service_tenant'),
'admin_user': rdata.get('service_username'),
@ -304,11 +312,13 @@ class AMQPContext(OSContextGenerator):
for unit in related_units(rid):
if relation_get('clustered', rid=rid, unit=unit):
ctxt['clustered'] = True
ctxt['rabbitmq_host'] = relation_get('vip', rid=rid,
unit=unit)
vip = relation_get('vip', rid=rid, unit=unit)
vip = format_ipv6_addr(vip) or vip
ctxt['rabbitmq_host'] = vip
else:
ctxt['rabbitmq_host'] = relation_get('private-address',
rid=rid, unit=unit)
host = relation_get('private-address', rid=rid, unit=unit)
host = format_ipv6_addr(host) or host
ctxt['rabbitmq_host'] = host
ctxt.update({
'rabbitmq_user': username,
'rabbitmq_password': relation_get('password', rid=rid,
@ -347,8 +357,9 @@ class AMQPContext(OSContextGenerator):
and len(related_units(rid)) > 1:
rabbitmq_hosts = []
for unit in related_units(rid):
rabbitmq_hosts.append(relation_get('private-address',
rid=rid, unit=unit))
host = relation_get('private-address', rid=rid, unit=unit)
host = format_ipv6_addr(host) or host
rabbitmq_hosts.append(host)
ctxt['rabbitmq_hosts'] = ','.join(rabbitmq_hosts)
if not context_complete(ctxt):
return {}
@ -377,6 +388,7 @@ class CephContext(OSContextGenerator):
ceph_addr = \
relation_get('ceph-public-address', rid=rid, unit=unit) or \
relation_get('private-address', rid=rid, unit=unit)
ceph_addr = format_ipv6_addr(ceph_addr) or ceph_addr
mon_hosts.append(ceph_addr)
ctxt = {
@ -413,8 +425,9 @@ class HAProxyContext(OSContextGenerator):
return {}
l_unit = local_unit().replace('/', '-')
if config('prefer-ipv6'):
addr = get_ipv6_addr()
addr = get_ipv6_addr(exc_list=[config('vip')])[0]
else:
addr = unit_get('private-address')
@ -443,7 +456,7 @@ class HAProxyContext(OSContextGenerator):
# NOTE(jamespage) no split configurations found, just use
# private addresses
if len(cluster_hosts) < 1:
if not cluster_hosts:
cluster_hosts[addr] = {}
cluster_hosts[addr]['network'] = "{}/{}".format(
addr,
@ -463,6 +476,11 @@ class HAProxyContext(OSContextGenerator):
'frontends': cluster_hosts,
}
if config('haproxy-server-timeout'):
ctxt['haproxy_server_timeout'] = config('haproxy-server-timeout')
if config('haproxy-client-timeout'):
ctxt['haproxy_client_timeout'] = config('haproxy-client-timeout')
if config('prefer-ipv6'):
ctxt['local_host'] = 'ip6-localhost'
ctxt['haproxy_host'] = '::'
@ -870,3 +888,16 @@ class SyslogContext(OSContextGenerator):
'use_syslog': config('use-syslog')
}
return ctxt
class BindHostContext(OSContextGenerator):
def __call__(self):
if config('prefer-ipv6'):
return {
'bind_host': '::'
}
else:
return {
'bind_host': '0.0.0.0'
}

View File

@ -66,7 +66,7 @@ def resolve_address(endpoint_type=PUBLIC):
resolved_address = vip
else:
if config('prefer-ipv6'):
fallback_addr = get_ipv6_addr()
fallback_addr = get_ipv6_addr(exc_list=[config('vip')])[0]
else:
fallback_addr = unit_get(_address_map[endpoint_type]['fallback'])
resolved_address = get_address_in_network(

View File

@ -14,8 +14,17 @@ defaults
retries 3
timeout queue 1000
timeout connect 1000
{% if haproxy_client_timeout -%}
timeout client {{ haproxy_client_timeout }}
{% else -%}
timeout client 30000
{% endif -%}
{% if haproxy_server_timeout -%}
timeout server {{ haproxy_server_timeout }}
{% else -%}
timeout server 30000
{% endif -%}
listen stats {{ stat_port }}
mode http

View File

@ -4,6 +4,7 @@
from collections import OrderedDict
import subprocess
import json
import os
import socket
import sys
@ -13,7 +14,9 @@ from charmhelpers.core.hookenv import (
log as juju_log,
charm_dir,
ERROR,
INFO
INFO,
relation_ids,
relation_set
)
from charmhelpers.contrib.storage.linux.lvm import (
@ -22,6 +25,10 @@ from charmhelpers.contrib.storage.linux.lvm import (
remove_lvm_physical_volume,
)
from charmhelpers.contrib.network.ip import (
get_ipv6_addr
)
from charmhelpers.core.host import lsb_release, mounts, umount
from charmhelpers.fetch import apt_install, apt_cache
from charmhelpers.contrib.storage.linux.utils import is_block_device, zap_disk
@ -457,3 +464,21 @@ def get_hostname(address, fqdn=True):
return result
else:
return result.split('.')[0]
def sync_db_with_multi_ipv6_addresses(database, database_user,
relation_prefix=None):
hosts = get_ipv6_addr(dynamic_only=False)
kwargs = {'database': database,
'username': database_user,
'hostname': json.dumps(hosts)}
if relation_prefix:
keys = kwargs.keys()
for key in keys:
kwargs["%s_%s" % (relation_prefix, key)] = kwargs[key]
del kwargs[key]
for rid in relation_ids('shared-db'):
relation_set(relation_id=rid, **kwargs)

View File

@ -12,6 +12,10 @@ from charmhelpers.contrib.hahelpers.cluster import (
is_clustered
)
from charmhelpers.contrib.network.ip import (
get_ipv6_addr
)
def context_complete(ctxt):
_missing = []
@ -158,10 +162,18 @@ def canonical_url(vip_setting='vip'):
scheme = 'http'
if https():
scheme = 'https'
if is_clustered():
addr = config(vip_setting)
if config('prefer-ipv6'):
if is_clustered():
addr = '[%s]' % config(vip_setting)
else:
addr = '[%s]' % get_ipv6_addr(exc_list=[config('vip')])[0]
else:
addr = unit_get('private-address')
if is_clustered():
addr = config(vip_setting)
else:
addr = unit_get('private-address')
return '%s://%s' % (scheme, addr)
@ -202,6 +214,7 @@ class NeutronCCContext(context.NeutronContext):
ctxt['nvp_controllers_list'] = \
_config['nvp-controllers'].split()
ctxt['nova_url'] = "{}:8774/v2".format(canonical_url())
return ctxt
@ -221,6 +234,7 @@ class IdentityServiceContext(context.IdentityServiceContext):
)
ctxt['keystone_ec2_url'] = ec2_tokens
ctxt['region'] = config('region')
return ctxt
@ -253,3 +267,10 @@ class NovaConfigContext(WorkerConfigContext):
ctxt['cpu_allocation_ratio'] = config('cpu-allocation-ratio')
ctxt['ram_allocation_ratio'] = config('ram-allocation-ratio')
return ctxt
class NovaIPv6Context(context.BindHostContext):
def __call__(self):
ctxt = super(NovaIPv6Context, self).__call__()
ctxt['use_ipv6'] = config('prefer-ipv6')
return ctxt

View File

@ -29,16 +29,20 @@ from charmhelpers.core.host import (
restart_on_change,
service_running,
service_stop,
service_restart,
)
from charmhelpers.fetch import (
apt_install, apt_update,
apt_install,
apt_update,
filter_installed_packages
)
from charmhelpers.contrib.openstack.utils import (
configure_installation_source,
openstack_upgrade_available,
os_release,
sync_db_with_multi_ipv6_addresses
)
from charmhelpers.contrib.openstack.neutron import (
@ -66,7 +70,8 @@ from nova_cc_utils import (
do_openstack_upgrade,
enable_services,
keystone_ca_cert_b64,
migrate_database,
migrate_neutron_database,
migrate_nova_database,
neutron_plugin,
save_script_rc,
ssh_compute_add,
@ -84,6 +89,8 @@ from nova_cc_utils import (
console_attributes,
service_guard,
guard_map,
services,
setup_ipv6
)
from charmhelpers.contrib.hahelpers.cluster import (
@ -102,6 +109,8 @@ from charmhelpers.contrib.network.ip import (
get_iface_for_address,
get_netmask_for_address,
get_address_in_network,
get_ipv6_addr,
is_ipv6
)
from charmhelpers.contrib.openstack.context import ADDRESS_TYPES
@ -134,9 +143,17 @@ def install():
active=config('service-guard'))
@restart_on_change(restart_map(), stopstart=True)
def config_changed():
if config('prefer-ipv6'):
setup_ipv6()
sync_db_with_multi_ipv6_addresses(config('database'),
config('database-user'),
relation_prefix='nova')
global CONFIGS
if openstack_upgrade_available('nova-common'):
CONFIGS = do_openstack_upgrade()
[neutron_api_relation_joined(rid=rid, remote_restart=True)
for rid in relation_ids('neutron-api')]
save_script_rc()
configure_https()
CONFIGS.write_all()
@ -173,6 +190,23 @@ def amqp_changed():
CONFIGS.write(NEUTRON_CONF)
def conditional_neutron_migration():
if relation_ids('neutron-api'):
log('Not running neutron database migration as neutron-api service'
'is present.')
elif os_release('nova-common') <= 'icehouse':
log('Not running neutron database migration as migrations are handled'
'by the neutron-server process.')
else:
migrate_neutron_database()
# neutron-api service may have appeared while the migration was
# running so prod it just in case
[neutron_api_relation_joined(rid=rid, remote_restart=True)
for rid in relation_ids('neutron-api')]
if 'neutron-server' in services():
service_restart('neutron-server')
@hooks.hook('shared-db-relation-joined')
def db_joined():
if is_relation_made('pgsql-nova-db') or \
@ -183,14 +217,31 @@ def db_joined():
log(e, level=ERROR)
raise Exception(e)
relation_set(nova_database=config('database'),
nova_username=config('database-user'),
nova_hostname=unit_get('private-address'))
if network_manager() in ['quantum', 'neutron']:
# XXX: Renaming relations from quantum_* to neutron_* here.
relation_set(neutron_database=config('neutron-database'),
neutron_username=config('neutron-database-user'),
neutron_hostname=unit_get('private-address'))
config_neutron = True
else:
config_neutron = False
if config('prefer-ipv6'):
sync_db_with_multi_ipv6_addresses(config('database'),
config('database-user'),
relation_prefix='nova')
if config_neutron:
sync_db_with_multi_ipv6_addresses(config('neutron-database'),
config('neutron-database-user'),
relation_prefix='neutron')
else:
host = unit_get('private-address')
relation_set(nova_database=config('database'),
nova_username=config('database-user'),
nova_hostname=host)
if config_neutron:
# XXX: Renaming relations from quantum_* to neutron_* here.
relation_set(neutron_database=config('neutron-database'),
neutron_username=config('neutron-database-user'),
neutron_hostname=host)
@hooks.hook('pgsql-nova-db-relation-joined')
@ -235,10 +286,11 @@ def db_changed():
if allowed_units and local_unit() not in allowed_units.split():
log('Allowed_units list provided and this unit not present')
return
migrate_database()
migrate_nova_database()
log('Triggering remote cloud-compute restarts.')
[compute_joined(rid=rid, remote_restart=True)
for rid in relation_ids('cloud-compute')]
conditional_neutron_migration()
@hooks.hook('pgsql-nova-db-relation-changed')
@ -252,10 +304,11 @@ def postgresql_nova_db_changed():
CONFIGS.write_all()
if eligible_leader(CLUSTER_RES):
migrate_database()
migrate_nova_database()
log('Triggering remote cloud-compute restarts.')
[compute_joined(rid=rid, remote_restart=True)
for rid in relation_ids('cloud-compute')]
conditional_neutron_migration()
@hooks.hook('pgsql-neutron-db-relation-changed')
@ -544,6 +597,10 @@ def cluster_joined(relation_id=None):
relation_id=relation_id,
relation_settings={'{}-address'.format(addr_type): address}
)
if config('prefer-ipv6'):
private_addr = get_ipv6_addr(exc_list=[config('vip')])[0]
relation_set(relation_id=relation_id,
relation_settings={'private-address': private_addr})
@hooks.hook('cluster-relation-changed',
@ -567,22 +624,31 @@ def cluster_changed():
@hooks.hook('ha-relation-joined')
def ha_joined():
config = get_hacluster_config()
cluster_config = get_hacluster_config()
resources = {
'res_nova_haproxy': 'lsb:haproxy',
}
resource_params = {
'res_nova_haproxy': 'op monitor interval="5s"'
}
vip_group = []
for vip in config['vip'].split():
for vip in cluster_config['vip'].split():
if is_ipv6(vip):
res_nova_vip = 'ocf:heartbeat:IPv6addr'
vip_params = 'ipv6addr'
else:
res_nova_vip = 'ocf:heartbeat:IPaddr2'
vip_params = 'ip'
iface = get_iface_for_address(vip)
if iface is not None:
vip_key = 'res_nova_{}_vip'.format(iface)
resources[vip_key] = 'ocf:heartbeat:IPaddr2'
resources[vip_key] = res_nova_vip
resource_params[vip_key] = (
'params ip="{vip}" cidr_netmask="{netmask}"'
' nic="{iface}"'.format(vip=vip,
'params {ip}="{vip}" cidr_netmask="{netmask}"'
' nic="{iface}"'.format(ip=vip_params,
vip=vip,
iface=iface,
netmask=get_netmask_for_address(vip))
)
@ -598,8 +664,8 @@ def ha_joined():
'cl_nova_haproxy': 'res_nova_haproxy'
}
relation_set(init_services=init_services,
corosync_bindiface=config['ha-bindiface'],
corosync_mcastport=config['ha-mcastport'],
corosync_bindiface=cluster_config['ha-bindiface'],
corosync_mcastport=cluster_config['ha-mcastport'],
resources=resources,
resource_params=resource_params,
clones=clones)
@ -707,7 +773,7 @@ def upgrade_charm():
@hooks.hook('neutron-api-relation-joined')
def neutron_api_relation_joined(rid=None):
def neutron_api_relation_joined(rid=None, remote_restart=False):
with open('/etc/init/neutron-server.override', 'wb') as out:
out.write('manual\n')
if os.path.isfile(NEUTRON_CONF):
@ -716,8 +782,12 @@ def neutron_api_relation_joined(rid=None):
service_stop('neutron-server')
for id_rid in relation_ids('identity-service'):
identity_joined(rid=id_rid)
nova_url = canonical_url(CONFIGS, INTERNAL) + ":8774/v2"
relation_set(relation_id=rid, nova_url=nova_url)
rel_settings = {
'nova_url': canonical_url(CONFIGS, INTERNAL) + ":8774/v2"
}
if remote_restart:
rel_settings['restart_trigger'] = str(uuid.uuid4())
relation_set(relation_id=rid, **rel_settings)
@hooks.hook('neutron-api-relation-changed')

View File

@ -27,6 +27,7 @@ from charmhelpers.fetch import (
apt_upgrade,
apt_update,
apt_install,
add_source
)
from charmhelpers.core.hookenv import (
@ -44,7 +45,12 @@ from charmhelpers.core.host import (
service,
service_start,
service_stop,
service_running
service_running,
lsb_release
)
from charmhelpers.contrib.network.ip import (
is_ipv6
)
import nova_cc_context
@ -113,6 +119,7 @@ BASE_RESOURCE_MAP = OrderedDict([
nova_cc_context.HAProxyContext(),
nova_cc_context.IdentityServiceContext(),
nova_cc_context.VolumeServiceContext(),
nova_cc_context.NovaIPv6Context(),
nova_cc_context.NeutronCCContext(),
nova_cc_context.NovaConfigContext()],
}),
@ -505,8 +512,12 @@ def _do_openstack_upgrade(new_src):
# NOTE(jamespage) upgrade with existing config files as the
# havana->icehouse migration enables new service_plugins which
# create issues with db upgrades
neutron_db_manage(['stamp', cur_os_rel])
neutron_db_manage(['upgrade', 'head'])
if relation_ids('neutron-api'):
log('Not running neutron database migration as neutron-api service'
'is present.')
else:
neutron_db_manage(['stamp', cur_os_rel])
migrate_neutron_database()
reset_os_release()
configs = register_configs(release=new_os_rel)
configs.write_all()
@ -516,7 +527,7 @@ def _do_openstack_upgrade(new_src):
ml2_migration()
if eligible_leader(CLUSTER_RES):
migrate_database()
migrate_nova_database()
[service_start(s) for s in services()]
disable_policy_rcd()
@ -543,7 +554,7 @@ def volume_service():
return 'cinder'
def migrate_database():
def migrate_nova_database():
'''Runs nova-manage to initialize a new database or migrate existing'''
log('Migrating the nova database.', level=INFO)
cmd = ['nova-manage', 'db', 'sync']
@ -556,6 +567,12 @@ def migrate_database():
cmd_all_services('start')
def migrate_neutron_database():
'''Runs neutron-db-manage to init a new database or migrate existing'''
log('Migrating the neutron database.', level=INFO)
neutron_db_manage(['upgrade', 'head'])
def auth_token_config(setting):
"""
Returns currently configured value for setting in api-paste.ini's
@ -658,16 +675,18 @@ def ssh_compute_add(public_key, rid=None, unit=None, user=None):
private_address = relation_get(rid=rid, unit=unit,
attribute='private-address')
hosts = [private_address]
if relation_get('hostname'):
hosts.append(relation_get('hostname'))
if not is_ip(private_address):
hosts.append(get_host_ip(private_address))
hosts.append(private_address.split('.')[0])
else:
hn = get_hostname(private_address)
hosts.append(hn)
hosts.append(hn.split('.')[0])
if not is_ipv6(private_address):
if relation_get('hostname'):
hosts.append(relation_get('hostname'))
if not is_ip(private_address):
hosts.append(get_host_ip(private_address))
hosts.append(private_address.split('.')[0])
else:
hn = get_hostname(private_address)
hosts.append(hn)
hosts.append(hn.split('.')[0])
for host in list(set(hosts)):
if not ssh_known_host_key(host, unit, user):
@ -895,3 +914,19 @@ def enable_services():
override_file = '/etc/init/{}.override'.format(svc)
if os.path.isfile(override_file):
os.remove(override_file)
def setup_ipv6():
ubuntu_rel = lsb_release()['DISTRIB_CODENAME'].lower()
if ubuntu_rel < "trusty":
raise Exception("IPv6 is not supported in the charms for Ubuntu "
"versions less than Trusty 14.04")
# NOTE(xianghui): Need to install haproxy(1.5.3) from trusty-backports
# to support ipv6 address, so check is required to make sure not
# breaking other versions, IPv6 only support for >= Trusty
if ubuntu_rel == 'trusty':
add_source('deb http://archive.ubuntu.com/ubuntu trusty-backports'
' main')
apt_update()
apt_install('haproxy/trusty-backports', fatal=True)

View File

@ -5,7 +5,7 @@
[DEFAULT]
state_path = /var/lib/neutron
lock_path = $state_path/lock
bind_host = 0.0.0.0
bind_host = {{ bind_host }}
auth_strategy = keystone
notification_driver = neutron.openstack.common.notifier.rpc_notifier
api_workers = {{ workers }}

View File

@ -21,6 +21,12 @@ volumes_path=/var/lib/nova/volumes
enabled_apis=ec2,osapi_compute,metadata
auth_strategy=keystone
compute_driver=libvirt.LibvirtDriver
use_ipv6 = {{ use_ipv6 }}
osapi_compute_listen = {{ bind_host }}
my_ip = {{ bind_host }}
metadata_host = {{ bind_host }}
s3_listen = {{ bind_host }}
ec2_listen = {{ bind_host }}
osapi_compute_workers = {{ workers }}
ec2_workers = {{ workers }}

View File

@ -39,6 +39,7 @@ TO_PATCH = [
'is_relation_made',
'local_unit',
'log',
'os_release',
'relation_get',
'relation_set',
'relation_ids',
@ -46,8 +47,10 @@ TO_PATCH = [
'ssh_known_hosts_lines',
'ssh_authorized_keys_lines',
'save_script_rc',
'service_restart',
'service_running',
'service_stop',
'services',
'execd_preinstall',
'network_manager',
'volume_service',
@ -55,7 +58,9 @@ TO_PATCH = [
'eligible_leader',
'keystone_ca_cert_b64',
'neutron_plugin',
'migrate_database',
'migrate_nova_database',
'migrate_neutron_database',
'uuid',
]
@ -99,11 +104,19 @@ class NovaCCHooksTests(CharmTestCase):
hooks.config_changed()
self.assertTrue(self.save_script_rc.called)
@patch.object(hooks, 'cluster_joined')
@patch.object(hooks, 'identity_joined')
@patch.object(hooks, 'neutron_api_relation_joined')
@patch.object(hooks, 'configure_https')
def test_config_changed_with_upgrade(self, conf_https):
def test_config_changed_with_upgrade(self, conf_https, neutron_api_joined,
identity_joined, cluster_joined):
self.openstack_upgrade_available.return_value = True
self.relation_ids.return_value = ['generic_rid']
hooks.config_changed()
self.assertTrue(self.do_openstack_upgrade.called)
self.assertTrue(neutron_api_joined.called)
self.assertTrue(identity_joined.called)
self.assertTrue(cluster_joined.called)
self.assertTrue(self.save_script_rc.called)
def test_compute_changed_ssh_migration(self):
@ -322,11 +335,13 @@ class NovaCCHooksTests(CharmTestCase):
configs.write = MagicMock()
hooks.postgresql_nova_db_changed()
@patch.object(hooks, 'conditional_neutron_migration')
@patch.object(hooks, 'CONFIGS')
def test_db_changed(self, configs):
def test_db_changed(self, configs, cond_neutron_mig):
self._shared_db_test(configs)
self.assertTrue(configs.write_all.called)
self.migrate_database.assert_called_with()
self.migrate_nova_database.assert_called_with()
cond_neutron_mig.assert_called_with()
@patch.object(hooks, 'CONFIGS')
def test_db_changed_allowed(self, configs):
@ -337,7 +352,7 @@ class NovaCCHooksTests(CharmTestCase):
self.local_unit.return_value = 'nova-cloud-controller/3'
self._shared_db_test(configs)
self.assertTrue(configs.write_all.called)
self.migrate_database.assert_called_with()
self.migrate_nova_database.assert_called_with()
@patch.object(hooks, 'CONFIGS')
def test_db_changed_not_allowed(self, configs):
@ -348,13 +363,13 @@ class NovaCCHooksTests(CharmTestCase):
self.local_unit.return_value = 'nova-cloud-controller/1'
self._shared_db_test(configs)
self.assertTrue(configs.write_all.called)
self.assertFalse(self.migrate_database.called)
self.assertFalse(self.migrate_nova_database.called)
@patch.object(hooks, 'CONFIGS')
def test_postgresql_db_changed(self, configs):
self._postgresql_db_test(configs)
self.assertTrue(configs.write_all.called)
self.migrate_database.assert_called_with()
self.migrate_nova_database.assert_called_with()
@patch.object(os, 'rename')
@patch.object(os.path, 'isfile')
@ -367,13 +382,15 @@ class NovaCCHooksTests(CharmTestCase):
_identity_joined = self.patch('identity_joined')
self.relation_ids.side_effect = ['relid']
self.canonical_url.return_value = 'http://novaurl'
self.uuid.uuid4.return_value = 'bob'
with patch_open() as (_open, _file):
hooks.neutron_api_relation_joined()
hooks.neutron_api_relation_joined(remote_restart=True)
self.service_stop.assert_called_with('neutron-server')
rename.assert_called_with(neutron_conf, neutron_conf + '_unused')
self.assertTrue(_identity_joined.called)
self.relation_set.assert_called_with(relation_id=None,
nova_url=nova_url)
nova_url=nova_url,
restart_trigger='bob')
@patch.object(hooks, 'CONFIGS')
def test_neutron_api_relation_changed(self, configs):
@ -484,3 +501,28 @@ class NovaCCHooksTests(CharmTestCase):
'console_keymap': 'en-us'
}
self.assertEqual(_con_sets, console_settings)
def test_conditional_neutron_migration_api_rel(self):
self.relation_ids.return_value = ['neutron-api/0']
hooks.conditional_neutron_migration()
self.log.assert_called_with(
'Not running neutron database migration as neutron-api service'
'is present.'
)
def test_conditional_neutron_migration_noapi_rel(self):
self.os_release.return_value = 'juno'
self.relation_ids.return_value = []
self.services.return_value = ['neutron-server']
hooks.conditional_neutron_migration()
self.migrate_neutron_database.assert_called_with()
self.service_restart.assert_called_with('neutron-server')
def test_conditional_neutron_migration_noapi_rel_juno(self):
self.os_release.return_value = 'icehouse'
self.relation_ids.return_value = []
hooks.conditional_neutron_migration()
self.log.assert_called_with(
'Not running neutron database migration as migrations are handled'
'by the neutron-server process.'
)

View File

@ -592,29 +592,29 @@ class NovaCCUtilsTests(CharmTestCase):
_known_hosts.assert_called_with('bar', None)
@patch('subprocess.check_output')
def test_migrate_database(self, check_output):
def test_migrate_nova_database(self, check_output):
"Migrate database with nova-manage"
self.relation_ids.return_value = []
utils.migrate_database()
utils.migrate_nova_database()
check_output.assert_called_with(['nova-manage', 'db', 'sync'])
self.enable_services.assert_called()
self.cmd_all_services.assert_called_with('start')
@patch('subprocess.check_output')
def test_migrate_database_cluster(self, check_output):
def test_migrate_nova_database_cluster(self, check_output):
"Migrate database with nova-manage in a clustered env"
self.relation_ids.return_value = ['cluster:1']
utils.migrate_database()
utils.migrate_nova_database()
check_output.assert_called_with(['nova-manage', 'db', 'sync'])
self.peer_store.assert_called_with('dbsync_state', 'complete')
self.enable_services.assert_called()
self.cmd_all_services.assert_called_with('start')
@patch.object(utils, 'get_step_upgrade_source')
@patch.object(utils, 'migrate_database')
@patch.object(utils, 'migrate_nova_database')
@patch.object(utils, 'determine_packages')
def test_upgrade_grizzly_icehouse(self, determine_packages,
migrate_database,
migrate_nova_database,
get_step_upgrade_source):
"Simulate a call to do_openstack_upgrade() for grizzly->icehouse"
get_step_upgrade_source.return_value = 'cloud:precise-havana'
@ -623,6 +623,7 @@ class NovaCCUtilsTests(CharmTestCase):
'havana',
'icehouse']
self.eligible_leader.return_value = True
self.relation_ids.return_value = []
utils.do_openstack_upgrade()
expected = [call(['stamp', 'grizzly']), call(['upgrade', 'head']),
call(['stamp', 'havana']), call(['upgrade', 'head'])]
@ -634,19 +635,20 @@ class NovaCCUtilsTests(CharmTestCase):
expected = [call(release='havana'), call(release='icehouse')]
self.assertEquals(self.register_configs.call_args_list, expected)
self.assertEquals(self.ml2_migration.call_count, 1)
self.assertTrue(migrate_database.call_count, 2)
self.assertTrue(migrate_nova_database.call_count, 2)
@patch.object(utils, 'get_step_upgrade_source')
@patch.object(utils, 'migrate_database')
@patch.object(utils, 'migrate_nova_database')
@patch.object(utils, 'determine_packages')
def test_upgrade_havana_icehouse(self, determine_packages,
migrate_database,
migrate_nova_database,
get_step_upgrade_source):
"Simulate a call to do_openstack_upgrade() for havana->icehouse"
get_step_upgrade_source.return_value = None
self.os_release.return_value = 'havana'
self.get_os_codename_install_source.return_value = 'icehouse'
self.eligible_leader.return_value = True
self.relation_ids.return_value = []
utils.do_openstack_upgrade()
self.neutron_db_manage.assert_called_with(['upgrade', 'head'])
self.apt_update.assert_called_with(fatal=True)
@ -655,7 +657,28 @@ class NovaCCUtilsTests(CharmTestCase):
self.apt_install.assert_called_with(determine_packages(), fatal=True)
self.register_configs.assert_called_with(release='icehouse')
self.assertEquals(self.ml2_migration.call_count, 1)
self.assertTrue(migrate_database.call_count, 1)
self.assertTrue(migrate_nova_database.call_count, 1)
@patch.object(utils, 'get_step_upgrade_source')
@patch.object(utils, 'migrate_nova_database')
@patch.object(utils, 'determine_packages')
def test_upgrade_havana_icehouse_apirel(self, determine_packages,
migrate_nova_database,
get_step_upgrade_source):
"Simulate a call to do_openstack_upgrade() for havana->icehouse api"
get_step_upgrade_source.return_value = None
self.os_release.return_value = 'havana'
self.get_os_codename_install_source.return_value = 'icehouse'
self.eligible_leader.return_value = True
self.relation_ids.return_value = ['neutron-api/0']
utils.do_openstack_upgrade()
self.apt_update.assert_called_with(fatal=True)
self.apt_upgrade.assert_called_with(options=DPKG_OPTS, fatal=True,
dist=True)
self.apt_install.assert_called_with(determine_packages(), fatal=True)
self.register_configs.assert_called_with(release='icehouse')
self.assertEquals(self.ml2_migration.call_count, 1)
self.assertTrue(migrate_nova_database.call_count, 1)
@patch.object(utils, '_do_openstack_upgrade')
def test_upgrade_grizzly_icehouse_source(self, _do_openstack_upgrade):