synced /next

This commit is contained in:
Cory Benfield 2015-06-08 13:50:27 +01:00
commit 8c04757bc9
19 changed files with 581 additions and 237 deletions

150
README.md
View File

@ -27,14 +27,16 @@ This charm also supports scale out and high availability using the hacluster cha
The minimum openstack-origin-git config required to deploy from source is:
openstack-origin-git:
"repositories:
- {name: requirements,
repository: 'git://git.openstack.org/openstack/requirements',
branch: stable/juno}
- {name: neutron,
repository: 'git://git.openstack.org/openstack/neutron',
branch: stable/juno}"
openstack-origin-git: include-file://neutron-juno.yaml
neutron-juno.yaml
repositories:
- {name: requirements,
repository: 'git://github.com/openstack/requirements',
branch: stable/juno}
- {name: neutron,
repository: 'git://github.com/openstack/neutron',
branch: stable/juno}
Note that there are only two 'name' values the charm knows about: 'requirements'
and 'neutron'. These repositories must correspond to these 'name' values.
@ -44,71 +46,73 @@ in the order in which they are specified.
The following is a full list of current tip repos (may not be up-to-date):
openstack-origin-git:
"repositories:
- {name: requirements,
repository: 'git://git.openstack.org/openstack/requirements',
branch: master}
- {name: oslo-concurrency,
repository: 'git://git.openstack.org/openstack/oslo.concurrency',
branch: master}
- {name: oslo-config,
repository: 'git://git.openstack.org/openstack/oslo.config',
branch: master}
- {name: oslo-context,
repository: 'git://git.openstack.org/openstack/oslo.context.git',
branch: master}
- {name: oslo-db,
repository: 'git://git.openstack.org/openstack/oslo.db',
branch: master}
- {name: oslo-i18n,
repository: 'git://git.openstack.org/openstack/oslo.i18n',
branch: master}
- {name: oslo-messaging,
repository: 'git://git.openstack.org/openstack/oslo.messaging.git',
branch: master}
- {name: oslo-middleware,
repository': 'git://git.openstack.org/openstack/oslo.middleware.git',
branch: master}
- {name: oslo-rootwrap',
repository: 'git://git.openstack.org/openstack/oslo.rootwrap.git',
branch: master}
- {name: oslo-serialization,
repository: 'git://git.openstack.org/openstack/oslo.serialization',
branch: master}
- {name: oslo-utils,
repository: 'git://git.openstack.org/openstack/oslo.utils',
branch: master}
- {name: pbr,
repository: 'git://git.openstack.org/openstack-dev/pbr',
branch: master}
- {name: stevedore,
repository: 'git://git.openstack.org/openstack/stevedore.git',
branch: 'master'}
- {name: python-keystoneclient,
repository: 'git://git.openstack.org/openstack/python-keystoneclient',
branch: master}
- {name: python-neutronclient,
repository: 'git://git.openstack.org/openstack/python-neutronclient.git',
branch: master}
- {name: python-novaclient,
repository': 'git://git.openstack.org/openstack/python-novaclient.git',
branch: master}
- {name: keystonemiddleware,
repository: 'git://git.openstack.org/openstack/keystonemiddleware',
branch: master}
- {name: neutron-fwaas,
repository': 'git://git.openstack.org/openstack/neutron-fwaas.git',
branch: master}
- {name: neutron-lbaas,
repository: 'git://git.openstack.org/openstack/neutron-lbaas.git',
branch: master}
- {name: neutron-vpnaas,
repository: 'git://git.openstack.org/openstack/neutron-vpnaas.git',
branch: master}
- {name: neutron,
repository: 'git://git.openstack.org/openstack/neutron',
branch: master}"
openstack-origin-git: include-file://neutron-master.yaml
neutron-master.yaml
repositories:
- {name: requirements,
repository: 'git://github.com/openstack/requirements',
branch: master}
- {name: oslo-concurrency,
repository: 'git://github.com/openstack/oslo.concurrency',
branch: master}
- {name: oslo-config,
repository: 'git://github.com/openstack/oslo.config',
branch: master}
- {name: oslo-context,
repository: 'git://github.com/openstack/oslo.context',
branch: master}
- {name: oslo-db,
repository: 'git://github.com/openstack/oslo.db',
branch: master}
- {name: oslo-i18n,
repository: 'git://github.com/openstack/oslo.i18n',
branch: master}
- {name: oslo-messaging,
repository: 'git://github.com/openstack/oslo.messaging',
branch: master}
- {name: oslo-middleware,
repository': 'git://github.com/openstack/oslo.middleware',
branch: master}
- {name: oslo-rootwrap',
repository: 'git://github.com/openstack/oslo.rootwrap',
branch: master}
- {name: oslo-serialization,
repository: 'git://github.com/openstack/oslo.serialization',
branch: master}
- {name: oslo-utils,
repository: 'git://github.com/openstack/oslo.utils',
branch: master}
- {name: pbr,
repository: 'git://github.com/openstack-dev/pbr',
branch: master}
- {name: stevedore,
repository: 'git://github.com/openstack/stevedore',
branch: 'master'}
- {name: python-keystoneclient,
repository: 'git://github.com/openstack/python-keystoneclient',
branch: master}
- {name: python-neutronclient,
repository: 'git://github.com/openstack/python-neutronclient',
branch: master}
- {name: python-novaclient,
repository': 'git://github.com/openstack/python-novaclient',
branch: master}
- {name: keystonemiddleware,
repository: 'git://github.com/openstack/keystonemiddleware',
branch: master}
- {name: neutron-fwaas,
repository': 'git://github.com/openstack/neutron-fwaas',
branch: master}
- {name: neutron-lbaas,
repository: 'git://github.com/openstack/neutron-lbaas',
branch: master}
- {name: neutron-vpnaas,
repository: 'git://github.com/openstack/neutron-vpnaas',
branch: master}
- {name: neutron,
repository: 'git://github.com/openstack/neutron',
branch: master}
# Restrictions

View File

@ -85,11 +85,12 @@ options:
default: gre
type: string
description: |
Overlay network type to use choose one of:
Overlay network types to use, valid options include:
.
gre
vxlan
vxlan
.
Multiple types can be provided - field is space delimited.
flat-network-providers:
type: string
default:
@ -99,8 +100,10 @@ options:
type: string
default: "physnet1:1000:2000"
description: |
Space-delimited list of Neutron network-provider & vlan-id-ranges using
the following format "<provider>:<start>:<end> ...".
Space-delimited list of <physical_network>:<vlan_min>:<vlan_max> or
<physical_network> specifying physical_network names usable for VLAN
provider and tenant networks, as well as ranges of VLAN tags on each
available for allocation to tenant networks.
# Quota configuration settings
quota-security-group:
default: 10

View File

@ -52,6 +52,8 @@ from charmhelpers.core.strutils import (
bool_from_string,
)
DC_RESOURCE_NAME = 'DC'
class HAIncompleteConfig(Exception):
pass
@ -95,6 +97,27 @@ def is_clustered():
return False
def is_crm_dc():
"""
Determine leadership by querying the pacemaker Designated Controller
"""
cmd = ['crm', 'status']
try:
status = subprocess.check_output(cmd, stderr=subprocess.STDOUT)
if not isinstance(status, six.text_type):
status = six.text_type(status, "utf-8")
except subprocess.CalledProcessError:
return False
current_dc = ''
for line in status.split('\n'):
if line.startswith('Current DC'):
# Current DC: juju-lytrusty-machine-2 (168108163) - partition with quorum
current_dc = line.split(':')[1].split()[0]
if current_dc == get_unit_hostname():
return True
return False
@retry_on_exception(5, base_delay=2, exc_type=CRMResourceNotFound)
def is_crm_leader(resource, retry=False):
"""
@ -104,6 +127,8 @@ def is_crm_leader(resource, retry=False):
We allow this operation to be retried to avoid the possibility of getting a
false negative. See LP #1396246 for more info.
"""
if resource == DC_RESOURCE_NAME:
return is_crm_dc()
cmd = ['crm', 'resource', 'show', resource]
try:
status = subprocess.check_output(cmd, stderr=subprocess.STDOUT)

View File

@ -109,7 +109,7 @@ class OpenStackAmuletDeployment(AmuletDeployment):
# Must be ordered by OpenStack release (not by Ubuntu release):
(self.precise_essex, self.precise_folsom, self.precise_grizzly,
self.precise_havana, self.precise_icehouse,
self.trusty_icehouse, self.trusty_juno, self.utopic_juno,
self.trusty_icehouse, self.trusty_juno, self.utopic_juno,
self.trusty_kilo, self.vivid_kilo) = range(10)
releases = {

View File

@ -258,11 +258,14 @@ def network_manager():
def parse_mappings(mappings):
parsed = {}
if mappings:
mappings = mappings.split(' ')
mappings = mappings.split()
for m in mappings:
p = m.partition(':')
if p[1] == ':':
parsed[p[0].strip()] = p[2].strip()
key = p[0].strip()
if p[1]:
parsed[key] = p[2].strip()
else:
parsed[key] = ''
return parsed
@ -285,13 +288,13 @@ def parse_data_port_mappings(mappings, default_bridge='br-data'):
Returns dict of the form {bridge:port}.
"""
_mappings = parse_mappings(mappings)
if not _mappings:
if not _mappings or list(_mappings.values()) == ['']:
if not mappings:
return {}
# For backwards-compatibility we need to support port-only provided in
# config.
_mappings = {default_bridge: mappings.split(' ')[0]}
_mappings = {default_bridge: mappings.split()[0]}
bridges = _mappings.keys()
ports = _mappings.values()
@ -311,6 +314,8 @@ def parse_vlan_range_mappings(mappings):
Mappings must be a space-delimited list of provider:start:end mappings.
The start:end range is optional and may be omitted.
Returns dict of the form {provider: (start, end)}.
"""
_mappings = parse_mappings(mappings)

View File

@ -53,9 +53,13 @@ from charmhelpers.contrib.network.ip import (
get_ipv6_addr
)
from charmhelpers.contrib.python.packages import (
pip_create_virtualenv,
pip_install,
)
from charmhelpers.core.host import lsb_release, mounts, umount
from charmhelpers.fetch import apt_install, apt_cache, install_remote
from charmhelpers.contrib.python.packages import pip_install
from charmhelpers.contrib.storage.linux.utils import is_block_device, zap_disk
from charmhelpers.contrib.storage.linux.loopback import ensure_loopback_device
@ -497,7 +501,17 @@ def git_install_requested():
requirements_dir = None
def git_clone_and_install(projects_yaml, core_project):
def _git_yaml_load(projects_yaml):
"""
Load the specified yaml into a dictionary.
"""
if not projects_yaml:
return None
return yaml.load(projects_yaml)
def git_clone_and_install(projects_yaml, core_project, depth=1):
"""
Clone/install all specified OpenStack repositories.
@ -510,23 +524,22 @@ def git_clone_and_install(projects_yaml, core_project):
repository: 'git://git.openstack.org/openstack/requirements.git',
branch: 'stable/icehouse'}
directory: /mnt/openstack-git
http_proxy: http://squid.internal:3128
https_proxy: https://squid.internal:3128
http_proxy: squid-proxy-url
https_proxy: squid-proxy-url
The directory, http_proxy, and https_proxy keys are optional.
"""
global requirements_dir
parent_dir = '/mnt/openstack-git'
http_proxy = None
if not projects_yaml:
return
projects = yaml.load(projects_yaml)
projects = _git_yaml_load(projects_yaml)
_git_validate_projects_yaml(projects, core_project)
old_environ = dict(os.environ)
if 'http_proxy' in projects.keys():
http_proxy = projects['http_proxy']
os.environ['http_proxy'] = projects['http_proxy']
if 'https_proxy' in projects.keys():
os.environ['https_proxy'] = projects['https_proxy']
@ -534,15 +547,19 @@ def git_clone_and_install(projects_yaml, core_project):
if 'directory' in projects.keys():
parent_dir = projects['directory']
pip_create_virtualenv(os.path.join(parent_dir, 'venv'))
for p in projects['repositories']:
repo = p['repository']
branch = p['branch']
if p['name'] == 'requirements':
repo_dir = _git_clone_and_install_single(repo, branch, parent_dir,
repo_dir = _git_clone_and_install_single(repo, branch, depth,
parent_dir, http_proxy,
update_requirements=False)
requirements_dir = repo_dir
else:
repo_dir = _git_clone_and_install_single(repo, branch, parent_dir,
repo_dir = _git_clone_and_install_single(repo, branch, depth,
parent_dir, http_proxy,
update_requirements=True)
os.environ = old_environ
@ -574,7 +591,8 @@ def _git_ensure_key_exists(key, keys):
error_out('openstack-origin-git key \'{}\' is missing'.format(key))
def _git_clone_and_install_single(repo, branch, parent_dir, update_requirements):
def _git_clone_and_install_single(repo, branch, depth, parent_dir, http_proxy,
update_requirements):
"""
Clone and install a single git repository.
"""
@ -587,7 +605,8 @@ def _git_clone_and_install_single(repo, branch, parent_dir, update_requirements)
if not os.path.exists(dest_dir):
juju_log('Cloning git repo: {}, branch: {}'.format(repo, branch))
repo_dir = install_remote(repo, dest=parent_dir, branch=branch)
repo_dir = install_remote(repo, dest=parent_dir, branch=branch,
depth=depth)
else:
repo_dir = dest_dir
@ -598,7 +617,12 @@ def _git_clone_and_install_single(repo, branch, parent_dir, update_requirements)
_git_update_requirements(repo_dir, requirements_dir)
juju_log('Installing git repo from dir: {}'.format(repo_dir))
pip_install(repo_dir)
if http_proxy:
pip_install(repo_dir, proxy=http_proxy,
venv=os.path.join(parent_dir, 'venv'))
else:
pip_install(repo_dir,
venv=os.path.join(parent_dir, 'venv'))
return repo_dir
@ -621,16 +645,27 @@ def _git_update_requirements(package_dir, reqs_dir):
os.chdir(orig_dir)
def git_pip_venv_dir(projects_yaml):
"""
Return the pip virtualenv path.
"""
parent_dir = '/mnt/openstack-git'
projects = _git_yaml_load(projects_yaml)
if 'directory' in projects.keys():
parent_dir = projects['directory']
return os.path.join(parent_dir, 'venv')
def git_src_dir(projects_yaml, project):
"""
Return the directory where the specified project's source is located.
"""
parent_dir = '/mnt/openstack-git'
if not projects_yaml:
return
projects = yaml.load(projects_yaml)
projects = _git_yaml_load(projects_yaml)
if 'directory' in projects.keys():
parent_dir = projects['directory']
@ -640,3 +675,15 @@ def git_src_dir(projects_yaml, project):
return os.path.join(parent_dir, os.path.basename(p['repository']))
return None
def git_yaml_value(projects_yaml, key):
"""
Return the value in projects_yaml for the specified key.
"""
projects = _git_yaml_load(projects_yaml)
if key in projects.keys():
return projects[key]
return None

View File

@ -17,8 +17,11 @@
# You should have received a copy of the GNU Lesser General Public License
# along with charm-helpers. If not, see <http://www.gnu.org/licenses/>.
import os
import subprocess
from charmhelpers.fetch import apt_install, apt_update
from charmhelpers.core.hookenv import log
from charmhelpers.core.hookenv import charm_dir, log
try:
from pip import main as pip_execute
@ -51,11 +54,15 @@ def pip_install_requirements(requirements, **options):
pip_execute(command)
def pip_install(package, fatal=False, upgrade=False, **options):
def pip_install(package, fatal=False, upgrade=False, venv=None, **options):
"""Install a python package"""
command = ["install"]
if venv:
venv_python = os.path.join(venv, 'bin/pip')
command = [venv_python, "install"]
else:
command = ["install"]
available_options = ('proxy', 'src', 'log', "index-url", )
available_options = ('proxy', 'src', 'log', 'index-url', )
for option in parse_options(options, available_options):
command.append(option)
@ -69,7 +76,10 @@ def pip_install(package, fatal=False, upgrade=False, **options):
log("Installing {} package with options: {}".format(package,
command))
pip_execute(command)
if venv:
subprocess.check_call(command)
else:
pip_execute(command)
def pip_uninstall(package, **options):
@ -94,3 +104,16 @@ def pip_list():
"""Returns the list of current python installed packages
"""
return pip_execute(["list"])
def pip_create_virtualenv(path=None):
"""Create an isolated Python environment."""
apt_install('python-virtualenv')
if path:
venv_path = path
else:
venv_path = os.path.join(charm_dir(), 'venv')
if not os.path.exists(venv_path):
subprocess.check_call(['virtualenv', venv_path])

View File

@ -21,12 +21,14 @@
# Charm Helpers Developers <juju@lists.ubuntu.com>
from __future__ import print_function
from functools import wraps
import os
import json
import yaml
import subprocess
import sys
import errno
import tempfile
from subprocess import CalledProcessError
import six
@ -58,15 +60,17 @@ def cached(func):
will cache the result of unit_get + 'test' for future calls.
"""
@wraps(func)
def wrapper(*args, **kwargs):
global cache
key = str((func, args, kwargs))
try:
return cache[key]
except KeyError:
res = func(*args, **kwargs)
cache[key] = res
return res
pass # Drop out of the exception handler scope.
res = func(*args, **kwargs)
cache[key] = res
return res
return wrapper
@ -178,7 +182,7 @@ def local_unit():
def remote_unit():
"""The remote unit for the current relation hook"""
return os.environ['JUJU_REMOTE_UNIT']
return os.environ.get('JUJU_REMOTE_UNIT', None)
def service_name():
@ -250,6 +254,12 @@ class Config(dict):
except KeyError:
return (self._prev_dict or {})[key]
def get(self, key, default=None):
try:
return self[key]
except KeyError:
return default
def keys(self):
prev_keys = []
if self._prev_dict is not None:
@ -353,18 +363,49 @@ def relation_set(relation_id=None, relation_settings=None, **kwargs):
"""Set relation information for the current unit"""
relation_settings = relation_settings if relation_settings else {}
relation_cmd_line = ['relation-set']
accepts_file = "--file" in subprocess.check_output(
relation_cmd_line + ["--help"], universal_newlines=True)
if relation_id is not None:
relation_cmd_line.extend(('-r', relation_id))
for k, v in (list(relation_settings.items()) + list(kwargs.items())):
if v is None:
relation_cmd_line.append('{}='.format(k))
else:
relation_cmd_line.append('{}={}'.format(k, v))
subprocess.check_call(relation_cmd_line)
settings = relation_settings.copy()
settings.update(kwargs)
for key, value in settings.items():
# Force value to be a string: it always should, but some call
# sites pass in things like dicts or numbers.
if value is not None:
settings[key] = "{}".format(value)
if accepts_file:
# --file was introduced in Juju 1.23.2. Use it by default if
# available, since otherwise we'll break if the relation data is
# too big. Ideally we should tell relation-set to read the data from
# stdin, but that feature is broken in 1.23.2: Bug #1454678.
with tempfile.NamedTemporaryFile(delete=False) as settings_file:
settings_file.write(yaml.safe_dump(settings).encode("utf-8"))
subprocess.check_call(
relation_cmd_line + ["--file", settings_file.name])
os.remove(settings_file.name)
else:
for key, value in settings.items():
if value is None:
relation_cmd_line.append('{}='.format(key))
else:
relation_cmd_line.append('{}={}'.format(key, value))
subprocess.check_call(relation_cmd_line)
# Flush cache of any relation-gets for local unit
flush(local_unit())
def relation_clear(r_id=None):
''' Clears any relation data already set on relation r_id '''
settings = relation_get(rid=r_id,
unit=local_unit())
for setting in settings:
if setting not in ['public-address', 'private-address']:
settings[setting] = None
relation_set(relation_id=r_id,
**settings)
@cached
def relation_ids(reltype=None):
"""A list of relation_ids"""
@ -509,6 +550,11 @@ def unit_get(attribute):
return None
def unit_public_ip():
"""Get this unit's public IP address"""
return unit_get('public-address')
def unit_private_ip():
"""Get this unit's private IP address"""
return unit_get('private-address')
@ -605,3 +651,94 @@ def action_fail(message):
The results set by action_set are preserved."""
subprocess.check_call(['action-fail', message])
def status_set(workload_state, message):
"""Set the workload state with a message
Use status-set to set the workload state with a message which is visible
to the user via juju status. If the status-set command is not found then
assume this is juju < 1.23 and juju-log the message unstead.
workload_state -- valid juju workload state.
message -- status update message
"""
valid_states = ['maintenance', 'blocked', 'waiting', 'active']
if workload_state not in valid_states:
raise ValueError(
'{!r} is not a valid workload state'.format(workload_state)
)
cmd = ['status-set', workload_state, message]
try:
ret = subprocess.call(cmd)
if ret == 0:
return
except OSError as e:
if e.errno != errno.ENOENT:
raise
log_message = 'status-set failed: {} {}'.format(workload_state,
message)
log(log_message, level='INFO')
def status_get():
"""Retrieve the previously set juju workload state
If the status-set command is not found then assume this is juju < 1.23 and
return 'unknown'
"""
cmd = ['status-get']
try:
raw_status = subprocess.check_output(cmd, universal_newlines=True)
status = raw_status.rstrip()
return status
except OSError as e:
if e.errno == errno.ENOENT:
return 'unknown'
else:
raise
def translate_exc(from_exc, to_exc):
def inner_translate_exc1(f):
def inner_translate_exc2(*args, **kwargs):
try:
return f(*args, **kwargs)
except from_exc:
raise to_exc
return inner_translate_exc2
return inner_translate_exc1
@translate_exc(from_exc=OSError, to_exc=NotImplementedError)
def is_leader():
"""Does the current unit hold the juju leadership
Uses juju to determine whether the current unit is the leader of its peers
"""
cmd = ['is-leader', '--format=json']
return json.loads(subprocess.check_output(cmd).decode('UTF-8'))
@translate_exc(from_exc=OSError, to_exc=NotImplementedError)
def leader_get(attribute=None):
"""Juju leader get value(s)"""
cmd = ['leader-get', '--format=json'] + [attribute or '-']
return json.loads(subprocess.check_output(cmd).decode('UTF-8'))
@translate_exc(from_exc=OSError, to_exc=NotImplementedError)
def leader_set(settings=None, **kwargs):
"""Juju leader set value(s)"""
log("Juju leader-set '%s'" % (settings), level=DEBUG)
cmd = ['leader-set']
settings = settings or {}
settings.update(kwargs)
for k, v in settings.iteritems():
if v is None:
cmd.append('{}='.format(k))
else:
cmd.append('{}={}'.format(k, v))
subprocess.check_call(cmd)

View File

@ -90,7 +90,7 @@ def service_available(service_name):
['service', service_name, 'status'],
stderr=subprocess.STDOUT).decode('UTF-8')
except subprocess.CalledProcessError as e:
return 'unrecognized service' not in e.output
return b'unrecognized service' not in e.output
else:
return True

View File

@ -15,9 +15,9 @@
# along with charm-helpers. If not, see <http://www.gnu.org/licenses/>.
import os
import re
import json
from collections import Iterable
from inspect import getargspec
from collections import Iterable, OrderedDict
from charmhelpers.core import host
from charmhelpers.core import hookenv
@ -119,7 +119,7 @@ class ServiceManager(object):
"""
self._ready_file = os.path.join(hookenv.charm_dir(), 'READY-SERVICES.json')
self._ready = None
self.services = {}
self.services = OrderedDict()
for service in services or []:
service_name = service['service']
self.services[service_name] = service
@ -132,8 +132,8 @@ class ServiceManager(object):
if hook_name == 'stop':
self.stop_services()
else:
self.provide_data()
self.reconfigure_services()
self.provide_data()
cfg = hookenv.config()
if cfg.implicit_save:
cfg.save()
@ -145,15 +145,36 @@ class ServiceManager(object):
A provider must have a `name` attribute, which indicates which relation
to set data on, and a `provide_data()` method, which returns a dict of
data to set.
The `provide_data()` method can optionally accept two parameters:
* ``remote_service`` The name of the remote service that the data will
be provided to. The `provide_data()` method will be called once
for each connected service (not unit). This allows the method to
tailor its data to the given service.
* ``service_ready`` Whether or not the service definition had all of
its requirements met, and thus the ``data_ready`` callbacks run.
Note that the ``provided_data`` methods are now called **after** the
``data_ready`` callbacks are run. This gives the ``data_ready`` callbacks
a chance to generate any data necessary for the providing to the remote
services.
"""
hook_name = hookenv.hook_name()
for service in self.services.values():
for service_name, service in self.services.items():
service_ready = self.is_ready(service_name)
for provider in service.get('provided_data', []):
if re.match(r'{}-relation-(joined|changed)'.format(provider.name), hook_name):
data = provider.provide_data()
_ready = provider._is_ready(data) if hasattr(provider, '_is_ready') else data
if _ready:
hookenv.relation_set(None, data)
for relid in hookenv.relation_ids(provider.name):
units = hookenv.related_units(relid)
if not units:
continue
remote_service = units[0].split('/')[0]
argspec = getargspec(provider.provide_data)
if len(argspec.args) > 1:
data = provider.provide_data(remote_service, service_ready)
else:
data = provider.provide_data()
if data:
hookenv.relation_set(relid, data)
def reconfigure_services(self, *service_names):
"""

View File

@ -158,7 +158,7 @@ def filter_installed_packages(packages):
def apt_cache(in_memory=True):
"""Build and return an apt cache"""
import apt_pkg
from apt import apt_pkg
apt_pkg.init()
if in_memory:
apt_pkg.config.set("Dir::Cache::pkgcache", "")

View File

@ -45,14 +45,16 @@ class GitUrlFetchHandler(BaseFetchHandler):
else:
return True
def clone(self, source, dest, branch):
def clone(self, source, dest, branch, depth=None):
if not self.can_handle(source):
raise UnhandledSource("Cannot handle {}".format(source))
repo = Repo.clone_from(source, dest)
repo.git.checkout(branch)
if depth:
Repo.clone_from(source, dest, branch=branch, depth=depth)
else:
Repo.clone_from(source, dest, branch=branch)
def install(self, source, branch="master", dest=None):
def install(self, source, branch="master", dest=None, depth=None):
url_parts = self.parse_url(source)
branch_name = url_parts.path.strip("/").split("/")[-1]
if dest:
@ -63,7 +65,7 @@ class GitUrlFetchHandler(BaseFetchHandler):
if not os.path.exists(dest_dir):
mkdir(dest_dir, perms=0o755)
try:
self.clone(source, dest_dir, branch)
self.clone(source, dest_dir, branch, depth)
except GitCommandError as e:
raise UnhandledSource(e.message)
except OSError as e:

View File

@ -14,6 +14,11 @@ from charmhelpers.contrib.openstack.utils import (
os_release,
)
VLAN = 'vlan'
VXLAN = 'vxlan'
GRE = 'gre'
OVERLAY_NET_TYPES = [VXLAN, GRE]
def get_l2population():
plugin = config('neutron-plugin')
@ -21,10 +26,12 @@ def get_l2population():
def get_overlay_network_type():
overlay_net = config('overlay-network-type')
if overlay_net not in ['vxlan', 'gre']:
raise Exception('Unsupported overlay-network-type')
return overlay_net
overlay_networks = config('overlay-network-type').split()
for overlay_net in overlay_networks:
if overlay_net not in OVERLAY_NET_TYPES:
raise ValueError('Unsupported overlay-network-type %s'
% overlay_net)
return ','.join(overlay_networks)
def get_l3ha():
@ -32,10 +39,6 @@ def get_l3ha():
if os_release('neutron-server') < 'juno':
log('Disabling L3 HA, enable-l3ha is not valid before Juno')
return False
if config('overlay-network-type') not in ['vlan', 'gre', 'vxlan']:
log('Disabling L3 HA, enable-l3ha requires the use of the vxlan, '
'vlan or gre overlay network')
return False
if get_l2population():
log('Disabling L3 HA, l2-population must be disabled with L3 HA')
return False
@ -49,10 +52,11 @@ def get_dvr():
if os_release('neutron-server') < 'juno':
log('Disabling DVR, enable-dvr is not valid before Juno')
return False
if config('overlay-network-type') != 'vxlan':
log('Disabling DVR, enable-dvr requires the use of the vxlan '
'overlay network')
return False
if os_release('neutron-server') == 'juno':
if VXLAN not in config('overlay-network-type').split():
log('Disabling DVR, enable-dvr requires the use of the vxlan '
'overlay network for OpenStack Juno')
return False
if get_l3ha():
log('Disabling DVR, enable-l3ha must be disabled with dvr')
return False

View File

@ -12,7 +12,7 @@ description: |
etc.)
.
This charm provides the OpenStack Neutron API service.
categories:
tags:
- openstack
provides:
nrpe-external-master:

View File

@ -8,8 +8,8 @@
type_drivers = local,flat
mechanism_drivers = calico
{% else -%}
type_drivers = gre,vxlan,vlan,flat
tenant_network_types = gre,vxlan,vlan,flat
type_drivers = {{ overlay_network_type }},vlan,flat
tenant_network_types = {{ overlay_network_type }},vlan,flat
mechanism_drivers = openvswitch,hyperv,l2population
[ml2_type_gre]

View File

@ -4,8 +4,8 @@
# Configuration file maintained by Juju. Local changes may be overwritten.
###############################################################################
[ml2]
type_drivers = gre,vxlan,vlan,flat
tenant_network_types = gre,vxlan,vlan,flat
type_drivers = {{ overlay_network_type }},vlan,flat
tenant_network_types = {{ overlay_network_type }},vlan,flat
mechanism_drivers = openvswitch,l2population
[ml2_type_gre]

View File

@ -1,7 +1,43 @@
#!/usr/bin/python
"""
Basic neutron-api functional test.
test_* methods are called in sort order.
Convention to ensure desired test order:
1xx service and endpoint checks
2xx relation checks
3xx config checks
4xx functional checks
9xx restarts and other final checks
Common relation definitions:
- [ neutron-api, mysql ]
- [ neutron-api, rabbitmq-server ]
- [ neutron-api, nova-cloud-controller ]
- [ neutron-api, neutron-openvswitch ]
- [ neutron-api, keystone ]
- [ neutron-api, neutron-gateway ]
Resultant relations of neutron-api service:
relations:
amqp:
- rabbitmq-server
cluster:
- neutron-api
identity-service:
- keystone
neutron-api:
- nova-cloud-controller
neutron-plugin-api: # not inspected due to
- neutron-openvswitch # bug 1421388
shared-db:
- mysql
"""
import amulet
import os
import time
import yaml
from charmhelpers.contrib.openstack.amulet.deployment import (
@ -76,10 +112,10 @@ class NeutronAPIBasicDeployment(OpenStackAmuletDeployment):
openstack_origin_git = {
'repositories': [
{'name': 'requirements',
'repository': 'git://git.openstack.org/openstack/requirements',
'repository': 'git://github.com/openstack/requirements',
'branch': branch},
{'name': 'neutron',
'repository': 'git://git.openstack.org/openstack/neutron',
'repository': 'git://github.com/openstack/neutron',
'branch': branch},
],
'directory': '/mnt/openstack-git',
@ -110,9 +146,51 @@ class NeutronAPIBasicDeployment(OpenStackAmuletDeployment):
self._get_openstack_release()))
u.log.debug('openstack release str: {}'.format(
self._get_openstack_release_string()))
# Let things settle a bit before moving forward
time.sleep(30)
def test_neutron_api_shared_db_relation(self):
def test_100_services(self):
"""Verify the expected services are running on the corresponding
service units."""
u.log.debug('Checking status of system services...')
# Fails vivid-kilo, bug 1454754
neutron_api_services = ['status neutron-server']
neutron_services = ['status neutron-dhcp-agent',
'status neutron-lbaas-agent',
'status neutron-metadata-agent',
'status neutron-plugin-openvswitch-agent',
'status neutron-ovs-cleanup']
if self._get_openstack_release() <= self.trusty_juno:
neutron_services.append('status neutron-vpn-agent')
if self._get_openstack_release() < self.trusty_kilo:
# Juno or earlier
neutron_services.append('status neutron-metering-agent')
nova_cc_services = ['status nova-api-ec2',
'status nova-api-os-compute',
'status nova-objectstore',
'status nova-cert',
'status nova-scheduler',
'status nova-conductor']
commands = {
self.mysql_sentry: ['status mysql'],
self.keystone_sentry: ['status keystone'],
self.nova_cc_sentry: nova_cc_services,
self.quantum_gateway_sentry: neutron_services,
self.neutron_api_sentry: neutron_api_services,
}
ret = u.validate_services(commands)
if ret:
amulet.raise_status(amulet.FAIL, msg=ret)
def test_200_neutron_api_shared_db_relation(self):
"""Verify the neutron-api to mysql shared-db relation data"""
u.log.debug('Checking neutron-api:mysql relation data...')
unit = self.neutron_api_sentry
relation = ['shared-db', 'mysql:shared-db']
expected = {
@ -127,8 +205,9 @@ class NeutronAPIBasicDeployment(OpenStackAmuletDeployment):
message = u.relation_error('neutron-api shared-db', ret)
amulet.raise_status(amulet.FAIL, msg=message)
def test_shared_db_neutron_api_relation(self):
def test_201_shared_db_neutron_api_relation(self):
"""Verify the mysql to neutron-api shared-db relation data"""
u.log.debug('Checking mysql:neutron-api relation data...')
unit = self.mysql_sentry
relation = ['shared-db', 'neutron-api:shared-db']
expected = {
@ -149,8 +228,9 @@ class NeutronAPIBasicDeployment(OpenStackAmuletDeployment):
message = u.relation_error('mysql shared-db', ret)
amulet.raise_status(amulet.FAIL, msg=message)
def test_neutron_api_amqp_relation(self):
def test_202_neutron_api_amqp_relation(self):
"""Verify the neutron-api to rabbitmq-server amqp relation data"""
u.log.debug('Checking neutron-api:amqp relation data...')
unit = self.neutron_api_sentry
relation = ['amqp', 'rabbitmq-server:amqp']
expected = {
@ -164,8 +244,9 @@ class NeutronAPIBasicDeployment(OpenStackAmuletDeployment):
message = u.relation_error('neutron-api amqp', ret)
amulet.raise_status(amulet.FAIL, msg=message)
def test_amqp_neutron_api_relation(self):
def test_203_amqp_neutron_api_relation(self):
"""Verify the rabbitmq-server to neutron-api amqp relation data"""
u.log.debug('Checking amqp:neutron-api relation data...')
unit = self.rabbitmq_sentry
relation = ['amqp', 'neutron-api:amqp']
rel_data = unit.relation('amqp', 'neutron-api:amqp')
@ -179,8 +260,9 @@ class NeutronAPIBasicDeployment(OpenStackAmuletDeployment):
message = u.relation_error('rabbitmq amqp', ret)
amulet.raise_status(amulet.FAIL, msg=message)
def test_neutron_api_identity_relation(self):
def test_204_neutron_api_identity_relation(self):
"""Verify the neutron-api to keystone identity-service relation data"""
u.log.debug('Checking neutron-api:keystone relation data...')
unit = self.neutron_api_sentry
relation = ['identity-service', 'keystone:identity-service']
api_ip = unit.relation('identity-service',
@ -200,8 +282,9 @@ class NeutronAPIBasicDeployment(OpenStackAmuletDeployment):
message = u.relation_error('neutron-api identity-service', ret)
amulet.raise_status(amulet.FAIL, msg=message)
def test_keystone_neutron_api_identity_relation(self):
def test_205_keystone_neutron_api_identity_relation(self):
"""Verify the keystone to neutron-api identity-service relation data"""
u.log.debug('Checking keystone:neutron-api relation data...')
unit = self.keystone_sentry
relation = ['identity-service', 'neutron-api:identity-service']
id_relation = unit.relation('identity-service',
@ -220,8 +303,9 @@ class NeutronAPIBasicDeployment(OpenStackAmuletDeployment):
message = u.relation_error('neutron-api identity-service', ret)
amulet.raise_status(amulet.FAIL, msg=message)
def test_neutron_api_plugin_relation(self):
def test_206_neutron_api_plugin_relation(self):
"""Verify neutron-api to neutron-openvswitch neutron-plugin-api"""
u.log.debug('Checking neutron-api:neutron-ovs relation data...')
unit = self.neutron_api_sentry
relation = ['neutron-plugin-api',
'neutron-openvswitch:neutron-plugin-api']
@ -233,33 +317,9 @@ class NeutronAPIBasicDeployment(OpenStackAmuletDeployment):
message = u.relation_error('neutron-api neutron-plugin-api', ret)
amulet.raise_status(amulet.FAIL, msg=message)
# XXX Test missing to examine the relation data neutron-openvswitch is
# receiving. Current;y this data cannot be interegated due to
# Bug#1421388
def test_z_restart_on_config_change(self):
"""Verify that the specified services are restarted when the config
is changed.
Note(coreycb): The method name with the _z_ is a little odd
but it forces the test to run last. It just makes things
easier because restarting services requires re-authorization.
"""
conf = '/etc/neutron/neutron.conf'
services = ['neutron-server']
self.d.configure('neutron-api', {'use-syslog': 'True'})
stime = 60
for s in services:
if not u.service_restarted(self.neutron_api_sentry, s, conf,
pgrep_full=True, sleep_time=stime):
self.d.configure('neutron-api', {'use-syslog': 'False'})
msg = "service {} didn't restart after config change".format(s)
amulet.raise_status(amulet.FAIL, msg=msg)
stime = 0
self.d.configure('neutron-api', {'use-syslog': 'False'})
def test_neutron_api_novacc_relation(self):
def test_207_neutron_api_novacc_relation(self):
"""Verify the neutron-api to nova-cloud-controller relation data"""
u.log.debug('Checking neutron-api:novacc relation data...')
unit = self.neutron_api_sentry
relation = ['neutron-api', 'nova-cloud-controller:neutron-api']
api_ip = unit.relation('identity-service',
@ -276,8 +336,9 @@ class NeutronAPIBasicDeployment(OpenStackAmuletDeployment):
message = u.relation_error('neutron-api neutron-api', ret)
amulet.raise_status(amulet.FAIL, msg=message)
def test_novacc_neutron_api_relation(self):
def test_208_novacc_neutron_api_relation(self):
"""Verify the nova-cloud-controller to neutron-api relation data"""
u.log.debug('Checking novacc:neutron-api relation data...')
unit = self.nova_cc_sentry
relation = ['neutron-api', 'neutron-api:neutron-api']
cc_ip = unit.relation('neutron-api',
@ -292,8 +353,37 @@ class NeutronAPIBasicDeployment(OpenStackAmuletDeployment):
message = u.relation_error('nova-cc neutron-api', ret)
amulet.raise_status(amulet.FAIL, msg=message)
def test_neutron_config(self):
# XXX Test missing to examine the relation data neutron-openvswitch is
# receiving. Current;y this data cannot be interegated due to
# Bug#1421388
def test_900_restart_on_config_change(self):
"""Verify that the specified services are restarted when the config
is changed.
Note(coreycb): The method name with the _z_ is a little odd
but it forces the test to run last. It just makes things
easier because restarting services requires re-authorization.
"""
u.log.debug('Checking novacc neutron-api relation data...')
conf = '/etc/neutron/neutron.conf'
services = ['neutron-server']
u.log.debug('Making config change on neutron-api service...')
self.d.configure('neutron-api', {'use-syslog': 'True'})
stime = 60
for s in services:
u.log.debug("Checking that service restarted: {}".format(s))
if not u.service_restarted(self.neutron_api_sentry, s, conf,
pgrep_full=True, sleep_time=stime):
self.d.configure('neutron-api', {'use-syslog': 'False'})
msg = "service {} didn't restart after config change".format(s)
amulet.raise_status(amulet.FAIL, msg=msg)
stime = 0
self.d.configure('neutron-api', {'use-syslog': 'False'})
def test_300_neutron_config(self):
"""Verify the data in the neutron config file."""
u.log.debug('Checking neutron.conf config file data...')
unit = self.neutron_api_sentry
cc_relation = self.nova_cc_sentry.relation('neutron-api',
'neutron-api:neutron-api')
@ -372,17 +462,18 @@ class NeutronAPIBasicDeployment(OpenStackAmuletDeployment):
message = "neutron config error: {}".format(ret)
amulet.raise_status(amulet.FAIL, msg=message)
def test_ml2_config(self):
def test_301_ml2_config(self):
"""Verify the data in the ml2 config file. This is only available
since icehouse."""
u.log.debug('Checking ml2 config file data...')
unit = self.neutron_api_sentry
conf = '/etc/neutron/plugins/ml2/ml2_conf.ini'
neutron_api_relation = unit.relation('shared-db', 'mysql:shared-db')
expected = {
'ml2': {
'type_drivers': 'gre,vxlan,vlan,flat',
'tenant_network_types': 'gre,vxlan,vlan,flat',
'type_drivers': 'gre,vlan,flat',
'tenant_network_types': 'gre,vlan,flat',
},
'ml2_type_gre': {
'tunnel_id_ranges': '1:1000'
@ -422,39 +513,3 @@ class NeutronAPIBasicDeployment(OpenStackAmuletDeployment):
if ret:
message = "ml2 config error: {}".format(ret)
amulet.raise_status(amulet.FAIL, msg=message)
def test_services(self):
"""Verify the expected services are running on the corresponding
service units."""
neutron_api_services = ['status neutron-server']
neutron_services = ['status neutron-dhcp-agent',
'status neutron-lbaas-agent',
'status neutron-metadata-agent',
'status neutron-plugin-openvswitch-agent',
'status neutron-ovs-cleanup']
if self._get_openstack_release() <= self.trusty_juno:
neutron_services.append('status neutron-vpn-agent')
if self._get_openstack_release() < self.trusty_kilo:
# Juno or earlier
neutron_services.append('status neutron-metering-agent')
nova_cc_services = ['status nova-api-ec2',
'status nova-api-os-compute',
'status nova-objectstore',
'status nova-cert',
'status nova-scheduler',
'status nova-conductor']
commands = {
self.mysql_sentry: ['status mysql'],
self.keystone_sentry: ['status keystone'],
self.nova_cc_sentry: nova_cc_services,
self.quantum_gateway_sentry: neutron_services,
self.neutron_api_sentry: neutron_api_services,
}
ret = u.validate_services(commands)
if ret:
amulet.raise_status(amulet.FAIL, msg=ret)

View File

@ -89,7 +89,11 @@ class AmuletUtils(object):
def _get_config(self, unit, filename):
"""Get a ConfigParser object for parsing a unit's config file."""
file_contents = unit.file_contents(filename)
config = ConfigParser.ConfigParser()
# NOTE(beisner): by default, ConfigParser does not handle options
# with no value, such as the flags used in the mysql my.cnf file.
# https://bugs.python.org/issue7005
config = ConfigParser.ConfigParser(allow_no_value=True)
config.readfp(io.StringIO(file_contents))
return config

View File

@ -34,12 +34,16 @@ class GeneralTests(CharmTestCase):
self.test_config.set('overlay-network-type', 'gre')
self.assertEquals(context.get_overlay_network_type(), 'gre')
def test_get_overlay_network_type_multi(self):
self.test_config.set('overlay-network-type', 'gre vxlan')
self.assertEquals(context.get_overlay_network_type(), 'gre,vxlan')
def test_get_overlay_network_type_unsupported(self):
self.test_config.set('overlay-network-type', 'tokenring')
with self.assertRaises(Exception) as _exceptctxt:
with self.assertRaises(ValueError) as _exceptctxt:
context.get_overlay_network_type()
self.assertEqual(_exceptctxt.exception.message,
'Unsupported overlay-network-type')
'Unsupported overlay-network-type tokenring')
def test_get_l3ha(self):
self.test_config.set('enable-l3ha', True)
@ -65,14 +69,6 @@ class GeneralTests(CharmTestCase):
self.os_release.return_value = 'juno'
self.assertEquals(context.get_l3ha(), False)
def test_get_l3ha_badoverlay(self):
self.test_config.set('enable-l3ha', True)
self.test_config.set('overlay-network-type', 'tokenring')
self.test_config.set('neutron-plugin', 'ovs')
self.test_config.set('l2-population', False)
self.os_release.return_value = 'juno'
self.assertEquals(context.get_l3ha(), False)
def test_get_dvr(self):
self.test_config.set('enable-dvr', True)
self.test_config.set('enable-l3ha', False)
@ -109,6 +105,24 @@ class GeneralTests(CharmTestCase):
self.os_release.return_value = 'juno'
self.assertEquals(context.get_dvr(), False)
def test_get_dvr_gre_kilo(self):
self.test_config.set('enable-dvr', True)
self.test_config.set('enable-l3ha', False)
self.test_config.set('overlay-network-type', 'gre')
self.test_config.set('neutron-plugin', 'ovs')
self.test_config.set('l2-population', True)
self.os_release.return_value = 'kilo'
self.assertEquals(context.get_dvr(), True)
def test_get_dvr_vxlan_kilo(self):
self.test_config.set('enable-dvr', True)
self.test_config.set('enable-l3ha', False)
self.test_config.set('overlay-network-type', 'vxlan')
self.test_config.set('neutron-plugin', 'ovs')
self.test_config.set('l2-population', True)
self.os_release.return_value = 'kilo'
self.assertEquals(context.get_dvr(), True)
def test_get_dvr_l3ha_on(self):
self.test_config.set('enable-dvr', True)
self.test_config.set('enable-l3ha', True)