Merge "Switch charm to new format"
This commit is contained in:
commit
6c96dde8bd
|
@ -1,6 +1,8 @@
|
|||
bin
|
||||
build
|
||||
.settings
|
||||
.testrepository
|
||||
.tox
|
||||
*.pyc
|
||||
*.sw[nop]
|
||||
layers
|
||||
|
|
24
Makefile
24
Makefile
|
@ -1,24 +0,0 @@
|
|||
#!/usr/bin/make
|
||||
PYTHON := /usr/bin/env python
|
||||
CHARM_DIR := $(pwd)
|
||||
export CHARM_DIR
|
||||
|
||||
lint:
|
||||
@tox -e pep8
|
||||
|
||||
test:
|
||||
@echo Starting unit tests...
|
||||
@tox -e py27
|
||||
|
||||
functional_test:
|
||||
@echo Starting Amulet tests...
|
||||
@tox -e func27
|
||||
|
||||
bin/charm_helpers_sync.py:
|
||||
@mkdir -p bin
|
||||
@bzr cat lp:charm-helpers/tools/charm_helpers_sync/charm_helpers_sync.py \
|
||||
> bin/charm_helpers_sync.py
|
||||
|
||||
sync: bin/charm_helpers_sync.py
|
||||
@$(PYTHON) bin/charm_helpers_sync.py -c charm-helpers-hooks.yaml
|
||||
@$(PYTHON) bin/charm_helpers_sync.py -c charm-helpers-tests.yaml
|
|
@ -1,7 +0,0 @@
|
|||
branch: http://bazaar.launchpad.net/~johnsca/charm-helpers/reactive
|
||||
destination: hooks/charmhelpers
|
||||
include:
|
||||
- core
|
||||
- cli
|
||||
- fetch
|
||||
- contrib.network
|
|
@ -1,5 +0,0 @@
|
|||
branch: lp:charm-helpers
|
||||
destination: tests/charmhelpers
|
||||
include:
|
||||
- contrib.amulet
|
||||
- contrib.openstack.amulet
|
|
@ -1,38 +0,0 @@
|
|||
# Copyright 2014-2015 Canonical Limited.
|
||||
#
|
||||
# This file is part of charm-helpers.
|
||||
#
|
||||
# charm-helpers is free software: you can redistribute it and/or modify
|
||||
# it under the terms of the GNU Lesser General Public License version 3 as
|
||||
# published by the Free Software Foundation.
|
||||
#
|
||||
# charm-helpers is distributed in the hope that it will be useful,
|
||||
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
# GNU Lesser General Public License for more details.
|
||||
#
|
||||
# You should have received a copy of the GNU Lesser General Public License
|
||||
# along with charm-helpers. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
# Bootstrap charm-helpers, installing its dependencies if necessary using
|
||||
# only standard libraries.
|
||||
import subprocess
|
||||
import sys
|
||||
|
||||
try:
|
||||
import six # flake8: noqa
|
||||
except ImportError:
|
||||
if sys.version_info.major == 2:
|
||||
subprocess.check_call(['apt-get', 'install', '-y', 'python-six'])
|
||||
else:
|
||||
subprocess.check_call(['apt-get', 'install', '-y', 'python3-six'])
|
||||
import six # flake8: noqa
|
||||
|
||||
try:
|
||||
import yaml # flake8: noqa
|
||||
except ImportError:
|
||||
if sys.version_info.major == 2:
|
||||
subprocess.check_call(['apt-get', 'install', '-y', 'python-yaml'])
|
||||
else:
|
||||
subprocess.check_call(['apt-get', 'install', '-y', 'python3-yaml'])
|
||||
import yaml # flake8: noqa
|
|
@ -1,195 +0,0 @@
|
|||
# Copyright 2014-2015 Canonical Limited.
|
||||
#
|
||||
# This file is part of charm-helpers.
|
||||
#
|
||||
# charm-helpers is free software: you can redistribute it and/or modify
|
||||
# it under the terms of the GNU Lesser General Public License version 3 as
|
||||
# published by the Free Software Foundation.
|
||||
#
|
||||
# charm-helpers is distributed in the hope that it will be useful,
|
||||
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
# GNU Lesser General Public License for more details.
|
||||
#
|
||||
# You should have received a copy of the GNU Lesser General Public License
|
||||
# along with charm-helpers. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
import inspect
|
||||
import argparse
|
||||
import sys
|
||||
|
||||
from six.moves import zip
|
||||
|
||||
from charmhelpers.core import unitdata
|
||||
|
||||
|
||||
class OutputFormatter(object):
|
||||
def __init__(self, outfile=sys.stdout):
|
||||
self.formats = (
|
||||
"raw",
|
||||
"json",
|
||||
"py",
|
||||
"yaml",
|
||||
"csv",
|
||||
"tab",
|
||||
)
|
||||
self.outfile = outfile
|
||||
|
||||
def add_arguments(self, argument_parser):
|
||||
formatgroup = argument_parser.add_mutually_exclusive_group()
|
||||
choices = self.supported_formats
|
||||
formatgroup.add_argument("--format", metavar='FMT',
|
||||
help="Select output format for returned data, "
|
||||
"where FMT is one of: {}".format(choices),
|
||||
choices=choices, default='raw')
|
||||
for fmt in self.formats:
|
||||
fmtfunc = getattr(self, fmt)
|
||||
formatgroup.add_argument("-{}".format(fmt[0]),
|
||||
"--{}".format(fmt), action='store_const',
|
||||
const=fmt, dest='format',
|
||||
help=fmtfunc.__doc__)
|
||||
|
||||
@property
|
||||
def supported_formats(self):
|
||||
return self.formats
|
||||
|
||||
def raw(self, output):
|
||||
"""Output data as raw string (default)"""
|
||||
if isinstance(output, (list, tuple)):
|
||||
output = '\n'.join(map(str, output))
|
||||
self.outfile.write(str(output))
|
||||
|
||||
def py(self, output):
|
||||
"""Output data as a nicely-formatted python data structure"""
|
||||
import pprint
|
||||
pprint.pprint(output, stream=self.outfile)
|
||||
|
||||
def json(self, output):
|
||||
"""Output data in JSON format"""
|
||||
import json
|
||||
json.dump(output, self.outfile)
|
||||
|
||||
def yaml(self, output):
|
||||
"""Output data in YAML format"""
|
||||
import yaml
|
||||
yaml.safe_dump(output, self.outfile)
|
||||
|
||||
def csv(self, output):
|
||||
"""Output data as excel-compatible CSV"""
|
||||
import csv
|
||||
csvwriter = csv.writer(self.outfile)
|
||||
csvwriter.writerows(output)
|
||||
|
||||
def tab(self, output):
|
||||
"""Output data in excel-compatible tab-delimited format"""
|
||||
import csv
|
||||
csvwriter = csv.writer(self.outfile, dialect=csv.excel_tab)
|
||||
csvwriter.writerows(output)
|
||||
|
||||
def format_output(self, output, fmt='raw'):
|
||||
fmtfunc = getattr(self, fmt)
|
||||
fmtfunc(output)
|
||||
|
||||
|
||||
class CommandLine(object):
|
||||
argument_parser = None
|
||||
subparsers = None
|
||||
formatter = None
|
||||
exit_code = 0
|
||||
|
||||
def __init__(self):
|
||||
if not self.argument_parser:
|
||||
self.argument_parser = argparse.ArgumentParser(description='Perform common charm tasks')
|
||||
if not self.formatter:
|
||||
self.formatter = OutputFormatter()
|
||||
self.formatter.add_arguments(self.argument_parser)
|
||||
if not self.subparsers:
|
||||
self.subparsers = self.argument_parser.add_subparsers(help='Commands')
|
||||
|
||||
def subcommand(self, command_name=None):
|
||||
"""
|
||||
Decorate a function as a subcommand. Use its arguments as the
|
||||
command-line arguments"""
|
||||
def wrapper(decorated):
|
||||
cmd_name = command_name or decorated.__name__
|
||||
subparser = self.subparsers.add_parser(cmd_name,
|
||||
description=decorated.__doc__)
|
||||
for args, kwargs in describe_arguments(decorated):
|
||||
subparser.add_argument(*args, **kwargs)
|
||||
subparser.set_defaults(func=decorated)
|
||||
return decorated
|
||||
return wrapper
|
||||
|
||||
def test_command(self, decorated):
|
||||
"""
|
||||
Subcommand is a boolean test function, so bool return values should be
|
||||
converted to a 0/1 exit code.
|
||||
"""
|
||||
decorated._cli_test_command = True
|
||||
return decorated
|
||||
|
||||
def no_output(self, decorated):
|
||||
"""
|
||||
Subcommand is not expected to return a value, so don't print a spurious None.
|
||||
"""
|
||||
decorated._cli_no_output = True
|
||||
return decorated
|
||||
|
||||
def subcommand_builder(self, command_name, description=None):
|
||||
"""
|
||||
Decorate a function that builds a subcommand. Builders should accept a
|
||||
single argument (the subparser instance) and return the function to be
|
||||
run as the command."""
|
||||
def wrapper(decorated):
|
||||
subparser = self.subparsers.add_parser(command_name)
|
||||
func = decorated(subparser)
|
||||
subparser.set_defaults(func=func)
|
||||
subparser.description = description or func.__doc__
|
||||
return wrapper
|
||||
|
||||
def run(self):
|
||||
"Run cli, processing arguments and executing subcommands."
|
||||
arguments = self.argument_parser.parse_args()
|
||||
argspec = inspect.getargspec(arguments.func)
|
||||
vargs = []
|
||||
kwargs = {}
|
||||
for arg in argspec.args:
|
||||
vargs.append(getattr(arguments, arg))
|
||||
if argspec.varargs:
|
||||
vargs.extend(getattr(arguments, argspec.varargs))
|
||||
if argspec.keywords:
|
||||
for kwarg in argspec.keywords.items():
|
||||
kwargs[kwarg] = getattr(arguments, kwarg)
|
||||
output = arguments.func(*vargs, **kwargs)
|
||||
if getattr(arguments.func, '_cli_test_command', False):
|
||||
self.exit_code = 0 if output else 1
|
||||
output = ''
|
||||
if getattr(arguments.func, '_cli_no_output', False):
|
||||
output = ''
|
||||
self.formatter.format_output(output, arguments.format)
|
||||
if unitdata._KV:
|
||||
unitdata._KV.flush()
|
||||
|
||||
|
||||
cmdline = CommandLine()
|
||||
|
||||
|
||||
def describe_arguments(func):
|
||||
"""
|
||||
Analyze a function's signature and return a data structure suitable for
|
||||
passing in as arguments to an argparse parser's add_argument() method."""
|
||||
|
||||
argspec = inspect.getargspec(func)
|
||||
# we should probably raise an exception somewhere if func includes **kwargs
|
||||
if argspec.defaults:
|
||||
positional_args = argspec.args[:-len(argspec.defaults)]
|
||||
keyword_names = argspec.args[-len(argspec.defaults):]
|
||||
for arg, default in zip(keyword_names, argspec.defaults):
|
||||
yield ('--{}'.format(arg),), {'default': default}
|
||||
else:
|
||||
positional_args = argspec.args
|
||||
|
||||
for arg in positional_args:
|
||||
yield (arg,), {}
|
||||
if argspec.varargs:
|
||||
yield (argspec.varargs,), {'nargs': '*'}
|
|
@ -1,36 +0,0 @@
|
|||
# Copyright 2014-2015 Canonical Limited.
|
||||
#
|
||||
# This file is part of charm-helpers.
|
||||
#
|
||||
# charm-helpers is free software: you can redistribute it and/or modify
|
||||
# it under the terms of the GNU Lesser General Public License version 3 as
|
||||
# published by the Free Software Foundation.
|
||||
#
|
||||
# charm-helpers is distributed in the hope that it will be useful,
|
||||
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
# GNU Lesser General Public License for more details.
|
||||
#
|
||||
# You should have received a copy of the GNU Lesser General Public License
|
||||
# along with charm-helpers. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
from . import cmdline
|
||||
from charmhelpers.contrib.benchmark import Benchmark
|
||||
|
||||
|
||||
@cmdline.subcommand(command_name='benchmark-start')
|
||||
def start():
|
||||
Benchmark.start()
|
||||
|
||||
|
||||
@cmdline.subcommand(command_name='benchmark-finish')
|
||||
def finish():
|
||||
Benchmark.finish()
|
||||
|
||||
|
||||
@cmdline.subcommand_builder('benchmark-composite', description="Set the benchmark composite score")
|
||||
def service(subparser):
|
||||
subparser.add_argument("value", help="The composite score.")
|
||||
subparser.add_argument("units", help="The units the composite score represents, i.e., 'reads/sec'.")
|
||||
subparser.add_argument("direction", help="'asc' if a lower score is better, 'desc' if a higher score is better.")
|
||||
return Benchmark.set_composite_score
|
|
@ -1,35 +0,0 @@
|
|||
# Copyright 2014-2015 Canonical Limited.
|
||||
#
|
||||
# This file is part of charm-helpers.
|
||||
#
|
||||
# charm-helpers is free software: you can redistribute it and/or modify
|
||||
# it under the terms of the GNU Lesser General Public License version 3 as
|
||||
# published by the Free Software Foundation.
|
||||
#
|
||||
# charm-helpers is distributed in the hope that it will be useful,
|
||||
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
# GNU Lesser General Public License for more details.
|
||||
#
|
||||
# You should have received a copy of the GNU Lesser General Public License
|
||||
# along with charm-helpers. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
"""
|
||||
This module loads sub-modules into the python runtime so they can be
|
||||
discovered via the inspect module. In order to prevent flake8 from (rightfully)
|
||||
telling us these are unused modules, throw a ' # noqa' at the end of each import
|
||||
so that the warning is suppressed.
|
||||
"""
|
||||
|
||||
from . import CommandLine # noqa
|
||||
|
||||
"""
|
||||
Import the sub-modules to be included by chlp.
|
||||
"""
|
||||
from . import host # noqa
|
||||
from . import benchmark # noqa
|
||||
from . import unitdata # noqa
|
||||
from . import reactive # noqa
|
||||
from charmhelpers.core import hookenv # noqa
|
||||
import charmhelpers.core.reactive.relations # noqa
|
||||
import charmhelpers.core.reactive.bus # noqa
|
|
@ -1,31 +0,0 @@
|
|||
# Copyright 2014-2015 Canonical Limited.
|
||||
#
|
||||
# This file is part of charm-helpers.
|
||||
#
|
||||
# charm-helpers is free software: you can redistribute it and/or modify
|
||||
# it under the terms of the GNU Lesser General Public License version 3 as
|
||||
# published by the Free Software Foundation.
|
||||
#
|
||||
# charm-helpers is distributed in the hope that it will be useful,
|
||||
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
# GNU Lesser General Public License for more details.
|
||||
#
|
||||
# You should have received a copy of the GNU Lesser General Public License
|
||||
# along with charm-helpers. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
from . import cmdline
|
||||
from charmhelpers.core import host
|
||||
|
||||
|
||||
@cmdline.subcommand()
|
||||
def mounts():
|
||||
"List mounts"
|
||||
return host.mounts()
|
||||
|
||||
|
||||
@cmdline.subcommand_builder('service', description="Control system services")
|
||||
def service(subparser):
|
||||
subparser.add_argument("action", help="The action to perform (start, stop, etc...)")
|
||||
subparser.add_argument("service_name", help="Name of the service to control")
|
||||
return host.service
|
|
@ -1,72 +0,0 @@
|
|||
# Copyright 2014-2015 Canonical Limited.
|
||||
#
|
||||
# This file is part of charm-helpers.
|
||||
#
|
||||
# charm-helpers is free software: you can redistribute it and/or modify
|
||||
# it under the terms of the GNU Lesser General Public License version 3 as
|
||||
# published by the Free Software Foundation.
|
||||
#
|
||||
# charm-helpers is distributed in the hope that it will be useful,
|
||||
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
# GNU Lesser General Public License for more details.
|
||||
#
|
||||
# You should have received a copy of the GNU Lesser General Public License
|
||||
# along with charm-helpers. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
from . import cmdline
|
||||
from charmhelpers.core.reactive import helpers
|
||||
|
||||
|
||||
@cmdline.subcommand()
|
||||
@cmdline.test_command
|
||||
def hook(*hook_patterns):
|
||||
"""
|
||||
Check if the current hook matches one of the patterns.
|
||||
"""
|
||||
return helpers._hook(hook_patterns)
|
||||
|
||||
|
||||
@cmdline.subcommand()
|
||||
@cmdline.test_command
|
||||
def when(handler_id, *desired_states):
|
||||
"""
|
||||
Check if all of the desired_states are active and have changed.
|
||||
"""
|
||||
return helpers._when(handler_id, desired_states, False)
|
||||
|
||||
|
||||
@cmdline.subcommand()
|
||||
@cmdline.test_command
|
||||
def when_not(handler_id, *desired_states):
|
||||
"""
|
||||
Check if not all of the desired_states are active and have changed.
|
||||
"""
|
||||
return helpers._when(handler_id, desired_states, True)
|
||||
|
||||
|
||||
@cmdline.subcommand()
|
||||
@cmdline.test_command
|
||||
def when_file_changed(*filenames):
|
||||
"""
|
||||
Check if files have changed since the last time they were checked.
|
||||
"""
|
||||
return helpers.any_file_changed(filenames)
|
||||
|
||||
|
||||
@cmdline.subcommand()
|
||||
@cmdline.test_command
|
||||
def only_once(handler_id):
|
||||
"""
|
||||
Check if handler has already been run in the past.
|
||||
"""
|
||||
return not helpers.was_invoked(handler_id)
|
||||
|
||||
|
||||
@cmdline.subcommand()
|
||||
@cmdline.no_output
|
||||
def mark_invoked(handler_id):
|
||||
"""
|
||||
Record that the handler has been invoked, for use with only_once.
|
||||
"""
|
||||
helpers.mark_invoked(handler_id)
|
|
@ -1,39 +0,0 @@
|
|||
# Copyright 2014-2015 Canonical Limited.
|
||||
#
|
||||
# This file is part of charm-helpers.
|
||||
#
|
||||
# charm-helpers is free software: you can redistribute it and/or modify
|
||||
# it under the terms of the GNU Lesser General Public License version 3 as
|
||||
# published by the Free Software Foundation.
|
||||
#
|
||||
# charm-helpers is distributed in the hope that it will be useful,
|
||||
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
# GNU Lesser General Public License for more details.
|
||||
#
|
||||
# You should have received a copy of the GNU Lesser General Public License
|
||||
# along with charm-helpers. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
from . import cmdline
|
||||
from charmhelpers.core import unitdata
|
||||
|
||||
|
||||
@cmdline.subcommand_builder('unitdata', description="Store and retrieve data")
|
||||
def unitdata_cmd(subparser):
|
||||
nested = subparser.add_subparsers()
|
||||
get_cmd = nested.add_parser('get', help='Retrieve data')
|
||||
get_cmd.add_argument('key', help='Key to retrieve the value of')
|
||||
get_cmd.set_defaults(action='get', value=None)
|
||||
set_cmd = nested.add_parser('set', help='Store data')
|
||||
set_cmd.add_argument('key', help='Key to set')
|
||||
set_cmd.add_argument('value', help='Value to store')
|
||||
set_cmd.set_defaults(action='set')
|
||||
|
||||
def _unitdata_cmd(action, key, value):
|
||||
if action == 'get':
|
||||
return unitdata.kv().get(key)
|
||||
elif action == 'set':
|
||||
unitdata.kv().set(key, value)
|
||||
unitdata.kv().flush()
|
||||
return ''
|
||||
return _unitdata_cmd
|
|
@ -1,15 +0,0 @@
|
|||
# Copyright 2014-2015 Canonical Limited.
|
||||
#
|
||||
# This file is part of charm-helpers.
|
||||
#
|
||||
# charm-helpers is free software: you can redistribute it and/or modify
|
||||
# it under the terms of the GNU Lesser General Public License version 3 as
|
||||
# published by the Free Software Foundation.
|
||||
#
|
||||
# charm-helpers is distributed in the hope that it will be useful,
|
||||
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
# GNU Lesser General Public License for more details.
|
||||
#
|
||||
# You should have received a copy of the GNU Lesser General Public License
|
||||
# along with charm-helpers. If not, see <http://www.gnu.org/licenses/>.
|
|
@ -1,15 +0,0 @@
|
|||
# Copyright 2014-2015 Canonical Limited.
|
||||
#
|
||||
# This file is part of charm-helpers.
|
||||
#
|
||||
# charm-helpers is free software: you can redistribute it and/or modify
|
||||
# it under the terms of the GNU Lesser General Public License version 3 as
|
||||
# published by the Free Software Foundation.
|
||||
#
|
||||
# charm-helpers is distributed in the hope that it will be useful,
|
||||
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
# GNU Lesser General Public License for more details.
|
||||
#
|
||||
# You should have received a copy of the GNU Lesser General Public License
|
||||
# along with charm-helpers. If not, see <http://www.gnu.org/licenses/>.
|
|
@ -1,450 +0,0 @@
|
|||
# Copyright 2014-2015 Canonical Limited.
|
||||
#
|
||||
# This file is part of charm-helpers.
|
||||
#
|
||||
# charm-helpers is free software: you can redistribute it and/or modify
|
||||
# it under the terms of the GNU Lesser General Public License version 3 as
|
||||
# published by the Free Software Foundation.
|
||||
#
|
||||
# charm-helpers is distributed in the hope that it will be useful,
|
||||
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
# GNU Lesser General Public License for more details.
|
||||
#
|
||||
# You should have received a copy of the GNU Lesser General Public License
|
||||
# along with charm-helpers. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
import glob
|
||||
import re
|
||||
import subprocess
|
||||
import six
|
||||
import socket
|
||||
|
||||
from functools import partial
|
||||
|
||||
from charmhelpers.core.hookenv import unit_get
|
||||
from charmhelpers.fetch import apt_install
|
||||
from charmhelpers.core.hookenv import (
|
||||
log,
|
||||
WARNING,
|
||||
)
|
||||
|
||||
try:
|
||||
import netifaces
|
||||
except ImportError:
|
||||
apt_install('python-netifaces')
|
||||
import netifaces
|
||||
|
||||
try:
|
||||
import netaddr
|
||||
except ImportError:
|
||||
apt_install('python-netaddr')
|
||||
import netaddr
|
||||
|
||||
|
||||
def _validate_cidr(network):
|
||||
try:
|
||||
netaddr.IPNetwork(network)
|
||||
except (netaddr.core.AddrFormatError, ValueError):
|
||||
raise ValueError("Network (%s) is not in CIDR presentation format" %
|
||||
network)
|
||||
|
||||
|
||||
def no_ip_found_error_out(network):
|
||||
errmsg = ("No IP address found in network: %s" % network)
|
||||
raise ValueError(errmsg)
|
||||
|
||||
|
||||
def get_address_in_network(network, fallback=None, fatal=False):
|
||||
"""Get an IPv4 or IPv6 address within the network from the host.
|
||||
|
||||
:param network (str): CIDR presentation format. For example,
|
||||
'192.168.1.0/24'.
|
||||
:param fallback (str): If no address is found, return fallback.
|
||||
:param fatal (boolean): If no address is found, fallback is not
|
||||
set and fatal is True then exit(1).
|
||||
"""
|
||||
if network is None:
|
||||
if fallback is not None:
|
||||
return fallback
|
||||
|
||||
if fatal:
|
||||
no_ip_found_error_out(network)
|
||||
else:
|
||||
return None
|
||||
|
||||
_validate_cidr(network)
|
||||
network = netaddr.IPNetwork(network)
|
||||
for iface in netifaces.interfaces():
|
||||
addresses = netifaces.ifaddresses(iface)
|
||||
if network.version == 4 and netifaces.AF_INET in addresses:
|
||||
addr = addresses[netifaces.AF_INET][0]['addr']
|
||||
netmask = addresses[netifaces.AF_INET][0]['netmask']
|
||||
cidr = netaddr.IPNetwork("%s/%s" % (addr, netmask))
|
||||
if cidr in network:
|
||||
return str(cidr.ip)
|
||||
|
||||
if network.version == 6 and netifaces.AF_INET6 in addresses:
|
||||
for addr in addresses[netifaces.AF_INET6]:
|
||||
if not addr['addr'].startswith('fe80'):
|
||||
cidr = netaddr.IPNetwork("%s/%s" % (addr['addr'],
|
||||
addr['netmask']))
|
||||
if cidr in network:
|
||||
return str(cidr.ip)
|
||||
|
||||
if fallback is not None:
|
||||
return fallback
|
||||
|
||||
if fatal:
|
||||
no_ip_found_error_out(network)
|
||||
|
||||
return None
|
||||
|
||||
|
||||
def is_ipv6(address):
|
||||
"""Determine whether provided address is IPv6 or not."""
|
||||
try:
|
||||
address = netaddr.IPAddress(address)
|
||||
except netaddr.AddrFormatError:
|
||||
# probably a hostname - so not an address at all!
|
||||
return False
|
||||
|
||||
return address.version == 6
|
||||
|
||||
|
||||
def is_address_in_network(network, address):
|
||||
"""
|
||||
Determine whether the provided address is within a network range.
|
||||
|
||||
:param network (str): CIDR presentation format. For example,
|
||||
'192.168.1.0/24'.
|
||||
:param address: An individual IPv4 or IPv6 address without a net
|
||||
mask or subnet prefix. For example, '192.168.1.1'.
|
||||
:returns boolean: Flag indicating whether address is in network.
|
||||
"""
|
||||
try:
|
||||
network = netaddr.IPNetwork(network)
|
||||
except (netaddr.core.AddrFormatError, ValueError):
|
||||
raise ValueError("Network (%s) is not in CIDR presentation format" %
|
||||
network)
|
||||
|
||||
try:
|
||||
address = netaddr.IPAddress(address)
|
||||
except (netaddr.core.AddrFormatError, ValueError):
|
||||
raise ValueError("Address (%s) is not in correct presentation format" %
|
||||
address)
|
||||
|
||||
if address in network:
|
||||
return True
|
||||
else:
|
||||
return False
|
||||
|
||||
|
||||
def _get_for_address(address, key):
|
||||
"""Retrieve an attribute of or the physical interface that
|
||||
the IP address provided could be bound to.
|
||||
|
||||
:param address (str): An individual IPv4 or IPv6 address without a net
|
||||
mask or subnet prefix. For example, '192.168.1.1'.
|
||||
:param key: 'iface' for the physical interface name or an attribute
|
||||
of the configured interface, for example 'netmask'.
|
||||
:returns str: Requested attribute or None if address is not bindable.
|
||||
"""
|
||||
address = netaddr.IPAddress(address)
|
||||
for iface in netifaces.interfaces():
|
||||
addresses = netifaces.ifaddresses(iface)
|
||||
if address.version == 4 and netifaces.AF_INET in addresses:
|
||||
addr = addresses[netifaces.AF_INET][0]['addr']
|
||||
netmask = addresses[netifaces.AF_INET][0]['netmask']
|
||||
network = netaddr.IPNetwork("%s/%s" % (addr, netmask))
|
||||
cidr = network.cidr
|
||||
if address in cidr:
|
||||
if key == 'iface':
|
||||
return iface
|
||||
else:
|
||||
return addresses[netifaces.AF_INET][0][key]
|
||||
|
||||
if address.version == 6 and netifaces.AF_INET6 in addresses:
|
||||
for addr in addresses[netifaces.AF_INET6]:
|
||||
if not addr['addr'].startswith('fe80'):
|
||||
network = netaddr.IPNetwork("%s/%s" % (addr['addr'],
|
||||
addr['netmask']))
|
||||
cidr = network.cidr
|
||||
if address in cidr:
|
||||
if key == 'iface':
|
||||
return iface
|
||||
elif key == 'netmask' and cidr:
|
||||
return str(cidr).split('/')[1]
|
||||
else:
|
||||
return addr[key]
|
||||
|
||||
return None
|
||||
|
||||
|
||||
get_iface_for_address = partial(_get_for_address, key='iface')
|
||||
|
||||
|
||||
get_netmask_for_address = partial(_get_for_address, key='netmask')
|
||||
|
||||
|
||||
def format_ipv6_addr(address):
|
||||
"""If address is IPv6, wrap it in '[]' otherwise return None.
|
||||
|
||||
This is required by most configuration files when specifying IPv6
|
||||
addresses.
|
||||
"""
|
||||
if is_ipv6(address):
|
||||
return "[%s]" % address
|
||||
|
||||
return None
|
||||
|
||||
|
||||
def get_iface_addr(iface='eth0', inet_type='AF_INET', inc_aliases=False,
|
||||
fatal=True, exc_list=None):
|
||||
"""Return the assigned IP address for a given interface, if any."""
|
||||
# Extract nic if passed /dev/ethX
|
||||
if '/' in iface:
|
||||
iface = iface.split('/')[-1]
|
||||
|
||||
if not exc_list:
|
||||
exc_list = []
|
||||
|
||||
try:
|
||||
inet_num = getattr(netifaces, inet_type)
|
||||
except AttributeError:
|
||||
raise Exception("Unknown inet type '%s'" % str(inet_type))
|
||||
|
||||
interfaces = netifaces.interfaces()
|
||||
if inc_aliases:
|
||||
ifaces = []
|
||||
for _iface in interfaces:
|
||||
if iface == _iface or _iface.split(':')[0] == iface:
|
||||
ifaces.append(_iface)
|
||||
|
||||
if fatal and not ifaces:
|
||||
raise Exception("Invalid interface '%s'" % iface)
|
||||
|
||||
ifaces.sort()
|
||||
else:
|
||||
if iface not in interfaces:
|
||||
if fatal:
|
||||
raise Exception("Interface '%s' not found " % (iface))
|
||||
else:
|
||||
return []
|
||||
|
||||
else:
|
||||
ifaces = [iface]
|
||||
|
||||
addresses = []
|
||||
for netiface in ifaces:
|
||||
net_info = netifaces.ifaddresses(netiface)
|
||||
if inet_num in net_info:
|
||||
for entry in net_info[inet_num]:
|
||||
if 'addr' in entry and entry['addr'] not in exc_list:
|
||||
addresses.append(entry['addr'])
|
||||
|
||||
if fatal and not addresses:
|
||||
raise Exception("Interface '%s' doesn't have any %s addresses." %
|
||||
(iface, inet_type))
|
||||
|
||||
return sorted(addresses)
|
||||
|
||||
|
||||
get_ipv4_addr = partial(get_iface_addr, inet_type='AF_INET')
|
||||
|
||||
|
||||
def get_iface_from_addr(addr):
|
||||
"""Work out on which interface the provided address is configured."""
|
||||
for iface in netifaces.interfaces():
|
||||
addresses = netifaces.ifaddresses(iface)
|
||||
for inet_type in addresses:
|
||||
for _addr in addresses[inet_type]:
|
||||
_addr = _addr['addr']
|
||||
# link local
|
||||
ll_key = re.compile("(.+)%.*")
|
||||
raw = re.match(ll_key, _addr)
|
||||
if raw:
|
||||
_addr = raw.group(1)
|
||||
|
||||
if _addr == addr:
|
||||
log("Address '%s' is configured on iface '%s'" %
|
||||
(addr, iface))
|
||||
return iface
|
||||
|
||||
msg = "Unable to infer net iface on which '%s' is configured" % (addr)
|
||||
raise Exception(msg)
|
||||
|
||||
|
||||
def sniff_iface(f):
|
||||
"""Ensure decorated function is called with a value for iface.
|
||||
|
||||
If no iface provided, inject net iface inferred from unit private address.
|
||||
"""
|
||||
def iface_sniffer(*args, **kwargs):
|
||||
if not kwargs.get('iface', None):
|
||||
kwargs['iface'] = get_iface_from_addr(unit_get('private-address'))
|
||||
|
||||
return f(*args, **kwargs)
|
||||
|
||||
return iface_sniffer
|
||||
|
||||
|
||||
@sniff_iface
|
||||
def get_ipv6_addr(iface=None, inc_aliases=False, fatal=True, exc_list=None,
|
||||
dynamic_only=True):
|
||||
"""Get assigned IPv6 address for a given interface.
|
||||
|
||||
Returns list of addresses found. If no address found, returns empty list.
|
||||
|
||||
If iface is None, we infer the current primary interface by doing a reverse
|
||||
lookup on the unit private-address.
|
||||
|
||||
We currently only support scope global IPv6 addresses i.e. non-temporary
|
||||
addresses. If no global IPv6 address is found, return the first one found
|
||||
in the ipv6 address list.
|
||||
"""
|
||||
addresses = get_iface_addr(iface=iface, inet_type='AF_INET6',
|
||||
inc_aliases=inc_aliases, fatal=fatal,
|
||||
exc_list=exc_list)
|
||||
|
||||
if addresses:
|
||||
global_addrs = []
|
||||
for addr in addresses:
|
||||
key_scope_link_local = re.compile("^fe80::..(.+)%(.+)")
|
||||
m = re.match(key_scope_link_local, addr)
|
||||
if m:
|
||||
eui_64_mac = m.group(1)
|
||||
iface = m.group(2)
|
||||
else:
|
||||
global_addrs.append(addr)
|
||||
|
||||
if global_addrs:
|
||||
# Make sure any found global addresses are not temporary
|
||||
cmd = ['ip', 'addr', 'show', iface]
|
||||
out = subprocess.check_output(cmd).decode('UTF-8')
|
||||
if dynamic_only:
|
||||
key = re.compile("inet6 (.+)/[0-9]+ scope global dynamic.*")
|
||||
else:
|
||||
key = re.compile("inet6 (.+)/[0-9]+ scope global.*")
|
||||
|
||||
addrs = []
|
||||
for line in out.split('\n'):
|
||||
line = line.strip()
|
||||
m = re.match(key, line)
|
||||
if m and 'temporary' not in line:
|
||||
# Return the first valid address we find
|
||||
for addr in global_addrs:
|
||||
if m.group(1) == addr:
|
||||
if not dynamic_only or \
|
||||
m.group(1).endswith(eui_64_mac):
|
||||
addrs.append(addr)
|
||||
|
||||
if addrs:
|
||||
return addrs
|
||||
|
||||
if fatal:
|
||||
raise Exception("Interface '%s' does not have a scope global "
|
||||
"non-temporary ipv6 address." % iface)
|
||||
|
||||
return []
|
||||
|
||||
|
||||
def get_bridges(vnic_dir='/sys/devices/virtual/net'):
|
||||
"""Return a list of bridges on the system."""
|
||||
b_regex = "%s/*/bridge" % vnic_dir
|
||||
return [x.replace(vnic_dir, '').split('/')[1] for x in glob.glob(b_regex)]
|
||||
|
||||
|
||||
def get_bridge_nics(bridge, vnic_dir='/sys/devices/virtual/net'):
|
||||
"""Return a list of nics comprising a given bridge on the system."""
|
||||
brif_regex = "%s/%s/brif/*" % (vnic_dir, bridge)
|
||||
return [x.split('/')[-1] for x in glob.glob(brif_regex)]
|
||||
|
||||
|
||||
def is_bridge_member(nic):
|
||||
"""Check if a given nic is a member of a bridge."""
|
||||
for bridge in get_bridges():
|
||||
if nic in get_bridge_nics(bridge):
|
||||
return True
|
||||
|
||||
return False
|
||||
|
||||
|
||||
def is_ip(address):
|
||||
"""
|
||||
Returns True if address is a valid IP address.
|
||||
"""
|
||||
try:
|
||||
# Test to see if already an IPv4 address
|
||||
socket.inet_aton(address)
|
||||
return True
|
||||
except socket.error:
|
||||
return False
|
||||
|
||||
|
||||
def ns_query(address):
|
||||
try:
|
||||
import dns.resolver
|
||||
except ImportError:
|
||||
apt_install('python-dnspython')
|
||||
import dns.resolver
|
||||
|
||||
if isinstance(address, dns.name.Name):
|
||||
rtype = 'PTR'
|
||||
elif isinstance(address, six.string_types):
|
||||
rtype = 'A'
|
||||
else:
|
||||
return None
|
||||
|
||||
answers = dns.resolver.query(address, rtype)
|
||||
if answers:
|
||||
return str(answers[0])
|
||||
return None
|
||||
|
||||
|
||||
def get_host_ip(hostname, fallback=None):
|
||||
"""
|
||||
Resolves the IP for a given hostname, or returns
|
||||
the input if it is already an IP.
|
||||
"""
|
||||
if is_ip(hostname):
|
||||
return hostname
|
||||
|
||||
ip_addr = ns_query(hostname)
|
||||
if not ip_addr:
|
||||
try:
|
||||
ip_addr = socket.gethostbyname(hostname)
|
||||
except:
|
||||
log("Failed to resolve hostname '%s'" % (hostname),
|
||||
level=WARNING)
|
||||
return fallback
|
||||
return ip_addr
|
||||
|
||||
|
||||
def get_hostname(address, fqdn=True):
|
||||
"""
|
||||
Resolves hostname for given IP, or returns the input
|
||||
if it is already a hostname.
|
||||
"""
|
||||
if is_ip(address):
|
||||
try:
|
||||
import dns.reversename
|
||||
except ImportError:
|
||||
apt_install("python-dnspython")
|
||||
import dns.reversename
|
||||
|
||||
rev = dns.reversename.from_address(address)
|
||||
result = ns_query(rev)
|
||||
if not result:
|
||||
return None
|
||||
else:
|
||||
result = address
|
||||
|
||||
if fqdn:
|
||||
# strip trailing .
|
||||
if result.endswith('.'):
|
||||
return result[:-1]
|
||||
else:
|
||||
return result
|
||||
else:
|
||||
return result.split('.')[0]
|
|
@ -1,96 +0,0 @@
|
|||
# Copyright 2014-2015 Canonical Limited.
|
||||
#
|
||||
# This file is part of charm-helpers.
|
||||
#
|
||||
# charm-helpers is free software: you can redistribute it and/or modify
|
||||
# it under the terms of the GNU Lesser General Public License version 3 as
|
||||
# published by the Free Software Foundation.
|
||||
#
|
||||
# charm-helpers is distributed in the hope that it will be useful,
|
||||
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
# GNU Lesser General Public License for more details.
|
||||
#
|
||||
# You should have received a copy of the GNU Lesser General Public License
|
||||
# along with charm-helpers. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
''' Helpers for interacting with OpenvSwitch '''
|
||||
import subprocess
|
||||
import os
|
||||
from charmhelpers.core.hookenv import (
|
||||
log, WARNING
|
||||
)
|
||||
from charmhelpers.core.host import (
|
||||
service
|
||||
)
|
||||
|
||||
|
||||
def add_bridge(name):
|
||||
''' Add the named bridge to openvswitch '''
|
||||
log('Creating bridge {}'.format(name))
|
||||
subprocess.check_call(["ovs-vsctl", "--", "--may-exist", "add-br", name])
|
||||
|
||||
|
||||
def del_bridge(name):
|
||||
''' Delete the named bridge from openvswitch '''
|
||||
log('Deleting bridge {}'.format(name))
|
||||
subprocess.check_call(["ovs-vsctl", "--", "--if-exists", "del-br", name])
|
||||
|
||||
|
||||
def add_bridge_port(name, port, promisc=False):
|
||||
''' Add a port to the named openvswitch bridge '''
|
||||
log('Adding port {} to bridge {}'.format(port, name))
|
||||
subprocess.check_call(["ovs-vsctl", "--", "--may-exist", "add-port",
|
||||
name, port])
|
||||
subprocess.check_call(["ip", "link", "set", port, "up"])
|
||||
if promisc:
|
||||
subprocess.check_call(["ip", "link", "set", port, "promisc", "on"])
|
||||
else:
|
||||
subprocess.check_call(["ip", "link", "set", port, "promisc", "off"])
|
||||
|
||||
|
||||
def del_bridge_port(name, port):
|
||||
''' Delete a port from the named openvswitch bridge '''
|
||||
log('Deleting port {} from bridge {}'.format(port, name))
|
||||
subprocess.check_call(["ovs-vsctl", "--", "--if-exists", "del-port",
|
||||
name, port])
|
||||
subprocess.check_call(["ip", "link", "set", port, "down"])
|
||||
subprocess.check_call(["ip", "link", "set", port, "promisc", "off"])
|
||||
|
||||
|
||||
def set_manager(manager):
|
||||
''' Set the controller for the local openvswitch '''
|
||||
log('Setting manager for local ovs to {}'.format(manager))
|
||||
subprocess.check_call(['ovs-vsctl', 'set-manager',
|
||||
'ssl:{}'.format(manager)])
|
||||
|
||||
|
||||
CERT_PATH = '/etc/openvswitch/ovsclient-cert.pem'
|
||||
|
||||
|
||||
def get_certificate():
|
||||
''' Read openvswitch certificate from disk '''
|
||||
if os.path.exists(CERT_PATH):
|
||||
log('Reading ovs certificate from {}'.format(CERT_PATH))
|
||||
with open(CERT_PATH, 'r') as cert:
|
||||
full_cert = cert.read()
|
||||
begin_marker = "-----BEGIN CERTIFICATE-----"
|
||||
end_marker = "-----END CERTIFICATE-----"
|
||||
begin_index = full_cert.find(begin_marker)
|
||||
end_index = full_cert.rfind(end_marker)
|
||||
if end_index == -1 or begin_index == -1:
|
||||
raise RuntimeError("Certificate does not contain valid begin"
|
||||
" and end markers.")
|
||||
full_cert = full_cert[begin_index:(end_index + len(end_marker))]
|
||||
return full_cert
|
||||
else:
|
||||
log('Certificate not found', level=WARNING)
|
||||
return None
|
||||
|
||||
|
||||
def full_restart():
|
||||
''' Full restart and reload of openvswitch '''
|
||||
if os.path.exists('/etc/init/openvswitch-force-reload-kmod.conf'):
|
||||
service('start', 'openvswitch-force-reload-kmod')
|
||||
else:
|
||||
service('force-reload-kmod', 'openvswitch-switch')
|
|
@ -1,319 +0,0 @@
|
|||
# Copyright 2014-2015 Canonical Limited.
|
||||
#
|
||||
# This file is part of charm-helpers.
|
||||
#
|
||||
# charm-helpers is free software: you can redistribute it and/or modify
|
||||
# it under the terms of the GNU Lesser General Public License version 3 as
|
||||
# published by the Free Software Foundation.
|
||||
#
|
||||
# charm-helpers is distributed in the hope that it will be useful,
|
||||
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
# GNU Lesser General Public License for more details.
|
||||
#
|
||||
# You should have received a copy of the GNU Lesser General Public License
|
||||
# along with charm-helpers. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
"""
|
||||
This module contains helpers to add and remove ufw rules.
|
||||
|
||||
Examples:
|
||||
|
||||
- open SSH port for subnet 10.0.3.0/24:
|
||||
|
||||
>>> from charmhelpers.contrib.network import ufw
|
||||
>>> ufw.enable()
|
||||
>>> ufw.grant_access(src='10.0.3.0/24', dst='any', port='22', proto='tcp')
|
||||
|
||||
- open service by name as defined in /etc/services:
|
||||
|
||||
>>> from charmhelpers.contrib.network import ufw
|
||||
>>> ufw.enable()
|
||||
>>> ufw.service('ssh', 'open')
|
||||
|
||||
- close service by port number:
|
||||
|
||||
>>> from charmhelpers.contrib.network import ufw
|
||||
>>> ufw.enable()
|
||||
>>> ufw.service('4949', 'close') # munin
|
||||
"""
|
||||
import re
|
||||
import os
|
||||
import subprocess
|
||||
from charmhelpers.core import hookenv
|
||||
|
||||
__author__ = "Felipe Reyes <felipe.reyes@canonical.com>"
|
||||
|
||||
|
||||
class UFWError(Exception):
|
||||
pass
|
||||
|
||||
|
||||
class UFWIPv6Error(UFWError):
|
||||
pass
|
||||
|
||||
|
||||
def is_enabled():
|
||||
"""
|
||||
Check if `ufw` is enabled
|
||||
|
||||
:returns: True if ufw is enabled
|
||||
"""
|
||||
output = subprocess.check_output(['ufw', 'status'],
|
||||
universal_newlines=True,
|
||||
env={'LANG': 'en_US',
|
||||
'PATH': os.environ['PATH']})
|
||||
|
||||
m = re.findall(r'^Status: active\n', output, re.M)
|
||||
|
||||
return len(m) >= 1
|
||||
|
||||
|
||||
def is_ipv6_ok(soft_fail=False):
|
||||
"""
|
||||
Check if IPv6 support is present and ip6tables functional
|
||||
|
||||
:param soft_fail: If set to True and IPv6 support is broken, then reports
|
||||
that the host doesn't have IPv6 support, otherwise a
|
||||
UFWIPv6Error exception is raised.
|
||||
:returns: True if IPv6 is working, False otherwise
|
||||
"""
|
||||
|
||||
# do we have IPv6 in the machine?
|
||||
if os.path.isdir('/proc/sys/net/ipv6'):
|
||||
# is ip6tables kernel module loaded?
|
||||
lsmod = subprocess.check_output(['lsmod'], universal_newlines=True)
|
||||
matches = re.findall('^ip6_tables[ ]+', lsmod, re.M)
|
||||
if len(matches) == 0:
|
||||
# ip6tables support isn't complete, let's try to load it
|
||||
try:
|
||||
subprocess.check_output(['modprobe', 'ip6_tables'],
|
||||
universal_newlines=True)
|
||||
# great, we could load the module
|
||||
return True
|
||||
except subprocess.CalledProcessError as ex:
|
||||
hookenv.log("Couldn't load ip6_tables module: %s" % ex.output,
|
||||
level="WARN")
|
||||
# we are in a world where ip6tables isn't working
|
||||
if soft_fail:
|
||||
# so we inform that the machine doesn't have IPv6
|
||||
return False
|
||||
else:
|
||||
raise UFWIPv6Error("IPv6 firewall support broken")
|
||||
else:
|
||||
# the module is present :)
|
||||
return True
|
||||
|
||||
else:
|
||||
# the system doesn't have IPv6
|
||||
return False
|
||||
|
||||
|
||||
def disable_ipv6():
|
||||
"""
|
||||
Disable ufw IPv6 support in /etc/default/ufw
|
||||
"""
|
||||
exit_code = subprocess.call(['sed', '-i', 's/IPV6=.*/IPV6=no/g',
|
||||
'/etc/default/ufw'])
|
||||
if exit_code == 0:
|
||||
hookenv.log('IPv6 support in ufw disabled', level='INFO')
|
||||
else:
|
||||
hookenv.log("Couldn't disable IPv6 support in ufw", level="ERROR")
|
||||
raise UFWError("Couldn't disable IPv6 support in ufw")
|
||||
|
||||
|
||||
def enable(soft_fail=False):
|
||||
"""
|
||||
Enable ufw
|
||||
|
||||
:param soft_fail: If set to True silently disables IPv6 support in ufw,
|
||||
otherwise a UFWIPv6Error exception is raised when IP6
|
||||
support is broken.
|
||||
:returns: True if ufw is successfully enabled
|
||||
"""
|
||||
if is_enabled():
|
||||
return True
|
||||
|
||||
if not is_ipv6_ok(soft_fail):
|
||||
disable_ipv6()
|
||||
|
||||
output = subprocess.check_output(['ufw', 'enable'],
|
||||
universal_newlines=True,
|
||||
env={'LANG': 'en_US',
|
||||
'PATH': os.environ['PATH']})
|
||||
|
||||
m = re.findall('^Firewall is active and enabled on system startup\n',
|
||||
output, re.M)
|
||||
hookenv.log(output, level='DEBUG')
|
||||
|
||||
if len(m) == 0:
|
||||
hookenv.log("ufw couldn't be enabled", level='WARN')
|
||||
return False
|
||||
else:
|
||||
hookenv.log("ufw enabled", level='INFO')
|
||||
return True
|
||||
|
||||
|
||||
def disable():
|
||||
"""
|
||||
Disable ufw
|
||||
|
||||
:returns: True if ufw is successfully disabled
|
||||
"""
|
||||
if not is_enabled():
|
||||
return True
|
||||
|
||||
output = subprocess.check_output(['ufw', 'disable'],
|
||||
universal_newlines=True,
|
||||
env={'LANG': 'en_US',
|
||||
'PATH': os.environ['PATH']})
|
||||
|
||||
m = re.findall(r'^Firewall stopped and disabled on system startup\n',
|
||||
output, re.M)
|
||||
hookenv.log(output, level='DEBUG')
|
||||
|
||||
if len(m) == 0:
|
||||
hookenv.log("ufw couldn't be disabled", level='WARN')
|
||||
return False
|
||||
else:
|
||||
hookenv.log("ufw disabled", level='INFO')
|
||||
return True
|
||||
|
||||
|
||||
def default_policy(policy='deny', direction='incoming'):
|
||||
"""
|
||||
Changes the default policy for traffic `direction`
|
||||
|
||||
:param policy: allow, deny or reject
|
||||
:param direction: traffic direction, possible values: incoming, outgoing,
|
||||
routed
|
||||
"""
|
||||
if policy not in ['allow', 'deny', 'reject']:
|
||||
raise UFWError(('Unknown policy %s, valid values: '
|
||||
'allow, deny, reject') % policy)
|
||||
|
||||
if direction not in ['incoming', 'outgoing', 'routed']:
|
||||
raise UFWError(('Unknown direction %s, valid values: '
|
||||
'incoming, outgoing, routed') % direction)
|
||||
|
||||
output = subprocess.check_output(['ufw', 'default', policy, direction],
|
||||
universal_newlines=True,
|
||||
env={'LANG': 'en_US',
|
||||
'PATH': os.environ['PATH']})
|
||||
hookenv.log(output, level='DEBUG')
|
||||
|
||||
m = re.findall("^Default %s policy changed to '%s'\n" % (direction,
|
||||
policy),
|
||||
output, re.M)
|
||||
if len(m) == 0:
|
||||
hookenv.log("ufw couldn't change the default policy to %s for %s"
|
||||
% (policy, direction), level='WARN')
|
||||
return False
|
||||
else:
|
||||
hookenv.log("ufw default policy for %s changed to %s"
|
||||
% (direction, policy), level='INFO')
|
||||
return True
|
||||
|
||||
|
||||
def modify_access(src, dst='any', port=None, proto=None, action='allow',
|
||||
index=None):
|
||||
"""
|
||||
Grant access to an address or subnet
|
||||
|
||||
:param src: address (e.g. 192.168.1.234) or subnet
|
||||
(e.g. 192.168.1.0/24).
|
||||
:param dst: destiny of the connection, if the machine has multiple IPs and
|
||||
connections to only one of those have to accepted this is the
|
||||
field has to be set.
|
||||
:param port: destiny port
|
||||
:param proto: protocol (tcp or udp)
|
||||
:param action: `allow` or `delete`
|
||||
:param index: if different from None the rule is inserted at the given
|
||||
`index`.
|
||||
"""
|
||||
if not is_enabled():
|
||||
hookenv.log('ufw is disabled, skipping modify_access()', level='WARN')
|
||||
return
|
||||
|
||||
if action == 'delete':
|
||||
cmd = ['ufw', 'delete', 'allow']
|
||||
elif index is not None:
|
||||
cmd = ['ufw', 'insert', str(index), action]
|
||||
else:
|
||||
cmd = ['ufw', action]
|
||||
|
||||
if src is not None:
|
||||
cmd += ['from', src]
|
||||
|
||||
if dst is not None:
|
||||
cmd += ['to', dst]
|
||||
|
||||
if port is not None:
|
||||
cmd += ['port', str(port)]
|
||||
|
||||
if proto is not None:
|
||||
cmd += ['proto', proto]
|
||||
|
||||
hookenv.log('ufw {}: {}'.format(action, ' '.join(cmd)), level='DEBUG')
|
||||
p = subprocess.Popen(cmd, stdout=subprocess.PIPE)
|
||||
(stdout, stderr) = p.communicate()
|
||||
|
||||
hookenv.log(stdout, level='INFO')
|
||||
|
||||
if p.returncode != 0:
|
||||
hookenv.log(stderr, level='ERROR')
|
||||
hookenv.log('Error running: {}, exit code: {}'.format(' '.join(cmd),
|
||||
p.returncode),
|
||||
level='ERROR')
|
||||
|
||||
|
||||
def grant_access(src, dst='any', port=None, proto=None, index=None):
|
||||
"""
|
||||
Grant access to an address or subnet
|
||||
|
||||
:param src: address (e.g. 192.168.1.234) or subnet
|
||||
(e.g. 192.168.1.0/24).
|
||||
:param dst: destiny of the connection, if the machine has multiple IPs and
|
||||
connections to only one of those have to accepted this is the
|
||||
field has to be set.
|
||||
:param port: destiny port
|
||||
:param proto: protocol (tcp or udp)
|
||||
:param index: if different from None the rule is inserted at the given
|
||||
`index`.
|
||||
"""
|
||||
return modify_access(src, dst=dst, port=port, proto=proto, action='allow',
|
||||
index=index)
|
||||
|
||||
|
||||
def revoke_access(src, dst='any', port=None, proto=None):
|
||||
"""
|
||||
Revoke access to an address or subnet
|
||||
|
||||
:param src: address (e.g. 192.168.1.234) or subnet
|
||||
(e.g. 192.168.1.0/24).
|
||||
:param dst: destiny of the connection, if the machine has multiple IPs and
|
||||
connections to only one of those have to accepted this is the
|
||||
field has to be set.
|
||||
:param port: destiny port
|
||||
:param proto: protocol (tcp or udp)
|
||||
"""
|
||||
return modify_access(src, dst=dst, port=port, proto=proto, action='delete')
|
||||
|
||||
|
||||
def service(name, action):
|
||||
"""
|
||||
Open/close access to a service
|
||||
|
||||
:param name: could be a service name defined in `/etc/services` or a port
|
||||
number.
|
||||
:param action: `open` or `close`
|
||||
"""
|
||||
if action == 'open':
|
||||
subprocess.check_output(['ufw', 'allow', str(name)],
|
||||
universal_newlines=True)
|
||||
elif action == 'close':
|
||||
subprocess.check_output(['ufw', 'delete', 'allow', str(name)],
|
||||
universal_newlines=True)
|
||||
else:
|
||||
raise UFWError(("'{}' not supported, use 'allow' "
|
||||
"or 'delete'").format(action))
|
|
@ -1,15 +0,0 @@
|
|||
# Copyright 2014-2015 Canonical Limited.
|
||||
#
|
||||
# This file is part of charm-helpers.
|
||||
#
|
||||
# charm-helpers is free software: you can redistribute it and/or modify
|
||||
# it under the terms of the GNU Lesser General Public License version 3 as
|
||||
# published by the Free Software Foundation.
|
||||
#
|
||||
# charm-helpers is distributed in the hope that it will be useful,
|
||||
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
# GNU Lesser General Public License for more details.
|
||||
#
|
||||
# You should have received a copy of the GNU Lesser General Public License
|
||||
# along with charm-helpers. If not, see <http://www.gnu.org/licenses/>.
|
|
@ -1,57 +0,0 @@
|
|||
# Copyright 2014-2015 Canonical Limited.
|
||||
#
|
||||
# This file is part of charm-helpers.
|
||||
#
|
||||
# charm-helpers is free software: you can redistribute it and/or modify
|
||||
# it under the terms of the GNU Lesser General Public License version 3 as
|
||||
# published by the Free Software Foundation.
|
||||
#
|
||||
# charm-helpers is distributed in the hope that it will be useful,
|
||||
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
# GNU Lesser General Public License for more details.
|
||||
#
|
||||
# You should have received a copy of the GNU Lesser General Public License
|
||||
# along with charm-helpers. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
#
|
||||
# Copyright 2014 Canonical Ltd.
|
||||
#
|
||||
# Authors:
|
||||
# Edward Hope-Morley <opentastic@gmail.com>
|
||||
#
|
||||
|
||||
import time
|
||||
|
||||
from charmhelpers.core.hookenv import (
|
||||
log,
|
||||
INFO,
|
||||
)
|
||||
|
||||
|
||||
def retry_on_exception(num_retries, base_delay=0, exc_type=Exception):
|
||||
"""If the decorated function raises exception exc_type, allow num_retries
|
||||
retry attempts before raise the exception.
|
||||
"""
|
||||
def _retry_on_exception_inner_1(f):
|
||||
def _retry_on_exception_inner_2(*args, **kwargs):
|
||||
retries = num_retries
|
||||
multiplier = 1
|
||||
while True:
|
||||
try:
|
||||
return f(*args, **kwargs)
|
||||
except exc_type:
|
||||
if not retries:
|
||||
raise
|
||||
|
||||
delay = base_delay * multiplier
|
||||
multiplier += 1
|
||||
log("Retrying '%s' %d more times (delay=%s)" %
|
||||
(f.__name__, retries, delay), level=INFO)
|
||||
retries -= 1
|
||||
if delay:
|
||||
time.sleep(delay)
|
||||
|
||||
return _retry_on_exception_inner_2
|
||||
|
||||
return _retry_on_exception_inner_1
|
|
@ -1,134 +0,0 @@
|
|||
#!/usr/bin/env python
|
||||
# -*- coding: utf-8 -*-
|
||||
|
||||
# Copyright 2014-2015 Canonical Limited.
|
||||
#
|
||||
# This file is part of charm-helpers.
|
||||
#
|
||||
# charm-helpers is free software: you can redistribute it and/or modify
|
||||
# it under the terms of the GNU Lesser General Public License version 3 as
|
||||
# published by the Free Software Foundation.
|
||||
#
|
||||
# charm-helpers is distributed in the hope that it will be useful,
|
||||
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
# GNU Lesser General Public License for more details.
|
||||
#
|
||||
# You should have received a copy of the GNU Lesser General Public License
|
||||
# along with charm-helpers. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
import io
|
||||
import os
|
||||
|
||||
__author__ = 'Jorge Niedbalski R. <jorge.niedbalski@canonical.com>'
|
||||
|
||||
|
||||
class Fstab(io.FileIO):
|
||||
"""This class extends file in order to implement a file reader/writer
|
||||
for file `/etc/fstab`
|
||||
"""
|
||||
|
||||
class Entry(object):
|
||||
"""Entry class represents a non-comment line on the `/etc/fstab` file
|
||||
"""
|
||||
def __init__(self, device, mountpoint, filesystem,
|
||||
options, d=0, p=0):
|
||||
self.device = device
|
||||
self.mountpoint = mountpoint
|
||||
self.filesystem = filesystem
|
||||
|
||||
if not options:
|
||||
options = "defaults"
|
||||
|
||||
self.options = options
|
||||
self.d = int(d)
|
||||
self.p = int(p)
|
||||
|
||||
def __eq__(self, o):
|
||||
return str(self) == str(o)
|
||||
|
||||
def __str__(self):
|
||||
return "{} {} {} {} {} {}".format(self.device,
|
||||
self.mountpoint,
|
||||
self.filesystem,
|
||||
self.options,
|
||||
self.d,
|
||||
self.p)
|
||||
|
||||
DEFAULT_PATH = os.path.join(os.path.sep, 'etc', 'fstab')
|
||||
|
||||
def __init__(self, path=None):
|
||||
if path:
|
||||
self._path = path
|
||||
else:
|
||||
self._path = self.DEFAULT_PATH
|
||||
super(Fstab, self).__init__(self._path, 'rb+')
|
||||
|
||||
def _hydrate_entry(self, line):
|
||||
# NOTE: use split with no arguments to split on any
|
||||
# whitespace including tabs
|
||||
return Fstab.Entry(*filter(
|
||||
lambda x: x not in ('', None),
|
||||
line.strip("\n").split()))
|
||||
|
||||
@property
|
||||
def entries(self):
|
||||
self.seek(0)
|
||||
for line in self.readlines():
|
||||
line = line.decode('us-ascii')
|
||||
try:
|
||||
if line.strip() and not line.strip().startswith("#"):
|
||||
yield self._hydrate_entry(line)
|
||||
except ValueError:
|
||||
pass
|
||||
|
||||
def get_entry_by_attr(self, attr, value):
|
||||
for entry in self.entries:
|
||||
e_attr = getattr(entry, attr)
|
||||
if e_attr == value:
|
||||
return entry
|
||||
return None
|
||||
|
||||
def add_entry(self, entry):
|
||||
if self.get_entry_by_attr('device', entry.device):
|
||||
return False
|
||||
|
||||
self.write((str(entry) + '\n').encode('us-ascii'))
|
||||
self.truncate()
|
||||
return entry
|
||||
|
||||
def remove_entry(self, entry):
|
||||
self.seek(0)
|
||||
|
||||
lines = [l.decode('us-ascii') for l in self.readlines()]
|
||||
|
||||
found = False
|
||||
for index, line in enumerate(lines):
|
||||
if line.strip() and not line.strip().startswith("#"):
|
||||
if self._hydrate_entry(line) == entry:
|
||||
found = True
|
||||
break
|
||||
|
||||
if not found:
|
||||
return False
|
||||
|
||||
lines.remove(line)
|
||||
|
||||
self.seek(0)
|
||||
self.write(''.join(lines).encode('us-ascii'))
|
||||
self.truncate()
|
||||
return True
|
||||
|
||||
@classmethod
|
||||
def remove_by_mountpoint(cls, mountpoint, path=None):
|
||||
fstab = cls(path=path)
|
||||
entry = fstab.get_entry_by_attr('mountpoint', mountpoint)
|
||||
if entry:
|
||||
return fstab.remove_entry(entry)
|
||||
return False
|
||||
|
||||
@classmethod
|
||||
def add(cls, device, mountpoint, filesystem, options=None, path=None):
|
||||
return cls(path=path).add_entry(Fstab.Entry(device,
|
||||
mountpoint, filesystem,
|
||||
options=options))
|
|
@ -1,884 +0,0 @@
|
|||
# Copyright 2014-2015 Canonical Limited.
|
||||
#
|
||||
# This file is part of charm-helpers.
|
||||
#
|
||||
# charm-helpers is free software: you can redistribute it and/or modify
|
||||
# it under the terms of the GNU Lesser General Public License version 3 as
|
||||
# published by the Free Software Foundation.
|
||||
#
|
||||
# charm-helpers is distributed in the hope that it will be useful,
|
||||
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
# GNU Lesser General Public License for more details.
|
||||
#
|
||||
# You should have received a copy of the GNU Lesser General Public License
|
||||
# along with charm-helpers. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
"Interactions with the Juju environment"
|
||||
# Copyright 2013 Canonical Ltd.
|
||||
#
|
||||
# Authors:
|
||||
# Charm Helpers Developers <juju@lists.ubuntu.com>
|
||||
|
||||
from __future__ import print_function
|
||||
from distutils.version import LooseVersion
|
||||
from functools import wraps
|
||||
import glob
|
||||
import os
|
||||
import json
|
||||
import yaml
|
||||
import subprocess
|
||||
import sys
|
||||
import errno
|
||||
import tempfile
|
||||
from subprocess import CalledProcessError
|
||||
|
||||
from charmhelpers.cli import cmdline
|
||||
|
||||
import six
|
||||
if not six.PY3:
|
||||
from UserDict import UserDict
|
||||
else:
|
||||
from collections import UserDict
|
||||
|
||||
CRITICAL = "CRITICAL"
|
||||
ERROR = "ERROR"
|
||||
WARNING = "WARNING"
|
||||
INFO = "INFO"
|
||||
DEBUG = "DEBUG"
|
||||
MARKER = object()
|
||||
|
||||
cache = {}
|
||||
|
||||
|
||||
def cached(func):
|
||||
"""Cache return values for multiple executions of func + args
|
||||
|
||||
For example::
|
||||
|
||||
@cached
|
||||
def unit_get(attribute):
|
||||
pass
|
||||
|
||||
unit_get('test')
|
||||
|
||||
will cache the result of unit_get + 'test' for future calls.
|
||||
"""
|
||||
@wraps(func)
|
||||
def wrapper(*args, **kwargs):
|
||||
global cache
|
||||
key = str((func, args, kwargs))
|
||||
try:
|
||||
return cache[key]
|
||||
except KeyError:
|
||||
pass # Drop out of the exception handler scope.
|
||||
res = func(*args, **kwargs)
|
||||
cache[key] = res
|
||||
return res
|
||||
return wrapper
|
||||
|
||||
|
||||
def flush(key):
|
||||
"""Flushes any entries from function cache where the
|
||||
key is found in the function+args """
|
||||
flush_list = []
|
||||
for item in cache:
|
||||
if key in item:
|
||||
flush_list.append(item)
|
||||
for item in flush_list:
|
||||
del cache[item]
|
||||
|
||||
|
||||
def log(message, level=None):
|
||||
"""Write a message to the juju log"""
|
||||
command = ['juju-log']
|
||||
if level:
|
||||
command += ['-l', level]
|
||||
if not isinstance(message, six.string_types):
|
||||
message = repr(message)
|
||||
command += [message]
|
||||
# Missing juju-log should not cause failures in unit tests
|
||||
# Send log output to stderr
|
||||
try:
|
||||
subprocess.call(command)
|
||||
except OSError as e:
|
||||
if e.errno == errno.ENOENT:
|
||||
if level:
|
||||
message = "{}: {}".format(level, message)
|
||||
message = "juju-log: {}".format(message)
|
||||
print(message, file=sys.stderr)
|
||||
else:
|
||||
raise
|
||||
|
||||
|
||||
class Serializable(UserDict):
|
||||
"""Wrapper, an object that can be serialized to yaml or json"""
|
||||
|
||||
def __init__(self, obj):
|
||||
# wrap the object
|
||||
UserDict.__init__(self)
|
||||
self.data = obj
|
||||
|
||||
def __getattr__(self, attr):
|
||||
# See if this object has attribute.
|
||||
if attr in ("json", "yaml", "data"):
|
||||
return self.__dict__[attr]
|
||||
# Check for attribute in wrapped object.
|
||||
got = getattr(self.data, attr, MARKER)
|
||||
if got is not MARKER:
|
||||
return got
|
||||
# Proxy to the wrapped object via dict interface.
|
||||
try:
|
||||
return self.data[attr]
|
||||
except KeyError:
|
||||
raise AttributeError(attr)
|
||||
|
||||
def __getstate__(self):
|
||||
# Pickle as a standard dictionary.
|
||||
return self.data
|
||||
|
||||
def __setstate__(self, state):
|
||||
# Unpickle into our wrapper.
|
||||
self.data = state
|
||||
|
||||
def json(self):
|
||||
"""Serialize the object to json"""
|
||||
return json.dumps(self.data)
|
||||
|
||||
def yaml(self):
|
||||
"""Serialize the object to yaml"""
|
||||
return yaml.dump(self.data)
|
||||
|
||||
|
||||
def execution_environment():
|
||||
"""A convenient bundling of the current execution context"""
|
||||
context = {}
|
||||
context['conf'] = config()
|
||||
if relation_id():
|
||||
context['reltype'] = relation_type()
|
||||
context['relid'] = relation_id()
|
||||
context['rel'] = relation_get()
|
||||
context['unit'] = local_unit()
|
||||
context['rels'] = relations()
|
||||
context['env'] = os.environ
|
||||
return context
|
||||
|
||||
|
||||
def in_relation_hook():
|
||||
"""Determine whether we're running in a relation hook"""
|
||||
return 'JUJU_RELATION' in os.environ
|
||||
|
||||
|
||||
def relation_type():
|
||||
"""The scope for the current relation hook"""
|
||||
return os.environ.get('JUJU_RELATION', None)
|
||||
|
||||
|
||||
@cmdline.subcommand()
|
||||
@cached
|
||||
def relation_id(relation_name=None, service_or_unit=None):
|
||||
"""The relation ID for the current or a specified relation"""
|
||||
if not relation_name and not service_or_unit:
|
||||
return os.environ.get('JUJU_RELATION_ID', None)
|
||||
elif relation_name and service_or_unit:
|
||||
service_name = service_or_unit.split('/')[0]
|
||||
for relid in relation_ids(relation_name):
|
||||
remote_service = remote_service_name(relid)
|
||||
if remote_service == service_name:
|
||||
return relid
|
||||
else:
|
||||
raise ValueError('Must specify neither or both of relation_name and service_or_unit')
|
||||
|
||||
|
||||
def local_unit():
|
||||
"""Local unit ID"""
|
||||
return os.environ['JUJU_UNIT_NAME']
|
||||
|
||||
|
||||
def remote_unit():
|
||||
"""The remote unit for the current relation hook"""
|
||||
return os.environ.get('JUJU_REMOTE_UNIT', None)
|
||||
|
||||
|
||||
@cmdline.subcommand()
|
||||
def service_name():
|
||||
"""The name service group this unit belongs to"""
|
||||
return local_unit().split('/')[0]
|
||||
|
||||
|
||||
@cmdline.subcommand()
|
||||
@cached
|
||||
def remote_service_name(relid=None):
|
||||
"""The remote service name for a given relation-id (or the current relation)"""
|
||||
if relid is None:
|
||||
unit = remote_unit()
|
||||
else:
|
||||
units = related_units(relid)
|
||||
unit = units[0] if units else None
|
||||
return unit.split('/')[0] if unit else None
|
||||
|
||||
|
||||
def hook_name():
|
||||
"""The name of the currently executing hook"""
|
||||
return os.environ.get('JUJU_HOOK_NAME', os.path.basename(sys.argv[0]))
|
||||
|
||||
|
||||
class Config(dict):
|
||||
"""A dictionary representation of the charm's config.yaml, with some
|
||||
extra features:
|
||||
|
||||
- See which values in the dictionary have changed since the previous hook.
|
||||
- For values that have changed, see what the previous value was.
|
||||
- Store arbitrary data for use in a later hook.
|
||||
|
||||
NOTE: Do not instantiate this object directly - instead call
|
||||
``hookenv.config()``, which will return an instance of :class:`Config`.
|
||||
|
||||
Example usage::
|
||||
|
||||
>>> # inside a hook
|
||||
>>> from charmhelpers.core import hookenv
|
||||
>>> config = hookenv.config()
|
||||
>>> config['foo']
|
||||
'bar'
|
||||
>>> # store a new key/value for later use
|
||||
>>> config['mykey'] = 'myval'
|
||||
|
||||
|
||||
>>> # user runs `juju set mycharm foo=baz`
|
||||
>>> # now we're inside subsequent config-changed hook
|
||||
>>> config = hookenv.config()
|
||||
>>> config['foo']
|
||||
'baz'
|
||||
>>> # test to see if this val has changed since last hook
|
||||
>>> config.changed('foo')
|
||||
True
|
||||
>>> # what was the previous value?
|
||||
>>> config.previous('foo')
|
||||
'bar'
|
||||
>>> # keys/values that we add are preserved across hooks
|
||||
>>> config['mykey']
|
||||
'myval'
|
||||
|
||||
"""
|
||||
CONFIG_FILE_NAME = '.juju-persistent-config'
|
||||
|
||||
def __init__(self, *args, **kw):
|
||||
super(Config, self).__init__(*args, **kw)
|
||||
self.implicit_save = True
|
||||
self._prev_dict = None
|
||||
self.path = os.path.join(charm_dir(), Config.CONFIG_FILE_NAME)
|
||||
if os.path.exists(self.path):
|
||||
self.load_previous()
|
||||
atexit(self._implicit_save)
|
||||
|
||||
def load_previous(self, path=None):
|
||||
"""Load previous copy of config from disk.
|
||||
|
||||
In normal usage you don't need to call this method directly - it
|
||||
is called automatically at object initialization.
|
||||
|
||||
:param path:
|
||||
|
||||
File path from which to load the previous config. If `None`,
|
||||
config is loaded from the default location. If `path` is
|
||||
specified, subsequent `save()` calls will write to the same
|
||||
path.
|
||||
|
||||
"""
|
||||
self.path = path or self.path
|
||||
with open(self.path) as f:
|
||||
self._prev_dict = json.load(f)
|
||||
for k, v in self._prev_dict.items():
|
||||
if k not in self:
|
||||
self[k] = v
|
||||
|
||||
def changed(self, key):
|
||||
"""Return True if the current value for this key is different from
|
||||
the previous value.
|
||||
|
||||
"""
|
||||
if self._prev_dict is None:
|
||||
return True
|
||||
return self.previous(key) != self.get(key)
|
||||
|
||||
def previous(self, key):
|
||||
"""Return previous value for this key, or None if there
|
||||
is no previous value.
|
||||
|
||||
"""
|
||||
if self._prev_dict:
|
||||
return self._prev_dict.get(key)
|
||||
return None
|
||||
|
||||
def save(self):
|
||||
"""Save this config to disk.
|
||||
|
||||
If the charm is using the :mod:`Services Framework <services.base>`
|
||||
or :meth:'@hook <Hooks.hook>' decorator, this
|
||||
is called automatically at the end of successful hook execution.
|
||||
Otherwise, it should be called directly by user code.
|
||||
|
||||
To disable automatic saves, set ``implicit_save=False`` on this
|
||||
instance.
|
||||
|
||||
"""
|
||||
with open(self.path, 'w') as f:
|
||||
json.dump(self, f)
|
||||
|
||||
def _implicit_save(self):
|
||||
if self.implicit_save:
|
||||
self.save()
|
||||
|
||||
|
||||
@cached
|
||||
def config(scope=None):
|
||||
"""Juju charm configuration"""
|
||||
config_cmd_line = ['config-get']
|
||||
if scope is not None:
|
||||
config_cmd_line.append(scope)
|
||||
config_cmd_line.append('--format=json')
|
||||
try:
|
||||
config_data = json.loads(
|
||||
subprocess.check_output(config_cmd_line).decode('UTF-8'))
|
||||
if scope is not None:
|
||||
return config_data
|
||||
return Config(config_data)
|
||||
except ValueError:
|
||||
return None
|
||||
|
||||
|
||||
@cached
|
||||
def relation_get(attribute=None, unit=None, rid=None):
|
||||
"""Get relation information"""
|
||||
_args = ['relation-get', '--format=json']
|
||||
if rid:
|
||||
_args.append('-r')
|
||||
_args.append(rid)
|
||||
_args.append(attribute or '-')
|
||||
if unit:
|
||||
_args.append(unit)
|
||||
try:
|
||||
return json.loads(subprocess.check_output(_args).decode('UTF-8'))
|
||||
except ValueError:
|
||||
return None
|
||||
except CalledProcessError as e:
|
||||
if e.returncode == 2:
|
||||
return None
|
||||
raise
|
||||
|
||||
|
||||
def relation_set(relation_id=None, relation_settings=None, **kwargs):
|
||||
"""Set relation information for the current unit"""
|
||||
relation_settings = relation_settings if relation_settings else {}
|
||||
relation_cmd_line = ['relation-set']
|
||||
accepts_file = "--file" in subprocess.check_output(
|
||||
relation_cmd_line + ["--help"], universal_newlines=True)
|
||||
if relation_id is not None:
|
||||
relation_cmd_line.extend(('-r', relation_id))
|
||||
settings = relation_settings.copy()
|
||||
settings.update(kwargs)
|
||||
for key, value in settings.items():
|
||||
# Force value to be a string: it always should, but some call
|
||||
# sites pass in things like dicts or numbers.
|
||||
if value is not None:
|
||||
settings[key] = "{}".format(value)
|
||||
if accepts_file:
|
||||
# --file was introduced in Juju 1.23.2. Use it by default if
|
||||
# available, since otherwise we'll break if the relation data is
|
||||
# too big. Ideally we should tell relation-set to read the data from
|
||||
# stdin, but that feature is broken in 1.23.2: Bug #1454678.
|
||||
with tempfile.NamedTemporaryFile(delete=False) as settings_file:
|
||||
settings_file.write(yaml.safe_dump(settings).encode("utf-8"))
|
||||
subprocess.check_call(
|
||||
relation_cmd_line + ["--file", settings_file.name])
|
||||
os.remove(settings_file.name)
|
||||
else:
|
||||
for key, value in settings.items():
|
||||
if value is None:
|
||||
relation_cmd_line.append('{}='.format(key))
|
||||
else:
|
||||
relation_cmd_line.append('{}={}'.format(key, value))
|
||||
subprocess.check_call(relation_cmd_line)
|
||||
# Flush cache of any relation-gets for local unit
|
||||
flush(local_unit())
|
||||
|
||||
|
||||
def relation_clear(r_id=None):
|
||||
''' Clears any relation data already set on relation r_id '''
|
||||
settings = relation_get(rid=r_id,
|
||||
unit=local_unit())
|
||||
for setting in settings:
|
||||
if setting not in ['public-address', 'private-address']:
|
||||
settings[setting] = None
|
||||
relation_set(relation_id=r_id,
|
||||
**settings)
|
||||
|
||||
|
||||
@cached
|
||||
def relation_ids(reltype=None):
|
||||
"""A list of relation_ids"""
|
||||
reltype = reltype or relation_type()
|
||||
relid_cmd_line = ['relation-ids', '--format=json']
|
||||
if reltype is not None:
|
||||
relid_cmd_line.append(reltype)
|
||||
return json.loads(
|
||||
subprocess.check_output(relid_cmd_line).decode('UTF-8')) or []
|
||||
return []
|
||||
|
||||
|
||||
@cached
|
||||
def related_units(relid=None):
|
||||
"""A list of related units"""
|
||||
relid = relid or relation_id()
|
||||
units_cmd_line = ['relation-list', '--format=json']
|
||||
if relid is not None:
|
||||
units_cmd_line.extend(('-r', relid))
|
||||
return json.loads(
|
||||
subprocess.check_output(units_cmd_line).decode('UTF-8')) or []
|
||||
|
||||
|
||||
@cached
|
||||
def relation_for_unit(unit=None, rid=None):
|
||||
"""Get the json represenation of a unit's relation"""
|
||||
unit = unit or remote_unit()
|
||||
relation = relation_get(unit=unit, rid=rid)
|
||||
for key in relation:
|
||||
if key.endswith('-list'):
|
||||
relation[key] = relation[key].split()
|
||||
relation['__unit__'] = unit
|
||||
return relation
|
||||
|
||||
|
||||
@cached
|
||||
def relations_for_id(relid=None):
|
||||
"""Get relations of a specific relation ID"""
|
||||
relation_data = []
|
||||
relid = relid or relation_ids()
|
||||
for unit in related_units(relid):
|
||||
unit_data = relation_for_unit(unit, relid)
|
||||
unit_data['__relid__'] = relid
|
||||
relation_data.append(unit_data)
|
||||
return relation_data
|
||||
|
||||
|
||||
@cached
|
||||
def relations_of_type(reltype=None):
|
||||
"""Get relations of a specific type"""
|
||||
relation_data = []
|
||||
reltype = reltype or relation_type()
|
||||
for relid in relation_ids(reltype):
|
||||
for relation in relations_for_id(relid):
|
||||
relation['__relid__'] = relid
|
||||
relation_data.append(relation)
|
||||
return relation_data
|
||||
|
||||
|
||||
@cached
|
||||
def metadata():
|
||||
"""Get the current charm metadata.yaml contents as a python object"""
|
||||
with open(os.path.join(charm_dir(), 'metadata.yaml')) as md:
|
||||
return yaml.safe_load(md)
|
||||
|
||||
|
||||
@cached
|
||||
def relation_types():
|
||||
"""Get a list of relation types supported by this charm"""
|
||||
rel_types = []
|
||||
md = metadata()
|
||||
for key in ('provides', 'requires', 'peers'):
|
||||
section = md.get(key)
|
||||
if section:
|
||||
rel_types.extend(section.keys())
|
||||
return rel_types
|
||||
|
||||
|
||||
@cached
|
||||
def relation_to_interface(relation_name):
|
||||
"""
|
||||
Given the name of a relation, return the interface that relation uses.
|
||||
|
||||
:returns: The interface name, or ``None``.
|
||||
"""
|
||||
return relation_to_role_and_interface(relation_name)[1]
|
||||
|
||||
|
||||
@cached
|
||||
def relation_to_role_and_interface(relation_name):
|
||||
"""
|
||||
Given the name of a relation, return the role and the name of the interface
|
||||
that relation uses (where role is one of ``provides``, ``requires``, or ``peer``).
|
||||
|
||||
:returns: A tuple containing ``(role, interface)``, or ``(None, None)``.
|
||||
"""
|
||||
_metadata = metadata()
|
||||
for role in ('provides', 'requires', 'peer'):
|
||||
interface = _metadata.get(role, {}).get(relation_name, {}).get('interface')
|
||||
if interface:
|
||||
return role, interface
|
||||
return None, None
|
||||
|
||||
|
||||
@cached
|
||||
def role_and_interface_to_relations(role, interface_name):
|
||||
"""
|
||||
Given a role and interface name, return a list of relation names for the
|
||||
current charm that use that interface under that role (where role is one
|
||||
of ``provides``, ``requires``, or ``peer``).
|
||||
|
||||
:returns: A list of relation names.
|
||||
"""
|
||||
_metadata = metadata()
|
||||
results = []
|
||||
for relation_name, relation in _metadata.get(role, {}).items():
|
||||
if relation['interface'] == interface_name:
|
||||
results.append(relation_name)
|
||||
return results
|
||||
|
||||
|
||||
@cached
|
||||
def interface_to_relations(interface_name):
|
||||
"""
|
||||
Given an interface, return a list of relation names for the current
|
||||
charm that use that interface.
|
||||
|
||||
:returns: A list of relation names.
|
||||
"""
|
||||
results = []
|
||||
for role in ('provides', 'requires', 'peer'):
|
||||
results.extend(role_and_interface_to_relations(role, interface_name))
|
||||
return results
|
||||
|
||||
|
||||
@cached
|
||||
def charm_name():
|
||||
"""Get the name of the current charm as is specified on metadata.yaml"""
|
||||
return metadata().get('name')
|
||||
|
||||
|
||||
@cached
|
||||
def relations():
|
||||
"""Get a nested dictionary of relation data for all related units"""
|
||||
rels = {}
|
||||
for reltype in relation_types():
|
||||
relids = {}
|
||||
for relid in relation_ids(reltype):
|
||||
units = {local_unit(): relation_get(unit=local_unit(), rid=relid)}
|
||||
for unit in related_units(relid):
|
||||
reldata = relation_get(unit=unit, rid=relid)
|
||||
units[unit] = reldata
|
||||
relids[relid] = units
|
||||
rels[reltype] = relids
|
||||
return rels
|
||||
|
||||
|
||||
@cached
|
||||
def is_relation_made(relation, keys='private-address'):
|
||||
'''
|
||||
Determine whether a relation is established by checking for
|
||||
presence of key(s). If a list of keys is provided, they
|
||||
must all be present for the relation to be identified as made
|
||||
'''
|
||||
if isinstance(keys, str):
|
||||
keys = [keys]
|
||||
for r_id in relation_ids(relation):
|
||||
for unit in related_units(r_id):
|
||||
context = {}
|
||||
for k in keys:
|
||||
context[k] = relation_get(k, rid=r_id,
|
||||
unit=unit)
|
||||
if None not in context.values():
|
||||
return True
|
||||
return False
|
||||
|
||||
|
||||
def open_port(port, protocol="TCP"):
|
||||
"""Open a service network port"""
|
||||
_args = ['open-port']
|
||||
_args.append('{}/{}'.format(port, protocol))
|
||||
subprocess.check_call(_args)
|
||||
|
||||
|
||||
def close_port(port, protocol="TCP"):
|
||||
"""Close a service network port"""
|
||||
_args = ['close-port']
|
||||
_args.append('{}/{}'.format(port, protocol))
|
||||
subprocess.check_call(_args)
|
||||
|
||||
|
||||
@cached
|
||||
def unit_get(attribute):
|
||||
"""Get the unit ID for the remote unit"""
|
||||
_args = ['unit-get', '--format=json', attribute]
|
||||
try:
|
||||
return json.loads(subprocess.check_output(_args).decode('UTF-8'))
|
||||
except ValueError:
|
||||
return None
|
||||
|
||||
|
||||
def unit_public_ip():
|
||||
"""Get this unit's public IP address"""
|
||||
return unit_get('public-address')
|
||||
|
||||
|
||||
def unit_private_ip():
|
||||
"""Get this unit's private IP address"""
|
||||
return unit_get('private-address')
|
||||
|
||||
|
||||
class UnregisteredHookError(Exception):
|
||||
"""Raised when an undefined hook is called"""
|
||||
pass
|
||||
|
||||
|
||||
class Hooks(object):
|
||||
"""A convenient handler for hook functions.
|
||||
|
||||
Example::
|
||||
|
||||
hooks = Hooks()
|
||||
|
||||
# register a hook, taking its name from the function name
|
||||
@hooks.hook()
|
||||
def install():
|
||||
pass # your code here
|
||||
|
||||
# register a hook, providing a custom hook name
|
||||
@hooks.hook("config-changed")
|
||||
def config_changed():
|
||||
pass # your code here
|
||||
|
||||
if __name__ == "__main__":
|
||||
# execute a hook based on the name the program is called by
|
||||
hooks.execute(sys.argv)
|
||||
"""
|
||||
|
||||
def __init__(self, config_save=None):
|
||||
super(Hooks, self).__init__()
|
||||
self._hooks = {}
|
||||
|
||||
# For unknown reasons, we allow the Hooks constructor to override
|
||||
# config().implicit_save.
|
||||
if config_save is not None:
|
||||
config().implicit_save = config_save
|
||||
|
||||
def register(self, name, function):
|
||||
"""Register a hook"""
|
||||
self._hooks[name] = function
|
||||
|
||||
def execute(self, args):
|
||||
"""Execute a registered hook based on args[0]"""
|
||||
_run_atstart()
|
||||
hook_name = os.path.basename(args[0])
|
||||
if hook_name in self._hooks:
|
||||
try:
|
||||
self._hooks[hook_name]()
|
||||
except SystemExit as x:
|
||||
if x.code is None or x.code == 0:
|
||||
_run_atexit()
|
||||
raise
|
||||
_run_atexit()
|
||||
else:
|
||||
raise UnregisteredHookError(hook_name)
|
||||
|
||||
def hook(self, *hook_names):
|
||||
"""Decorator, registering them as hooks"""
|
||||
def wrapper(decorated):
|
||||
for hook_name in hook_names:
|
||||
self.register(hook_name, decorated)
|
||||
else:
|
||||
self.register(decorated.__name__, decorated)
|
||||
if '_' in decorated.__name__:
|
||||
self.register(
|
||||
decorated.__name__.replace('_', '-'), decorated)
|
||||
return decorated
|
||||
return wrapper
|
||||
|
||||
|
||||
def charm_dir():
|
||||
"""Return the root directory of the current charm"""
|
||||
return os.environ.get('CHARM_DIR')
|
||||
|
||||
|
||||
@cached
|
||||
def action_get(key=None):
|
||||
"""Gets the value of an action parameter, or all key/value param pairs"""
|
||||
cmd = ['action-get']
|
||||
if key is not None:
|
||||
cmd.append(key)
|
||||
cmd.append('--format=json')
|
||||
action_data = json.loads(subprocess.check_output(cmd).decode('UTF-8'))
|
||||
return action_data
|
||||
|
||||
|
||||
def action_set(values):
|
||||
"""Sets the values to be returned after the action finishes"""
|
||||
cmd = ['action-set']
|
||||
for k, v in list(values.items()):
|
||||
cmd.append('{}={}'.format(k, v))
|
||||
subprocess.check_call(cmd)
|
||||
|
||||
|
||||
def action_fail(message):
|
||||
"""Sets the action status to failed and sets the error message.
|
||||
|
||||
The results set by action_set are preserved."""
|
||||
subprocess.check_call(['action-fail', message])
|
||||
|
||||
|
||||
def status_set(workload_state, message):
|
||||
"""Set the workload state with a message
|
||||
|
||||
Use status-set to set the workload state with a message which is visible
|
||||
to the user via juju status. If the status-set command is not found then
|
||||
assume this is juju < 1.23 and juju-log the message unstead.
|
||||
|
||||
workload_state -- valid juju workload state.
|
||||
message -- status update message
|
||||
"""
|
||||
valid_states = ['maintenance', 'blocked', 'waiting', 'active']
|
||||
if workload_state not in valid_states:
|
||||
raise ValueError(
|
||||
'{!r} is not a valid workload state'.format(workload_state)
|
||||
)
|
||||
cmd = ['status-set', workload_state, message]
|
||||
try:
|
||||
ret = subprocess.call(cmd)
|
||||
if ret == 0:
|
||||
return
|
||||
except OSError as e:
|
||||
if e.errno != errno.ENOENT:
|
||||
raise
|
||||
log_message = 'status-set failed: {} {}'.format(workload_state,
|
||||
message)
|
||||
log(log_message, level='INFO')
|
||||
|
||||
|
||||
def status_get():
|
||||
"""Retrieve the previously set juju workload state
|
||||
|
||||
If the status-set command is not found then assume this is juju < 1.23 and
|
||||
return 'unknown'
|
||||
"""
|
||||
cmd = ['status-get']
|
||||
try:
|
||||
raw_status = subprocess.check_output(cmd, universal_newlines=True)
|
||||
status = raw_status.rstrip()
|
||||
return status
|
||||
except OSError as e:
|
||||
if e.errno == errno.ENOENT:
|
||||
return 'unknown'
|
||||
else:
|
||||
raise
|
||||
|
||||
|
||||
def translate_exc(from_exc, to_exc):
|
||||
def inner_translate_exc1(f):
|
||||
def inner_translate_exc2(*args, **kwargs):
|
||||
try:
|
||||
return f(*args, **kwargs)
|
||||
except from_exc:
|
||||
raise to_exc
|
||||
|
||||
return inner_translate_exc2
|
||||
|
||||
return inner_translate_exc1
|
||||
|
||||
|
||||
@translate_exc(from_exc=OSError, to_exc=NotImplementedError)
|
||||
def is_leader():
|
||||
"""Does the current unit hold the juju leadership
|
||||
|
||||
Uses juju to determine whether the current unit is the leader of its peers
|
||||
"""
|
||||
cmd = ['is-leader', '--format=json']
|
||||
return json.loads(subprocess.check_output(cmd).decode('UTF-8'))
|
||||
|
||||
|
||||
@translate_exc(from_exc=OSError, to_exc=NotImplementedError)
|
||||
def leader_get(attribute=None):
|
||||
"""Juju leader get value(s)"""
|
||||
cmd = ['leader-get', '--format=json'] + [attribute or '-']
|
||||
return json.loads(subprocess.check_output(cmd).decode('UTF-8'))
|
||||
|
||||
|
||||
@translate_exc(from_exc=OSError, to_exc=NotImplementedError)
|
||||
def leader_set(settings=None, **kwargs):
|
||||
"""Juju leader set value(s)"""
|
||||
# Don't log secrets.
|
||||
# log("Juju leader-set '%s'" % (settings), level=DEBUG)
|
||||
cmd = ['leader-set']
|
||||
settings = settings or {}
|
||||
settings.update(kwargs)
|
||||
for k, v in settings.items():
|
||||
if v is None:
|
||||
cmd.append('{}='.format(k))
|
||||
else:
|
||||
cmd.append('{}={}'.format(k, v))
|
||||
subprocess.check_call(cmd)
|
||||
|
||||
|
||||
@cached
|
||||
def juju_version():
|
||||
"""Full version string (eg. '1.23.3.1-trusty-amd64')"""
|
||||
# Per https://bugs.launchpad.net/juju-core/+bug/1455368/comments/1
|
||||
jujud = glob.glob('/var/lib/juju/tools/machine-*/jujud')[0]
|
||||
return subprocess.check_output([jujud, 'version'],
|
||||
universal_newlines=True).strip()
|
||||
|
||||
|
||||
@cached
|
||||
def has_juju_version(minimum_version):
|
||||
"""Return True if the Juju version is at least the provided version"""
|
||||
return LooseVersion(juju_version()) >= LooseVersion(minimum_version)
|
||||
|
||||
|
||||
_atexit = []
|
||||
_atstart = []
|
||||
|
||||
|
||||
def atstart(callback, *args, **kwargs):
|
||||
'''Schedule a callback to run before the main hook.
|
||||
|
||||
Callbacks are run in the order they were added.
|
||||
|
||||
This is useful for modules and classes to perform initialization
|
||||
and inject behavior. In particular:
|
||||
|
||||
- Run common code before all of your hooks, such as logging
|
||||
the hook name or interesting relation data.
|
||||
- Defer object or module initialization that requires a hook
|
||||
context until we know there actually is a hook context,
|
||||
making testing easier.
|
||||
- Rather than requiring charm authors to include boilerplate to
|
||||
invoke your helper's behavior, have it run automatically if
|
||||
your object is instantiated or module imported.
|
||||
|
||||
This is not at all useful after your hook framework as been launched.
|
||||
'''
|
||||
global _atstart
|
||||
_atstart.append((callback, args, kwargs))
|
||||
|
||||
|
||||
def atexit(callback, *args, **kwargs):
|
||||
'''Schedule a callback to run on successful hook completion.
|
||||
|
||||
Callbacks are run in the reverse order that they were added.'''
|
||||
_atexit.append((callback, args, kwargs))
|
||||
|
||||
|
||||
def _run_atstart():
|
||||
'''Hook frameworks must invoke this before running the main hook body.'''
|
||||
global _atstart
|
||||
for callback, args, kwargs in _atstart:
|
||||
callback(*args, **kwargs)
|
||||
del _atstart[:]
|
||||
|
||||
|
||||
def _run_atexit():
|
||||
'''Hook frameworks must invoke this after the main hook body has
|
||||
successfully completed. Do not invoke it if the hook fails.'''
|
||||
global _atexit
|
||||
for callback, args, kwargs in reversed(_atexit):
|
||||
callback(*args, **kwargs)
|
||||
del _atexit[:]
|
|
@ -1,468 +0,0 @@
|
|||
# Copyright 2014-2015 Canonical Limited.
|
||||
#
|
||||
# This file is part of charm-helpers.
|
||||
#
|
||||
# charm-helpers is free software: you can redistribute it and/or modify
|
||||
# it under the terms of the GNU Lesser General Public License version 3 as
|
||||
# published by the Free Software Foundation.
|
||||
#
|
||||
# charm-helpers is distributed in the hope that it will be useful,
|
||||
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
# GNU Lesser General Public License for more details.
|
||||
#
|
||||
# You should have received a copy of the GNU Lesser General Public License
|
||||
# along with charm-helpers. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
"""Tools for working with the host system"""
|
||||
# Copyright 2012 Canonical Ltd.
|
||||
#
|
||||
# Authors:
|
||||
# Nick Moffitt <nick.moffitt@canonical.com>
|
||||
# Matthew Wedgwood <matthew.wedgwood@canonical.com>
|
||||
|
||||
import os
|
||||
import re
|
||||
import pwd
|
||||
import glob
|
||||
import grp
|
||||
import random
|
||||
import string
|
||||
import subprocess
|
||||
import hashlib
|
||||
from contextlib import contextmanager
|
||||
from collections import OrderedDict
|
||||
|
||||
import six
|
||||
|
||||
from .hookenv import log
|
||||
from .fstab import Fstab
|
||||
|
||||
|
||||
def service_start(service_name):
|
||||
"""Start a system service"""
|
||||
return service('start', service_name)
|
||||
|
||||
|
||||
def service_stop(service_name):
|
||||
"""Stop a system service"""
|
||||
return service('stop', service_name)
|
||||
|
||||
|
||||
def service_restart(service_name):
|
||||
"""Restart a system service"""
|
||||
return service('restart', service_name)
|
||||
|
||||
|
||||
def service_reload(service_name, restart_on_failure=False):
|
||||
"""Reload a system service, optionally falling back to restart if
|
||||
reload fails"""
|
||||
service_result = service('reload', service_name)
|
||||
if not service_result and restart_on_failure:
|
||||
service_result = service('restart', service_name)
|
||||
return service_result
|
||||
|
||||
|
||||
def service(action, service_name):
|
||||
"""Control a system service"""
|
||||
cmd = ['service', service_name, action]
|
||||
return subprocess.call(cmd) == 0
|
||||
|
||||
|
||||
def service_running(service):
|
||||
"""Determine whether a system service is running"""
|
||||
try:
|
||||
output = subprocess.check_output(
|
||||
['service', service, 'status'],
|
||||
stderr=subprocess.STDOUT).decode('UTF-8')
|
||||
except subprocess.CalledProcessError:
|
||||
return False
|
||||
else:
|
||||
if ("start/running" in output or "is running" in output):
|
||||
return True
|
||||
else:
|
||||
return False
|
||||
|
||||
|
||||
def service_available(service_name):
|
||||
"""Determine whether a system service is available"""
|
||||
try:
|
||||
subprocess.check_output(
|
||||
['service', service_name, 'status'],
|
||||
stderr=subprocess.STDOUT).decode('UTF-8')
|
||||
except subprocess.CalledProcessError as e:
|
||||
return b'unrecognized service' not in e.output
|
||||
else:
|
||||
return True
|
||||
|
||||
|
||||
def adduser(username, password=None, shell='/bin/bash', system_user=False):
|
||||
"""Add a user to the system"""
|
||||
try:
|
||||
user_info = pwd.getpwnam(username)
|
||||
log('user {0} already exists!'.format(username))
|
||||
except KeyError:
|
||||
log('creating user {0}'.format(username))
|
||||
cmd = ['useradd']
|
||||
if system_user or password is None:
|
||||
cmd.append('--system')
|
||||
else:
|
||||
cmd.extend([
|
||||
'--create-home',
|
||||
'--shell', shell,
|
||||
'--password', password,
|
||||
])
|
||||
cmd.append(username)
|
||||
subprocess.check_call(cmd)
|
||||
user_info = pwd.getpwnam(username)
|
||||
return user_info
|
||||
|
||||
|
||||
def add_group(group_name, system_group=False):
|
||||
"""Add a group to the system"""
|
||||
try:
|
||||
group_info = grp.getgrnam(group_name)
|
||||
log('group {0} already exists!'.format(group_name))
|
||||
except KeyError:
|
||||
log('creating group {0}'.format(group_name))
|
||||
cmd = ['addgroup']
|
||||
if system_group:
|
||||
cmd.append('--system')
|
||||
else:
|
||||
cmd.extend([
|
||||
'--group',
|
||||
])
|
||||
cmd.append(group_name)
|
||||
subprocess.check_call(cmd)
|
||||
group_info = grp.getgrnam(group_name)
|
||||
return group_info
|
||||
|
||||
|
||||
def add_user_to_group(username, group):
|
||||
"""Add a user to a group"""
|
||||
cmd = [
|
||||
'gpasswd', '-a',
|
||||
username,
|
||||
group
|
||||
]
|
||||
log("Adding user {} to group {}".format(username, group))
|
||||
subprocess.check_call(cmd)
|
||||
|
||||
|
||||
def rsync(from_path, to_path, flags='-r', options=None):
|
||||
"""Replicate the contents of a path"""
|
||||
options = options or ['--delete', '--executability']
|
||||
cmd = ['/usr/bin/rsync', flags]
|
||||
cmd.extend(options)
|
||||
cmd.append(from_path)
|
||||
cmd.append(to_path)
|
||||
log(" ".join(cmd))
|
||||
return subprocess.check_output(cmd).decode('UTF-8').strip()
|
||||
|
||||
|
||||
def symlink(source, destination):
|
||||
"""Create a symbolic link"""
|
||||
log("Symlinking {} as {}".format(source, destination))
|
||||
cmd = [
|
||||
'ln',
|
||||
'-sf',
|
||||
source,
|
||||
destination,
|
||||
]
|
||||
subprocess.check_call(cmd)
|
||||
|
||||
|
||||
def mkdir(path, owner='root', group='root', perms=0o555, force=False):
|
||||
"""Create a directory"""
|
||||
log("Making dir {} {}:{} {:o}".format(path, owner, group,
|
||||
perms))
|
||||
uid = pwd.getpwnam(owner).pw_uid
|
||||
gid = grp.getgrnam(group).gr_gid
|
||||
realpath = os.path.abspath(path)
|
||||
path_exists = os.path.exists(realpath)
|
||||
if path_exists and force:
|
||||
if not os.path.isdir(realpath):
|
||||
log("Removing non-directory file {} prior to mkdir()".format(path))
|
||||
os.unlink(realpath)
|
||||
os.makedirs(realpath, perms)
|
||||
elif not path_exists:
|
||||
os.makedirs(realpath, perms)
|
||||
os.chown(realpath, uid, gid)
|
||||
os.chmod(realpath, perms)
|
||||
|
||||
|
||||
def write_file(path, content, owner='root', group='root', perms=0o444):
|
||||
"""Create or overwrite a file with the contents of a byte string."""
|
||||
log("Writing file {} {}:{} {:o}".format(path, owner, group, perms))
|
||||
uid = pwd.getpwnam(owner).pw_uid
|
||||
gid = grp.getgrnam(group).gr_gid
|
||||
with open(path, 'wb') as target:
|
||||
os.fchown(target.fileno(), uid, gid)
|
||||
os.fchmod(target.fileno(), perms)
|
||||
target.write(content)
|
||||
|
||||
|
||||
def fstab_remove(mp):
|
||||
"""Remove the given mountpoint entry from /etc/fstab
|
||||
"""
|
||||
return Fstab.remove_by_mountpoint(mp)
|
||||
|
||||
|
||||
def fstab_add(dev, mp, fs, options=None):
|
||||
"""Adds the given device entry to the /etc/fstab file
|
||||
"""
|
||||
return Fstab.add(dev, mp, fs, options=options)
|
||||
|
||||
|
||||
def mount(device, mountpoint, options=None, persist=False, filesystem="ext3"):
|
||||
"""Mount a filesystem at a particular mountpoint"""
|
||||
cmd_args = ['mount']
|
||||
if options is not None:
|
||||
cmd_args.extend(['-o', options])
|
||||
cmd_args.extend([device, mountpoint])
|
||||
try:
|
||||
subprocess.check_output(cmd_args)
|
||||
except subprocess.CalledProcessError as e:
|
||||
log('Error mounting {} at {}\n{}'.format(device, mountpoint, e.output))
|
||||
return False
|
||||
|
||||
if persist:
|
||||
return fstab_add(device, mountpoint, filesystem, options=options)
|
||||
return True
|
||||
|
||||
|
||||
def umount(mountpoint, persist=False):
|
||||
"""Unmount a filesystem"""
|
||||
cmd_args = ['umount', mountpoint]
|
||||
try:
|
||||
subprocess.check_output(cmd_args)
|
||||
except subprocess.CalledProcessError as e:
|
||||
log('Error unmounting {}\n{}'.format(mountpoint, e.output))
|
||||
return False
|
||||
|
||||
if persist:
|
||||
return fstab_remove(mountpoint)
|
||||
return True
|
||||
|
||||
|
||||
def mounts():
|
||||
"""Get a list of all mounted volumes as [[mountpoint,device],[...]]"""
|
||||
with open('/proc/mounts') as f:
|
||||
# [['/mount/point','/dev/path'],[...]]
|
||||
system_mounts = [m[1::-1] for m in [l.strip().split()
|
||||
for l in f.readlines()]]
|
||||
return system_mounts
|
||||
|
||||
|
||||
def file_hash(path, hash_type='md5'):
|
||||
"""
|
||||
Generate a hash checksum of the contents of 'path' or None if not found.
|
||||
|
||||
:param str hash_type: Any hash alrgorithm supported by :mod:`hashlib`,
|
||||
such as md5, sha1, sha256, sha512, etc.
|
||||
"""
|
||||
if os.path.exists(path):
|
||||
h = getattr(hashlib, hash_type)()
|
||||
with open(path, 'rb') as source:
|
||||
h.update(source.read())
|
||||
return h.hexdigest()
|
||||
else:
|
||||
return None
|
||||
|
||||
|
||||
def path_hash(path):
|
||||
"""
|
||||
Generate a hash checksum of all files matching 'path'. Standard wildcards
|
||||
like '*' and '?' are supported, see documentation for the 'glob' module for
|
||||
more information.
|
||||
|
||||
:return: dict: A { filename: hash } dictionary for all matched files.
|
||||
Empty if none found.
|
||||
"""
|
||||
return {
|
||||
filename: file_hash(filename)
|
||||
for filename in glob.iglob(path)
|
||||
}
|
||||
|
||||
|
||||
def check_hash(path, checksum, hash_type='md5'):
|
||||
"""
|
||||
Validate a file using a cryptographic checksum.
|
||||
|
||||
:param str checksum: Value of the checksum used to validate the file.
|
||||
:param str hash_type: Hash algorithm used to generate `checksum`.
|
||||
Can be any hash alrgorithm supported by :mod:`hashlib`,
|
||||
such as md5, sha1, sha256, sha512, etc.
|
||||
:raises ChecksumError: If the file fails the checksum
|
||||
|
||||
"""
|
||||
actual_checksum = file_hash(path, hash_type)
|
||||
if checksum != actual_checksum:
|
||||
raise ChecksumError("'%s' != '%s'" % (checksum, actual_checksum))
|
||||
|
||||
|
||||
class ChecksumError(ValueError):
|
||||
pass
|
||||
|
||||
|
||||
def restart_on_change(restart_map, stopstart=False):
|
||||
"""Restart services based on configuration files changing
|
||||
|
||||
This function is used a decorator, for example::
|
||||
|
||||
@restart_on_change({
|
||||
'/etc/ceph/ceph.conf': [ 'cinder-api', 'cinder-volume' ]
|
||||
'/etc/apache/sites-enabled/*': [ 'apache2' ]
|
||||
})
|
||||
def config_changed():
|
||||
pass # your code here
|
||||
|
||||
In this example, the cinder-api and cinder-volume services
|
||||
would be restarted if /etc/ceph/ceph.conf is changed by the
|
||||
ceph_client_changed function. The apache2 service would be
|
||||
restarted if any file matching the pattern got changed, created
|
||||
or removed. Standard wildcards are supported, see documentation
|
||||
for the 'glob' module for more information.
|
||||
"""
|
||||
def wrap(f):
|
||||
def wrapped_f(*args, **kwargs):
|
||||
checksums = {path: path_hash(path) for path in restart_map}
|
||||
f(*args, **kwargs)
|
||||
restarts = []
|
||||
for path in restart_map:
|
||||
if path_hash(path) != checksums[path]:
|
||||
restarts += restart_map[path]
|
||||
services_list = list(OrderedDict.fromkeys(restarts))
|
||||
if not stopstart:
|
||||
for service_name in services_list:
|
||||
service('restart', service_name)
|
||||
else:
|
||||
for action in ['stop', 'start']:
|
||||
for service_name in services_list:
|
||||
service(action, service_name)
|
||||
return wrapped_f
|
||||
return wrap
|
||||
|
||||
|
||||
def lsb_release():
|
||||
"""Return /etc/lsb-release in a dict"""
|
||||
d = {}
|
||||
with open('/etc/lsb-release', 'r') as lsb:
|
||||
for l in lsb:
|
||||
k, v = l.split('=')
|
||||
d[k.strip()] = v.strip()
|
||||
return d
|
||||
|
||||
|
||||
def pwgen(length=None):
|
||||
"""Generate a random pasword."""
|
||||
if length is None:
|
||||
# A random length is ok to use a weak PRNG
|
||||
length = random.choice(range(35, 45))
|
||||
alphanumeric_chars = [
|
||||
l for l in (string.ascii_letters + string.digits)
|
||||
if l not in 'l0QD1vAEIOUaeiou']
|
||||
# Use a crypto-friendly PRNG (e.g. /dev/urandom) for making the
|
||||
# actual password
|
||||
random_generator = random.SystemRandom()
|
||||
random_chars = [
|
||||
random_generator.choice(alphanumeric_chars) for _ in range(length)]
|
||||
return(''.join(random_chars))
|
||||
|
||||
|
||||
def list_nics(nic_type):
|
||||
'''Return a list of nics of given type(s)'''
|
||||
if isinstance(nic_type, six.string_types):
|
||||
int_types = [nic_type]
|
||||
else:
|
||||
int_types = nic_type
|
||||
interfaces = []
|
||||
for int_type in int_types:
|
||||
cmd = ['ip', 'addr', 'show', 'label', int_type + '*']
|
||||
ip_output = subprocess.check_output(cmd).decode('UTF-8').split('\n')
|
||||
ip_output = (line for line in ip_output if line)
|
||||
for line in ip_output:
|
||||
if line.split()[1].startswith(int_type):
|
||||
matched = re.search('.*: (' + int_type + r'[0-9]+\.[0-9]+)@.*', line)
|
||||
if matched:
|
||||
interface = matched.groups()[0]
|
||||
else:
|
||||
interface = line.split()[1].replace(":", "")
|
||||
interfaces.append(interface)
|
||||
|
||||
return interfaces
|
||||
|
||||
|
||||
def set_nic_mtu(nic, mtu):
|
||||
'''Set MTU on a network interface'''
|
||||
cmd = ['ip', 'link', 'set', nic, 'mtu', mtu]
|
||||
subprocess.check_call(cmd)
|
||||
|
||||
|
||||
def get_nic_mtu(nic):
|
||||
cmd = ['ip', 'addr', 'show', nic]
|
||||
ip_output = subprocess.check_output(cmd).decode('UTF-8').split('\n')
|
||||
mtu = ""
|
||||
for line in ip_output:
|
||||
words = line.split()
|
||||
if 'mtu' in words:
|
||||
mtu = words[words.index("mtu") + 1]
|
||||
return mtu
|
||||
|
||||
|
||||
def get_nic_hwaddr(nic):
|
||||
cmd = ['ip', '-o', '-0', 'addr', 'show', nic]
|
||||
ip_output = subprocess.check_output(cmd).decode('UTF-8')
|
||||
hwaddr = ""
|
||||
words = ip_output.split()
|
||||
if 'link/ether' in words:
|
||||
hwaddr = words[words.index('link/ether') + 1]
|
||||
return hwaddr
|
||||
|
||||
|
||||
def cmp_pkgrevno(package, revno, pkgcache=None):
|
||||
'''Compare supplied revno with the revno of the installed package
|
||||
|
||||
* 1 => Installed revno is greater than supplied arg
|
||||
* 0 => Installed revno is the same as supplied arg
|
||||
* -1 => Installed revno is less than supplied arg
|
||||
|
||||
This function imports apt_cache function from charmhelpers.fetch if
|
||||
the pkgcache argument is None. Be sure to add charmhelpers.fetch if
|
||||
you call this function, or pass an apt_pkg.Cache() instance.
|
||||
'''
|
||||
import apt_pkg
|
||||
if not pkgcache:
|
||||
from charmhelpers.fetch import apt_cache
|
||||
pkgcache = apt_cache()
|
||||
pkg = pkgcache[package]
|
||||
return apt_pkg.version_compare(pkg.current_ver.ver_str, revno)
|
||||
|
||||
|
||||
@contextmanager
|
||||
def chdir(d):
|
||||
cur = os.getcwd()
|
||||
try:
|
||||
yield os.chdir(d)
|
||||
finally:
|
||||
os.chdir(cur)
|
||||
|
||||
|
||||
def chownr(path, owner, group, follow_links=True):
|
||||
uid = pwd.getpwnam(owner).pw_uid
|
||||
gid = grp.getgrnam(group).gr_gid
|
||||
if follow_links:
|
||||
chown = os.chown
|
||||
else:
|
||||
chown = os.lchown
|
||||
|
||||
for root, dirs, files in os.walk(path):
|
||||
for name in dirs + files:
|
||||
full = os.path.join(root, name)
|
||||
broken_symlink = os.path.lexists(full) and not os.path.exists(full)
|
||||
if not broken_symlink:
|
||||
chown(full, uid, gid)
|
||||
|
||||
|
||||
def lchownr(path, owner, group):
|
||||
chownr(path, owner, group, follow_links=False)
|
|
@ -1,48 +0,0 @@
|
|||
# Copyright 2014-2015 Canonical Limited.
|
||||
#
|
||||
# This file is part of charm-helpers.
|
||||
#
|
||||
# charm-helpers is free software: you can redistribute it and/or modify
|
||||
# it under the terms of the GNU Lesser General Public License version 3 as
|
||||
# published by the Free Software Foundation.
|
||||
#
|
||||
# charm-helpers is distributed in the hope that it will be useful,
|
||||
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
# GNU Lesser General Public License for more details.
|
||||
#
|
||||
# You should have received a copy of the GNU Lesser General Public License
|
||||
# along with charm-helpers. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
from .bus import set_state # noqa
|
||||
from .bus import remove_state # noqa
|
||||
from .relations import scopes # noqa
|
||||
from .relations import RelationBase # noqa
|
||||
from .decorators import hook # noqa
|
||||
from .decorators import when # noqa
|
||||
from .decorators import when_not # noqa
|
||||
from .decorators import not_unless # noqa
|
||||
from .decorators import only_once # noqa
|
||||
|
||||
from . import bus
|
||||
from charmhelpers.core import hookenv
|
||||
from charmhelpers.core import unitdata
|
||||
|
||||
|
||||
def main(relation_name=None):
|
||||
"""
|
||||
This is the main entry point for the reactive framework. It calls
|
||||
:func:`~bus.discover` to find and load all reactive handlers (e.g.,
|
||||
:func:`@when <decorators.when>` decorated blocks), and then
|
||||
:func:`~bus.dispatch` to trigger hook and state handlers until the
|
||||
state settles out. Finally,
|
||||
:meth:`unitdata.kv().flush <charmhelpers.core.unitdata.Storage.flush>`
|
||||
is called to persist the state.
|
||||
|
||||
:param str relation_name: Optional name of the relation which is being handled.
|
||||
"""
|
||||
hookenv.log('Reactive main running for hook %s' % hookenv.hook_name(), level=hookenv.INFO)
|
||||
bus.discover()
|
||||
bus.dispatch()
|
||||
if unitdata._KV:
|
||||
unitdata._KV.flush()
|
|
@ -1,459 +0,0 @@
|
|||
# Copyright 2014-2015 Canonical Limited.
|
||||
#
|
||||
# This file is part of charm-helpers.
|
||||
#
|
||||
# charm-helpers is free software: you can redistribute it and/or modify
|
||||
# it under the terms of the GNU Lesser General Public License version 3 as
|
||||
# published by the Free Software Foundation.
|
||||
#
|
||||
# charm-helpers is distributed in the hope that it will be useful,
|
||||
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
# GNU Lesser General Public License for more details.
|
||||
#
|
||||
# You should have received a copy of the GNU Lesser General Public License
|
||||
# along with charm-helpers. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
import os
|
||||
import re
|
||||
import sys
|
||||
import subprocess
|
||||
from itertools import chain
|
||||
from functools import partial
|
||||
|
||||
from six.moves import range
|
||||
|
||||
from charmhelpers.core import hookenv
|
||||
from charmhelpers.core import unitdata
|
||||
from charmhelpers.cli import cmdline
|
||||
|
||||
try:
|
||||
# Python 3
|
||||
from importlib import SourceFileLoader
|
||||
|
||||
def load_source(modname, realpath):
|
||||
return SourceFileLoader(modname, realpath).load_module()
|
||||
except ImportError:
|
||||
# Python 2
|
||||
from imp import load_source
|
||||
|
||||
|
||||
_log_opts = os.environ.get('REACTIVE_LOG_OPTS', '').split(',')
|
||||
LOG_OPTS = {
|
||||
'register': 'register' in _log_opts,
|
||||
}
|
||||
|
||||
|
||||
@cmdline.subcommand()
|
||||
@cmdline.no_output
|
||||
def set_state(state, value=None):
|
||||
"""Set the given state as active, optionally associating with a relation"""
|
||||
old_states = get_states()
|
||||
unitdata.kv().update({state: value}, prefix='reactive.states.')
|
||||
if state not in old_states:
|
||||
StateWatch.change(state)
|
||||
|
||||
|
||||
@cmdline.subcommand()
|
||||
@cmdline.no_output
|
||||
def remove_state(state):
|
||||
"""Remove / deactivate a state"""
|
||||
old_states = get_states()
|
||||
unitdata.kv().unset('reactive.states.%s' % state)
|
||||
unitdata.kv().set('reactive.dispatch.removed_state', True)
|
||||
if state in old_states:
|
||||
StateWatch.change(state)
|
||||
|
||||
|
||||
@cmdline.subcommand()
|
||||
def get_states():
|
||||
"""Return a mapping of all active states to their values"""
|
||||
return unitdata.kv().getrange('reactive.states.', strip=True) or {}
|
||||
|
||||
|
||||
class StateWatch(object):
|
||||
key = 'reactive.state_watch'
|
||||
|
||||
@classmethod
|
||||
def _store(cls):
|
||||
return unitdata.kv()
|
||||
|
||||
@classmethod
|
||||
def _get(cls):
|
||||
return cls._store().get(cls.key, {
|
||||
'iteration': 0,
|
||||
'changes': [],
|
||||
'pending': [],
|
||||
})
|
||||
|
||||
@classmethod
|
||||
def _set(cls, data):
|
||||
cls._store().set(cls.key, data)
|
||||
|
||||
@classmethod
|
||||
def reset(cls):
|
||||
cls._store().unset(cls.key)
|
||||
|
||||
@classmethod
|
||||
def iteration(cls, i):
|
||||
data = cls._get()
|
||||
data['iteration'] = i
|
||||
cls._set(data)
|
||||
|
||||
@classmethod
|
||||
def watch(cls, watcher, states):
|
||||
data = cls._get()
|
||||
iteration = data['iteration']
|
||||
changed = bool(set(states) & set(data['changes']))
|
||||
return iteration == 0 or changed
|
||||
|
||||
@classmethod
|
||||
def change(cls, state):
|
||||
data = cls._get()
|
||||
data['pending'].append(state)
|
||||
cls._set(data)
|
||||
|
||||
@classmethod
|
||||
def commit(cls):
|
||||
data = cls._get()
|
||||
data['changes'] = data['pending']
|
||||
data['pending'] = []
|
||||
cls._set(data)
|
||||
|
||||
|
||||
def get_state(state, default=None):
|
||||
"""Return the value associated with an active state, or None"""
|
||||
return unitdata.kv().get('reactive.states.%s' % state, default)
|
||||
|
||||
|
||||
@cmdline.subcommand()
|
||||
@cmdline.test_command
|
||||
def all_states(*desired_states):
|
||||
"""Assert that all desired_states are active"""
|
||||
active_states = get_states()
|
||||
return all(state in active_states for state in desired_states)
|
||||
|
||||
|
||||
@cmdline.subcommand()
|
||||
@cmdline.test_command
|
||||
def any_states(*desired_states):
|
||||
"""Assert that any of the desired_states are active"""
|
||||
active_states = get_states()
|
||||
return any(state in active_states for state in desired_states)
|
||||
|
||||
|
||||
@cmdline.subcommand()
|
||||
@cmdline.test_command
|
||||
def any_hook(*hook_patterns):
|
||||
"""
|
||||
Assert that the currently executing hook matches one of the given patterns.
|
||||
|
||||
Each pattern will match one or more hooks, and can use the following
|
||||
special syntax:
|
||||
|
||||
* ``db-relation-{joined,changed}`` can be used to match multiple hooks
|
||||
(in this case, ``db-relation-joined`` and ``db-relation-changed``).
|
||||
* ``{provides:mysql}-relation-joined`` can be used to match a relation
|
||||
hook by the role and interface instead of the relation name. The role
|
||||
must be one of ``provides``, ``requires``, or ``peer``.
|
||||
* The previous two can be combined, of course: ``{provides:mysql}-relation-{joined,changed}``
|
||||
"""
|
||||
current_hook = hookenv.hook_name()
|
||||
|
||||
# expand {role:interface} patterns
|
||||
i_pat = re.compile(r'{([^:}]+):([^}]+)}')
|
||||
hook_patterns = _expand_replacements(i_pat, hookenv.role_and_interface_to_relations, hook_patterns)
|
||||
|
||||
# expand {A,B,C,...} patterns
|
||||
c_pat = re.compile(r'{((?:[^:,}]+,?)+)}')
|
||||
hook_patterns = _expand_replacements(c_pat, lambda v: v.split(','), hook_patterns)
|
||||
|
||||
return current_hook in hook_patterns
|
||||
|
||||
|
||||
def _expand_replacements(pat, subf, values):
|
||||
while any(pat.search(r) for r in values):
|
||||
new_values = []
|
||||
for value in values:
|
||||
m = pat.search(value)
|
||||
if not m:
|
||||
new_values.append(value)
|
||||
continue
|
||||
whole_match = m.group(0)
|
||||
selected_groups = m.groups()
|
||||
for replacement in subf(*selected_groups):
|
||||
# have to replace one match at a time, or we'll lose combinations
|
||||
# e.g., '{A,B}{A,B}' would turn to ['AA', 'BB'] instead of
|
||||
# ['A{A,B}', 'B{A,B}'] -> ['AA', 'AB', 'BA', 'BB']
|
||||
new_values.append(value.replace(whole_match, replacement, 1))
|
||||
values = new_values
|
||||
return values
|
||||
|
||||
|
||||
def _action_id(action):
|
||||
return "%s:%s:%s" % (action.__code__.co_filename,
|
||||
action.__code__.co_firstlineno,
|
||||
action.__code__.co_name)
|
||||
|
||||
|
||||
def _short_action_id(action):
|
||||
filepath = os.path.relpath(action.__code__.co_filename, hookenv.charm_dir())
|
||||
return "%s:%s:%s" % (filepath,
|
||||
action.__code__.co_firstlineno,
|
||||
action.__code__.co_name)
|
||||
|
||||
|
||||
class Handler(object):
|
||||
"""
|
||||
Class representing a reactive state handler.
|
||||
"""
|
||||
_HANDLERS = {}
|
||||
|
||||
@classmethod
|
||||
def get(cls, action):
|
||||
"""
|
||||
Get or register a handler for the given action.
|
||||
|
||||
:param func action: Callback that is called when invoking the Handler
|
||||
:param func args_source: Optional callback that generates args for the action
|
||||
"""
|
||||
action_id = _action_id(action)
|
||||
if action_id not in cls._HANDLERS:
|
||||
if LOG_OPTS['register']:
|
||||
hookenv.log('Registering reactive handler for %s' % _short_action_id(action), level=hookenv.DEBUG)
|
||||
cls._HANDLERS[action_id] = cls(action)
|
||||
return cls._HANDLERS[action_id]
|
||||
|
||||
@classmethod
|
||||
def get_handlers(cls):
|
||||
"""
|
||||
Clear all registered handlers.
|
||||
"""
|
||||
return cls._HANDLERS.values()
|
||||
|
||||
@classmethod
|
||||
def clear(cls):
|
||||
"""
|
||||
Clear all registered handlers.
|
||||
"""
|
||||
cls._HANDLERS = {}
|
||||
|
||||
def __init__(self, action):
|
||||
"""
|
||||
Create a new Handler.
|
||||
|
||||
:param func action: Callback that is called when invoking the Handler
|
||||
:param func args_source: Optional callback that generates args for the action
|
||||
"""
|
||||
self._action_id = _short_action_id(action)
|
||||
self._action = action
|
||||
self._args = []
|
||||
self._predicates = []
|
||||
|
||||
def id(self):
|
||||
return self._action_id
|
||||
|
||||
def add_args(self, args):
|
||||
"""
|
||||
Add arguments to be passed to the action when invoked.
|
||||
|
||||
:param args: Any sequence or iterable, which will be lazily evaluated
|
||||
to provide args. Subsequent calls to :meth:`add_args` can be used
|
||||
to add additional arguments.
|
||||
"""
|
||||
self._args.append(args)
|
||||
|
||||
def add_predicate(self, predicate):
|
||||
"""
|
||||
Add a new predicate callback to this handler.
|
||||
"""
|
||||
_predicate = predicate
|
||||
if isinstance(predicate, partial):
|
||||
_predicate = 'partial(%s, %s, %s)' % (predicate.func, predicate.args, predicate.keywords)
|
||||
if LOG_OPTS['register']:
|
||||
hookenv.log(' Adding predicate for %s: %s' % (self.id(), _predicate), level=hookenv.DEBUG)
|
||||
self._predicates.append(predicate)
|
||||
|
||||
def test(self):
|
||||
"""
|
||||
Check the predicate(s) and return True if this handler should be invoked.
|
||||
"""
|
||||
return all(predicate() for predicate in self._predicates)
|
||||
|
||||
def _get_args(self):
|
||||
"""
|
||||
Lazily evaluate the args.
|
||||
"""
|
||||
return list(chain.from_iterable(self._args))
|
||||
|
||||
def invoke(self):
|
||||
"""
|
||||
Invoke this handler.
|
||||
"""
|
||||
args = self._get_args()
|
||||
self._action(*args)
|
||||
|
||||
|
||||
class ExternalHandler(Handler):
|
||||
"""
|
||||
A variant Handler for external executable actions (such as bash scripts).
|
||||
|
||||
External handlers must adhere to the following protocol:
|
||||
|
||||
* The handler can be any executable
|
||||
|
||||
* When invoked with the ``--test`` command-line flag, it should exit with
|
||||
an exit code of zero to indicate that the handler should be invoked, and
|
||||
a non-zero exit code to indicate that it need not be invoked. It can
|
||||
also provide a line of output to be passed to the ``--invoke`` call, e.g.,
|
||||
to indicate which sub-handlers should be invoked. The handler should
|
||||
**not** perform its action when given this flag.
|
||||
|
||||
* When invoked with the ``--invoke`` command-line flag (which will be
|
||||
followed by any output returned by the ``--test`` call), the handler
|
||||
should perform its action(s).
|
||||
"""
|
||||
@classmethod
|
||||
def register(cls, filepath):
|
||||
if filepath not in Handler._HANDLERS:
|
||||
_filepath = os.path.relpath(filepath, hookenv.charm_dir())
|
||||
if LOG_OPTS['register']:
|
||||
hookenv.log('Registering external reactive handler for %s' % _filepath, level=hookenv.DEBUG)
|
||||
Handler._HANDLERS[filepath] = cls(filepath)
|
||||
return Handler._HANDLERS[filepath]
|
||||
|
||||
def __init__(self, filepath):
|
||||
self._filepath = filepath
|
||||
self._test_output = ''
|
||||
|
||||
def id(self):
|
||||
_filepath = os.path.relpath(self._filepath, hookenv.charm_dir())
|
||||
return '%s "%s"' % (_filepath, self._test_output)
|
||||
|
||||
def test(self):
|
||||
"""
|
||||
Call the external handler to test whether it should be invoked.
|
||||
"""
|
||||
# flush to ensure external process can see states as they currently
|
||||
# are, and write states (flush releases lock)
|
||||
unitdata.kv().flush()
|
||||
proc = subprocess.Popen([self._filepath, '--test'], stdout=subprocess.PIPE, env=os.environ)
|
||||
self._test_output, _ = proc.communicate()
|
||||
return proc.returncode == 0
|
||||
|
||||
def invoke(self):
|
||||
"""
|
||||
Call the external handler to be invoked.
|
||||
"""
|
||||
# flush to ensure external process can see states as they currently
|
||||
# are, and write states (flush releases lock)
|
||||
unitdata.kv().flush()
|
||||
subprocess.check_call([self._filepath, '--invoke', self._test_output], env=os.environ)
|
||||
|
||||
|
||||
def dispatch():
|
||||
"""
|
||||
Dispatch registered handlers.
|
||||
|
||||
Handlers are dispatched according to the following rules:
|
||||
|
||||
* Handlers are repeatedly tested and invoked in iterations, until the system
|
||||
settles into quiescence (that is, until no new handlers match to be invoked).
|
||||
|
||||
* In the first iteration, :func:`@hook <charmhelpers.core.reactive.decorators.hook>`
|
||||
and :func:`@action <charmhelpers.core.reactive.decorators.action>` handlers will
|
||||
be invoked, if they match.
|
||||
|
||||
* In subsequent iterations, other handlers are invoked, if they match.
|
||||
|
||||
* Added states will not trigger new handlers until the next iteration,
|
||||
to ensure that chained states are invoked in a predictable order.
|
||||
|
||||
* Removed states will cause the current set of matched handlers to be
|
||||
re-tested, to ensure that no handler is invoked after its matching
|
||||
state has been removed.
|
||||
|
||||
* Other than the guarantees mentioned above, the order in which matching
|
||||
handlers are invoked is undefined.
|
||||
|
||||
* States are preserved between hook and action invocations, and all matching
|
||||
handlers are re-invoked for every hook and action. There are
|
||||
:doc:`decorators <charmhelpers.core.reactive.decorators>` and
|
||||
:doc:`helpers <charmhelpers.core.reactive.helpers>`
|
||||
to prevent unnecessary reinvocations, such as
|
||||
:func:`~charmhelpers.core.reactive.decorators.only_once`.
|
||||
"""
|
||||
StateWatch.reset()
|
||||
|
||||
def _test(to_test):
|
||||
return list(filter(lambda h: h.test(), to_test))
|
||||
|
||||
def _invoke(to_invoke):
|
||||
while to_invoke:
|
||||
unitdata.kv().set('reactive.dispatch.removed_state', False)
|
||||
for handler in list(to_invoke):
|
||||
to_invoke.remove(handler)
|
||||
hookenv.log('Invoking reactive handler: %s' % handler.id(), level=hookenv.INFO)
|
||||
handler.invoke()
|
||||
if unitdata.kv().get('reactive.dispatch.removed_state'):
|
||||
# re-test remaining handlers
|
||||
to_invoke = _test(to_invoke)
|
||||
break
|
||||
StateWatch.commit()
|
||||
|
||||
unitdata.kv().set('reactive.dispatch.phase', 'hooks')
|
||||
hook_handlers = _test(Handler.get_handlers())
|
||||
_invoke(hook_handlers)
|
||||
|
||||
unitdata.kv().set('reactive.dispatch.phase', 'other')
|
||||
for i in range(100):
|
||||
StateWatch.iteration(i)
|
||||
other_handlers = _test(Handler.get_handlers())
|
||||
if not other_handlers:
|
||||
break
|
||||
_invoke(other_handlers)
|
||||
|
||||
StateWatch.reset()
|
||||
|
||||
|
||||
def discover():
|
||||
"""
|
||||
Discover handlers based on convention.
|
||||
|
||||
Handlers will be loaded from the following directories and their subdirectories:
|
||||
|
||||
* ``$CHARM_DIR/reactive/``
|
||||
* ``$CHARM_DIR/hooks/reactive/``
|
||||
* ``$CHARM_DIR/hooks/relations/``
|
||||
|
||||
They can be Python files, in which case they will be imported and decorated
|
||||
functions registered. Or they can be executables, in which case they must
|
||||
adhere to the :class:`ExternalHandler` protocol.
|
||||
"""
|
||||
for search_dir in ('reactive', 'hooks/reactive', 'hooks/relations'):
|
||||
search_path = os.path.join(hookenv.charm_dir(), search_dir)
|
||||
for dirpath, dirnames, filenames in os.walk(search_path):
|
||||
for filename in filenames:
|
||||
filepath = os.path.join(dirpath, filename)
|
||||
_register_handlers_from_file(filepath)
|
||||
|
||||
|
||||
def _load_module(filepath):
|
||||
realpath = os.path.realpath(filepath)
|
||||
for module in sys.modules.values():
|
||||
if not hasattr(module, '__file__'):
|
||||
continue # ignore builtins
|
||||
modpath = os.path.realpath(re.sub(r'\.pyc$', '.py', module.__file__))
|
||||
if realpath == modpath:
|
||||
return module
|
||||
else:
|
||||
modname = realpath.replace('.', '_').replace(os.sep, '_')
|
||||
sys.modules[modname] = load_source(modname, realpath)
|
||||
return sys.modules[modname]
|
||||
|
||||
|
||||
def _register_handlers_from_file(filepath):
|
||||
if filepath.endswith('.py'):
|
||||
_load_module(filepath)
|
||||
elif os.access(filepath, os.X_OK):
|
||||
ExternalHandler.register(filepath)
|
|
@ -1,158 +0,0 @@
|
|||
# Copyright 2014-2015 Canonical Limited.
|
||||
#
|
||||
# This file is part of charm-helpers.
|
||||
#
|
||||
# charm-helpers is free software: you can redistribute it and/or modify
|
||||
# it under the terms of the GNU Lesser General Public License version 3 as
|
||||
# published by the Free Software Foundation.
|
||||
#
|
||||
# charm-helpers is distributed in the hope that it will be useful,
|
||||
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
# GNU Lesser General Public License for more details.
|
||||
#
|
||||
# You should have received a copy of the GNU Lesser General Public License
|
||||
# along with charm-helpers. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
from six.moves import filter, map
|
||||
from functools import wraps, partial
|
||||
|
||||
from charmhelpers.core import hookenv
|
||||
from charmhelpers.core.reactive.bus import Handler
|
||||
from charmhelpers.core.reactive.bus import get_states
|
||||
from charmhelpers.core.reactive.bus import _action_id
|
||||
from charmhelpers.core.reactive.relations import RelationBase
|
||||
from charmhelpers.core.reactive.helpers import _hook
|
||||
from charmhelpers.core.reactive.helpers import _when
|
||||
from charmhelpers.core.reactive.helpers import any_file_changed
|
||||
from charmhelpers.core.reactive.helpers import was_invoked
|
||||
from charmhelpers.core.reactive.helpers import mark_invoked
|
||||
|
||||
|
||||
def hook(*hook_patterns):
|
||||
"""
|
||||
Register the decorated function to run when the current hook matches any of
|
||||
the ``hook_patterns``.
|
||||
|
||||
The hook patterns can use the ``{interface:...}`` and ``{A,B,...}`` syntax
|
||||
supported by :func:`~charmhelpers.core.reactive.bus.any_hook`.
|
||||
|
||||
If the hook is a relation hook, an instance of that relation class will be
|
||||
passed in to the decorated function.
|
||||
|
||||
For example, to match any joined or changed hook for the relation providing
|
||||
the ``mysql`` interface::
|
||||
|
||||
class MySQLRelation(RelationBase):
|
||||
@hook('{provides:mysql}-relation-{joined,changed}')
|
||||
def joined_or_changed(self):
|
||||
pass
|
||||
"""
|
||||
def _register(action):
|
||||
def arg_gen():
|
||||
# use a generator to defer calling of hookenv.relation_type, for tests
|
||||
rel = RelationBase.from_name(hookenv.relation_type())
|
||||
if rel:
|
||||
yield rel
|
||||
|
||||
handler = Handler.get(action)
|
||||
handler.add_predicate(partial(_hook, hook_patterns))
|
||||
handler.add_args(arg_gen())
|
||||
return action
|
||||
return _register
|
||||
|
||||
|
||||
def when(*desired_states):
|
||||
"""
|
||||
Register the decorated function to run when all ``desired_states`` are active.
|
||||
|
||||
This decorator will pass zero or more relation instances to the handler, if
|
||||
any of the states are associated with relations. If so, they will be passed
|
||||
in in the same order that the states are given to the decorator.
|
||||
|
||||
Note that handlers whose conditions match are triggered at least once per
|
||||
hook invocation.
|
||||
"""
|
||||
def _register(action):
|
||||
handler = Handler.get(action)
|
||||
handler.add_predicate(partial(_when, _action_id(action), desired_states, False))
|
||||
handler.add_args(filter(None, map(RelationBase.from_state, desired_states)))
|
||||
return action
|
||||
return _register
|
||||
|
||||
|
||||
def when_not(*desired_states):
|
||||
"""
|
||||
Register the decorated function to run when **not** all desired_states are active.
|
||||
|
||||
This decorator will never cause arguments to be passed to the handler.
|
||||
|
||||
Note that handlers whose conditions match are triggered at least once per
|
||||
hook invocation.
|
||||
"""
|
||||
def _register(action):
|
||||
handler = Handler.get(action)
|
||||
handler.add_predicate(partial(_when, _action_id(action), desired_states, True))
|
||||
return action
|
||||
return _register
|
||||
|
||||
|
||||
def when_file_changed(*filenames, **kwargs):
|
||||
"""
|
||||
Register the decorated function to run when one or more files have changed.
|
||||
|
||||
:param list filenames: The names of one or more files to check for changes.
|
||||
:param str hash_type: The type of hash to use for determining if a file has
|
||||
changed. Defaults to 'md5'. Must be given as a kwarg.
|
||||
"""
|
||||
def _register(action):
|
||||
handler = Handler.get(action)
|
||||
handler.add_predicate(partial(any_file_changed, filenames, **kwargs))
|
||||
return action
|
||||
return _register
|
||||
|
||||
|
||||
def not_unless(*desired_states):
|
||||
"""
|
||||
Assert that the decorated function can only be called if the desired_states
|
||||
are active.
|
||||
|
||||
Note that, unlike :func:`when`, this does **not** trigger the decorated
|
||||
function if the states match. It **only** raises an exception if the
|
||||
function is called when the states do not match.
|
||||
|
||||
This is primarily for informational purposes and as a guard clause.
|
||||
"""
|
||||
def _decorator(func):
|
||||
@wraps(func)
|
||||
def _wrapped(*args, **kwargs):
|
||||
active_states = get_states()
|
||||
missing_states = [state for state in desired_states if state not in active_states]
|
||||
if missing_states:
|
||||
func_id = "%s:%s:%s" % (func.__code__.co_filename,
|
||||
func.__code__.co_firstlineno,
|
||||
func.__code__.co_name)
|
||||
hookenv.log('%s called before state%s: %s' % (
|
||||
func_id,
|
||||
's' if len(missing_states) > 1 else '',
|
||||
', '.join(missing_states)), hookenv.WARNING)
|
||||
return func(*args, **kwargs)
|
||||
return _wrapped
|
||||
return _decorator
|
||||
|
||||
|
||||
def only_once(action):
|
||||
"""
|
||||
Ensure that the decorated function is only executed the first time it is called.
|
||||
|
||||
This can be used on reactive handlers to ensure that they are only triggered
|
||||
once, even if their conditions continue to match on subsequent calls, even
|
||||
across hook invocations.
|
||||
"""
|
||||
@wraps(action)
|
||||
def wrapper(*args, **kwargs):
|
||||
action_id = _action_id(action)
|
||||
if not was_invoked(action_id):
|
||||
action(*args, **kwargs)
|
||||
mark_invoked(action_id)
|
||||
return wrapper
|
|
@ -1,98 +0,0 @@
|
|||
# Copyright 2014-2015 Canonical Limited.
|
||||
#
|
||||
# This file is part of charm-helpers.
|
||||
#
|
||||
# charm-helpers is free software: you can redistribute it and/or modify
|
||||
# it under the terms of the GNU Lesser General Public License version 3 as
|
||||
# published by the Free Software Foundation.
|
||||
#
|
||||
# charm-helpers is distributed in the hope that it will be useful,
|
||||
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
# GNU Lesser General Public License for more details.
|
||||
#
|
||||
# You should have received a copy of the GNU Lesser General Public License
|
||||
# along with charm-helpers. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
import json
|
||||
import hashlib
|
||||
|
||||
from charmhelpers.core import host
|
||||
from charmhelpers.core import unitdata
|
||||
from charmhelpers.core.reactive.bus import any_hook
|
||||
from charmhelpers.core.reactive.bus import all_states
|
||||
from charmhelpers.core.reactive.bus import StateWatch
|
||||
|
||||
|
||||
def any_file_changed(filenames, hash_type='md5'):
|
||||
"""
|
||||
Check if any of the given files have changed since the last time this
|
||||
was called.
|
||||
|
||||
:param list filenames: Names of files to check.
|
||||
:param str hash_type: Algorithm to use to check the files.
|
||||
"""
|
||||
changed = False
|
||||
for filename in filenames:
|
||||
old_hash = unitdata.kv().get('reactive.files_changed.%s' % filename)
|
||||
new_hash = host.file_hash(filename, hash_type=hash_type)
|
||||
if old_hash != new_hash:
|
||||
unitdata.kv().set('reactive.files_changed.%s' % filename, new_hash)
|
||||
changed = True # mark as changed, but keep updating hashes
|
||||
return changed
|
||||
|
||||
|
||||
def was_invoked(invocation_id):
|
||||
"""
|
||||
Returns whether the given ID has been invoked before, as per :func:`mark_invoked`.
|
||||
|
||||
This is useful for ensuring that a given block only runs one time::
|
||||
|
||||
def foo():
|
||||
if was_invoked('foo'):
|
||||
return
|
||||
do_something()
|
||||
mark_invoked('foo')
|
||||
|
||||
This is also available as a decorator at
|
||||
:func:`~charmhelpers.core.reactive.decorators.only_once`.
|
||||
"""
|
||||
return unitdata.kv().get('reactive.invoked.%s' % invocation_id, False)
|
||||
|
||||
|
||||
def mark_invoked(invocation_id):
|
||||
"""
|
||||
Mark the given ID as having been invoked, for use with :func:`was_invoked`.
|
||||
"""
|
||||
unitdata.kv().set('reactive.invoked.%s' % invocation_id, True)
|
||||
|
||||
|
||||
def data_changed(data_id, data, hash_type='md5'):
|
||||
"""
|
||||
Check if the given set of data has changed since the previous call.
|
||||
|
||||
This works by hashing the JSON-serialization of the data. Note that,
|
||||
while the data will be serialized using ``sort_keys=True``, some types
|
||||
of data structures, such as sets, may lead to false positivies.
|
||||
|
||||
:param str data_id: Unique identifier for this set of data.
|
||||
:param data: JSON-serializable data.
|
||||
:param str hash_type: Any hash algorithm supported by :mod:`hashlib`.
|
||||
"""
|
||||
key = 'reactive.data_changed.%s' % data_id
|
||||
alg = getattr(hashlib, hash_type)
|
||||
serialized = json.dumps(data, sort_keys=True).encode('utf8')
|
||||
old_hash = unitdata.kv().get(key)
|
||||
new_hash = alg(serialized).hexdigest()
|
||||
unitdata.kv().set(key, new_hash)
|
||||
return old_hash != new_hash
|
||||
|
||||
|
||||
def _hook(hook_patterns):
|
||||
dispatch_phase = unitdata.kv().get('reactive.dispatch.phase')
|
||||
return dispatch_phase == 'hooks' and any_hook(*hook_patterns)
|
||||
|
||||
|
||||
def _when(handler_id, states, invert):
|
||||
dispatch_phase = unitdata.kv().get('reactive.dispatch.phase')
|
||||
return dispatch_phase == 'other' and StateWatch.watch(handler_id, states) and (all_states(*states) ^ invert)
|
|
@ -1,578 +0,0 @@
|
|||
# Copyright 2014-2015 Canonical Limited.
|
||||
#
|
||||
# This file is part of charm-helpers.
|
||||
#
|
||||
# charm-helpers is free software: you can redistribute it and/or modify
|
||||
# it under the terms of the GNU Lesser General Public License version 3 as
|
||||
# published by the Free Software Foundation.
|
||||
#
|
||||
# charm-helpers is distributed in the hope that it will be useful,
|
||||
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
# GNU Lesser General Public License for more details.
|
||||
#
|
||||
# You should have received a copy of the GNU Lesser General Public License
|
||||
# along with charm-helpers. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
import os
|
||||
from inspect import isclass
|
||||
|
||||
from six import with_metaclass
|
||||
|
||||
from charmhelpers.core import hookenv
|
||||
from charmhelpers.core import unitdata
|
||||
from charmhelpers.cli import cmdline
|
||||
from charmhelpers.core.reactive.bus import get_state
|
||||
from charmhelpers.core.reactive.bus import set_state
|
||||
from charmhelpers.core.reactive.bus import remove_state
|
||||
from charmhelpers.core.reactive.bus import _load_module
|
||||
|
||||
|
||||
ALL = '__ALL_SERVICES__'
|
||||
|
||||
|
||||
class scopes(object):
|
||||
"""
|
||||
These are the recommended scope values for relation implementations.
|
||||
|
||||
To use, simply set the ``scope`` class variable to one of these::
|
||||
|
||||
class MyRelationClient(RelationBase):
|
||||
scope = scopes.SERVICE
|
||||
"""
|
||||
|
||||
GLOBAL = 'global'
|
||||
"""
|
||||
All connected services and units for this relation will share a single
|
||||
conversation. The same data will be broadcast to every remote unit, and
|
||||
retrieved data will be aggregated across all remote units and is expected
|
||||
to either eventually agree or be set by a single leader.
|
||||
"""
|
||||
|
||||
SERVICE = 'service'
|
||||
"""
|
||||
Each connected service for this relation will have its own conversation.
|
||||
The same data will be broadcast to every unit of each service's conversation,
|
||||
and data from all units of each service will be aggregated and is expected
|
||||
to either eventually agree or be set by a single leader.
|
||||
"""
|
||||
|
||||
UNIT = 'unit'
|
||||
"""
|
||||
Each connected unit for this relation will have its own conversation. This
|
||||
is the default scope. Each unit's data will be retrieved individually, but
|
||||
note that due to how Juju works, the same data is still broadcast to all
|
||||
units of a single service.
|
||||
"""
|
||||
|
||||
|
||||
class AutoAccessors(type):
|
||||
"""
|
||||
Metaclass that converts fields referenced by ``auto_accessors`` into
|
||||
accessor methods with very basic doc strings.
|
||||
"""
|
||||
def __new__(cls, name, parents, dct):
|
||||
for field in dct.get('auto_accessors', []):
|
||||
meth_name = field.replace('-', '_')
|
||||
meth = cls._accessor(field)
|
||||
meth.__name__ = meth_name
|
||||
meth.__module__ = dct.get('__module__')
|
||||
meth.__doc__ = 'Get the %s, if available, or None.' % field
|
||||
dct[meth_name] = meth
|
||||
return super(AutoAccessors, cls).__new__(cls, name, parents, dct)
|
||||
|
||||
@staticmethod
|
||||
def _accessor(field):
|
||||
def __accessor(self):
|
||||
return self.get_remote(field)
|
||||
return __accessor
|
||||
|
||||
|
||||
class RelationBase(with_metaclass(AutoAccessors, object)):
|
||||
"""
|
||||
The base class for all relation implementations.
|
||||
"""
|
||||
_cache = {}
|
||||
|
||||
scope = scopes.UNIT
|
||||
"""
|
||||
Conversation scope for this relation.
|
||||
|
||||
The conversation scope controls how communication with connected units
|
||||
is aggregated into related :class:`Conversations <Conversation>`, and
|
||||
can be any of the predefined :class:`scopes`, or any arbitrary string.
|
||||
Connected units which share the same scope will be considered part of
|
||||
the same conversation. Data sent to a conversation is sent to all units
|
||||
that are a part of that conversation, and units that are part of a
|
||||
conversation are expected to agree on the data that they send, whether
|
||||
via eventual consistency or by having a single leader set the data.
|
||||
|
||||
The default scope is :attr:`scopes.UNIT`.
|
||||
"""
|
||||
|
||||
auto_accessors = []
|
||||
"""
|
||||
Remote field names to be automatically converted into accessors with
|
||||
basic documentation.
|
||||
|
||||
These accessors will just call :meth:`get_remote` using the
|
||||
:meth:`default conversation <conversation>`. Note that it is highly
|
||||
recommended that this be used only with :attr:`scopes.GLOBAL` scope.
|
||||
"""
|
||||
|
||||
def __init__(self, relation_name, conversations=None):
|
||||
self._relation_name = relation_name
|
||||
self._conversations = conversations or [Conversation.join(self.scope)]
|
||||
|
||||
@property
|
||||
def relation_name(self):
|
||||
"""
|
||||
Name of the relation this instance is handling.
|
||||
"""
|
||||
return self._relation_name
|
||||
|
||||
@classmethod
|
||||
def from_state(cls, state):
|
||||
"""
|
||||
Find relation implementation in the current charm, based on the
|
||||
name of an active state.
|
||||
"""
|
||||
value = get_state(state)
|
||||
if value is None:
|
||||
return None
|
||||
relation_name = value['relation']
|
||||
conversations = Conversation.load(value['conversations'])
|
||||
return cls.from_name(relation_name, conversations)
|
||||
|
||||
@classmethod
|
||||
def from_name(cls, relation_name, conversations=None):
|
||||
"""
|
||||
Find relation implementation in the current charm, based on the
|
||||
ID of the relation.
|
||||
|
||||
:return: A Relation instance, or None
|
||||
"""
|
||||
if relation_name is None:
|
||||
return None
|
||||
relation_class = cls._cache.get(relation_name)
|
||||
if relation_class:
|
||||
return relation_class(relation_name, conversations)
|
||||
role, interface = hookenv.relation_to_role_and_interface(relation_name)
|
||||
if role and interface:
|
||||
relation_class = cls._find_impl(role, interface)
|
||||
if relation_class:
|
||||
cls._cache[relation_name] = relation_class
|
||||
return relation_class(relation_name, conversations)
|
||||
return None
|
||||
|
||||
@classmethod
|
||||
def _find_impl(cls, role, interface):
|
||||
"""
|
||||
Find relation implementation based on its role and interface.
|
||||
|
||||
Looks for the first file matching:
|
||||
``$CHARM_DIR/hooks/relations/{interface}/{provides,requires,peer}.py``
|
||||
"""
|
||||
hooks_dir = os.path.join(hookenv.charm_dir(), 'hooks')
|
||||
try:
|
||||
filepath = os.path.join(hooks_dir, 'relations', interface, role + '.py')
|
||||
module = _load_module(filepath)
|
||||
return cls._find_subclass(module)
|
||||
except ImportError:
|
||||
return None
|
||||
|
||||
@classmethod
|
||||
def _find_subclass(cls, module):
|
||||
"""
|
||||
Attempt to find subclass of :class:`RelationBase` in the given module.
|
||||
|
||||
Note: This means strictly subclasses and not :class:`RelationBase` itself.
|
||||
This is to prevent picking up :class:`RelationBase` being imported to be
|
||||
used as the base class.
|
||||
"""
|
||||
for attr in dir(module):
|
||||
candidate = getattr(module, attr)
|
||||
if isclass(candidate) and issubclass(candidate, cls) and candidate is not cls:
|
||||
return candidate
|
||||
return None
|
||||
|
||||
def conversations(self):
|
||||
"""
|
||||
Return a list of the conversations that this relation is currently handling.
|
||||
|
||||
Note that "currently handling" means for the current state or hook context,
|
||||
and not all conversations that might be active for this relation for other
|
||||
states.
|
||||
"""
|
||||
return list(self._conversations)
|
||||
|
||||
def conversation(self, scope=None):
|
||||
"""
|
||||
Get a single conversation, by scope, that this relation is currently handling.
|
||||
|
||||
If the scope is not given, the correct scope is inferred by the current
|
||||
hook execution context. If there is no current hook execution context, it
|
||||
is assume that there is only a single global conversation scope for this
|
||||
relation. If this relation's scope is not global and there is no current
|
||||
hook execution context, then an error is raised.
|
||||
"""
|
||||
if scope is None:
|
||||
if self.scope is scopes.UNIT:
|
||||
scope = hookenv.remote_unit()
|
||||
elif self.scope is scopes.SERVICE:
|
||||
scope = hookenv.remote_service()
|
||||
else:
|
||||
scope = self.scope
|
||||
if scope is None:
|
||||
raise ValueError('Unable to determine default scope: no current hook or global scope')
|
||||
for conversation in self._conversations:
|
||||
if conversation.scope == scope:
|
||||
return conversation
|
||||
else:
|
||||
raise ValueError("Conversation with scope '%s' not found" % scope)
|
||||
|
||||
def set_state(self, state, scope=None):
|
||||
"""
|
||||
Set the state for the :class:`Conversation` with the given scope.
|
||||
|
||||
In Python, this is equivalent to::
|
||||
|
||||
relation.conversation(scope).set_state(state)
|
||||
|
||||
See :meth:`conversation` and :meth:`Conversation.set_state`.
|
||||
"""
|
||||
self.conversation(scope).set_state(state)
|
||||
|
||||
def remove_state(self, state, scope=None):
|
||||
"""
|
||||
Remove the state for the :class:`Conversation` with the given scope.
|
||||
|
||||
In Python, this is equivalent to::
|
||||
|
||||
relation.conversation(scope).remove_state(state)
|
||||
|
||||
See :meth:`conversation` and :meth:`Conversation.remove_state`.
|
||||
"""
|
||||
self.conversation(scope).remove_state(state)
|
||||
|
||||
def set_remote(self, key=None, value=None, data=None, scope=None, **kwdata):
|
||||
"""
|
||||
Set data for the remote end(s) of the :class:`Conversation` with the given scope.
|
||||
|
||||
In Python, this is equivalent to::
|
||||
|
||||
relation.conversation(scope).set_remote(data, scope, **kwdata)
|
||||
|
||||
See :meth:`conversation` and :meth:`Conversation.set_remote`.
|
||||
"""
|
||||
self.conversation(scope).set_remote(key, value, data, **kwdata)
|
||||
|
||||
def get_remote(self, key, default=None, scope=None):
|
||||
"""
|
||||
Get data from the remote end(s) of the :class:`Conversation` with the given scope.
|
||||
|
||||
In Python, this is equivalent to::
|
||||
|
||||
relation.conversation(scope).get_remote(key, default)
|
||||
|
||||
See :meth:`conversation` and :meth:`Conversation.get_remote`.
|
||||
"""
|
||||
return self.conversation(scope).get_remote(key, default)
|
||||
|
||||
def set_local(self, key=None, value=None, data=None, scope=None, **kwdata):
|
||||
"""
|
||||
Locally store some data, namespaced by the current or given :class:`Conversation` scope.
|
||||
|
||||
In Python, this is equivalent to::
|
||||
|
||||
relation.conversation(scope).set_local(data, scope, **kwdata)
|
||||
|
||||
See :meth:`conversation` and :meth:`Conversation.set_local`.
|
||||
"""
|
||||
self.conversation(scope).set_local(key, value, data, **kwdata)
|
||||
|
||||
def get_local(self, key, default=None, scope=None):
|
||||
"""
|
||||
Retrieve some data previously set via :meth:`set_local`.
|
||||
|
||||
In Python, this is equivalent to::
|
||||
|
||||
relation.conversation(scope).get_local(key, default)
|
||||
|
||||
See :meth:`conversation` and :meth:`Conversation.get_local`.
|
||||
"""
|
||||
return self.conversation(scope).get_local(key, default)
|
||||
|
||||
|
||||
class Conversation(object):
|
||||
"""
|
||||
Converations are the persistent, evolving, two-way communication between
|
||||
this service and one or more remote services.
|
||||
|
||||
Conversations are not limited to a single Juju hook context. They represent
|
||||
the entire set of interactions between the end-points from the time the
|
||||
relation is joined until it is departed.
|
||||
|
||||
Conversations evolve over time, moving from one semantic state to the next
|
||||
as the communication progresses.
|
||||
|
||||
Conversations may encompass multiple remote services or units. While a
|
||||
database client would connect to only a single database, that database will
|
||||
likely serve several other services. On the other hand, while the database
|
||||
is only concerned about providing a database to each service as a whole, a
|
||||
load-balancing proxy must consider each unit of each service individually.
|
||||
|
||||
Conversations use the idea of :class:`scope` to determine how units and
|
||||
services are grouped together.
|
||||
"""
|
||||
def __init__(self, relation_name=None, units=None, scope=None):
|
||||
self.relation_name = relation_name or hookenv.relation_type()
|
||||
self.units = set(units or [hookenv.remote_unit()])
|
||||
self.scope = scope or hookenv.remote_unit()
|
||||
|
||||
@property
|
||||
def key(self):
|
||||
"""
|
||||
The key under which this conversation will be stored.
|
||||
"""
|
||||
return 'reactive.conversations.%s.%s' % (self.relation_name, self.scope)
|
||||
|
||||
@property
|
||||
@hookenv.cached
|
||||
def relation_ids(self):
|
||||
"""
|
||||
The set of IDs of the specific relation instances that this conversation
|
||||
is communicating with.
|
||||
"""
|
||||
relation_ids = []
|
||||
services = set(unit.split('/')[0] for unit in self.units)
|
||||
for relation_id in hookenv.relation_ids(self.relation_name):
|
||||
if hookenv.remote_service_name(relation_id) in services:
|
||||
relation_ids.append(relation_id)
|
||||
return relation_ids
|
||||
|
||||
@classmethod
|
||||
def join(cls, scope):
|
||||
"""
|
||||
Get or create a conversation for the given scope and active hook context.
|
||||
|
||||
The current remote unit for the active hook context will be added to
|
||||
the conversation.
|
||||
|
||||
Note: This uses :mod:`charmhelpers.core.unitdata` and requires that
|
||||
:meth:`~charmhelpers.core.unitdata.Storage.flush` be called.
|
||||
"""
|
||||
relation_name = hookenv.relation_type()
|
||||
unit = hookenv.remote_unit()
|
||||
service = hookenv.remote_service_name()
|
||||
if scope is scopes.UNIT:
|
||||
scope = unit
|
||||
elif scope is scopes.SERVICE:
|
||||
scope = service
|
||||
key = 'reactive.conversations.%s.%s' % (relation_name, scope)
|
||||
conversation = cls.deserialize(unitdata.kv().get(key, {'scope': scope}))
|
||||
conversation.units.add(unit)
|
||||
unitdata.kv().set(key, cls.serialize(conversation))
|
||||
return conversation
|
||||
|
||||
def depart(self):
|
||||
"""
|
||||
Remove the current remote unit, for the active hook context, from
|
||||
this conversation. This should be called from a `-departed` hook.
|
||||
|
||||
TODO: Need to figure out a way to have this called implicitly, to
|
||||
ensure cleaning up of conversations that are no longer needed.
|
||||
"""
|
||||
unit = hookenv.remote_unit()
|
||||
self.units.remove(unit)
|
||||
if self.units:
|
||||
unitdata.kv().set(self.key, self.serialize(self))
|
||||
else:
|
||||
unitdata.kv().unset(self.key)
|
||||
|
||||
@classmethod
|
||||
def deserialize(cls, conversation):
|
||||
"""
|
||||
Deserialize a :meth:`serialized <serialize>` conversation.
|
||||
"""
|
||||
return cls(**conversation)
|
||||
|
||||
@classmethod
|
||||
def serialize(cls, conversation):
|
||||
"""
|
||||
Serialize a conversation instance for storage.
|
||||
"""
|
||||
return {
|
||||
'relation_name': conversation.relation_name,
|
||||
'units': list(conversation.units),
|
||||
'scope': conversation.scope,
|
||||
}
|
||||
|
||||
@classmethod
|
||||
def load(cls, keys):
|
||||
"""
|
||||
Load a set of conversations by their keys.
|
||||
"""
|
||||
conversations = []
|
||||
for key in keys:
|
||||
conversation = unitdata.kv().get(key)
|
||||
if conversation:
|
||||
conversations.append(cls.deserialize(conversation))
|
||||
return conversations
|
||||
|
||||
def set_state(self, state):
|
||||
"""
|
||||
Activate and put this conversation into the given state.
|
||||
|
||||
The relation name will be interpolated in the state name, and it is
|
||||
recommended that it be included to avoid conflicts with states from
|
||||
other relations. For example::
|
||||
|
||||
conversation.set_state('{relation_name}.state')
|
||||
|
||||
If called from a converation handling the relation "foo", this will
|
||||
activate the "foo.state" state, and will add this conversation to
|
||||
that state.
|
||||
|
||||
Note: This uses :mod:`charmhelpers.core.unitdata` and requires that
|
||||
:meth:`~charmhelpers.core.unitdata.Storage.flush` be called.
|
||||
"""
|
||||
state = state.format(relation_name=self.relation_name)
|
||||
value = get_state(state, {
|
||||
'relation': self.relation_name,
|
||||
'conversations': [],
|
||||
})
|
||||
value['conversations'].append(self.key)
|
||||
set_state(state, value)
|
||||
|
||||
def remove_state(self, state):
|
||||
"""
|
||||
Remove this conversation from the given state, and potentially
|
||||
deactivate the state if no more conversations are in it.
|
||||
|
||||
The relation name will be interpolated in the state name, and it is
|
||||
recommended that it be included to avoid conflicts with states from
|
||||
other relations. For example::
|
||||
|
||||
conversation.remove_state('{relation_name}.state')
|
||||
|
||||
If called from a converation handling the relation "foo", this will
|
||||
remove the conversation from the "foo.state" state, and, if no more
|
||||
conversations are in this the state, will deactivate it.
|
||||
"""
|
||||
state = state.format(relation_name=self.relation_name)
|
||||
value = get_state(state)
|
||||
if not value:
|
||||
return
|
||||
if self.key in value['conversations']:
|
||||
value['conversations'].remove(self.key)
|
||||
if value['conversations']:
|
||||
set_state(state, value)
|
||||
else:
|
||||
remove_state(state)
|
||||
|
||||
def set_remote(self, key=None, value=None, data=None, **kwdata):
|
||||
"""
|
||||
Set data for the remote end(s) of this conversation.
|
||||
|
||||
Data can be passed in either as a single dict, or as key-word args.
|
||||
|
||||
Note that, in Juju, setting relation data is inherently service scoped.
|
||||
That is, if the conversation only includes a single unit, the data will
|
||||
still be set for that unit's entire service.
|
||||
|
||||
However, if this conversation's scope encompasses multiple services,
|
||||
the data will be set for all of those services.
|
||||
|
||||
:param str key: The name of a field to set.
|
||||
:param value: A value to set.
|
||||
:param dict data: A mapping of keys to values.
|
||||
:param \*\*kwdata: A mapping of keys to values, as keyword arguments.
|
||||
"""
|
||||
if data is None:
|
||||
data = {}
|
||||
if key is not None:
|
||||
data[key] = value
|
||||
data.update(kwdata)
|
||||
if not data:
|
||||
return
|
||||
for relation_id in self.relation_ids:
|
||||
hookenv.relation_set(relation_id, data)
|
||||
|
||||
def get_remote(self, key, default=None):
|
||||
"""
|
||||
Get a value from the remote end(s) of this conversation.
|
||||
|
||||
Note that if a conversation's scope encompasses multiple units, then
|
||||
those units are expected to agree on their data, whether that is through
|
||||
relying on a single leader to set the data or by all units eventually
|
||||
converging to identical data. Thus, this method returns the first
|
||||
value that it finds set by any of its units.
|
||||
"""
|
||||
for relation_id in self.relation_ids:
|
||||
for unit in hookenv.related_units(relation_id):
|
||||
if unit not in self.units:
|
||||
continue
|
||||
value = hookenv.relation_get(key, unit, relation_id)
|
||||
if value:
|
||||
return value
|
||||
return default
|
||||
|
||||
def set_local(self, key=None, value=None, data=None, **kwdata):
|
||||
"""
|
||||
Locally store some data associated with this conversation.
|
||||
|
||||
Data can be passed in either as a single dict, or as key-word args.
|
||||
|
||||
For example, if you need to store the previous value of a remote field
|
||||
to determine if it has changed, you can use the following::
|
||||
|
||||
prev = conversation.get_local('field')
|
||||
curr = conversation.get_remote('field')
|
||||
if prev != curr:
|
||||
handle_change(prev, curr)
|
||||
conversation.set_local('field', curr)
|
||||
|
||||
Note: This uses :mod:`charmhelpers.core.unitdata` and requires that
|
||||
:meth:`~charmhelpers.core.unitdata.Storage.flush` be called.
|
||||
|
||||
:param str key: The name of a field to set.
|
||||
:param value: A value to set.
|
||||
:param dict data: A mapping of keys to values.
|
||||
:param \*\*kwdata: A mapping of keys to values, as keyword arguments.
|
||||
"""
|
||||
if data is None:
|
||||
data = {}
|
||||
if key is not None:
|
||||
data[key] = value
|
||||
data.update(kwdata)
|
||||
if not data:
|
||||
return
|
||||
unitdata.kv().update(data, prefix='%s.%s.' % (self.key, 'local-data'))
|
||||
|
||||
def get_local(self, key, default=None):
|
||||
"""
|
||||
Retrieve some data previously set via :meth:`set_local` for this conversation.
|
||||
"""
|
||||
key = '%s.%s.%s' % (self.key, 'local-data', key)
|
||||
return unitdata.kv().get(key, default)
|
||||
|
||||
|
||||
@cmdline.subcommand()
|
||||
def relation_call(method, relation_name=None, state=None, *args):
|
||||
"""Invoke a method on the class implementing a relation via the CLI"""
|
||||
if relation_name:
|
||||
relation = RelationBase.from_name(relation_name)
|
||||
if relation is None:
|
||||
raise ValueError('Relation not found: %s' % relation_name)
|
||||
elif state:
|
||||
relation = RelationBase.from_state(state)
|
||||
if relation is None:
|
||||
raise ValueError('Relation not found: %s' % state)
|
||||
else:
|
||||
raise ValueError('Must specify either relation_name or state')
|
||||
result = getattr(relation, method)(*args)
|
||||
if method == 'conversations':
|
||||
# special case for conversations to make them work from CLI
|
||||
result = [c.scope for c in result]
|
||||
return result
|
|
@ -1,36 +0,0 @@
|
|||
# Copyright 2014-2015 Canonical Limited.
|
||||
#
|
||||
# This file is part of charm-helpers.
|
||||
#
|
||||
# charm-helpers is free software: you can redistribute it and/or modify
|
||||
# it under the terms of the GNU Lesser General Public License version 3 as
|
||||
# published by the Free Software Foundation.
|
||||
#
|
||||
# charm-helpers is distributed in the hope that it will be useful,
|
||||
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
# GNU Lesser General Public License for more details.
|
||||
#
|
||||
# You should have received a copy of the GNU Lesser General Public License
|
||||
# along with charm-helpers. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
from charmhelpers.cli import cmdline
|
||||
|
||||
|
||||
class RelationBase(object):
|
||||
@classmethod
|
||||
def from_name(cls, relation_name):
|
||||
"""
|
||||
Find relation implementation in the current charm, based on the
|
||||
name of the relation.
|
||||
|
||||
:return: A Relation instance, or None
|
||||
"""
|
||||
return None
|
||||
|
||||
|
||||
@cmdline.subcommand()
|
||||
def relation_call(relation_name, method, *args):
|
||||
"""Invoke a method on the class implementing a relation"""
|
||||
relation = RelationBase.from_name(relation_name)
|
||||
return getattr(relation, method)(*args)
|
|
@ -1,18 +0,0 @@
|
|||
# Copyright 2014-2015 Canonical Limited.
|
||||
#
|
||||
# This file is part of charm-helpers.
|
||||
#
|
||||
# charm-helpers is free software: you can redistribute it and/or modify
|
||||
# it under the terms of the GNU Lesser General Public License version 3 as
|
||||
# published by the Free Software Foundation.
|
||||
#
|
||||
# charm-helpers is distributed in the hope that it will be useful,
|
||||
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
# GNU Lesser General Public License for more details.
|
||||
#
|
||||
# You should have received a copy of the GNU Lesser General Public License
|
||||
# along with charm-helpers. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
from .base import * # NOQA
|
||||
from .helpers import * # NOQA
|
|
@ -1,353 +0,0 @@
|
|||
# Copyright 2014-2015 Canonical Limited.
|
||||
#
|
||||
# This file is part of charm-helpers.
|
||||
#
|
||||
# charm-helpers is free software: you can redistribute it and/or modify
|
||||
# it under the terms of the GNU Lesser General Public License version 3 as
|
||||
# published by the Free Software Foundation.
|
||||
#
|
||||
# charm-helpers is distributed in the hope that it will be useful,
|
||||
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
# GNU Lesser General Public License for more details.
|
||||
#
|
||||
# You should have received a copy of the GNU Lesser General Public License
|
||||
# along with charm-helpers. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
import os
|
||||
import json
|
||||
from inspect import getargspec
|
||||
from collections import Iterable, OrderedDict
|
||||
|
||||
from charmhelpers.core import host
|
||||
from charmhelpers.core import hookenv
|
||||
|
||||
|
||||
__all__ = ['ServiceManager', 'ManagerCallback',
|
||||
'PortManagerCallback', 'open_ports', 'close_ports', 'manage_ports',
|
||||
'service_restart', 'service_stop']
|
||||
|
||||
|
||||
class ServiceManager(object):
|
||||
def __init__(self, services=None):
|
||||
"""
|
||||
Register a list of services, given their definitions.
|
||||
|
||||
Service definitions are dicts in the following formats (all keys except
|
||||
'service' are optional)::
|
||||
|
||||
{
|
||||
"service": <service name>,
|
||||
"required_data": <list of required data contexts>,
|
||||
"provided_data": <list of provided data contexts>,
|
||||
"data_ready": <one or more callbacks>,
|
||||
"data_lost": <one or more callbacks>,
|
||||
"start": <one or more callbacks>,
|
||||
"stop": <one or more callbacks>,
|
||||
"ports": <list of ports to manage>,
|
||||
}
|
||||
|
||||
The 'required_data' list should contain dicts of required data (or
|
||||
dependency managers that act like dicts and know how to collect the data).
|
||||
Only when all items in the 'required_data' list are populated are the list
|
||||
of 'data_ready' and 'start' callbacks executed. See `is_ready()` for more
|
||||
information.
|
||||
|
||||
The 'provided_data' list should contain relation data providers, most likely
|
||||
a subclass of :class:`charmhelpers.core.services.helpers.RelationContext`,
|
||||
that will indicate a set of data to set on a given relation.
|
||||
|
||||
The 'data_ready' value should be either a single callback, or a list of
|
||||
callbacks, to be called when all items in 'required_data' pass `is_ready()`.
|
||||
Each callback will be called with the service name as the only parameter.
|
||||
After all of the 'data_ready' callbacks are called, the 'start' callbacks
|
||||
are fired.
|
||||
|
||||
The 'data_lost' value should be either a single callback, or a list of
|
||||
callbacks, to be called when a 'required_data' item no longer passes
|
||||
`is_ready()`. Each callback will be called with the service name as the
|
||||
only parameter. After all of the 'data_lost' callbacks are called,
|
||||
the 'stop' callbacks are fired.
|
||||
|
||||
The 'start' value should be either a single callback, or a list of
|
||||
callbacks, to be called when starting the service, after the 'data_ready'
|
||||
callbacks are complete. Each callback will be called with the service
|
||||
name as the only parameter. This defaults to
|
||||
`[host.service_start, services.open_ports]`.
|
||||
|
||||
The 'stop' value should be either a single callback, or a list of
|
||||
callbacks, to be called when stopping the service. If the service is
|
||||
being stopped because it no longer has all of its 'required_data', this
|
||||
will be called after all of the 'data_lost' callbacks are complete.
|
||||
Each callback will be called with the service name as the only parameter.
|
||||
This defaults to `[services.close_ports, host.service_stop]`.
|
||||
|
||||
The 'ports' value should be a list of ports to manage. The default
|
||||
'start' handler will open the ports after the service is started,
|
||||
and the default 'stop' handler will close the ports prior to stopping
|
||||
the service.
|
||||
|
||||
|
||||
Examples:
|
||||
|
||||
The following registers an Upstart service called bingod that depends on
|
||||
a mongodb relation and which runs a custom `db_migrate` function prior to
|
||||
restarting the service, and a Runit service called spadesd::
|
||||
|
||||
manager = services.ServiceManager([
|
||||
{
|
||||
'service': 'bingod',
|
||||
'ports': [80, 443],
|
||||
'required_data': [MongoRelation(), config(), {'my': 'data'}],
|
||||
'data_ready': [
|
||||
services.template(source='bingod.conf'),
|
||||
services.template(source='bingod.ini',
|
||||
target='/etc/bingod.ini',
|
||||
owner='bingo', perms=0400),
|
||||
],
|
||||
},
|
||||
{
|
||||
'service': 'spadesd',
|
||||
'data_ready': services.template(source='spadesd_run.j2',
|
||||
target='/etc/sv/spadesd/run',
|
||||
perms=0555),
|
||||
'start': runit_start,
|
||||
'stop': runit_stop,
|
||||
},
|
||||
])
|
||||
manager.manage()
|
||||
"""
|
||||
self._ready_file = os.path.join(hookenv.charm_dir(), 'READY-SERVICES.json')
|
||||
self._ready = None
|
||||
self.services = OrderedDict()
|
||||
for service in services or []:
|
||||
service_name = service['service']
|
||||
self.services[service_name] = service
|
||||
|
||||
def manage(self):
|
||||
"""
|
||||
Handle the current hook by doing The Right Thing with the registered services.
|
||||
"""
|
||||
hookenv._run_atstart()
|
||||
try:
|
||||
hook_name = hookenv.hook_name()
|
||||
if hook_name == 'stop':
|
||||
self.stop_services()
|
||||
else:
|
||||
self.reconfigure_services()
|
||||
self.provide_data()
|
||||
except SystemExit as x:
|
||||
if x.code is None or x.code == 0:
|
||||
hookenv._run_atexit()
|
||||
hookenv._run_atexit()
|
||||
|
||||
def provide_data(self):
|
||||
"""
|
||||
Set the relation data for each provider in the ``provided_data`` list.
|
||||
|
||||
A provider must have a `name` attribute, which indicates which relation
|
||||
to set data on, and a `provide_data()` method, which returns a dict of
|
||||
data to set.
|
||||
|
||||
The `provide_data()` method can optionally accept two parameters:
|
||||
|
||||
* ``remote_service`` The name of the remote service that the data will
|
||||
be provided to. The `provide_data()` method will be called once
|
||||
for each connected service (not unit). This allows the method to
|
||||
tailor its data to the given service.
|
||||
* ``service_ready`` Whether or not the service definition had all of
|
||||
its requirements met, and thus the ``data_ready`` callbacks run.
|
||||
|
||||
Note that the ``provided_data`` methods are now called **after** the
|
||||
``data_ready`` callbacks are run. This gives the ``data_ready`` callbacks
|
||||
a chance to generate any data necessary for the providing to the remote
|
||||
services.
|
||||
"""
|
||||
for service_name, service in self.services.items():
|
||||
service_ready = self.is_ready(service_name)
|
||||
for provider in service.get('provided_data', []):
|
||||
for relid in hookenv.relation_ids(provider.name):
|
||||
units = hookenv.related_units(relid)
|
||||
if not units:
|
||||
continue
|
||||
remote_service = units[0].split('/')[0]
|
||||
argspec = getargspec(provider.provide_data)
|
||||
if len(argspec.args) > 1:
|
||||
data = provider.provide_data(remote_service, service_ready)
|
||||
else:
|
||||
data = provider.provide_data()
|
||||
if data:
|
||||
hookenv.relation_set(relid, data)
|
||||
|
||||
def reconfigure_services(self, *service_names):
|
||||
"""
|
||||
Update all files for one or more registered services, and,
|
||||
if ready, optionally restart them.
|
||||
|
||||
If no service names are given, reconfigures all registered services.
|
||||
"""
|
||||
for service_name in service_names or self.services.keys():
|
||||
if self.is_ready(service_name):
|
||||
self.fire_event('data_ready', service_name)
|
||||
self.fire_event('start', service_name, default=[
|
||||
service_restart,
|
||||
manage_ports])
|
||||
self.save_ready(service_name)
|
||||
else:
|
||||
if self.was_ready(service_name):
|
||||
self.fire_event('data_lost', service_name)
|
||||
self.fire_event('stop', service_name, default=[
|
||||
manage_ports,
|
||||
service_stop])
|
||||
self.save_lost(service_name)
|
||||
|
||||
def stop_services(self, *service_names):
|
||||
"""
|
||||
Stop one or more registered services, by name.
|
||||
|
||||
If no service names are given, stops all registered services.
|
||||
"""
|
||||
for service_name in service_names or self.services.keys():
|
||||
self.fire_event('stop', service_name, default=[
|
||||
manage_ports,
|
||||
service_stop])
|
||||
|
||||
def get_service(self, service_name):
|
||||
"""
|
||||
Given the name of a registered service, return its service definition.
|
||||
"""
|
||||
service = self.services.get(service_name)
|
||||
if not service:
|
||||
raise KeyError('Service not registered: %s' % service_name)
|
||||
return service
|
||||
|
||||
def fire_event(self, event_name, service_name, default=None):
|
||||
"""
|
||||
Fire a data_ready, data_lost, start, or stop event on a given service.
|
||||
"""
|
||||
service = self.get_service(service_name)
|
||||
callbacks = service.get(event_name, default)
|
||||
if not callbacks:
|
||||
return
|
||||
if not isinstance(callbacks, Iterable):
|
||||
callbacks = [callbacks]
|
||||
for callback in callbacks:
|
||||
if isinstance(callback, ManagerCallback):
|
||||
callback(self, service_name, event_name)
|
||||
else:
|
||||
callback(service_name)
|
||||
|
||||
def is_ready(self, service_name):
|
||||
"""
|
||||
Determine if a registered service is ready, by checking its 'required_data'.
|
||||
|
||||
A 'required_data' item can be any mapping type, and is considered ready
|
||||
if `bool(item)` evaluates as True.
|
||||
"""
|
||||
service = self.get_service(service_name)
|
||||
reqs = service.get('required_data', [])
|
||||
return all(bool(req) for req in reqs)
|
||||
|
||||
def _load_ready_file(self):
|
||||
if self._ready is not None:
|
||||
return
|
||||
if os.path.exists(self._ready_file):
|
||||
with open(self._ready_file) as fp:
|
||||
self._ready = set(json.load(fp))
|
||||
else:
|
||||
self._ready = set()
|
||||
|
||||
def _save_ready_file(self):
|
||||
if self._ready is None:
|
||||
return
|
||||
with open(self._ready_file, 'w') as fp:
|
||||
json.dump(list(self._ready), fp)
|
||||
|
||||
def save_ready(self, service_name):
|
||||
"""
|
||||
Save an indicator that the given service is now data_ready.
|
||||
"""
|
||||
self._load_ready_file()
|
||||
self._ready.add(service_name)
|
||||
self._save_ready_file()
|
||||
|
||||
def save_lost(self, service_name):
|
||||
"""
|
||||
Save an indicator that the given service is no longer data_ready.
|
||||
"""
|
||||
self._load_ready_file()
|
||||
self._ready.discard(service_name)
|
||||
self._save_ready_file()
|
||||
|
||||
def was_ready(self, service_name):
|
||||
"""
|
||||
Determine if the given service was previously data_ready.
|
||||
"""
|
||||
self._load_ready_file()
|
||||
return service_name in self._ready
|
||||
|
||||
|
||||
class ManagerCallback(object):
|
||||
"""
|
||||
Special case of a callback that takes the `ServiceManager` instance
|
||||
in addition to the service name.
|
||||
|
||||
Subclasses should implement `__call__` which should accept three parameters:
|
||||
|
||||
* `manager` The `ServiceManager` instance
|
||||
* `service_name` The name of the service it's being triggered for
|
||||
* `event_name` The name of the event that this callback is handling
|
||||
"""
|
||||
def __call__(self, manager, service_name, event_name):
|
||||
raise NotImplementedError()
|
||||
|
||||
|
||||
class PortManagerCallback(ManagerCallback):
|
||||
"""
|
||||
Callback class that will open or close ports, for use as either
|
||||
a start or stop action.
|
||||
"""
|
||||
def __call__(self, manager, service_name, event_name):
|
||||
service = manager.get_service(service_name)
|
||||
new_ports = service.get('ports', [])
|
||||
port_file = os.path.join(hookenv.charm_dir(), '.{}.ports'.format(service_name))
|
||||
if os.path.exists(port_file):
|
||||
with open(port_file) as fp:
|
||||
old_ports = fp.read().split(',')
|
||||
for old_port in old_ports:
|
||||
if bool(old_port):
|
||||
old_port = int(old_port)
|
||||
if old_port not in new_ports:
|
||||
hookenv.close_port(old_port)
|
||||
with open(port_file, 'w') as fp:
|
||||
fp.write(','.join(str(port) for port in new_ports))
|
||||
for port in new_ports:
|
||||
if event_name == 'start':
|
||||
hookenv.open_port(port)
|
||||
elif event_name == 'stop':
|
||||
hookenv.close_port(port)
|
||||
|
||||
|
||||
def service_stop(service_name):
|
||||
"""
|
||||
Wrapper around host.service_stop to prevent spurious "unknown service"
|
||||
messages in the logs.
|
||||
"""
|
||||
if host.service_running(service_name):
|
||||
host.service_stop(service_name)
|
||||
|
||||
|
||||
def service_restart(service_name):
|
||||
"""
|
||||
Wrapper around host.service_restart to prevent spurious "unknown service"
|
||||
messages in the logs.
|
||||
"""
|
||||
if host.service_available(service_name):
|
||||
if host.service_running(service_name):
|
||||
host.service_restart(service_name)
|
||||
else:
|
||||
host.service_start(service_name)
|
||||
|
||||
|
||||
# Convenience aliases
|
||||
open_ports = close_ports = manage_ports = PortManagerCallback()
|
|
@ -1,267 +0,0 @@
|
|||
# Copyright 2014-2015 Canonical Limited.
|
||||
#
|
||||
# This file is part of charm-helpers.
|
||||
#
|
||||
# charm-helpers is free software: you can redistribute it and/or modify
|
||||
# it under the terms of the GNU Lesser General Public License version 3 as
|
||||
# published by the Free Software Foundation.
|
||||
#
|
||||
# charm-helpers is distributed in the hope that it will be useful,
|
||||
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
# GNU Lesser General Public License for more details.
|
||||
#
|
||||
# You should have received a copy of the GNU Lesser General Public License
|
||||
# along with charm-helpers. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
import os
|
||||
import yaml
|
||||
from charmhelpers.core import hookenv
|
||||
from charmhelpers.core import templating
|
||||
|
||||
from charmhelpers.core.services.base import ManagerCallback
|
||||
|
||||
|
||||
__all__ = ['RelationContext', 'TemplateCallback',
|
||||
'render_template', 'template']
|
||||
|
||||
|
||||
class RelationContext(dict):
|
||||
"""
|
||||
Base class for a context generator that gets relation data from juju.
|
||||
|
||||
Subclasses must provide the attributes `name`, which is the name of the
|
||||
interface of interest, `interface`, which is the type of the interface of
|
||||
interest, and `required_keys`, which is the set of keys required for the
|
||||
relation to be considered complete. The data for all interfaces matching
|
||||
the `name` attribute that are complete will used to populate the dictionary
|
||||
values (see `get_data`, below).
|
||||
|
||||
The generated context will be namespaced under the relation :attr:`name`,
|
||||
to prevent potential naming conflicts.
|
||||
|
||||
:param str name: Override the relation :attr:`name`, since it can vary from charm to charm
|
||||
:param list additional_required_keys: Extend the list of :attr:`required_keys`
|
||||
"""
|
||||
name = None
|
||||
interface = None
|
||||
|
||||
def __init__(self, name=None, additional_required_keys=None):
|
||||
if not hasattr(self, 'required_keys'):
|
||||
self.required_keys = []
|
||||
|
||||
if name is not None:
|
||||
self.name = name
|
||||
if additional_required_keys:
|
||||
self.required_keys.extend(additional_required_keys)
|
||||
self.get_data()
|
||||
|
||||
def __bool__(self):
|
||||
"""
|
||||
Returns True if all of the required_keys are available.
|
||||
"""
|
||||
return self.is_ready()
|
||||
|
||||
__nonzero__ = __bool__
|
||||
|
||||
def __repr__(self):
|
||||
return super(RelationContext, self).__repr__()
|
||||
|
||||
def is_ready(self):
|
||||
"""
|
||||
Returns True if all of the `required_keys` are available from any units.
|
||||
"""
|
||||
ready = len(self.get(self.name, [])) > 0
|
||||
if not ready:
|
||||
hookenv.log('Incomplete relation: {}'.format(self.__class__.__name__), hookenv.DEBUG)
|
||||
return ready
|
||||
|
||||
def _is_ready(self, unit_data):
|
||||
"""
|
||||
Helper method that tests a set of relation data and returns True if
|
||||
all of the `required_keys` are present.
|
||||
"""
|
||||
return set(unit_data.keys()).issuperset(set(self.required_keys))
|
||||
|
||||
def get_data(self):
|
||||
"""
|
||||
Retrieve the relation data for each unit involved in a relation and,
|
||||
if complete, store it in a list under `self[self.name]`. This
|
||||
is automatically called when the RelationContext is instantiated.
|
||||
|
||||
The units are sorted lexographically first by the service ID, then by
|
||||
the unit ID. Thus, if an interface has two other services, 'db:1'
|
||||
and 'db:2', with 'db:1' having two units, 'wordpress/0' and 'wordpress/1',
|
||||
and 'db:2' having one unit, 'mediawiki/0', all of which have a complete
|
||||
set of data, the relation data for the units will be stored in the
|
||||
order: 'wordpress/0', 'wordpress/1', 'mediawiki/0'.
|
||||
|
||||
If you only care about a single unit on the relation, you can just
|
||||
access it as `{{ interface[0]['key'] }}`. However, if you can at all
|
||||
support multiple units on a relation, you should iterate over the list,
|
||||
like::
|
||||
|
||||
{% for unit in interface -%}
|
||||
{{ unit['key'] }}{% if not loop.last %},{% endif %}
|
||||
{%- endfor %}
|
||||
|
||||
Note that since all sets of relation data from all related services and
|
||||
units are in a single list, if you need to know which service or unit a
|
||||
set of data came from, you'll need to extend this class to preserve
|
||||
that information.
|
||||
"""
|
||||
if not hookenv.relation_ids(self.name):
|
||||
return
|
||||
|
||||
ns = self.setdefault(self.name, [])
|
||||
for rid in sorted(hookenv.relation_ids(self.name)):
|
||||
for unit in sorted(hookenv.related_units(rid)):
|
||||
reldata = hookenv.relation_get(rid=rid, unit=unit)
|
||||
if self._is_ready(reldata):
|
||||
ns.append(reldata)
|
||||
|
||||
def provide_data(self):
|
||||
"""
|
||||
Return data to be relation_set for this interface.
|
||||
"""
|
||||
return {}
|
||||
|
||||
|
||||
class MysqlRelation(RelationContext):
|
||||
"""
|
||||
Relation context for the `mysql` interface.
|
||||
|
||||
:param str name: Override the relation :attr:`name`, since it can vary from charm to charm
|
||||
:param list additional_required_keys: Extend the list of :attr:`required_keys`
|
||||
"""
|
||||
name = 'db'
|
||||
interface = 'mysql'
|
||||
|
||||
def __init__(self, *args, **kwargs):
|
||||
self.required_keys = ['host', 'user', 'password', 'database']
|
||||
RelationContext.__init__(self, *args, **kwargs)
|
||||
|
||||
|
||||
class HttpRelation(RelationContext):
|
||||
"""
|
||||
Relation context for the `http` interface.
|
||||
|
||||
:param str name: Override the relation :attr:`name`, since it can vary from charm to charm
|
||||
:param list additional_required_keys: Extend the list of :attr:`required_keys`
|
||||
"""
|
||||
name = 'website'
|
||||
interface = 'http'
|
||||
|
||||
def __init__(self, *args, **kwargs):
|
||||
self.required_keys = ['host', 'port']
|
||||
RelationContext.__init__(self, *args, **kwargs)
|
||||
|
||||
def provide_data(self):
|
||||
return {
|
||||
'host': hookenv.unit_get('private-address'),
|
||||
'port': 80,
|
||||
}
|
||||
|
||||
|
||||
class RequiredConfig(dict):
|
||||
"""
|
||||
Data context that loads config options with one or more mandatory options.
|
||||
|
||||
Once the required options have been changed from their default values, all
|
||||
config options will be available, namespaced under `config` to prevent
|
||||
potential naming conflicts (for example, between a config option and a
|
||||
relation property).
|
||||
|
||||
:param list *args: List of options that must be changed from their default values.
|
||||
"""
|
||||
|
||||
def __init__(self, *args):
|
||||
self.required_options = args
|
||||
self['config'] = hookenv.config()
|
||||
with open(os.path.join(hookenv.charm_dir(), 'config.yaml')) as fp:
|
||||
self.config = yaml.load(fp).get('options', {})
|
||||
|
||||
def __bool__(self):
|
||||
for option in self.required_options:
|
||||
if option not in self['config']:
|
||||
return False
|
||||
current_value = self['config'][option]
|
||||
default_value = self.config[option].get('default')
|
||||
if current_value == default_value:
|
||||
return False
|
||||
if current_value in (None, '') and default_value in (None, ''):
|
||||
return False
|
||||
return True
|
||||
|
||||
def __nonzero__(self):
|
||||
return self.__bool__()
|
||||
|
||||
|
||||
class StoredContext(dict):
|
||||
"""
|
||||
A data context that always returns the data that it was first created with.
|
||||
|
||||
This is useful to do a one-time generation of things like passwords, that
|
||||
will thereafter use the same value that was originally generated, instead
|
||||
of generating a new value each time it is run.
|
||||
"""
|
||||
def __init__(self, file_name, config_data):
|
||||
"""
|
||||
If the file exists, populate `self` with the data from the file.
|
||||
Otherwise, populate with the given data and persist it to the file.
|
||||
"""
|
||||
if os.path.exists(file_name):
|
||||
self.update(self.read_context(file_name))
|
||||
else:
|
||||
self.store_context(file_name, config_data)
|
||||
self.update(config_data)
|
||||
|
||||
def store_context(self, file_name, config_data):
|
||||
if not os.path.isabs(file_name):
|
||||
file_name = os.path.join(hookenv.charm_dir(), file_name)
|
||||
with open(file_name, 'w') as file_stream:
|
||||
os.fchmod(file_stream.fileno(), 0o600)
|
||||
yaml.dump(config_data, file_stream)
|
||||
|
||||
def read_context(self, file_name):
|
||||
if not os.path.isabs(file_name):
|
||||
file_name = os.path.join(hookenv.charm_dir(), file_name)
|
||||
with open(file_name, 'r') as file_stream:
|
||||
data = yaml.load(file_stream)
|
||||
if not data:
|
||||
raise OSError("%s is empty" % file_name)
|
||||
return data
|
||||
|
||||
|
||||
class TemplateCallback(ManagerCallback):
|
||||
"""
|
||||
Callback class that will render a Jinja2 template, for use as a ready
|
||||
action.
|
||||
|
||||
:param str source: The template source file, relative to
|
||||
`$CHARM_DIR/templates`
|
||||
:param str target: The target to write the rendered template to
|
||||
:param str owner: The owner of the rendered file
|
||||
:param str group: The group of the rendered file
|
||||
:param int perms: The permissions of the rendered file
|
||||
|
||||
"""
|
||||
def __init__(self, source, target,
|
||||
owner='root', group='root', perms=0o444):
|
||||
self.source = source
|
||||
self.target = target
|
||||
self.owner = owner
|
||||
self.group = group
|
||||
self.perms = perms
|
||||
|
||||
def __call__(self, manager, service_name, event_name):
|
||||
service = manager.get_service(service_name)
|
||||
context = {}
|
||||
for ctx in service.get('required_data', []):
|
||||
context.update(ctx)
|
||||
templating.render(self.source, self.target, context,
|
||||
self.owner, self.group, self.perms)
|
||||
|
||||
|
||||
# Convenience aliases for templates
|
||||
render_template = template = TemplateCallback
|
|
@ -1,42 +0,0 @@
|
|||
#!/usr/bin/env python
|
||||
# -*- coding: utf-8 -*-
|
||||
|
||||
# Copyright 2014-2015 Canonical Limited.
|
||||
#
|
||||
# This file is part of charm-helpers.
|
||||
#
|
||||
# charm-helpers is free software: you can redistribute it and/or modify
|
||||
# it under the terms of the GNU Lesser General Public License version 3 as
|
||||
# published by the Free Software Foundation.
|
||||
#
|
||||
# charm-helpers is distributed in the hope that it will be useful,
|
||||
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
# GNU Lesser General Public License for more details.
|
||||
#
|
||||
# You should have received a copy of the GNU Lesser General Public License
|
||||
# along with charm-helpers. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
import six
|
||||
|
||||
|
||||
def bool_from_string(value):
|
||||
"""Interpret string value as boolean.
|
||||
|
||||
Returns True if value translates to True otherwise False.
|
||||
"""
|
||||
if isinstance(value, six.string_types):
|
||||
value = six.text_type(value)
|
||||
else:
|
||||
msg = "Unable to interpret non-string value '%s' as boolean" % (value)
|
||||
raise ValueError(msg)
|
||||
|
||||
value = value.strip().lower()
|
||||
|
||||
if value in ['y', 'yes', 'true', 't', 'on']:
|
||||
return True
|
||||
elif value in ['n', 'no', 'false', 'f', 'off']:
|
||||
return False
|
||||
|
||||
msg = "Unable to interpret string value '%s' as boolean" % (value)
|
||||
raise ValueError(msg)
|
|
@ -1,56 +0,0 @@
|
|||
#!/usr/bin/env python
|
||||
# -*- coding: utf-8 -*-
|
||||
|
||||
# Copyright 2014-2015 Canonical Limited.
|
||||
#
|
||||
# This file is part of charm-helpers.
|
||||
#
|
||||
# charm-helpers is free software: you can redistribute it and/or modify
|
||||
# it under the terms of the GNU Lesser General Public License version 3 as
|
||||
# published by the Free Software Foundation.
|
||||
#
|
||||
# charm-helpers is distributed in the hope that it will be useful,
|
||||
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
# GNU Lesser General Public License for more details.
|
||||
#
|
||||
# You should have received a copy of the GNU Lesser General Public License
|
||||
# along with charm-helpers. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
import yaml
|
||||
|
||||
from subprocess import check_call
|
||||
|
||||
from charmhelpers.core.hookenv import (
|
||||
log,
|
||||
DEBUG,
|
||||
ERROR,
|
||||
)
|
||||
|
||||
__author__ = 'Jorge Niedbalski R. <jorge.niedbalski@canonical.com>'
|
||||
|
||||
|
||||
def create(sysctl_dict, sysctl_file):
|
||||
"""Creates a sysctl.conf file from a YAML associative array
|
||||
|
||||
:param sysctl_dict: a YAML-formatted string of sysctl options eg "{ 'kernel.max_pid': 1337 }"
|
||||
:type sysctl_dict: str
|
||||
:param sysctl_file: path to the sysctl file to be saved
|
||||
:type sysctl_file: str or unicode
|
||||
:returns: None
|
||||
"""
|
||||
try:
|
||||
sysctl_dict_parsed = yaml.safe_load(sysctl_dict)
|
||||
except yaml.YAMLError:
|
||||
log("Error parsing YAML sysctl_dict: {}".format(sysctl_dict),
|
||||
level=ERROR)
|
||||
return
|
||||
|
||||
with open(sysctl_file, "w") as fd:
|
||||
for key, value in sysctl_dict_parsed.items():
|
||||
fd.write("{}={}\n".format(key, value))
|
||||
|
||||
log("Updating sysctl_file: %s values: %s" % (sysctl_file, sysctl_dict_parsed),
|
||||
level=DEBUG)
|
||||
|
||||
check_call(["sysctl", "-p", sysctl_file])
|
|
@ -1,68 +0,0 @@
|
|||
# Copyright 2014-2015 Canonical Limited.
|
||||
#
|
||||
# This file is part of charm-helpers.
|
||||
#
|
||||
# charm-helpers is free software: you can redistribute it and/or modify
|
||||
# it under the terms of the GNU Lesser General Public License version 3 as
|
||||
# published by the Free Software Foundation.
|
||||
#
|
||||
# charm-helpers is distributed in the hope that it will be useful,
|
||||
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
# GNU Lesser General Public License for more details.
|
||||
#
|
||||
# You should have received a copy of the GNU Lesser General Public License
|
||||
# along with charm-helpers. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
import os
|
||||
|
||||
from charmhelpers.core import host
|
||||
from charmhelpers.core import hookenv
|
||||
|
||||
|
||||
def render(source, target, context, owner='root', group='root',
|
||||
perms=0o444, templates_dir=None, encoding='UTF-8'):
|
||||
"""
|
||||
Render a template.
|
||||
|
||||
The `source` path, if not absolute, is relative to the `templates_dir`.
|
||||
|
||||
The `target` path should be absolute.
|
||||
|
||||
The context should be a dict containing the values to be replaced in the
|
||||
template.
|
||||
|
||||
The `owner`, `group`, and `perms` options will be passed to `write_file`.
|
||||
|
||||
If omitted, `templates_dir` defaults to the `templates` folder in the charm.
|
||||
|
||||
Note: Using this requires python-jinja2; if it is not installed, calling
|
||||
this will attempt to use charmhelpers.fetch.apt_install to install it.
|
||||
"""
|
||||
try:
|
||||
from jinja2 import FileSystemLoader, Environment, exceptions
|
||||
except ImportError:
|
||||
try:
|
||||
from charmhelpers.fetch import apt_install
|
||||
except ImportError:
|
||||
hookenv.log('Could not import jinja2, and could not import '
|
||||
'charmhelpers.fetch to install it',
|
||||
level=hookenv.ERROR)
|
||||
raise
|
||||
apt_install('python-jinja2', fatal=True)
|
||||
from jinja2 import FileSystemLoader, Environment, exceptions
|
||||
|
||||
if templates_dir is None:
|
||||
templates_dir = os.path.join(hookenv.charm_dir(), 'templates')
|
||||
loader = Environment(loader=FileSystemLoader(templates_dir))
|
||||
try:
|
||||
source = source
|
||||
template = loader.get_template(source)
|
||||
except exceptions.TemplateNotFound as e:
|
||||
hookenv.log('Could not load template %s from %s.' %
|
||||
(source, templates_dir),
|
||||
level=hookenv.ERROR)
|
||||
raise e
|
||||
content = template.render(context)
|
||||
host.mkdir(os.path.dirname(target), owner, group, perms=0o755)
|
||||
host.write_file(target, content.encode(encoding), owner, group, perms)
|
|
@ -1,521 +0,0 @@
|
|||
#!/usr/bin/env python
|
||||
# -*- coding: utf-8 -*-
|
||||
#
|
||||
# Copyright 2014-2015 Canonical Limited.
|
||||
#
|
||||
# This file is part of charm-helpers.
|
||||
#
|
||||
# charm-helpers is free software: you can redistribute it and/or modify
|
||||
# it under the terms of the GNU Lesser General Public License version 3 as
|
||||
# published by the Free Software Foundation.
|
||||
#
|
||||
# charm-helpers is distributed in the hope that it will be useful,
|
||||
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
# GNU Lesser General Public License for more details.
|
||||
#
|
||||
# You should have received a copy of the GNU Lesser General Public License
|
||||
# along with charm-helpers. If not, see <http://www.gnu.org/licenses/>.
|
||||
#
|
||||
#
|
||||
# Authors:
|
||||
# Kapil Thangavelu <kapil.foss@gmail.com>
|
||||
#
|
||||
"""
|
||||
Intro
|
||||
-----
|
||||
|
||||
A simple way to store state in units. This provides a key value
|
||||
storage with support for versioned, transactional operation,
|
||||
and can calculate deltas from previous values to simplify unit logic
|
||||
when processing changes.
|
||||
|
||||
|
||||
Hook Integration
|
||||
----------------
|
||||
|
||||
There are several extant frameworks for hook execution, including
|
||||
|
||||
- charmhelpers.core.hookenv.Hooks
|
||||
- charmhelpers.core.services.ServiceManager
|
||||
|
||||
The storage classes are framework agnostic, one simple integration is
|
||||
via the HookData contextmanager. It will record the current hook
|
||||
execution environment (including relation data, config data, etc.),
|
||||
setup a transaction and allow easy access to the changes from
|
||||
previously seen values. One consequence of the integration is the
|
||||
reservation of particular keys ('rels', 'unit', 'env', 'config',
|
||||
'charm_revisions') for their respective values.
|
||||
|
||||
Here's a fully worked integration example using hookenv.Hooks::
|
||||
|
||||
from charmhelper.core import hookenv, unitdata
|
||||
|
||||
hook_data = unitdata.HookData()
|
||||
db = unitdata.kv()
|
||||
hooks = hookenv.Hooks()
|
||||
|
||||
@hooks.hook
|
||||
def config_changed():
|
||||
# Print all changes to configuration from previously seen
|
||||
# values.
|
||||
for changed, (prev, cur) in hook_data.conf.items():
|
||||
print('config changed', changed,
|
||||
'previous value', prev,
|
||||
'current value', cur)
|
||||
|
||||
# Get some unit specific bookeeping
|
||||
if not db.get('pkg_key'):
|
||||
key = urllib.urlopen('https://example.com/pkg_key').read()
|
||||
db.set('pkg_key', key)
|
||||
|
||||
# Directly access all charm config as a mapping.
|
||||
conf = db.getrange('config', True)
|
||||
|
||||
# Directly access all relation data as a mapping
|
||||
rels = db.getrange('rels', True)
|
||||
|
||||
if __name__ == '__main__':
|
||||
with hook_data():
|
||||
hook.execute()
|
||||
|
||||
|
||||
A more basic integration is via the hook_scope context manager which simply
|
||||
manages transaction scope (and records hook name, and timestamp)::
|
||||
|
||||
>>> from unitdata import kv
|
||||
>>> db = kv()
|
||||
>>> with db.hook_scope('install'):
|
||||
... # do work, in transactional scope.
|
||||
... db.set('x', 1)
|
||||
>>> db.get('x')
|
||||
1
|
||||
|
||||
|
||||
Usage
|
||||
-----
|
||||
|
||||
Values are automatically json de/serialized to preserve basic typing
|
||||
and complex data struct capabilities (dicts, lists, ints, booleans, etc).
|
||||
|
||||
Individual values can be manipulated via get/set::
|
||||
|
||||
>>> kv.set('y', True)
|
||||
>>> kv.get('y')
|
||||
True
|
||||
|
||||
# We can set complex values (dicts, lists) as a single key.
|
||||
>>> kv.set('config', {'a': 1, 'b': True'})
|
||||
|
||||
# Also supports returning dictionaries as a record which
|
||||
# provides attribute access.
|
||||
>>> config = kv.get('config', record=True)
|
||||
>>> config.b
|
||||
True
|
||||
|
||||
|
||||
Groups of keys can be manipulated with update/getrange::
|
||||
|
||||
>>> kv.update({'z': 1, 'y': 2}, prefix="gui.")
|
||||
>>> kv.getrange('gui.', strip=True)
|
||||
{'z': 1, 'y': 2}
|
||||
|
||||
When updating values, its very helpful to understand which values
|
||||
have actually changed and how have they changed. The storage
|
||||
provides a delta method to provide for this::
|
||||
|
||||
>>> data = {'debug': True, 'option': 2}
|
||||
>>> delta = kv.delta(data, 'config.')
|
||||
>>> delta.debug.previous
|
||||
None
|
||||
>>> delta.debug.current
|
||||
True
|
||||
>>> delta
|
||||
{'debug': (None, True), 'option': (None, 2)}
|
||||
|
||||
Note the delta method does not persist the actual change, it needs to
|
||||
be explicitly saved via 'update' method::
|
||||
|
||||
>>> kv.update(data, 'config.')
|
||||
|
||||
Values modified in the context of a hook scope retain historical values
|
||||
associated to the hookname.
|
||||
|
||||
>>> with db.hook_scope('config-changed'):
|
||||
... db.set('x', 42)
|
||||
>>> db.gethistory('x')
|
||||
[(1, u'x', 1, u'install', u'2015-01-21T16:49:30.038372'),
|
||||
(2, u'x', 42, u'config-changed', u'2015-01-21T16:49:30.038786')]
|
||||
|
||||
"""
|
||||
|
||||
import collections
|
||||
import contextlib
|
||||
import datetime
|
||||
import itertools
|
||||
import json
|
||||
import os
|
||||
import pprint
|
||||
import sqlite3
|
||||
import sys
|
||||
|
||||
__author__ = 'Kapil Thangavelu <kapil.foss@gmail.com>'
|
||||
|
||||
|
||||
class Storage(object):
|
||||
"""Simple key value database for local unit state within charms.
|
||||
|
||||
Modifications are not persisted unless :meth:`flush` is called.
|
||||
|
||||
To support dicts, lists, integer, floats, and booleans values
|
||||
are automatically json encoded/decoded.
|
||||
"""
|
||||
def __init__(self, path=None):
|
||||
self.db_path = path
|
||||
if path is None:
|
||||
if 'UNIT_STATE_DB' in os.environ:
|
||||
self.db_path = os.environ['UNIT_STATE_DB']
|
||||
else:
|
||||
self.db_path = os.path.join(
|
||||
os.environ.get('CHARM_DIR', ''), '.unit-state.db')
|
||||
self.conn = sqlite3.connect('%s' % self.db_path)
|
||||
self.cursor = self.conn.cursor()
|
||||
self.revision = None
|
||||
self._closed = False
|
||||
self._init()
|
||||
|
||||
def close(self):
|
||||
if self._closed:
|
||||
return
|
||||
self.flush(False)
|
||||
self.cursor.close()
|
||||
self.conn.close()
|
||||
self._closed = True
|
||||
|
||||
def get(self, key, default=None, record=False):
|
||||
self.cursor.execute('select data from kv where key=?', [key])
|
||||
result = self.cursor.fetchone()
|
||||
if not result:
|
||||
return default
|
||||
if record:
|
||||
return Record(json.loads(result[0]))
|
||||
return json.loads(result[0])
|
||||
|
||||
def getrange(self, key_prefix, strip=False):
|
||||
"""
|
||||
Get a range of keys starting with a common prefix as a mapping of
|
||||
keys to values.
|
||||
|
||||
:param str key_prefix: Common prefix among all keys
|
||||
:param bool strip: Optionally strip the common prefix from the key
|
||||
names in the returned dict
|
||||
:return dict: A (possibly empty) dict of key-value mappings
|
||||
"""
|
||||
self.cursor.execute("select key, data from kv where key like ?",
|
||||
['%s%%' % key_prefix])
|
||||
result = self.cursor.fetchall()
|
||||
|
||||
if not result:
|
||||
return {}
|
||||
if not strip:
|
||||
key_prefix = ''
|
||||
return dict([
|
||||
(k[len(key_prefix):], json.loads(v)) for k, v in result])
|
||||
|
||||
def update(self, mapping, prefix=""):
|
||||
"""
|
||||
Set the values of multiple keys at once.
|
||||
|
||||
:param dict mapping: Mapping of keys to values
|
||||
:param str prefix: Optional prefix to apply to all keys in `mapping`
|
||||
before setting
|
||||
"""
|
||||
for k, v in mapping.items():
|
||||
self.set("%s%s" % (prefix, k), v)
|
||||
|
||||
def unset(self, key):
|
||||
"""
|
||||
Remove a key from the database entirely.
|
||||
"""
|
||||
self.cursor.execute('delete from kv where key=?', [key])
|
||||
if self.revision and self.cursor.rowcount:
|
||||
self.cursor.execute(
|
||||
'insert into kv_revisions values (?, ?, ?)',
|
||||
[key, self.revision, json.dumps('DELETED')])
|
||||
|
||||
def unsetrange(self, keys=None, prefix=""):
|
||||
"""
|
||||
Remove a range of keys starting with a common prefix, from the database
|
||||
entirely.
|
||||
|
||||
:param list keys: List of keys to remove.
|
||||
:param str prefix: Optional prefix to apply to all keys in ``keys``
|
||||
before removing.
|
||||
"""
|
||||
if keys is not None:
|
||||
keys = ['%s%s' % (prefix, key) for key in keys]
|
||||
self.cursor.execute('delete from kv where key in (%s)' % ','.join(['?'] * len(keys)), keys)
|
||||
if self.revision and self.cursor.rowcount:
|
||||
self.cursor.execute(
|
||||
'insert into kv_revisions values %s' % ','.join(['(?, ?, ?)'] * len(keys)),
|
||||
list(itertools.chain.from_iterable((key, self.revision, json.dumps('DELETED')) for key in keys)))
|
||||
else:
|
||||
self.cursor.execute('delete from kv where key like ?',
|
||||
['%s%%' % prefix])
|
||||
if self.revision and self.cursor.rowcount:
|
||||
self.cursor.execute(
|
||||
'insert into kv_revisions values (?, ?, ?)',
|
||||
['%s%%' % prefix, self.revision, json.dumps('DELETED')])
|
||||
|
||||
def set(self, key, value):
|
||||
"""
|
||||
Set a value in the database.
|
||||
|
||||
:param str key: Key to set the value for
|
||||
:param value: Any JSON-serializable value to be set
|
||||
"""
|
||||
serialized = json.dumps(value)
|
||||
|
||||
self.cursor.execute('select data from kv where key=?', [key])
|
||||
exists = self.cursor.fetchone()
|
||||
|
||||
# Skip mutations to the same value
|
||||
if exists:
|
||||
if exists[0] == serialized:
|
||||
return value
|
||||
|
||||
if not exists:
|
||||
self.cursor.execute(
|
||||
'insert into kv (key, data) values (?, ?)',
|
||||
(key, serialized))
|
||||
else:
|
||||
self.cursor.execute('''
|
||||
update kv
|
||||
set data = ?
|
||||
where key = ?''', [serialized, key])
|
||||
|
||||
# Save
|
||||
if not self.revision:
|
||||
return value
|
||||
|
||||
self.cursor.execute(
|
||||
'select 1 from kv_revisions where key=? and revision=?',
|
||||
[key, self.revision])
|
||||
exists = self.cursor.fetchone()
|
||||
|
||||
if not exists:
|
||||
self.cursor.execute(
|
||||
'''insert into kv_revisions (
|
||||
revision, key, data) values (?, ?, ?)''',
|
||||
(self.revision, key, serialized))
|
||||
else:
|
||||
self.cursor.execute(
|
||||
'''
|
||||
update kv_revisions
|
||||
set data = ?
|
||||
where key = ?
|
||||
and revision = ?''',
|
||||
[serialized, key, self.revision])
|
||||
|
||||
return value
|
||||
|
||||
def delta(self, mapping, prefix):
|
||||
"""
|
||||
return a delta containing values that have changed.
|
||||
"""
|
||||
previous = self.getrange(prefix, strip=True)
|
||||
if not previous:
|
||||
pk = set()
|
||||
else:
|
||||
pk = set(previous.keys())
|
||||
ck = set(mapping.keys())
|
||||
delta = DeltaSet()
|
||||
|
||||
# added
|
||||
for k in ck.difference(pk):
|
||||
delta[k] = Delta(None, mapping[k])
|
||||
|
||||
# removed
|
||||
for k in pk.difference(ck):
|
||||
delta[k] = Delta(previous[k], None)
|
||||
|
||||
# changed
|
||||
for k in pk.intersection(ck):
|
||||
c = mapping[k]
|
||||
p = previous[k]
|
||||
if c != p:
|
||||
delta[k] = Delta(p, c)
|
||||
|
||||
return delta
|
||||
|
||||
@contextlib.contextmanager
|
||||
def hook_scope(self, name=""):
|
||||
"""Scope all future interactions to the current hook execution
|
||||
revision."""
|
||||
assert not self.revision
|
||||
self.cursor.execute(
|
||||
'insert into hooks (hook, date) values (?, ?)',
|
||||
(name or sys.argv[0],
|
||||
datetime.datetime.utcnow().isoformat()))
|
||||
self.revision = self.cursor.lastrowid
|
||||
try:
|
||||
yield self.revision
|
||||
self.revision = None
|
||||
except:
|
||||
self.flush(False)
|
||||
self.revision = None
|
||||
raise
|
||||
else:
|
||||
self.flush()
|
||||
|
||||
def flush(self, save=True):
|
||||
if save:
|
||||
self.conn.commit()
|
||||
elif self._closed:
|
||||
return
|
||||
else:
|
||||
self.conn.rollback()
|
||||
|
||||
def _init(self):
|
||||
self.cursor.execute('''
|
||||
create table if not exists kv (
|
||||
key text,
|
||||
data text,
|
||||
primary key (key)
|
||||
)''')
|
||||
self.cursor.execute('''
|
||||
create table if not exists kv_revisions (
|
||||
key text,
|
||||
revision integer,
|
||||
data text,
|
||||
primary key (key, revision)
|
||||
)''')
|
||||
self.cursor.execute('''
|
||||
create table if not exists hooks (
|
||||
version integer primary key autoincrement,
|
||||
hook text,
|
||||
date text
|
||||
)''')
|
||||
self.conn.commit()
|
||||
|
||||
def gethistory(self, key, deserialize=False):
|
||||
self.cursor.execute(
|
||||
'''
|
||||
select kv.revision, kv.key, kv.data, h.hook, h.date
|
||||
from kv_revisions kv,
|
||||
hooks h
|
||||
where kv.key=?
|
||||
and kv.revision = h.version
|
||||
''', [key])
|
||||
if deserialize is False:
|
||||
return self.cursor.fetchall()
|
||||
return map(_parse_history, self.cursor.fetchall())
|
||||
|
||||
def debug(self, fh=sys.stderr):
|
||||
self.cursor.execute('select * from kv')
|
||||
pprint.pprint(self.cursor.fetchall(), stream=fh)
|
||||
self.cursor.execute('select * from kv_revisions')
|
||||
pprint.pprint(self.cursor.fetchall(), stream=fh)
|
||||
|
||||
|
||||
def _parse_history(d):
|
||||
return (d[0], d[1], json.loads(d[2]), d[3],
|
||||
datetime.datetime.strptime(d[-1], "%Y-%m-%dT%H:%M:%S.%f"))
|
||||
|
||||
|
||||
class HookData(object):
|
||||
"""Simple integration for existing hook exec frameworks.
|
||||
|
||||
Records all unit information, and stores deltas for processing
|
||||
by the hook.
|
||||
|
||||
Sample::
|
||||
|
||||
from charmhelper.core import hookenv, unitdata
|
||||
|
||||
changes = unitdata.HookData()
|
||||
db = unitdata.kv()
|
||||
hooks = hookenv.Hooks()
|
||||
|
||||
@hooks.hook
|
||||
def config_changed():
|
||||
# View all changes to configuration
|
||||
for changed, (prev, cur) in changes.conf.items():
|
||||
print('config changed', changed,
|
||||
'previous value', prev,
|
||||
'current value', cur)
|
||||
|
||||
# Get some unit specific bookeeping
|
||||
if not db.get('pkg_key'):
|
||||
key = urllib.urlopen('https://example.com/pkg_key').read()
|
||||
db.set('pkg_key', key)
|
||||
|
||||
if __name__ == '__main__':
|
||||
with changes():
|
||||
hook.execute()
|
||||
|
||||
"""
|
||||
def __init__(self):
|
||||
self.kv = kv()
|
||||
self.conf = None
|
||||
self.rels = None
|
||||
|
||||
@contextlib.contextmanager
|
||||
def __call__(self):
|
||||
from charmhelpers.core import hookenv
|
||||
hook_name = hookenv.hook_name()
|
||||
|
||||
with self.kv.hook_scope(hook_name):
|
||||
self._record_charm_version(hookenv.charm_dir())
|
||||
delta_config, delta_relation = self._record_hook(hookenv)
|
||||
yield self.kv, delta_config, delta_relation
|
||||
|
||||
def _record_charm_version(self, charm_dir):
|
||||
# Record revisions.. charm revisions are meaningless
|
||||
# to charm authors as they don't control the revision.
|
||||
# so logic dependnent on revision is not particularly
|
||||
# useful, however it is useful for debugging analysis.
|
||||
charm_rev = open(
|
||||
os.path.join(charm_dir, 'revision')).read().strip()
|
||||
charm_rev = charm_rev or '0'
|
||||
revs = self.kv.get('charm_revisions', [])
|
||||
if charm_rev not in revs:
|
||||
revs.append(charm_rev.strip() or '0')
|
||||
self.kv.set('charm_revisions', revs)
|
||||
|
||||
def _record_hook(self, hookenv):
|
||||
data = hookenv.execution_environment()
|
||||
self.conf = conf_delta = self.kv.delta(data['conf'], 'config')
|
||||
self.rels = rels_delta = self.kv.delta(data['rels'], 'rels')
|
||||
self.kv.set('env', dict(data['env']))
|
||||
self.kv.set('unit', data['unit'])
|
||||
self.kv.set('relid', data.get('relid'))
|
||||
return conf_delta, rels_delta
|
||||
|
||||
|
||||
class Record(dict):
|
||||
|
||||
__slots__ = ()
|
||||
|
||||
def __getattr__(self, k):
|
||||
if k in self:
|
||||
return self[k]
|
||||
raise AttributeError(k)
|
||||
|
||||
|
||||
class DeltaSet(Record):
|
||||
|
||||
__slots__ = ()
|
||||
|
||||
|
||||
Delta = collections.namedtuple('Delta', ['previous', 'current'])
|
||||
|
||||
|
||||
_KV = None
|
||||
|
||||
|
||||
def kv():
|
||||
global _KV
|
||||
if _KV is None:
|
||||
_KV = Storage()
|
||||
return _KV
|
|
@ -1,448 +0,0 @@
|
|||
# Copyright 2014-2015 Canonical Limited.
|
||||
#
|
||||
# This file is part of charm-helpers.
|
||||
#
|
||||
# charm-helpers is free software: you can redistribute it and/or modify
|
||||
# it under the terms of the GNU Lesser General Public License version 3 as
|
||||
# published by the Free Software Foundation.
|
||||
#
|
||||
# charm-helpers is distributed in the hope that it will be useful,
|
||||
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
# GNU Lesser General Public License for more details.
|
||||
#
|
||||
# You should have received a copy of the GNU Lesser General Public License
|
||||
# along with charm-helpers. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
import importlib
|
||||
from tempfile import NamedTemporaryFile
|
||||
import time
|
||||
from yaml import safe_load
|
||||
from charmhelpers.core.host import (
|
||||
lsb_release
|
||||
)
|
||||
import subprocess
|
||||
from charmhelpers.core.hookenv import (
|
||||
config,
|
||||
log,
|
||||
)
|
||||
import os
|
||||
|
||||
import six
|
||||
if six.PY3:
|
||||
from urllib.parse import urlparse, urlunparse
|
||||
else:
|
||||
from urlparse import urlparse, urlunparse
|
||||
|
||||
|
||||
CLOUD_ARCHIVE = """# Ubuntu Cloud Archive
|
||||
deb http://ubuntu-cloud.archive.canonical.com/ubuntu {} main
|
||||
"""
|
||||
PROPOSED_POCKET = """# Proposed
|
||||
deb http://archive.ubuntu.com/ubuntu {}-proposed main universe multiverse restricted
|
||||
"""
|
||||
CLOUD_ARCHIVE_POCKETS = {
|
||||
# Folsom
|
||||
'folsom': 'precise-updates/folsom',
|
||||
'precise-folsom': 'precise-updates/folsom',
|
||||
'precise-folsom/updates': 'precise-updates/folsom',
|
||||
'precise-updates/folsom': 'precise-updates/folsom',
|
||||
'folsom/proposed': 'precise-proposed/folsom',
|
||||
'precise-folsom/proposed': 'precise-proposed/folsom',
|
||||
'precise-proposed/folsom': 'precise-proposed/folsom',
|
||||
# Grizzly
|
||||
'grizzly': 'precise-updates/grizzly',
|
||||
'precise-grizzly': 'precise-updates/grizzly',
|
||||
'precise-grizzly/updates': 'precise-updates/grizzly',
|
||||
'precise-updates/grizzly': 'precise-updates/grizzly',
|
||||
'grizzly/proposed': 'precise-proposed/grizzly',
|
||||
'precise-grizzly/proposed': 'precise-proposed/grizzly',
|
||||
'precise-proposed/grizzly': 'precise-proposed/grizzly',
|
||||
# Havana
|
||||
'havana': 'precise-updates/havana',
|
||||
'precise-havana': 'precise-updates/havana',
|
||||
'precise-havana/updates': 'precise-updates/havana',
|
||||
'precise-updates/havana': 'precise-updates/havana',
|
||||
'havana/proposed': 'precise-proposed/havana',
|
||||
'precise-havana/proposed': 'precise-proposed/havana',
|
||||
'precise-proposed/havana': 'precise-proposed/havana',
|
||||
# Icehouse
|
||||
'icehouse': 'precise-updates/icehouse',
|
||||
'precise-icehouse': 'precise-updates/icehouse',
|
||||
'precise-icehouse/updates': 'precise-updates/icehouse',
|
||||
'precise-updates/icehouse': 'precise-updates/icehouse',
|
||||
'icehouse/proposed': 'precise-proposed/icehouse',
|
||||
'precise-icehouse/proposed': 'precise-proposed/icehouse',
|
||||
'precise-proposed/icehouse': 'precise-proposed/icehouse',
|
||||
# Juno
|
||||
'juno': 'trusty-updates/juno',
|
||||
'trusty-juno': 'trusty-updates/juno',
|
||||
'trusty-juno/updates': 'trusty-updates/juno',
|
||||
'trusty-updates/juno': 'trusty-updates/juno',
|
||||
'juno/proposed': 'trusty-proposed/juno',
|
||||
'trusty-juno/proposed': 'trusty-proposed/juno',
|
||||
'trusty-proposed/juno': 'trusty-proposed/juno',
|
||||
# Kilo
|
||||
'kilo': 'trusty-updates/kilo',
|
||||
'trusty-kilo': 'trusty-updates/kilo',
|
||||
'trusty-kilo/updates': 'trusty-updates/kilo',
|
||||
'trusty-updates/kilo': 'trusty-updates/kilo',
|
||||
'kilo/proposed': 'trusty-proposed/kilo',
|
||||
'trusty-kilo/proposed': 'trusty-proposed/kilo',
|
||||
'trusty-proposed/kilo': 'trusty-proposed/kilo',
|
||||
}
|
||||
|
||||
# The order of this list is very important. Handlers should be listed in from
|
||||
# least- to most-specific URL matching.
|
||||
FETCH_HANDLERS = (
|
||||
'charmhelpers.fetch.archiveurl.ArchiveUrlFetchHandler',
|
||||
'charmhelpers.fetch.bzrurl.BzrUrlFetchHandler',
|
||||
'charmhelpers.fetch.giturl.GitUrlFetchHandler',
|
||||
)
|
||||
|
||||
APT_NO_LOCK = 100 # The return code for "couldn't acquire lock" in APT.
|
||||
APT_NO_LOCK_RETRY_DELAY = 10 # Wait 10 seconds between apt lock checks.
|
||||
APT_NO_LOCK_RETRY_COUNT = 30 # Retry to acquire the lock X times.
|
||||
|
||||
|
||||
class SourceConfigError(Exception):
|
||||
pass
|
||||
|
||||
|
||||
class UnhandledSource(Exception):
|
||||
pass
|
||||
|
||||
|
||||
class AptLockError(Exception):
|
||||
pass
|
||||
|
||||
|
||||
class BaseFetchHandler(object):
|
||||
|
||||
"""Base class for FetchHandler implementations in fetch plugins"""
|
||||
|
||||
def can_handle(self, source):
|
||||
"""Returns True if the source can be handled. Otherwise returns
|
||||
a string explaining why it cannot"""
|
||||
return "Wrong source type"
|
||||
|
||||
def install(self, source):
|
||||
"""Try to download and unpack the source. Return the path to the
|
||||
unpacked files or raise UnhandledSource."""
|
||||
raise UnhandledSource("Wrong source type {}".format(source))
|
||||
|
||||
def parse_url(self, url):
|
||||
return urlparse(url)
|
||||
|
||||
def base_url(self, url):
|
||||
"""Return url without querystring or fragment"""
|
||||
parts = list(self.parse_url(url))
|
||||
parts[4:] = ['' for i in parts[4:]]
|
||||
return urlunparse(parts)
|
||||
|
||||
|
||||
def filter_installed_packages(packages):
|
||||
"""Returns a list of packages that require installation"""
|
||||
cache = apt_cache()
|
||||
_pkgs = []
|
||||
for package in packages:
|
||||
try:
|
||||
p = cache[package]
|
||||
p.current_ver or _pkgs.append(package)
|
||||
except KeyError:
|
||||
log('Package {} has no installation candidate.'.format(package),
|
||||
level='WARNING')
|
||||
_pkgs.append(package)
|
||||
return _pkgs
|
||||
|
||||
|
||||
def apt_cache(in_memory=True):
|
||||
"""Build and return an apt cache"""
|
||||
from apt import apt_pkg
|
||||
apt_pkg.init()
|
||||
if in_memory:
|
||||
apt_pkg.config.set("Dir::Cache::pkgcache", "")
|
||||
apt_pkg.config.set("Dir::Cache::srcpkgcache", "")
|
||||
return apt_pkg.Cache()
|
||||
|
||||
|
||||
def apt_install(packages, options=None, fatal=False):
|
||||
"""Install one or more packages"""
|
||||
if options is None:
|
||||
options = ['--option=Dpkg::Options::=--force-confold']
|
||||
|
||||
cmd = ['apt-get', '--assume-yes']
|
||||
cmd.extend(options)
|
||||
cmd.append('install')
|
||||
if isinstance(packages, six.string_types):
|
||||
cmd.append(packages)
|
||||
else:
|
||||
cmd.extend(packages)
|
||||
log("Installing {} with options: {}".format(packages,
|
||||
options))
|
||||
_run_apt_command(cmd, fatal)
|
||||
|
||||
|
||||
def apt_upgrade(options=None, fatal=False, dist=False):
|
||||
"""Upgrade all packages"""
|
||||
if options is None:
|
||||
options = ['--option=Dpkg::Options::=--force-confold']
|
||||
|
||||
cmd = ['apt-get', '--assume-yes']
|
||||
cmd.extend(options)
|
||||
if dist:
|
||||
cmd.append('dist-upgrade')
|
||||
else:
|
||||
cmd.append('upgrade')
|
||||
log("Upgrading with options: {}".format(options))
|
||||
_run_apt_command(cmd, fatal)
|
||||
|
||||
|
||||
def apt_update(fatal=False):
|
||||
"""Update local apt cache"""
|
||||
cmd = ['apt-get', 'update']
|
||||
_run_apt_command(cmd, fatal)
|
||||
|
||||
|
||||
def apt_purge(packages, fatal=False):
|
||||
"""Purge one or more packages"""
|
||||
cmd = ['apt-get', '--assume-yes', 'purge']
|
||||
if isinstance(packages, six.string_types):
|
||||
cmd.append(packages)
|
||||
else:
|
||||
cmd.extend(packages)
|
||||
log("Purging {}".format(packages))
|
||||
_run_apt_command(cmd, fatal)
|
||||
|
||||
|
||||
def apt_mark(packages, mark, fatal=False):
|
||||
"""Flag one or more packages using apt-mark"""
|
||||
cmd = ['apt-mark', mark]
|
||||
if isinstance(packages, six.string_types):
|
||||
cmd.append(packages)
|
||||
else:
|
||||
cmd.extend(packages)
|
||||
log("Holding {}".format(packages))
|
||||
|
||||
if fatal:
|
||||
subprocess.check_call(cmd, universal_newlines=True)
|
||||
else:
|
||||
subprocess.call(cmd, universal_newlines=True)
|
||||
|
||||
|
||||
def apt_hold(packages, fatal=False):
|
||||
return apt_mark(packages, 'hold', fatal=fatal)
|
||||
|
||||
|
||||
def apt_unhold(packages, fatal=False):
|
||||
return apt_mark(packages, 'unhold', fatal=fatal)
|
||||
|
||||
|
||||
def add_source(source, key=None):
|
||||
"""Add a package source to this system.
|
||||
|
||||
@param source: a URL or sources.list entry, as supported by
|
||||
add-apt-repository(1). Examples::
|
||||
|
||||
ppa:charmers/example
|
||||
deb https://stub:key@private.example.com/ubuntu trusty main
|
||||
|
||||
In addition:
|
||||
'proposed:' may be used to enable the standard 'proposed'
|
||||
pocket for the release.
|
||||
'cloud:' may be used to activate official cloud archive pockets,
|
||||
such as 'cloud:icehouse'
|
||||
'distro' may be used as a noop
|
||||
|
||||
@param key: A key to be added to the system's APT keyring and used
|
||||
to verify the signatures on packages. Ideally, this should be an
|
||||
ASCII format GPG public key including the block headers. A GPG key
|
||||
id may also be used, but be aware that only insecure protocols are
|
||||
available to retrieve the actual public key from a public keyserver
|
||||
placing your Juju environment at risk. ppa and cloud archive keys
|
||||
are securely added automtically, so sould not be provided.
|
||||
"""
|
||||
if source is None:
|
||||
log('Source is not present. Skipping')
|
||||
return
|
||||
|
||||
if (source.startswith('ppa:') or
|
||||
source.startswith('http') or
|
||||
source.startswith('deb ') or
|
||||
source.startswith('cloud-archive:')):
|
||||
subprocess.check_call(['add-apt-repository', '--yes', source])
|
||||
elif source.startswith('cloud:'):
|
||||
apt_install(filter_installed_packages(['ubuntu-cloud-keyring']),
|
||||
fatal=True)
|
||||
pocket = source.split(':')[-1]
|
||||
if pocket not in CLOUD_ARCHIVE_POCKETS:
|
||||
raise SourceConfigError(
|
||||
'Unsupported cloud: source option %s' %
|
||||
pocket)
|
||||
actual_pocket = CLOUD_ARCHIVE_POCKETS[pocket]
|
||||
with open('/etc/apt/sources.list.d/cloud-archive.list', 'w') as apt:
|
||||
apt.write(CLOUD_ARCHIVE.format(actual_pocket))
|
||||
elif source == 'proposed':
|
||||
release = lsb_release()['DISTRIB_CODENAME']
|
||||
with open('/etc/apt/sources.list.d/proposed.list', 'w') as apt:
|
||||
apt.write(PROPOSED_POCKET.format(release))
|
||||
elif source == 'distro':
|
||||
pass
|
||||
else:
|
||||
log("Unknown source: {!r}".format(source))
|
||||
|
||||
if key:
|
||||
if '-----BEGIN PGP PUBLIC KEY BLOCK-----' in key:
|
||||
with NamedTemporaryFile('w+') as key_file:
|
||||
key_file.write(key)
|
||||
key_file.flush()
|
||||
key_file.seek(0)
|
||||
subprocess.check_call(['apt-key', 'add', '-'], stdin=key_file)
|
||||
else:
|
||||
# Note that hkp: is in no way a secure protocol. Using a
|
||||
# GPG key id is pointless from a security POV unless you
|
||||
# absolutely trust your network and DNS.
|
||||
subprocess.check_call(['apt-key', 'adv', '--keyserver',
|
||||
'hkp://keyserver.ubuntu.com:80', '--recv',
|
||||
key])
|
||||
|
||||
|
||||
def configure_sources(update=False,
|
||||
sources_var='install_sources',
|
||||
keys_var='install_keys'):
|
||||
"""
|
||||
Configure multiple sources from charm configuration.
|
||||
|
||||
The lists are encoded as yaml fragments in the configuration.
|
||||
The frament needs to be included as a string. Sources and their
|
||||
corresponding keys are of the types supported by add_source().
|
||||
|
||||
Example config:
|
||||
install_sources: |
|
||||
- "ppa:foo"
|
||||
- "http://example.com/repo precise main"
|
||||
install_keys: |
|
||||
- null
|
||||
- "a1b2c3d4"
|
||||
|
||||
Note that 'null' (a.k.a. None) should not be quoted.
|
||||
"""
|
||||
sources = safe_load((config(sources_var) or '').strip()) or []
|
||||
keys = safe_load((config(keys_var) or '').strip()) or None
|
||||
|
||||
if isinstance(sources, six.string_types):
|
||||
sources = [sources]
|
||||
|
||||
if keys is None:
|
||||
for source in sources:
|
||||
add_source(source, None)
|
||||
else:
|
||||
if isinstance(keys, six.string_types):
|
||||
keys = [keys]
|
||||
|
||||
if len(sources) != len(keys):
|
||||
raise SourceConfigError(
|
||||
'Install sources and keys lists are different lengths')
|
||||
for source, key in zip(sources, keys):
|
||||
add_source(source, key)
|
||||
if update:
|
||||
apt_update(fatal=True)
|
||||
|
||||
|
||||
def install_remote(source, *args, **kwargs):
|
||||
"""
|
||||
Install a file tree from a remote source
|
||||
|
||||
The specified source should be a url of the form:
|
||||
scheme://[host]/path[#[option=value][&...]]
|
||||
|
||||
Schemes supported are based on this modules submodules.
|
||||
Options supported are submodule-specific.
|
||||
Additional arguments are passed through to the submodule.
|
||||
|
||||
For example::
|
||||
|
||||
dest = install_remote('http://example.com/archive.tgz',
|
||||
checksum='deadbeef',
|
||||
hash_type='sha1')
|
||||
|
||||
This will download `archive.tgz`, validate it using SHA1 and, if
|
||||
the file is ok, extract it and return the directory in which it
|
||||
was extracted. If the checksum fails, it will raise
|
||||
:class:`charmhelpers.core.host.ChecksumError`.
|
||||
"""
|
||||
# We ONLY check for True here because can_handle may return a string
|
||||
# explaining why it can't handle a given source.
|
||||
handlers = [h for h in plugins() if h.can_handle(source) is True]
|
||||
installed_to = None
|
||||
for handler in handlers:
|
||||
try:
|
||||
installed_to = handler.install(source, *args, **kwargs)
|
||||
except UnhandledSource as e:
|
||||
log('Install source attempt unsuccessful: {}'.format(e),
|
||||
level='WARNING')
|
||||
if not installed_to:
|
||||
raise UnhandledSource("No handler found for source {}".format(source))
|
||||
return installed_to
|
||||
|
||||
|
||||
def install_from_config(config_var_name):
|
||||
charm_config = config()
|
||||
source = charm_config[config_var_name]
|
||||
return install_remote(source)
|
||||
|
||||
|
||||
def plugins(fetch_handlers=None):
|
||||
if not fetch_handlers:
|
||||
fetch_handlers = FETCH_HANDLERS
|
||||
plugin_list = []
|
||||
for handler_name in fetch_handlers:
|
||||
package, classname = handler_name.rsplit('.', 1)
|
||||
try:
|
||||
handler_class = getattr(
|
||||
importlib.import_module(package),
|
||||
classname)
|
||||
plugin_list.append(handler_class())
|
||||
except (ImportError, AttributeError):
|
||||
# Skip missing plugins so that they can be ommitted from
|
||||
# installation if desired
|
||||
log("FetchHandler {} not found, skipping plugin".format(
|
||||
handler_name))
|
||||
return plugin_list
|
||||
|
||||
|
||||
def _run_apt_command(cmd, fatal=False):
|
||||
"""
|
||||
Run an APT command, checking output and retrying if the fatal flag is set
|
||||
to True.
|
||||
|
||||
:param: cmd: str: The apt command to run.
|
||||
:param: fatal: bool: Whether the command's output should be checked and
|
||||
retried.
|
||||
"""
|
||||
env = os.environ.copy()
|
||||
|
||||
if 'DEBIAN_FRONTEND' not in env:
|
||||
env['DEBIAN_FRONTEND'] = 'noninteractive'
|
||||
|
||||
if fatal:
|
||||
retry_count = 0
|
||||
result = None
|
||||
|
||||
# If the command is considered "fatal", we need to retry if the apt
|
||||
# lock was not acquired.
|
||||
|
||||
while result is None or result == APT_NO_LOCK:
|
||||
try:
|
||||
result = subprocess.check_call(cmd, env=env)
|
||||
except subprocess.CalledProcessError as e:
|
||||
retry_count = retry_count + 1
|
||||
if retry_count > APT_NO_LOCK_RETRY_COUNT:
|
||||
raise
|
||||
result = e.returncode
|
||||
log("Couldn't acquire DPKG lock. Will retry in {} seconds."
|
||||
"".format(APT_NO_LOCK_RETRY_DELAY))
|
||||
time.sleep(APT_NO_LOCK_RETRY_DELAY)
|
||||
|
||||
else:
|
||||
subprocess.call(cmd, env=env)
|
|
@ -1,167 +0,0 @@
|
|||
# Copyright 2014-2015 Canonical Limited.
|
||||
#
|
||||
# This file is part of charm-helpers.
|
||||
#
|
||||
# charm-helpers is free software: you can redistribute it and/or modify
|
||||
# it under the terms of the GNU Lesser General Public License version 3 as
|
||||
# published by the Free Software Foundation.
|
||||
#
|
||||
# charm-helpers is distributed in the hope that it will be useful,
|
||||
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
# GNU Lesser General Public License for more details.
|
||||
#
|
||||
# You should have received a copy of the GNU Lesser General Public License
|
||||
# along with charm-helpers. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
import os
|
||||
import hashlib
|
||||
import re
|
||||
|
||||
from charmhelpers.fetch import (
|
||||
BaseFetchHandler,
|
||||
UnhandledSource
|
||||
)
|
||||
from charmhelpers.payload.archive import (
|
||||
get_archive_handler,
|
||||
extract,
|
||||
)
|
||||
from charmhelpers.core.host import mkdir, check_hash
|
||||
|
||||
import six
|
||||
if six.PY3:
|
||||
from urllib.request import (
|
||||
build_opener, install_opener, urlopen, urlretrieve,
|
||||
HTTPPasswordMgrWithDefaultRealm, HTTPBasicAuthHandler,
|
||||
)
|
||||
from urllib.parse import urlparse, urlunparse, parse_qs
|
||||
from urllib.error import URLError
|
||||
else:
|
||||
from urllib import urlretrieve
|
||||
from urllib2 import (
|
||||
build_opener, install_opener, urlopen,
|
||||
HTTPPasswordMgrWithDefaultRealm, HTTPBasicAuthHandler,
|
||||
URLError
|
||||
)
|
||||
from urlparse import urlparse, urlunparse, parse_qs
|
||||
|
||||
|
||||
def splituser(host):
|
||||
'''urllib.splituser(), but six's support of this seems broken'''
|
||||
_userprog = re.compile('^(.*)@(.*)$')
|
||||
match = _userprog.match(host)
|
||||
if match:
|
||||
return match.group(1, 2)
|
||||
return None, host
|
||||
|
||||
|
||||
def splitpasswd(user):
|
||||
'''urllib.splitpasswd(), but six's support of this is missing'''
|
||||
_passwdprog = re.compile('^([^:]*):(.*)$', re.S)
|
||||
match = _passwdprog.match(user)
|
||||
if match:
|
||||
return match.group(1, 2)
|
||||
return user, None
|
||||
|
||||
|
||||
class ArchiveUrlFetchHandler(BaseFetchHandler):
|
||||
"""
|
||||
Handler to download archive files from arbitrary URLs.
|
||||
|
||||
Can fetch from http, https, ftp, and file URLs.
|
||||
|
||||
Can install either tarballs (.tar, .tgz, .tbz2, etc) or zip files.
|
||||
|
||||
Installs the contents of the archive in $CHARM_DIR/fetched/.
|
||||
"""
|
||||
def can_handle(self, source):
|
||||
url_parts = self.parse_url(source)
|
||||
if url_parts.scheme not in ('http', 'https', 'ftp', 'file'):
|
||||
# XXX: Why is this returning a boolean and a string? It's
|
||||
# doomed to fail since "bool(can_handle('foo://'))" will be True.
|
||||
return "Wrong source type"
|
||||
if get_archive_handler(self.base_url(source)):
|
||||
return True
|
||||
return False
|
||||
|
||||
def download(self, source, dest):
|
||||
"""
|
||||
Download an archive file.
|
||||
|
||||
:param str source: URL pointing to an archive file.
|
||||
:param str dest: Local path location to download archive file to.
|
||||
"""
|
||||
# propogate all exceptions
|
||||
# URLError, OSError, etc
|
||||
proto, netloc, path, params, query, fragment = urlparse(source)
|
||||
if proto in ('http', 'https'):
|
||||
auth, barehost = splituser(netloc)
|
||||
if auth is not None:
|
||||
source = urlunparse((proto, barehost, path, params, query, fragment))
|
||||
username, password = splitpasswd(auth)
|
||||
passman = HTTPPasswordMgrWithDefaultRealm()
|
||||
# Realm is set to None in add_password to force the username and password
|
||||
# to be used whatever the realm
|
||||
passman.add_password(None, source, username, password)
|
||||
authhandler = HTTPBasicAuthHandler(passman)
|
||||
opener = build_opener(authhandler)
|
||||
install_opener(opener)
|
||||
response = urlopen(source)
|
||||
try:
|
||||
with open(dest, 'w') as dest_file:
|
||||
dest_file.write(response.read())
|
||||
except Exception as e:
|
||||
if os.path.isfile(dest):
|
||||
os.unlink(dest)
|
||||
raise e
|
||||
|
||||
# Mandatory file validation via Sha1 or MD5 hashing.
|
||||
def download_and_validate(self, url, hashsum, validate="sha1"):
|
||||
tempfile, headers = urlretrieve(url)
|
||||
check_hash(tempfile, hashsum, validate)
|
||||
return tempfile
|
||||
|
||||
def install(self, source, dest=None, checksum=None, hash_type='sha1'):
|
||||
"""
|
||||
Download and install an archive file, with optional checksum validation.
|
||||
|
||||
The checksum can also be given on the `source` URL's fragment.
|
||||
For example::
|
||||
|
||||
handler.install('http://example.com/file.tgz#sha1=deadbeef')
|
||||
|
||||
:param str source: URL pointing to an archive file.
|
||||
:param str dest: Local destination path to install to. If not given,
|
||||
installs to `$CHARM_DIR/archives/archive_file_name`.
|
||||
:param str checksum: If given, validate the archive file after download.
|
||||
:param str hash_type: Algorithm used to generate `checksum`.
|
||||
Can be any hash alrgorithm supported by :mod:`hashlib`,
|
||||
such as md5, sha1, sha256, sha512, etc.
|
||||
|
||||
"""
|
||||
url_parts = self.parse_url(source)
|
||||
dest_dir = os.path.join(os.environ.get('CHARM_DIR'), 'fetched')
|
||||
if not os.path.exists(dest_dir):
|
||||
mkdir(dest_dir, perms=0o755)
|
||||
dld_file = os.path.join(dest_dir, os.path.basename(url_parts.path))
|
||||
try:
|
||||
self.download(source, dld_file)
|
||||
except URLError as e:
|
||||
raise UnhandledSource(e.reason)
|
||||
except OSError as e:
|
||||
raise UnhandledSource(e.strerror)
|
||||
options = parse_qs(url_parts.fragment)
|
||||
for key, value in options.items():
|
||||
if not six.PY3:
|
||||
algorithms = hashlib.algorithms
|
||||
else:
|
||||
algorithms = hashlib.algorithms_available
|
||||
if key in algorithms:
|
||||
if len(value) != 1:
|
||||
raise TypeError(
|
||||
"Expected 1 hash value, not %d" % len(value))
|
||||
expected = value[0]
|
||||
check_hash(dld_file, expected, key)
|
||||
if checksum:
|
||||
check_hash(dld_file, checksum, hash_type)
|
||||
return extract(dld_file, dest)
|
|
@ -1,78 +0,0 @@
|
|||
# Copyright 2014-2015 Canonical Limited.
|
||||
#
|
||||
# This file is part of charm-helpers.
|
||||
#
|
||||
# charm-helpers is free software: you can redistribute it and/or modify
|
||||
# it under the terms of the GNU Lesser General Public License version 3 as
|
||||
# published by the Free Software Foundation.
|
||||
#
|
||||
# charm-helpers is distributed in the hope that it will be useful,
|
||||
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
# GNU Lesser General Public License for more details.
|
||||
#
|
||||
# You should have received a copy of the GNU Lesser General Public License
|
||||
# along with charm-helpers. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
import os
|
||||
from charmhelpers.fetch import (
|
||||
BaseFetchHandler,
|
||||
UnhandledSource
|
||||
)
|
||||
from charmhelpers.core.host import mkdir
|
||||
|
||||
import six
|
||||
if six.PY3:
|
||||
raise ImportError('bzrlib does not support Python3')
|
||||
|
||||
try:
|
||||
from bzrlib.branch import Branch
|
||||
from bzrlib import bzrdir, workingtree, errors
|
||||
except ImportError:
|
||||
from charmhelpers.fetch import apt_install
|
||||
apt_install("python-bzrlib")
|
||||
from bzrlib.branch import Branch
|
||||
from bzrlib import bzrdir, workingtree, errors
|
||||
|
||||
|
||||
class BzrUrlFetchHandler(BaseFetchHandler):
|
||||
"""Handler for bazaar branches via generic and lp URLs"""
|
||||
def can_handle(self, source):
|
||||
url_parts = self.parse_url(source)
|
||||
if url_parts.scheme not in ('bzr+ssh', 'lp'):
|
||||
return False
|
||||
else:
|
||||
return True
|
||||
|
||||
def branch(self, source, dest):
|
||||
url_parts = self.parse_url(source)
|
||||
# If we use lp:branchname scheme we need to load plugins
|
||||
if not self.can_handle(source):
|
||||
raise UnhandledSource("Cannot handle {}".format(source))
|
||||
if url_parts.scheme == "lp":
|
||||
from bzrlib.plugin import load_plugins
|
||||
load_plugins()
|
||||
try:
|
||||
local_branch = bzrdir.BzrDir.create_branch_convenience(dest)
|
||||
except errors.AlreadyControlDirError:
|
||||
local_branch = Branch.open(dest)
|
||||
try:
|
||||
remote_branch = Branch.open(source)
|
||||
remote_branch.push(local_branch)
|
||||
tree = workingtree.WorkingTree.open(dest)
|
||||
tree.update()
|
||||
except Exception as e:
|
||||
raise e
|
||||
|
||||
def install(self, source):
|
||||
url_parts = self.parse_url(source)
|
||||
branch_name = url_parts.path.strip("/").split("/")[-1]
|
||||
dest_dir = os.path.join(os.environ.get('CHARM_DIR'), "fetched",
|
||||
branch_name)
|
||||
if not os.path.exists(dest_dir):
|
||||
mkdir(dest_dir, perms=0o755)
|
||||
try:
|
||||
self.branch(source, dest_dir)
|
||||
except OSError as e:
|
||||
raise UnhandledSource(e.strerror)
|
||||
return dest_dir
|
|
@ -1,73 +0,0 @@
|
|||
# Copyright 2014-2015 Canonical Limited.
|
||||
#
|
||||
# This file is part of charm-helpers.
|
||||
#
|
||||
# charm-helpers is free software: you can redistribute it and/or modify
|
||||
# it under the terms of the GNU Lesser General Public License version 3 as
|
||||
# published by the Free Software Foundation.
|
||||
#
|
||||
# charm-helpers is distributed in the hope that it will be useful,
|
||||
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
# GNU Lesser General Public License for more details.
|
||||
#
|
||||
# You should have received a copy of the GNU Lesser General Public License
|
||||
# along with charm-helpers. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
import os
|
||||
from charmhelpers.fetch import (
|
||||
BaseFetchHandler,
|
||||
UnhandledSource
|
||||
)
|
||||
from charmhelpers.core.host import mkdir
|
||||
|
||||
import six
|
||||
if six.PY3:
|
||||
raise ImportError('GitPython does not support Python 3')
|
||||
|
||||
try:
|
||||
from git import Repo
|
||||
except ImportError:
|
||||
from charmhelpers.fetch import apt_install
|
||||
apt_install("python-git")
|
||||
from git import Repo
|
||||
|
||||
from git.exc import GitCommandError # noqa E402
|
||||
|
||||
|
||||
class GitUrlFetchHandler(BaseFetchHandler):
|
||||
"""Handler for git branches via generic and github URLs"""
|
||||
def can_handle(self, source):
|
||||
url_parts = self.parse_url(source)
|
||||
# TODO (mattyw) no support for ssh git@ yet
|
||||
if url_parts.scheme not in ('http', 'https', 'git'):
|
||||
return False
|
||||
else:
|
||||
return True
|
||||
|
||||
def clone(self, source, dest, branch, depth=None):
|
||||
if not self.can_handle(source):
|
||||
raise UnhandledSource("Cannot handle {}".format(source))
|
||||
|
||||
if depth:
|
||||
Repo.clone_from(source, dest, branch=branch, depth=depth)
|
||||
else:
|
||||
Repo.clone_from(source, dest, branch=branch)
|
||||
|
||||
def install(self, source, branch="master", dest=None, depth=None):
|
||||
url_parts = self.parse_url(source)
|
||||
branch_name = url_parts.path.strip("/").split("/")[-1]
|
||||
if dest:
|
||||
dest_dir = os.path.join(dest, branch_name)
|
||||
else:
|
||||
dest_dir = os.path.join(os.environ.get('CHARM_DIR'), "fetched",
|
||||
branch_name)
|
||||
if not os.path.exists(dest_dir):
|
||||
mkdir(dest_dir, perms=0o755)
|
||||
try:
|
||||
self.clone(source, dest_dir, branch, depth)
|
||||
except GitCommandError as e:
|
||||
raise UnhandledSource(e)
|
||||
except OSError as e:
|
||||
raise UnhandledSource(e.strerror)
|
||||
return dest_dir
|
|
@ -1 +0,0 @@
|
|||
hooks.py
|
|
@ -1 +0,0 @@
|
|||
hooks.py
|
|
@ -1 +0,0 @@
|
|||
hooks.py
|
|
@ -1 +0,0 @@
|
|||
hooks.py
|
|
@ -1 +0,0 @@
|
|||
hooks.py
|
|
@ -1 +0,0 @@
|
|||
hooks.py
|
157
hooks/lib/ODL.py
157
hooks/lib/ODL.py
|
@ -1,157 +0,0 @@
|
|||
'''ODL Controller API integration'''
|
||||
import requests
|
||||
from jinja2 import Environment, FileSystemLoader
|
||||
from charmhelpers.core.hookenv import log
|
||||
from charmhelpers.core.decorators import retry_on_exception
|
||||
|
||||
|
||||
class ODLInteractionFatalError(Exception):
|
||||
''' Generic exception for failures in interaction with ODL '''
|
||||
pass
|
||||
|
||||
|
||||
class ODLConfig(requests.Session):
|
||||
|
||||
def __init__(self, username, password, host, port='8181'):
|
||||
super(ODLConfig, self).__init__()
|
||||
self.mount("http://", requests.adapters.HTTPAdapter(max_retries=5))
|
||||
self.base_url = 'http://{}:{}'.format(host, port)
|
||||
self.auth = (username, password)
|
||||
self.proxies = {}
|
||||
self.timeout = 10
|
||||
self.conf_url = self.base_url + '/restconf/config'
|
||||
self.oper_url = self.base_url + '/restconf/operational'
|
||||
self.netmap_url = self.conf_url + '/neutron-device-map:neutron_net_map'
|
||||
self.node_query_url = self.oper_url + '/opendaylight-inventory:nodes/'
|
||||
yang_mod_path = ('/opendaylight-inventory:nodes/node/'
|
||||
'controller-config/yang-ext:mount/config:modules')
|
||||
self.node_mount_url = self.conf_url + yang_mod_path
|
||||
|
||||
@retry_on_exception(5, base_delay=30,
|
||||
exc_type=requests.exceptions.ConnectionError)
|
||||
def contact_odl(self, request_type, url, headers=None, data=None,
|
||||
whitelist_rcs=None, retry_rcs=None):
|
||||
response = self.request(request_type, url, data=data, headers=headers)
|
||||
ok_codes = [requests.codes.ok, requests.codes.no_content]
|
||||
retry_codes = [requests.codes.service_unavailable]
|
||||
if whitelist_rcs:
|
||||
ok_codes.extend(whitelist_rcs)
|
||||
if retry_rcs:
|
||||
retry_codes.extend(retry_rcs)
|
||||
if response.status_code not in ok_codes:
|
||||
if response.status_code in retry_codes:
|
||||
msg = "Recieved {} from ODL on {}".format(response.status_code,
|
||||
url)
|
||||
raise requests.exceptions.ConnectionError(msg)
|
||||
else:
|
||||
msg = "Contact failed status_code={}, {}".format(
|
||||
response.status_code, url)
|
||||
raise ODLInteractionFatalError(msg)
|
||||
return response
|
||||
|
||||
def get_networks(self):
|
||||
log('Querying macs registered with odl')
|
||||
# No netmap may have been registered yet, so 404 is ok
|
||||
odl_req = self.contact_odl(
|
||||
'GET', self.netmap_url, whitelist_rcs=[requests.codes.not_found])
|
||||
if not odl_req:
|
||||
log('neutron_net_map not found in ODL')
|
||||
return {}
|
||||
odl_json = odl_req.json()
|
||||
if odl_json.get('neutron_net_map'):
|
||||
log('neutron_net_map returned by ODL')
|
||||
return odl_json['neutron_net_map']
|
||||
else:
|
||||
log('neutron_net_map NOT returned by ODL')
|
||||
return {}
|
||||
|
||||
def delete_net_device_entry(self, net, device_name):
|
||||
obj_url = self.netmap_url + \
|
||||
'physicalNetwork/{}/device/{}'.format(net, device_name)
|
||||
self.contact_odl('DELETE', obj_url)
|
||||
|
||||
def get_odl_registered_nodes(self):
|
||||
log('Querying nodes registered with odl')
|
||||
odl_req = self.contact_odl('GET', self.node_query_url)
|
||||
odl_json = odl_req.json()
|
||||
odl_node_ids = []
|
||||
if odl_json.get('nodes'):
|
||||
odl_nodes = odl_json['nodes'].get('node', [])
|
||||
odl_node_ids = [entry['id'] for entry in odl_nodes]
|
||||
log('Following nodes are registered: ' + ' '.join(odl_node_ids))
|
||||
return odl_node_ids
|
||||
|
||||
def odl_register_node(self, device_name, ip):
|
||||
log('Registering node {} ({}) with ODL'.format(device_name, ip))
|
||||
payload = self.render_node_xml(device_name, ip)
|
||||
headers = {'Content-Type': 'application/xml'}
|
||||
# Strictly a client should not retry on recipt of a bad_request (400)
|
||||
# but ODL return 400s while it is initialising
|
||||
self.contact_odl(
|
||||
'POST', self.node_mount_url, headers=headers, data=payload,
|
||||
retry_rcs=[requests.codes.bad_request])
|
||||
|
||||
def odl_register_macs(self, device_name, network, interface, mac,
|
||||
device_type='vhostuser'):
|
||||
log('Registering {} and {} on {}'.format(network, interface, mac))
|
||||
payload = self.render_mac_xml(device_name, network, interface, mac,
|
||||
device_type)
|
||||
headers = {'Content-Type': 'application/json'}
|
||||
self.contact_odl(
|
||||
'POST', self.netmap_url, headers=headers, data=payload)
|
||||
|
||||
def get_macs_networks(self, mac):
|
||||
registered_networks = self.get_networks()
|
||||
nets = []
|
||||
phy_nets = registered_networks.get('physicalNetwork')
|
||||
if phy_nets:
|
||||
for network in phy_nets:
|
||||
for device in network.get('device', []):
|
||||
for interface in device['interface']:
|
||||
if interface['macAddress'] == mac:
|
||||
nets.append(network['name'])
|
||||
return nets
|
||||
|
||||
def is_device_registered(self, device_name):
|
||||
return device_name in self.get_odl_registered_nodes()
|
||||
|
||||
def is_net_device_registered(self, net_name, device_name, interface_name,
|
||||
mac, device_type='vhostuser'):
|
||||
networks = self.get_networks()
|
||||
phy_nets = networks.get('physicalNetwork')
|
||||
if phy_nets:
|
||||
for net in phy_nets:
|
||||
if net_name == net['name']:
|
||||
for dev in net.get('device', []):
|
||||
if device_name == dev['device-name'] \
|
||||
and dev['device-type'] == device_type:
|
||||
for interface in dev['interface']:
|
||||
if (interface_name ==
|
||||
interface['interface-name'] and
|
||||
mac == interface['macAddress']):
|
||||
return True
|
||||
return False
|
||||
|
||||
def render_node_xml(self, device_name, ip, user='admin', password='admin'):
|
||||
env = Environment(loader=FileSystemLoader('templates'))
|
||||
template = env.get_template('odl_registration')
|
||||
node_xml = template.render(
|
||||
vpp_host=device_name,
|
||||
vpp_ip=ip,
|
||||
vpp_username=user,
|
||||
vpp_password=password,
|
||||
)
|
||||
return node_xml
|
||||
|
||||
def render_mac_xml(self, device_name, network, interface, mac,
|
||||
device_type='vhostuser'):
|
||||
env = Environment(loader=FileSystemLoader('templates'))
|
||||
template = env.get_template('mac_registration')
|
||||
mac_xml = template.render(
|
||||
vpp_host=device_name,
|
||||
network=network,
|
||||
interface=interface,
|
||||
mac=mac,
|
||||
device_type=device_type,
|
||||
)
|
||||
return mac_xml
|
|
@ -1,327 +0,0 @@
|
|||
#!/usr/bin/python
|
||||
import re
|
||||
import os
|
||||
import glob
|
||||
import subprocess
|
||||
from charmhelpers.core.decorators import retry_on_exception
|
||||
import shlex
|
||||
from charmhelpers.core.hookenv import(
|
||||
log,
|
||||
config,
|
||||
)
|
||||
|
||||
|
||||
def format_pci_addr(pci_addr):
|
||||
domain, bus, slot_func = pci_addr.split(':')
|
||||
slot, func = slot_func.split('.')
|
||||
return '{}:{}:{}.{}'.format(domain.zfill(4), bus.zfill(2), slot.zfill(2),
|
||||
func)
|
||||
|
||||
|
||||
class PCINetDevice(object):
|
||||
|
||||
def __init__(self, pci_address):
|
||||
self.pci_address = pci_address
|
||||
self.update_attributes()
|
||||
|
||||
def update_attributes(self):
|
||||
self.update_loaded_kmod()
|
||||
self.update_modalias_kmod()
|
||||
self.update_interface_info()
|
||||
|
||||
def update_loaded_kmod(self):
|
||||
cmd = ['lspci', '-ks', self.pci_address]
|
||||
lspci_output = subprocess.check_output(cmd)
|
||||
kdrive = None
|
||||
for line in lspci_output.split('\n'):
|
||||
if 'Kernel driver' in line:
|
||||
kdrive = line.split(':')[1].strip()
|
||||
log('Loaded kmod for {} is {}'.format(self.pci_address, kdrive))
|
||||
self.loaded_kmod = kdrive
|
||||
|
||||
def update_modalias_kmod(self):
|
||||
cmd = ['lspci', '-ns', self.pci_address]
|
||||
lspci_output = subprocess.check_output(cmd).split()
|
||||
vendor_device = lspci_output[2]
|
||||
vendor, device = vendor_device.split(':')
|
||||
pci_string = 'pci:v{}d{}'.format(vendor.zfill(8), device.zfill(8))
|
||||
kernel_name = self.get_kernel_name()
|
||||
alias_files = '/lib/modules/{}/modules.alias'.format(kernel_name)
|
||||
kmod = None
|
||||
with open(alias_files, 'r') as f:
|
||||
for line in f.readlines():
|
||||
if pci_string in line:
|
||||
kmod = line.split()[-1]
|
||||
log('module.alias kmod for {} is {}'.format(self.pci_address, kmod))
|
||||
self.modalias_kmod = kmod
|
||||
|
||||
def update_interface_info(self):
|
||||
if self.loaded_kmod:
|
||||
if self.loaded_kmod == 'igb_uio':
|
||||
return self.update_interface_info_vpe()
|
||||
else:
|
||||
return self.update_interface_info_eth()
|
||||
else:
|
||||
self.interface_name = None
|
||||
self.mac_address = None
|
||||
self.state = 'unbound'
|
||||
|
||||
def get_kernel_name(self):
|
||||
return subprocess.check_output(['uname', '-r']).strip()
|
||||
|
||||
def pci_rescan(self):
|
||||
rescan_file = '/sys/bus/pci/rescan'
|
||||
with open(rescan_file, 'w') as f:
|
||||
f.write('1')
|
||||
|
||||
def bind(self, kmod):
|
||||
bind_file = '/sys/bus/pci/drivers/{}/bind'.format(kmod)
|
||||
log('Binding {} to {}'.format(self.pci_address, bind_file))
|
||||
with open(bind_file, 'w') as f:
|
||||
f.write(self.pci_address)
|
||||
self.pci_rescan()
|
||||
self.update_attributes()
|
||||
|
||||
def unbind(self):
|
||||
if not self.loaded_kmod:
|
||||
return
|
||||
unbind_file = '/sys/bus/pci/drivers/{}/unbind'.format(self.loaded_kmod)
|
||||
log('Unbinding {} from {}'.format(self.pci_address, unbind_file))
|
||||
with open(unbind_file, 'w') as f:
|
||||
f.write(self.pci_address)
|
||||
self.pci_rescan()
|
||||
self.update_attributes()
|
||||
|
||||
def update_interface_info_vpe(self):
|
||||
vpe_devices = self.get_vpe_interfaces_and_macs()
|
||||
device_info = {}
|
||||
for interface in vpe_devices:
|
||||
if self.pci_address == interface['pci_address']:
|
||||
device_info['interface'] = interface['interface']
|
||||
device_info['macAddress'] = interface['macAddress']
|
||||
if device_info:
|
||||
self.interface_name = device_info['interface']
|
||||
self.mac_address = device_info['macAddress']
|
||||
self.state = 'vpebound'
|
||||
else:
|
||||
self.interface_name = None
|
||||
self.mac_address = None
|
||||
self.state = None
|
||||
|
||||
@retry_on_exception(5, base_delay=10,
|
||||
exc_type=subprocess.CalledProcessError)
|
||||
def get_vpe_cli_out(self):
|
||||
echo_cmd = [
|
||||
'echo', '-e', 'show interfaces-state interface phys-address\nexit']
|
||||
cli_cmd = ['/opt/cisco/vpe/bin/confd_cli', '-N', '-C', '-u', 'system']
|
||||
echo = subprocess.Popen(echo_cmd, stdout=subprocess.PIPE)
|
||||
cli_output = subprocess.check_output(cli_cmd, stdin=echo.stdout)
|
||||
echo.wait()
|
||||
echo.terminate
|
||||
log('confd_cli: ' + cli_output)
|
||||
return cli_output
|
||||
|
||||
def get_vpe_interfaces_and_macs(self):
|
||||
cli_output = self.get_vpe_cli_out()
|
||||
vpe_devs = []
|
||||
if 'local0' not in cli_output:
|
||||
log('local0 missing from confd_cli output, assuming things went '
|
||||
'wrong')
|
||||
raise subprocess.CalledProcessError
|
||||
for line in cli_output.split('\n'):
|
||||
if re.search(r'([0-9A-F]{2}[:-]){5}([0-9A-F]{2})', line, re.I):
|
||||
interface, mac = line.split()
|
||||
pci_addr = self.extract_pci_addr_from_vpe_interface(interface)
|
||||
vpe_devs.append({
|
||||
'interface': interface,
|
||||
'macAddress': mac,
|
||||
'pci_address': pci_addr,
|
||||
})
|
||||
return vpe_devs
|
||||
|
||||
def extract_pci_addr_from_vpe_interface(self, nic):
|
||||
''' Convert a str from nic postfix format to padded format
|
||||
|
||||
eg 6/1/2 -> 0000:06:01.2'''
|
||||
log('Extracting pci address from {}'.format(nic))
|
||||
# addr = re.sub(r'^\D*', '', nic, re.IGNORECASE)
|
||||
addr = re.sub(r'^.*Ethernet', '', nic, re.IGNORECASE)
|
||||
bus, slot, func = addr.split('/')
|
||||
domain = '0000'
|
||||
pci_addr = format_pci_addr(
|
||||
'{}:{}:{}.{}'.format(domain, bus, slot, func))
|
||||
log('pci address for {} is {}'.format(nic, pci_addr))
|
||||
return pci_addr
|
||||
|
||||
def update_interface_info_eth(self):
|
||||
net_devices = self.get_sysnet_interfaces_and_macs()
|
||||
for interface in net_devices:
|
||||
if self.pci_address == interface['pci_address']:
|
||||
self.interface_name = interface['interface']
|
||||
self.mac_address = interface['macAddress']
|
||||
self.state = interface['state']
|
||||
|
||||
def get_sysnet_interfaces_and_macs(self):
|
||||
net_devs = []
|
||||
for sdir in glob.glob('/sys/class/net/*'):
|
||||
sym_link = sdir + "/device"
|
||||
if os.path.islink(sym_link):
|
||||
fq_path = os.path.realpath(sym_link)
|
||||
path = fq_path.split('/')
|
||||
if 'virtio' in path[-1]:
|
||||
pci_address = path[-2]
|
||||
else:
|
||||
pci_address = path[-1]
|
||||
net_devs.append({
|
||||
'interface': self.get_sysnet_interface(sdir),
|
||||
'macAddress': self.get_sysnet_mac(sdir),
|
||||
'pci_address': pci_address,
|
||||
'state': self.get_sysnet_device_state(sdir),
|
||||
})
|
||||
return net_devs
|
||||
|
||||
def get_sysnet_mac(self, sysdir):
|
||||
mac_addr_file = sysdir + '/address'
|
||||
with open(mac_addr_file, 'r') as f:
|
||||
read_data = f.read()
|
||||
mac = read_data.strip()
|
||||
log('mac from {} is {}'.format(mac_addr_file, mac))
|
||||
return mac
|
||||
|
||||
def get_sysnet_device_state(self, sysdir):
|
||||
state_file = sysdir + '/operstate'
|
||||
with open(state_file, 'r') as f:
|
||||
read_data = f.read()
|
||||
state = read_data.strip()
|
||||
log('state from {} is {}'.format(state_file, state))
|
||||
return state
|
||||
|
||||
def get_sysnet_interface(self, sysdir):
|
||||
return sysdir.split('/')[-1]
|
||||
|
||||
|
||||
class PCINetDevices(object):
|
||||
|
||||
def __init__(self):
|
||||
pci_addresses = self.get_pci_ethernet_addresses()
|
||||
self.pci_devices = [PCINetDevice(dev) for dev in pci_addresses]
|
||||
|
||||
def get_pci_ethernet_addresses(self):
|
||||
cmd = ['lspci', '-m', '-D']
|
||||
lspci_output = subprocess.check_output(cmd)
|
||||
pci_addresses = []
|
||||
for line in lspci_output.split('\n'):
|
||||
columns = shlex.split(line)
|
||||
if len(columns) > 1 and columns[1] == 'Ethernet controller':
|
||||
pci_address = columns[0]
|
||||
pci_addresses.append(format_pci_addr(pci_address))
|
||||
return pci_addresses
|
||||
|
||||
def update_devices(self):
|
||||
for pcidev in self.pci_devices:
|
||||
pcidev.update_attributes()
|
||||
|
||||
def get_macs(self):
|
||||
macs = []
|
||||
for pcidev in self.pci_devices:
|
||||
if pcidev.mac_address:
|
||||
macs.append(pcidev.mac_address)
|
||||
return macs
|
||||
|
||||
def get_device_from_mac(self, mac):
|
||||
for pcidev in self.pci_devices:
|
||||
if pcidev.mac_address == mac:
|
||||
return pcidev
|
||||
|
||||
def get_device_from_pci_address(self, pci_addr):
|
||||
for pcidev in self.pci_devices:
|
||||
if pcidev.pci_address == pci_addr:
|
||||
return pcidev
|
||||
|
||||
def rebind_orphans(self):
|
||||
self.unbind_orphans()
|
||||
self.bind_orphans()
|
||||
|
||||
def unbind_orphans(self):
|
||||
for orphan in self.get_orphans():
|
||||
orphan.unbind()
|
||||
self.update_devices()
|
||||
|
||||
def bind_orphans(self):
|
||||
for orphan in self.get_orphans():
|
||||
orphan.bind(orphan.modalias_kmod)
|
||||
self.update_devices()
|
||||
|
||||
def get_orphans(self):
|
||||
orphans = []
|
||||
for pcidev in self.pci_devices:
|
||||
if not pcidev.loaded_kmod or pcidev.loaded_kmod == 'igb_uio':
|
||||
if not pcidev.interface_name and not pcidev.mac_address:
|
||||
orphans.append(pcidev)
|
||||
return orphans
|
||||
|
||||
|
||||
class PCIInfo(dict):
|
||||
|
||||
def __init__(self):
|
||||
''' Generate pci info '''
|
||||
if not self.is_ready():
|
||||
log('PCIInfo not ready')
|
||||
return
|
||||
self.user_requested_config = self.get_user_requested_config()
|
||||
net_devices = PCINetDevices()
|
||||
self['local_macs'] = net_devices.get_macs()
|
||||
pci_addresses = []
|
||||
self['local_config'] = {}
|
||||
for mac in self.user_requested_config.keys():
|
||||
log('Checking if {} is on this host'.format(mac))
|
||||
if mac in self['local_macs']:
|
||||
log('{} is on this host'.format(mac))
|
||||
device = net_devices.get_device_from_mac(mac)
|
||||
log('{} is {} and is currently {}'.format(mac,
|
||||
device.pci_address, device.interface_name))
|
||||
if device.state == 'up':
|
||||
log('Refusing to add {} to device list as it is {}'.format(
|
||||
device.pci_address, device.state))
|
||||
else:
|
||||
pci_addresses.append(device.pci_address)
|
||||
self['local_config'][mac] = []
|
||||
for conf in self.user_requested_config[mac]:
|
||||
self['local_config'][mac].append({
|
||||
'net': conf.get('net'),
|
||||
'interface': device.interface_name,
|
||||
})
|
||||
if pci_addresses:
|
||||
self['pci_devs'] = 'dev ' + ' dev '.join(pci_addresses)
|
||||
else:
|
||||
self['pci_devs'] = 'no-pci'
|
||||
log('pci_devs {}'.format(self['pci_devs']))
|
||||
|
||||
def is_ready(self):
|
||||
'''Override this for SDN specific integrations'''
|
||||
return True
|
||||
|
||||
def get_user_requested_config(self):
|
||||
''' Parse the user requested config str
|
||||
mac=<mac>;net=<net>;vlan=<vlan> and return a dict'''
|
||||
mac_net_config = {}
|
||||
mac_map = config('mac-network-map')
|
||||
if mac_map:
|
||||
for line in mac_map.split():
|
||||
entries = line.split(';')
|
||||
tmp_dict = {}
|
||||
for entry in entries:
|
||||
if '=' in entry:
|
||||
key, value = entry.split('=')
|
||||
tmp_dict[key] = value
|
||||
keys = tmp_dict.keys()
|
||||
if 'mac' in keys:
|
||||
if tmp_dict['mac'] in mac_net_config:
|
||||
mac_net_config[tmp_dict['mac']].append({
|
||||
'net': tmp_dict.get('net'),
|
||||
})
|
||||
else:
|
||||
mac_net_config[tmp_dict['mac']] = [{
|
||||
'net': tmp_dict.get('net'),
|
||||
}]
|
||||
return mac_net_config
|
|
@ -1,25 +0,0 @@
|
|||
import subprocess
|
||||
|
||||
from charmhelpers.core.hookenv import cached
|
||||
|
||||
|
||||
def set_manager(connection_url):
|
||||
'''Configure the OVSDB manager for the switch'''
|
||||
subprocess.check_call(['ovs-vsctl', 'set-manager', connection_url])
|
||||
|
||||
|
||||
@cached
|
||||
def _get_ovstbl():
|
||||
ovstbl = subprocess.check_output(['ovs-vsctl', 'get',
|
||||
'Open_vSwitch', '.',
|
||||
'_uuid']).strip()
|
||||
return ovstbl
|
||||
|
||||
|
||||
def set_config(key, value, table='other_config'):
|
||||
'''Set key value pairs in the other_config table'''
|
||||
subprocess.check_call(
|
||||
['ovs-vsctl', 'set',
|
||||
'Open_vSwitch', _get_ovstbl(),
|
||||
'{}:{}={}'.format(table, key, value)]
|
||||
)
|
|
@ -1 +0,0 @@
|
|||
hooks.py
|
|
@ -1 +0,0 @@
|
|||
hooks.py
|
|
@ -1 +0,0 @@
|
|||
hooks.py
|
|
@ -1 +0,0 @@
|
|||
hooks.py
|
|
@ -1 +0,0 @@
|
|||
hooks.py
|
|
@ -1 +0,0 @@
|
|||
hooks.py
|
|
@ -1 +0,0 @@
|
|||
hooks.py
|
|
@ -1 +0,0 @@
|
|||
hooks.py
|
|
@ -1,144 +0,0 @@
|
|||
import subprocess
|
||||
|
||||
from socket import gethostname
|
||||
|
||||
import lib.ODL as ODL
|
||||
import lib.PCIDev as PCIDev
|
||||
import lib.ovs as ovs
|
||||
|
||||
from charmhelpers.contrib.network.ip import get_address_in_network
|
||||
from charmhelpers.core.hookenv import config
|
||||
from charmhelpers.core.hookenv import log
|
||||
from charmhelpers.core.hookenv import status_set
|
||||
from charmhelpers.core.hookenv import unit_private_ip
|
||||
from charmhelpers.core.reactive import hook
|
||||
from charmhelpers.core.reactive import when
|
||||
from charmhelpers.core.reactive import when_not
|
||||
from charmhelpers.core.unitdata import kv
|
||||
|
||||
from charmhelpers.fetch import apt_install
|
||||
from charmhelpers.fetch import apt_purge
|
||||
from charmhelpers.fetch import filter_installed_packages
|
||||
|
||||
# Packages to install/remove
|
||||
PACKAGES = ['openvswitch-switch']
|
||||
|
||||
|
||||
@when('ovsdb-manager.access.available')
|
||||
def configure_openvswitch(odl_ovsdb):
|
||||
db = kv()
|
||||
# NOTE(jamespage): Check connection string as well
|
||||
# broken/departed seems busted right now
|
||||
if db.get('installed') and odl_ovsdb.connection_string():
|
||||
log("Configuring OpenvSwitch with ODL OVSDB controller: %s" %
|
||||
odl_ovsdb.connection_string())
|
||||
local_ip = get_address_in_network(config('os-data-network'),
|
||||
unit_private_ip())
|
||||
ovs.set_config('local_ip', local_ip)
|
||||
ovs.set_config('controller-ips', odl_ovsdb.private_address(),
|
||||
table='external_ids')
|
||||
ovs.set_config('host-id', gethostname(),
|
||||
table='external_ids')
|
||||
ovs.set_manager(odl_ovsdb.connection_string())
|
||||
status_set('active', 'Open vSwitch configured and ready')
|
||||
|
||||
|
||||
@when_not('ovsdb-manager.access.available')
|
||||
def unconfigure_openvswitch(odl_ovsdb=None):
|
||||
db = kv()
|
||||
if db.get('installed'):
|
||||
log("Unconfiguring OpenvSwitch")
|
||||
subprocess.check_call(['ovs-vsctl', 'del-manager'])
|
||||
bridges = subprocess.check_output(['ovs-vsctl',
|
||||
'list-br']).split()
|
||||
for bridge in bridges:
|
||||
subprocess.check_call(['ovs-vsctl',
|
||||
'del-controller', bridge])
|
||||
status_set('waiting',
|
||||
'Open vSwitch not configured with an ODL OVSDB controller')
|
||||
|
||||
|
||||
@when_not('ovsdb-manager.connected')
|
||||
def no_ovsdb_manager(odl_ovsdb=None):
|
||||
status_set('blocked', 'Not related to an OpenDayLight OVSDB controller')
|
||||
|
||||
|
||||
@when('neutron-plugin.connected')
|
||||
def configure_neutron_plugin(neutron_plugin):
|
||||
neutron_plugin.configure_plugin(
|
||||
plugin='ovs-odl',
|
||||
config={
|
||||
"nova-compute": {
|
||||
"/etc/nova/nova.conf": {
|
||||
"sections": {
|
||||
'DEFAULT': [
|
||||
('firewall_driver',
|
||||
'nova.virt.firewall.'
|
||||
'NoopFirewallDriver'),
|
||||
('libvirt_vif_driver',
|
||||
'nova.virt.libvirt.vif.'
|
||||
'LibvirtGenericVIFDriver'),
|
||||
('security_group_api', 'neutron'),
|
||||
],
|
||||
}
|
||||
}
|
||||
}
|
||||
})
|
||||
|
||||
|
||||
@hook('install')
|
||||
def install_packages():
|
||||
db = kv()
|
||||
if not db.get('installed'):
|
||||
status_set('maintenance', 'Installing packages')
|
||||
apt_install(filter_installed_packages(PACKAGES))
|
||||
db.set('installed', True)
|
||||
|
||||
|
||||
@hook('stop')
|
||||
def uninstall_packages():
|
||||
db = kv()
|
||||
if db.get('installed'):
|
||||
status_set('maintenance', 'Purging packages')
|
||||
apt_purge(PACKAGES)
|
||||
db.unset('installed')
|
||||
|
||||
|
||||
@when('controller-api.access.available')
|
||||
def odl_node_registration(controller=None):
|
||||
""" Register node with ODL if not registered already """
|
||||
if controller and controller.connection():
|
||||
odl = ODL.ODLConfig(**controller.connection())
|
||||
device_name = gethostname()
|
||||
if odl.is_device_registered(device_name):
|
||||
log('{} is already registered in odl'.format(device_name))
|
||||
else:
|
||||
local_ip = get_address_in_network(config('os-data-network'),
|
||||
unit_private_ip())
|
||||
log('Registering {} ({}) in odl'.format(
|
||||
device_name, local_ip))
|
||||
odl.odl_register_node(device_name, local_ip)
|
||||
|
||||
|
||||
@when('controller-api.access.available')
|
||||
def odl_register_macs(controller=None):
|
||||
""" Register local interfaces and their networks with ODL """
|
||||
if controller and controller.connection():
|
||||
log('Looking for macs to register with networks in odl')
|
||||
odl = ODL.ODLConfig(**controller.connection())
|
||||
device_name = gethostname()
|
||||
requested_config = PCIDev.PCIInfo()['local_config']
|
||||
for mac in requested_config.keys():
|
||||
for requested_net in requested_config[mac]:
|
||||
net = requested_net['net']
|
||||
interface = requested_net['interface']
|
||||
if not odl.is_net_device_registered(net, device_name,
|
||||
interface, mac,
|
||||
device_type='ovs'):
|
||||
log('Registering {} and {} on '
|
||||
'{}'.format(net, interface, mac))
|
||||
odl.odl_register_macs(device_name, net, interface, mac,
|
||||
device_type='ovs')
|
||||
else:
|
||||
log('{} already registered for {} on '
|
||||
'{}'.format(net, interface, device_name))
|
|
@ -1,25 +0,0 @@
|
|||
import json
|
||||
|
||||
from charmhelpers.core.reactive import hook
|
||||
from charmhelpers.core.reactive import RelationBase
|
||||
from charmhelpers.core.reactive import scopes
|
||||
|
||||
|
||||
class NeutronPluginProvides(RelationBase):
|
||||
scope = scopes.GLOBAL
|
||||
|
||||
@hook('{provides:neutron-plugin}-relation-{joined,changed}')
|
||||
def changed(self):
|
||||
self.set_state('{relation_name}.connected')
|
||||
|
||||
@hook('{provides:neutron-plugin}-relation-{broken,departed}')
|
||||
def broken(self):
|
||||
self.remove_state('{relation_name}.connected')
|
||||
|
||||
def configure_plugin(self, plugin, config):
|
||||
conversation = self.conversation()
|
||||
relation_info = {
|
||||
'neutron-plugin': plugin,
|
||||
'subordinate_configuration': json.dumps(config),
|
||||
}
|
||||
conversation.set_remote(**relation_info)
|
|
@ -1,38 +0,0 @@
|
|||
from charmhelpers.core.reactive import hook
|
||||
from charmhelpers.core.reactive import RelationBase
|
||||
from charmhelpers.core.reactive import scopes
|
||||
|
||||
|
||||
class ControllerAPIRequires(RelationBase):
|
||||
scope = scopes.GLOBAL
|
||||
auto_accessors = ['private-address', 'host', 'port',
|
||||
'username', 'password']
|
||||
|
||||
@hook('{requires:odl-controller-api}-relation-{joined,changed,departed}')
|
||||
def changed(self):
|
||||
self.set_state('{relation_name}.connected')
|
||||
if self.connection():
|
||||
self.set_state('{relation_name}.access.available')
|
||||
else:
|
||||
self.remove_state('{relation_name}.access.available')
|
||||
|
||||
@hook('{requires:odl-controller-api}-relation-broken')
|
||||
def broken(self):
|
||||
self.remove_state('{relation_name}.connected')
|
||||
self.remove_state('{relation_name}.access.available')
|
||||
|
||||
def connection(self):
|
||||
"""OpenDayLight Controller Access Details
|
||||
|
||||
Returns a dict of key value pairs for accessing the ODL controller API.
|
||||
"""
|
||||
data = {
|
||||
'host': self.host() or self.private_address(),
|
||||
'port': self.port() or '8181',
|
||||
'username': self.username(),
|
||||
'password': self.password(),
|
||||
}
|
||||
if all(data.values()):
|
||||
return data
|
||||
else:
|
||||
return None
|
|
@ -1,38 +0,0 @@
|
|||
from charmhelpers.core.reactive import hook
|
||||
from charmhelpers.core.reactive import RelationBase
|
||||
from charmhelpers.core.reactive import scopes
|
||||
|
||||
|
||||
class OVSDBManagerRequires(RelationBase):
|
||||
scope = scopes.GLOBAL
|
||||
auto_accessors = ['protocol', 'private-address', 'host', 'port']
|
||||
|
||||
@hook('{requires:ovsdb-manager}-relation-{joined,changed,departed}')
|
||||
def changed(self):
|
||||
self.set_state('{relation_name}.connected')
|
||||
if self.connection_string():
|
||||
self.set_state('{relation_name}.access.available')
|
||||
else:
|
||||
self.remove_state('{relation_name}.access.available')
|
||||
|
||||
@hook('{requires:ovsdb-manager}-relation-broken')
|
||||
def broken(self):
|
||||
self.remove_state('{relation_name}.connected')
|
||||
self.remove_state('{relation_name}.access.available')
|
||||
|
||||
def connection_string(self):
|
||||
"""Open vSwitch connection string
|
||||
|
||||
Returns the connection string to use for Open vSwitch or None
|
||||
if the remote ODL controller has not presented this data
|
||||
yet.
|
||||
"""
|
||||
data = {
|
||||
'host': self.host() or self.private_address(),
|
||||
'port': self.port() or '6640',
|
||||
'protocol': self.protocol(),
|
||||
}
|
||||
if all(data.values()):
|
||||
return "{protocol}:{host}:{port}".format(**data)
|
||||
else:
|
||||
return None
|
|
@ -1 +0,0 @@
|
|||
hooks.py
|
|
@ -1 +0,0 @@
|
|||
hooks.py
|
Before Width: | Height: | Size: 18 KiB After Width: | Height: | Size: 18 KiB |
|
@ -0,0 +1,5 @@
|
|||
includes: ['layer:openstack', 'interface:neutron-plugin', 'interface:ovsdb-manager', 'interface:odl-controller-api']
|
||||
options:
|
||||
basic:
|
||||
use_venv: True
|
||||
include_system_packages: True
|
|
@ -0,0 +1,102 @@
|
|||
import socket
|
||||
import subprocess
|
||||
|
||||
import charmhelpers.core.hookenv as hookenv
|
||||
import charmhelpers.contrib.network.ip as ch_ip
|
||||
import charms_openstack.charm
|
||||
import charms_openstack.sdn.odl as odl
|
||||
import charms_openstack.sdn.ovs as ovs
|
||||
import charms_openstack.devices.pci as pci
|
||||
|
||||
|
||||
class OVSODLCharm(charms_openstack.charm.OpenStackCharm):
|
||||
|
||||
# Internal name of charm + keystone endpoint
|
||||
service_name = name = 'openvswitch_odl'
|
||||
|
||||
# First release supported
|
||||
release = 'liberty'
|
||||
|
||||
packages = ['openvswitch-switch']
|
||||
|
||||
def configure_openvswitch(self, odl_ovsdb):
|
||||
hookenv.log("Configuring OpenvSwitch with ODL OVSDB controller: %s" %
|
||||
odl_ovsdb.connection_string())
|
||||
local_ip = ch_ip.get_address_in_network(
|
||||
self.config.get('os-data-network'),
|
||||
hookenv.unit_private_ip())
|
||||
ovs.set_config('local_ip', local_ip)
|
||||
ovs.set_config('controller-ips', odl_ovsdb.private_address(),
|
||||
table='external_ids')
|
||||
ovs.set_config('host-id', socket.gethostname(),
|
||||
table='external_ids')
|
||||
ovs.set_manager(odl_ovsdb.connection_string())
|
||||
hookenv.status_set('active', 'Open vSwitch configured and ready')
|
||||
|
||||
def unconfigure_openvswitch(self, odl_ovsdb):
|
||||
hookenv.log("Unconfiguring OpenvSwitch")
|
||||
subprocess.check_call(['ovs-vsctl', 'del-manager'])
|
||||
bridges = subprocess.check_output(['ovs-vsctl',
|
||||
'list-br']).split()
|
||||
for bridge in bridges:
|
||||
subprocess.check_call(['ovs-vsctl',
|
||||
'del-controller', bridge])
|
||||
hookenv.status_set(
|
||||
'waiting',
|
||||
'Open vSwitch not configured with an ODL OVSDB controller')
|
||||
|
||||
def configure_neutron_plugin(self, neutron_plugin):
|
||||
neutron_plugin.configure_plugin(
|
||||
plugin='ovs-odl',
|
||||
config={
|
||||
"nova-compute": {
|
||||
"/etc/nova/nova.conf": {
|
||||
"sections": {
|
||||
'DEFAULT': [
|
||||
('firewall_driver',
|
||||
'nova.virt.firewall.'
|
||||
'NoopFirewallDriver'),
|
||||
('libvirt_vif_driver',
|
||||
'nova.virt.libvirt.vif.'
|
||||
'LibvirtGenericVIFDriver'),
|
||||
('security_group_api', 'neutron'),
|
||||
],
|
||||
}
|
||||
}
|
||||
}
|
||||
})
|
||||
|
||||
def odl_node_registration(self, controller):
|
||||
""" Register node with ODL if not registered already """
|
||||
odl_conn = odl.ODLConfig(**controller.connection())
|
||||
device_name = socket.gethostname()
|
||||
if odl_conn.is_device_registered(device_name):
|
||||
hookenv.log('{} is already registered in odl'.format(device_name))
|
||||
else:
|
||||
local_ip = ch_ip.get_address_in_network(
|
||||
self.config('os-data-network'),
|
||||
hookenv.unit_private_ip())
|
||||
hookenv.log('Registering {} ({}) in odl'.format(
|
||||
device_name, local_ip))
|
||||
odl_conn.odl_register_node(device_name, local_ip)
|
||||
|
||||
def odl_register_macs(self, controller):
|
||||
""" Register local interfaces and their networks with ODL """
|
||||
hookenv.log('Looking for macs to register with networks in odl')
|
||||
odl_conn = odl.ODLConfig(**controller.connection())
|
||||
device_name = socket.gethostname()
|
||||
requested_config = pci.PCIInfo()['local_config']
|
||||
for mac in requested_config.keys():
|
||||
for requested_net in requested_config[mac]:
|
||||
net = requested_net['net']
|
||||
interface = requested_net['interface']
|
||||
if not odl_conn.is_net_device_registered(net, device_name,
|
||||
interface, mac,
|
||||
device_type='ovs'):
|
||||
hookenv.log('Registering {} and {} on '
|
||||
'{}'.format(net, interface, mac))
|
||||
odl_conn.odl_register_macs(device_name, net, interface,
|
||||
mac, device_type='ovs')
|
||||
else:
|
||||
hookenv.log('{} already registered for {} on '
|
||||
'{}'.format(net, interface, device_name))
|
|
@ -0,0 +1,59 @@
|
|||
# Copyright 2016 Canonical Ltd
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
import charms_openstack.charm as charm
|
||||
import charms.reactive as reactive
|
||||
|
||||
# This charm's library contains all of the handler code associated with
|
||||
# ovs_odl
|
||||
import charm.openstack.openvswitch_odl as ovs_odl # noqa
|
||||
|
||||
charm.use_defaults(
|
||||
'charm.installed',
|
||||
'config.changed',
|
||||
'update-status')
|
||||
|
||||
|
||||
@reactive.when('ovsdb-manager.access.available')
|
||||
@reactive.when('charm.installed')
|
||||
def configure_openvswitch(odl_ovsdb):
|
||||
with charm.provide_charm_instance() as ovs_odl_charm:
|
||||
ovs_odl_charm.configure_openvswitch(odl_ovsdb)
|
||||
reactive.set_state('ovs.configured')
|
||||
|
||||
|
||||
@reactive.when('ovs.configured')
|
||||
@reactive.when_not('ovsdb-manager.access.available')
|
||||
def unconfigure_openvswitch(odl_ovsdb=None):
|
||||
with charm.provide_charm_instance() as ovs_odl_charm:
|
||||
ovs_odl_charm.unconfigure_openvswitch(odl_ovsdb)
|
||||
reactive.remove_state('ovs.configured')
|
||||
|
||||
|
||||
@reactive.when('neutron-plugin.connected')
|
||||
def configure_neutron_plugin(neutron_plugin):
|
||||
with charm.provide_charm_instance() as ovs_odl_charm:
|
||||
ovs_odl_charm.configure_neutron_plugin(neutron_plugin)
|
||||
|
||||
|
||||
@reactive.when('controller-api.access.available')
|
||||
def odl_node_registration(controller):
|
||||
with charm.provide_charm_instance() as ovs_odl_charm:
|
||||
ovs_odl_charm.register_node(controller)
|
||||
|
||||
|
||||
@reactive.when('controller-api.access.available')
|
||||
def odl_mac_registration(controller):
|
||||
with charm.provide_charm_instance() as ovs_odl_charm:
|
||||
ovs_odl_charm.register_macs(controller)
|
|
@ -0,0 +1,23 @@
|
|||
# charm-proof
|
||||
charm-tools>=2.0.0
|
||||
# amulet deployment helpers
|
||||
bzr+lp:charm-helpers#egg=charmhelpers
|
||||
# BEGIN: Amulet OpenStack Charm Helper Requirements
|
||||
# Liberty client lower constraints
|
||||
amulet>=1.14.3,<2.0
|
||||
bundletester>=0.6.1,<1.0
|
||||
aodhclient>=0.1.0
|
||||
python-ceilometerclient>=1.5.0,<2.0
|
||||
python-cinderclient>=1.4.0,<2.0
|
||||
python-glanceclient>=1.1.0,<2.0
|
||||
python-heatclient>=0.8.0,<1.0
|
||||
python-keystoneclient>=1.7.1,<2.0
|
||||
python-neutronclient>=3.1.0,<4.0
|
||||
python-novaclient>=2.30.1,<3.0
|
||||
python-openstackclient>=1.7.0,<2.0
|
||||
python-swiftclient>=2.6.0,<3.0
|
||||
pika>=0.10.0,<1.0
|
||||
distro-info
|
||||
# END: Amulet OpenStack Charm Helper Requirements
|
||||
# NOTE: workaround for 14.04 pip/tox
|
||||
pytz
|
|
@ -0,0 +1,9 @@
|
|||
# Overview
|
||||
|
||||
This directory provides Amulet tests to verify basic deployment functionality
|
||||
from the perspective of this charm, its requirements and its features, as
|
||||
exercised in a subset of the full OpenStack deployment test bundle topology.
|
||||
|
||||
For full details on functional testing of OpenStack charms please refer to
|
||||
the [functional testing](http://docs.openstack.org/developer/charm-guide/testing.html#functional-testing)
|
||||
section of the OpenStack Charm Guide.
|
|
@ -0,0 +1,264 @@
|
|||
#!/usr/bin/python
|
||||
#
|
||||
# Copyright 2016 Canonical Ltd
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
import amulet
|
||||
import os
|
||||
from neutronclient.v2_0 import client as neutronclient
|
||||
|
||||
from charmhelpers.contrib.openstack.amulet.deployment import (
|
||||
OpenStackAmuletDeployment
|
||||
)
|
||||
|
||||
from charmhelpers.contrib.openstack.amulet.utils import (
|
||||
OpenStackAmuletUtils,
|
||||
DEBUG,
|
||||
)
|
||||
|
||||
# Use DEBUG to turn on debug logging
|
||||
u = OpenStackAmuletUtils(DEBUG)
|
||||
|
||||
ODL_QUERY_PATH = '/restconf/operational/opendaylight-inventory:nodes/'
|
||||
ODL_PROFILES = {
|
||||
'helium': {
|
||||
'location': 'AMULET_ODL_LOCATION',
|
||||
'profile': 'openvswitch-odl'
|
||||
},
|
||||
'beryllium': {
|
||||
'location': 'AMULET_ODL_BE_LOCATION',
|
||||
'profile': 'openvswitch-odl-beryllium'
|
||||
},
|
||||
}
|
||||
|
||||
|
||||
class OVSODLBasicDeployment(OpenStackAmuletDeployment):
|
||||
"""Amulet tests on a basic neutron-openvswtich deployment."""
|
||||
|
||||
def __init__(self, series, openstack=None, source=None, git=False,
|
||||
stable=False, odl_version='helium'):
|
||||
"""Deploy the entire test environment."""
|
||||
super(OVSODLBasicDeployment, self).__init__(series, openstack,
|
||||
source, stable)
|
||||
self.odl_version = odl_version
|
||||
self._add_services()
|
||||
self._add_relations()
|
||||
self._configure_services()
|
||||
self._deploy()
|
||||
self._initialize_tests()
|
||||
|
||||
def _add_services(self):
|
||||
"""Add services
|
||||
|
||||
Add the services that we're testing, where openvswitch-odl is local,
|
||||
and the rest of the service are from lp branches that are
|
||||
compatible with the local charm (e.g. stable or next).
|
||||
"""
|
||||
this_service = {'name': 'openvswitch-odl'}
|
||||
other_services = [
|
||||
{
|
||||
'name': 'nova-compute',
|
||||
'constraints': {'mem': '4G'},
|
||||
},
|
||||
{
|
||||
'name': 'neutron-api',
|
||||
},
|
||||
{
|
||||
'name': 'neutron-gateway',
|
||||
},
|
||||
{
|
||||
'name': 'odl-controller',
|
||||
'constraints': {'mem': '8G'},
|
||||
},
|
||||
{
|
||||
'name': 'neutron-api-odl',
|
||||
},
|
||||
{'name': 'mysql'},
|
||||
{'name': 'rabbitmq-server'},
|
||||
{'name': 'keystone'},
|
||||
{'name': 'nova-cloud-controller'},
|
||||
{'name': 'glance'},
|
||||
]
|
||||
super(OVSODLBasicDeployment, self)._add_services(this_service,
|
||||
other_services)
|
||||
|
||||
def _add_relations(self):
|
||||
"""Add all of the relations for the services."""
|
||||
relations = {
|
||||
'neutron-api:neutron-plugin-api-subordinate':
|
||||
'neutron-api-odl:neutron-plugin-api-subordinate',
|
||||
'nova-compute:neutron-plugin': 'openvswitch-odl:neutron-plugin',
|
||||
'openvswitch-odl:ovsdb-manager': 'odl-controller:ovsdb-manager',
|
||||
'neutron-api-odl:odl-controller': 'odl-controller:controller-api',
|
||||
'keystone:shared-db': 'mysql:shared-db',
|
||||
'nova-cloud-controller:shared-db': 'mysql:shared-db',
|
||||
'nova-cloud-controller:amqp': 'rabbitmq-server:amqp',
|
||||
'nova-cloud-controller:image-service': 'glance:image-service',
|
||||
'nova-cloud-controller:identity-service':
|
||||
'keystone:identity-service',
|
||||
'nova-compute:cloud-compute':
|
||||
'nova-cloud-controller:cloud-compute',
|
||||
'nova-compute:amqp': 'rabbitmq-server:amqp',
|
||||
'nova-compute:image-service': 'glance:image-service',
|
||||
'glance:shared-db': 'mysql:shared-db',
|
||||
'glance:identity-service': 'keystone:identity-service',
|
||||
'glance:amqp': 'rabbitmq-server:amqp',
|
||||
'neutron-api:shared-db': 'mysql:shared-db',
|
||||
'neutron-api:amqp': 'rabbitmq-server:amqp',
|
||||
'neutron-api:neutron-api': 'nova-cloud-controller:neutron-api',
|
||||
'neutron-api:identity-service': 'keystone:identity-service',
|
||||
'neutron-gateway:amqp': 'rabbitmq-server:amqp',
|
||||
'neutron-gateway:neutron-plugin-api':
|
||||
'neutron-api:neutron-plugin-api',
|
||||
'neutron-gateway:quantum-network-service':
|
||||
'nova-cloud-controller:quantum-network-service',
|
||||
'neutron-gateway:juju-info': 'openvswitch-odl:container',
|
||||
}
|
||||
super(OVSODLBasicDeployment, self)._add_relations(relations)
|
||||
|
||||
def _configure_services(self):
|
||||
"""Configure all of the services."""
|
||||
neutron_api = {
|
||||
'neutron-security-groups': False,
|
||||
}
|
||||
nova_compute = {
|
||||
'enable-live-migration': False,
|
||||
}
|
||||
keystone = {
|
||||
'admin-password': 'openstack',
|
||||
'admin-token': 'ubuntutesting',
|
||||
}
|
||||
mysql = {
|
||||
'dataset-size': '50%',
|
||||
}
|
||||
odl_controller = {}
|
||||
if os.environ.get(ODL_PROFILES[self.odl_version]['location']):
|
||||
odl_controller['install-url'] = \
|
||||
os.environ.get(ODL_PROFILES[self.odl_version]['location'])
|
||||
if os.environ.get('AMULET_HTTP_PROXY'):
|
||||
odl_controller['http-proxy'] = \
|
||||
os.environ['AMULET_HTTP_PROXY']
|
||||
if os.environ.get('AMULET_HTTP_PROXY'):
|
||||
odl_controller['https-proxy'] = \
|
||||
os.environ['AMULET_HTTP_PROXY']
|
||||
odl_controller['profile'] = \
|
||||
ODL_PROFILES[self.odl_version]['profile']
|
||||
neutron_gateway = {
|
||||
'plugin': 'ovs-odl'
|
||||
}
|
||||
neutron_api_odl = {
|
||||
'overlay-network-type': 'vxlan gre',
|
||||
}
|
||||
nova_cc = {
|
||||
'network-manager': 'Neutron',
|
||||
}
|
||||
configs = {
|
||||
'neutron-api': neutron_api,
|
||||
'nova-compute': nova_compute,
|
||||
'keystone': keystone,
|
||||
'mysql': mysql,
|
||||
'odl-controller': odl_controller,
|
||||
'neutron-api-odl': neutron_api_odl,
|
||||
'neutron-gateway': neutron_gateway,
|
||||
'nova-cloud-controller': nova_cc,
|
||||
}
|
||||
super(OVSODLBasicDeployment, self)._configure_services(configs)
|
||||
|
||||
def _initialize_tests(self):
|
||||
"""Perform final initialization before tests get run."""
|
||||
# Access the sentries for inspecting service units
|
||||
self.compute_sentry = self.d.sentry['nova-compute'][0]
|
||||
self.neutron_api_sentry = self.d.sentry['neutron-api'][0]
|
||||
self.ovsodl_sentry = self.d.sentry['openvswitch-odl'][0]
|
||||
self.mysql_sentry = self.d.sentry['mysql'][0]
|
||||
self.rabbitmq_server_sentry = self.d.sentry['rabbitmq-server'][0]
|
||||
self.keystone_sentry = self.d.sentry['keystone'][0]
|
||||
self.glance_sentry = self.d.sentry['glance'][0]
|
||||
self.nova_cc_sentry = self.d.sentry['nova-cloud-controller'][0]
|
||||
self.neutron_api_odl_sentry = self.d.sentry['neutron-api-odl'][0]
|
||||
self.odl_controller_sentry = self.d.sentry['odl-controller'][0]
|
||||
self.gateway_sentry = self.d.sentry['neutron-gateway'][0]
|
||||
self.keystone = u.authenticate_keystone_admin(self.keystone_sentry,
|
||||
user='admin',
|
||||
password='openstack',
|
||||
tenant='admin')
|
||||
ep = self.keystone.service_catalog.url_for(service_type='identity',
|
||||
endpoint_type='publicURL')
|
||||
self.neutron = neutronclient.Client(auth_url=ep,
|
||||
username='admin',
|
||||
password='openstack',
|
||||
tenant_name='admin',
|
||||
region_name='RegionOne')
|
||||
|
||||
def test_services(self):
|
||||
"""Verify the expected services are running on the corresponding
|
||||
service units."""
|
||||
|
||||
commands = {
|
||||
self.compute_sentry: ['nova-compute',
|
||||
'openvswitch-switch'],
|
||||
self.gateway_sentry: ['openvswitch-switch',
|
||||
'neutron-dhcp-agent',
|
||||
'neutron-l3-agent',
|
||||
'neutron-metadata-agent',
|
||||
'neutron-metering-agent',
|
||||
'neutron-lbaas-agent',
|
||||
'nova-api-metadata'],
|
||||
self.odl_controller_sentry: ['odl-controller'],
|
||||
}
|
||||
|
||||
ret = u.validate_services_by_name(commands)
|
||||
if ret:
|
||||
amulet.raise_status(amulet.FAIL, msg=ret)
|
||||
|
||||
def test_gateway_bridges(self):
|
||||
"""Ensure that all bridges are present and configured with the
|
||||
ODL controller as their NorthBound controller URL."""
|
||||
odl_ip = self.odl_controller_sentry.relation(
|
||||
'ovsdb-manager',
|
||||
'openvswitch-odl:ovsdb-manager'
|
||||
)['private-address']
|
||||
# NOTE: 6633 is legacy 6653 is IANA assigned
|
||||
if self.odl_version == 'helium':
|
||||
controller_url = "tcp:{}:6633".format(odl_ip)
|
||||
check_bridges = ['br-int', 'br-ex', 'br-data']
|
||||
else:
|
||||
controller_url = "tcp:{}:6653".format(odl_ip)
|
||||
# NOTE: later ODL releases only manage br-int
|
||||
check_bridges = ['br-int']
|
||||
cmd = 'ovs-vsctl list-br'
|
||||
output, _ = self.gateway_sentry.run(cmd)
|
||||
bridges = output.split()
|
||||
for bridge in check_bridges:
|
||||
if bridge not in bridges:
|
||||
amulet.raise_status(
|
||||
amulet.FAIL,
|
||||
msg="Missing bridge {} from gateway unit".format(bridge)
|
||||
)
|
||||
cmd = 'ovs-vsctl get-controller {}'.format(bridge)
|
||||
br_controllers, _ = self.gateway_sentry.run(cmd)
|
||||
# Beware of duplicate entries:
|
||||
# https://bugs.opendaylight.org/show_bug.cgi?id=960
|
||||
br_controllers = list(set(br_controllers.split('\n')))
|
||||
if len(br_controllers) != 1 or br_controllers[0] != controller_url:
|
||||
status, _ = self.gateway_sentry.run('ovs-vsctl show')
|
||||
amulet.raise_status(
|
||||
amulet.FAIL,
|
||||
msg="Controller configuration on bridge"
|
||||
" {} incorrect: {} != {}\n"
|
||||
"{}".format(bridge,
|
||||
br_controllers,
|
||||
controller_url,
|
||||
status)
|
||||
)
|
|
@ -14,5 +14,10 @@
|
|||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
import charmhelpers.core.reactive as reactive
|
||||
reactive.main()
|
||||
"""Amulet tests on a basic openvswitch ODL deployment on trusty-icehouse."""
|
||||
|
||||
from basic_deployment import OVSODLBasicDeployment
|
||||
|
||||
if __name__ == '__main__':
|
||||
deployment = OVSODLBasicDeployment(series='trusty')
|
||||
deployment.run_tests()
|
|
@ -0,0 +1,25 @@
|
|||
#!/usr/bin/env python
|
||||
#
|
||||
# Copyright 2016 Canonical Ltd
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
"""Amulet tests on a basic openvswitch ODL deployment on trusty-kilo."""
|
||||
|
||||
from basic_deployment import OVSODLBasicDeployment
|
||||
|
||||
if __name__ == '__main__':
|
||||
deployment = OVSODLBasicDeployment(series='trusty',
|
||||
openstack='cloud:trusty-kilo',
|
||||
source='cloud:trusty-updates/kilo')
|
||||
deployment.run_tests()
|
|
@ -0,0 +1,25 @@
|
|||
#!/usr/bin/env python
|
||||
#
|
||||
# Copyright 2016 Canonical Ltd
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
"""Amulet tests on a basic openvswitch ODL deployment on trusty-liberty."""
|
||||
|
||||
from basic_deployment import OVSODLBasicDeployment
|
||||
|
||||
if __name__ == '__main__':
|
||||
deployment = OVSODLBasicDeployment(series='trusty',
|
||||
openstack='cloud:trusty-liberty',
|
||||
source='cloud:trusty-updates/liberty')
|
||||
deployment.run_tests()
|
|
@ -0,0 +1,25 @@
|
|||
#!/usr/bin/env python
|
||||
#
|
||||
# Copyright 2016 Canonical Ltd
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
"""Amulet tests on a basic openvswitch ODL deployment on trusty-mitaka."""
|
||||
|
||||
from basic_deployment import OVSODLBasicDeployment
|
||||
|
||||
if __name__ == '__main__':
|
||||
deployment = OVSODLBasicDeployment(series='trusty',
|
||||
openstack='cloud:trusty-mitaka',
|
||||
source='cloud:trusty-updates/mitaka')
|
||||
deployment.run_tests()
|
|
@ -1,3 +1,5 @@
|
|||
#!/usr/bin/env python
|
||||
#
|
||||
# Copyright 2016 Canonical Ltd
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
|
@ -11,3 +13,12 @@
|
|||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
"""Amulet tests on a basic openvswitch ODL deployment on xenial-mitaka."""
|
||||
|
||||
from basic_deployment import OVSODLBasicDeployment
|
||||
|
||||
if __name__ == '__main__':
|
||||
deployment = OVSODLBasicDeployment(series='xenial',
|
||||
odl_version='beryllium')
|
||||
deployment.run_tests()
|
|
@ -0,0 +1,17 @@
|
|||
# Bootstrap the model if necessary.
|
||||
bootstrap: True
|
||||
# Re-use bootstrap node instead of destroying/re-bootstrapping.
|
||||
reset: True
|
||||
# Use tox/requirements to drive the venv instead of bundletester's venv feature.
|
||||
virtualenv: False
|
||||
# Leave makefile empty, otherwise unit/lint tests will rerun ahead of amulet.
|
||||
makefile: []
|
||||
# Do not specify juju PPA sources. Juju is presumed to be pre-installed
|
||||
# and configured in all test runner environments.
|
||||
#sources:
|
||||
# Do not specify or rely on system packages.
|
||||
#packages:
|
||||
# Do not specify python packages here. Use test-requirements.txt
|
||||
# and tox instead. ie. The venv is constructed before bundletester
|
||||
# is invoked.
|
||||
#python-packages:
|
|
@ -0,0 +1,53 @@
|
|||
# Source charm: ./src/tox.ini
|
||||
# This file is managed centrally by release-tools and should not be modified
|
||||
# within individual charm repos.
|
||||
[tox]
|
||||
envlist = pep8
|
||||
skipsdist = True
|
||||
|
||||
[testenv]
|
||||
setenv = VIRTUAL_ENV={envdir}
|
||||
PYTHONHASHSEED=0
|
||||
AMULET_SETUP_TIMEOUT=2700
|
||||
whitelist_externals = juju
|
||||
passenv = HOME TERM AMULET_*
|
||||
deps = -r{toxinidir}/test-requirements.txt
|
||||
install_command =
|
||||
pip install --allow-unverified python-apt {opts} {packages}
|
||||
|
||||
[testenv:pep8]
|
||||
basepython = python2.7
|
||||
commands = charm-proof
|
||||
|
||||
[testenv:func27-noop]
|
||||
# DRY RUN - For Debug
|
||||
basepython = python2.7
|
||||
commands =
|
||||
bundletester -vl DEBUG -r json -o func-results.json --test-pattern "gate-*" -n --no-destroy
|
||||
|
||||
[testenv:func27]
|
||||
# Run all gate tests which are +x (expected to always pass)
|
||||
basepython = python2.7
|
||||
commands =
|
||||
bundletester -vl DEBUG -r json -o func-results.json --test-pattern "gate-*" --no-destroy
|
||||
|
||||
[testenv:func27-smoke]
|
||||
# Run a specific test as an Amulet smoke test (expected to always pass)
|
||||
basepython = python2.7
|
||||
commands =
|
||||
bundletester -vl DEBUG -r json -o func-results.json gate-basic-xenial-mitaka --no-destroy
|
||||
|
||||
[testenv:func27-dfs]
|
||||
# Run all deploy-from-source tests which are +x (may not always pass!)
|
||||
basepython = python2.7
|
||||
commands =
|
||||
bundletester -vl DEBUG -r json -o func-results.json --test-pattern "dfs-*" --no-destroy
|
||||
|
||||
[testenv:func27-dev]
|
||||
# Run all development test targets which are +x (may not always pass!)
|
||||
basepython = python2.7
|
||||
commands =
|
||||
bundletester -vl DEBUG -r json -o func-results.json --test-pattern "dev-*" --no-destroy
|
||||
|
||||
[testenv:venv]
|
||||
commands = {posargs}
|
|
@ -1,25 +1,8 @@
|
|||
# The order of packages is significant, because pip processes them in the order
|
||||
# of appearance. Changing the order has an impact on the overall integration
|
||||
# process, which may cause wedges in the gate later.
|
||||
testtools
|
||||
coverage==3.7.1
|
||||
mock==1.0.1
|
||||
flake8==2.1.0
|
||||
# No version required
|
||||
charm-tools>=2.0.0
|
||||
os-testr
|
||||
requests==2.6.0
|
||||
# BEGIN: Amulet OpenStack Charm Helper Requirements
|
||||
# Liberty client lower constraints
|
||||
amulet>=1.14.3,<2.0
|
||||
bundletester>=0.6.1,<1.0
|
||||
python-ceilometerclient>=1.5.0,<2.0
|
||||
python-cinderclient>=1.4.0,<2.0
|
||||
python-glanceclient>=1.1.0,<2.0
|
||||
python-heatclient>=0.8.0,<1.0
|
||||
python-novaclient>=2.30.1,<3.0
|
||||
python-openstackclient>=1.7.0,<2.0
|
||||
python-swiftclient>=2.6.0,<3.0
|
||||
pika>=0.10.0,<1.0
|
||||
distro-info
|
||||
# END: Amulet OpenStack Charm Helper Requirements
|
||||
# Lint and unit test requirements
|
||||
flake8
|
||||
os-testr>=0.4.1
|
||||
charms.reactive
|
||||
mock>=1.2
|
||||
coverage>=3.6
|
||||
requests
|
||||
git+https://github.com/openstack/charms.openstack.git#egg=charms-openstack
|
||||
|
|
|
@ -56,6 +56,11 @@ class OVSODLBasicDeployment(OpenStackAmuletDeployment):
|
|||
self._add_relations()
|
||||
self._configure_services()
|
||||
self._deploy()
|
||||
|
||||
u.log.info('Waiting on extended status checks...')
|
||||
exclude_services = ['mysql']
|
||||
self._auto_wait_for_status(exclude_services=exclude_services)
|
||||
|
||||
self._initialize_tests()
|
||||
|
||||
def _add_services(self):
|
||||
|
|
89
tox.ini
89
tox.ini
|
@ -1,80 +1,47 @@
|
|||
# Classic charm: ./tox.ini
|
||||
# Source charm: ./tox.ini
|
||||
# This file is managed centrally by release-tools and should not be modified
|
||||
# within individual charm repos.
|
||||
[tox]
|
||||
envlist = pep8,py27
|
||||
skipsdist = True
|
||||
envlist = pep8,py34,py35
|
||||
skip_missing_interpreters = True
|
||||
|
||||
[testenv]
|
||||
setenv = VIRTUAL_ENV={envdir}
|
||||
PYTHONHASHSEED=0
|
||||
CHARM_DIR={envdir}
|
||||
AMULET_SETUP_TIMEOUT=2700
|
||||
TERM=linux
|
||||
LAYER_PATH={toxinidir}/layers
|
||||
INTERFACE_PATH={toxinidir}/interfaces
|
||||
JUJU_REPOSITORY={toxinidir}/build
|
||||
passenv = http_proxy https_proxy
|
||||
install_command =
|
||||
pip install --allow-unverified python-apt {opts} {packages}
|
||||
commands = ostestr {posargs}
|
||||
whitelist_externals = juju
|
||||
passenv = HOME TERM AMULET_*
|
||||
pip install {opts} {packages}
|
||||
deps =
|
||||
-r{toxinidir}/requirements.txt
|
||||
|
||||
[testenv:py27]
|
||||
[testenv:build]
|
||||
basepython = python2.7
|
||||
deps = -r{toxinidir}/requirements.txt
|
||||
-r{toxinidir}/test-requirements.txt
|
||||
commands =
|
||||
charm-build --log-level DEBUG -o {toxinidir}/build src {posargs}
|
||||
|
||||
[testenv:py34]
|
||||
basepython = python3.4
|
||||
deps = -r{toxinidir}/test-requirements.txt
|
||||
commands = ostestr {posargs}
|
||||
|
||||
[testenv:py35]
|
||||
basepython = python3.5
|
||||
deps = -r{toxinidir}/test-requirements.txt
|
||||
commands = ostestr {posargs}
|
||||
|
||||
[testenv:pep8]
|
||||
basepython = python2.7
|
||||
deps = -r{toxinidir}/requirements.txt
|
||||
-r{toxinidir}/test-requirements.txt
|
||||
commands = flake8 {posargs} hooks unit_tests tests actions lib
|
||||
charm-proof
|
||||
deps = -r{toxinidir}/test-requirements.txt
|
||||
commands = flake8 {posargs} src unit_tests
|
||||
|
||||
[testenv:venv]
|
||||
commands = {posargs}
|
||||
|
||||
[testenv:func27-noop]
|
||||
# DRY RUN - For Debug
|
||||
basepython = python2.7
|
||||
deps = -r{toxinidir}/requirements.txt
|
||||
-r{toxinidir}/test-requirements.txt
|
||||
commands =
|
||||
bundletester -vl DEBUG -r json -o func-results.json --test-pattern "gate-*" -n --no-destroy
|
||||
|
||||
[testenv:func27]
|
||||
# Charm Functional Test
|
||||
# Run all gate tests which are +x (expected to always pass)
|
||||
basepython = python2.7
|
||||
deps = -r{toxinidir}/requirements.txt
|
||||
-r{toxinidir}/test-requirements.txt
|
||||
commands =
|
||||
bundletester -vl DEBUG -r json -o func-results.json --test-pattern "gate-*" --no-destroy
|
||||
|
||||
[testenv:func27-smoke]
|
||||
# Charm Functional Test
|
||||
# Run a specific test as an Amulet smoke test (expected to always pass)
|
||||
basepython = python2.7
|
||||
deps = -r{toxinidir}/requirements.txt
|
||||
-r{toxinidir}/test-requirements.txt
|
||||
commands =
|
||||
bundletester -vl DEBUG -r json -o func-results.json gate-basic-xenial-mitaka --no-destroy
|
||||
|
||||
[testenv:func27-dfs]
|
||||
# Charm Functional Test
|
||||
# Run all deploy-from-source tests which are +x (may not always pass!)
|
||||
basepython = python2.7
|
||||
deps = -r{toxinidir}/requirements.txt
|
||||
-r{toxinidir}/test-requirements.txt
|
||||
commands =
|
||||
bundletester -vl DEBUG -r json -o func-results.json --test-pattern "dfs-*" --no-destroy
|
||||
|
||||
[testenv:func27-dev]
|
||||
# Charm Functional Test
|
||||
# Run all development test targets which are +x (may not always pass!)
|
||||
basepython = python2.7
|
||||
deps = -r{toxinidir}/requirements.txt
|
||||
-r{toxinidir}/test-requirements.txt
|
||||
commands =
|
||||
bundletester -vl DEBUG -r json -o func-results.json --test-pattern "dev-*" --no-destroy
|
||||
|
||||
[flake8]
|
||||
ignore = E402,E226
|
||||
exclude = */charmhelpers
|
||||
# E402 ignore necessary for path append before sys module import in actions
|
||||
ignore = E402
|
||||
|
|
|
@ -13,5 +13,34 @@
|
|||
# limitations under the License.
|
||||
|
||||
import sys
|
||||
import mock
|
||||
|
||||
sys.path.append('hooks')
|
||||
sys.path.append('src')
|
||||
sys.path.append('src/lib')
|
||||
|
||||
# Mock out charmhelpers so that we can test without it.
|
||||
# also stops sideeffects from occuring.
|
||||
charmhelpers = mock.MagicMock()
|
||||
apt_pkg = mock.MagicMock()
|
||||
sys.modules['apt_pkg'] = apt_pkg
|
||||
sys.modules['charmhelpers'] = charmhelpers
|
||||
sys.modules['charmhelpers.core'] = charmhelpers.core
|
||||
sys.modules['charmhelpers.core.decorators'] = charmhelpers.core.decorators
|
||||
sys.modules['charmhelpers.core.hookenv'] = charmhelpers.core.hookenv
|
||||
sys.modules['charmhelpers.core.host'] = charmhelpers.core.host
|
||||
sys.modules['charmhelpers.core.unitdata'] = charmhelpers.core.unitdata
|
||||
sys.modules['charmhelpers.core.templating'] = charmhelpers.core.templating
|
||||
sys.modules['charmhelpers.contrib'] = charmhelpers.contrib
|
||||
sys.modules['charmhelpers.contrib.openstack'] = charmhelpers.contrib.openstack
|
||||
sys.modules['charmhelpers.contrib.openstack.utils'] = (
|
||||
charmhelpers.contrib.openstack.utils)
|
||||
sys.modules['charmhelpers.contrib.openstack.templating'] = (
|
||||
charmhelpers.contrib.openstack.templating)
|
||||
sys.modules['charmhelpers.contrib.network'] = charmhelpers.contrib.network
|
||||
sys.modules['charmhelpers.contrib.network.ip'] = (
|
||||
charmhelpers.contrib.network.ip)
|
||||
sys.modules['charmhelpers.fetch'] = charmhelpers.fetch
|
||||
sys.modules['charmhelpers.cli'] = charmhelpers.cli
|
||||
sys.modules['charmhelpers.contrib.hahelpers'] = charmhelpers.contrib.hahelpers
|
||||
sys.modules['charmhelpers.contrib.hahelpers.cluster'] = (
|
||||
charmhelpers.contrib.hahelpers.cluster)
|
||||
|
|
|
@ -0,0 +1,168 @@
|
|||
# Copyright 2016 Canonical Ltd
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
from __future__ import absolute_import
|
||||
from __future__ import print_function
|
||||
|
||||
import unittest
|
||||
|
||||
import mock
|
||||
|
||||
import charm.openstack.openvswitch_odl as openvswitch_odl
|
||||
|
||||
CONN_STRING = 'tcp:odl-controller:6640'
|
||||
LOCALHOST = '10.1.1.1'
|
||||
|
||||
|
||||
class Helper(unittest.TestCase):
|
||||
|
||||
def setUp(self):
|
||||
self._patches = {}
|
||||
self._patches_start = {}
|
||||
|
||||
def tearDown(self):
|
||||
for k, v in self._patches.items():
|
||||
v.stop()
|
||||
setattr(self, k, None)
|
||||
self._patches = None
|
||||
self._patches_start = None
|
||||
|
||||
def patch(self, obj, attr, return_value=None, **kwargs):
|
||||
mocked = mock.patch.object(obj, attr, **kwargs)
|
||||
self._patches[attr] = mocked
|
||||
started = mocked.start()
|
||||
started.return_value = return_value
|
||||
self._patches_start[attr] = started
|
||||
setattr(self, attr, started)
|
||||
|
||||
|
||||
class TestOpenStackOVSODLCharm(Helper):
|
||||
|
||||
def test_configure_openvswitch(self):
|
||||
odl_ovsdb = mock.MagicMock()
|
||||
self.patch(openvswitch_odl.ch_ip, 'get_address_in_network')
|
||||
self.patch(openvswitch_odl.hookenv, 'log')
|
||||
self.patch(openvswitch_odl.hookenv, 'status_set')
|
||||
self.patch(openvswitch_odl.hookenv, 'unit_private_ip')
|
||||
self.patch(openvswitch_odl.ovs, 'set_config')
|
||||
self.patch(openvswitch_odl.ovs, 'set_manager')
|
||||
self.patch(openvswitch_odl.socket, 'gethostname')
|
||||
self.gethostname.return_value = 'ovs-host'
|
||||
self.unit_private_ip.return_value = LOCALHOST
|
||||
self.get_address_in_network.return_value = LOCALHOST
|
||||
odl_ovsdb.connection_string.return_value = CONN_STRING
|
||||
odl_ovsdb.private_address.return_value = 'odl-controller'
|
||||
a = openvswitch_odl.OVSODLCharm()
|
||||
a.configure_openvswitch(odl_ovsdb)
|
||||
self.set_manager.assert_called_with(CONN_STRING)
|
||||
self.set_config.assert_has_calls([
|
||||
mock.call('local_ip', '10.1.1.1'),
|
||||
mock.call('controller-ips', 'odl-controller',
|
||||
table='external_ids'),
|
||||
mock.call('host-id', 'ovs-host',
|
||||
table='external_ids'),
|
||||
])
|
||||
self.get_address_in_network.assert_called_with(mock.ANY, '10.1.1.1')
|
||||
self.status_set.assert_called_with('active',
|
||||
'Open vSwitch configured and ready')
|
||||
|
||||
def test_unconfigure_openvswitch(self):
|
||||
odl_ovsdb = mock.MagicMock()
|
||||
self.patch(openvswitch_odl.hookenv, 'log')
|
||||
self.patch(openvswitch_odl.hookenv, 'status_set')
|
||||
self.patch(openvswitch_odl.subprocess, 'check_call')
|
||||
self.patch(openvswitch_odl.subprocess, 'check_output')
|
||||
self.check_output.return_value = "br-data\nbr-ex\nbr-int\n"
|
||||
a = openvswitch_odl.OVSODLCharm()
|
||||
a.unconfigure_openvswitch(odl_ovsdb)
|
||||
check_call_calls = [
|
||||
mock.call(['ovs-vsctl', 'del-manager']),
|
||||
mock.call(['ovs-vsctl', 'del-controller', 'br-data']),
|
||||
mock.call(['ovs-vsctl', 'del-controller', 'br-ex']),
|
||||
mock.call(['ovs-vsctl', 'del-controller', 'br-int'])]
|
||||
self.check_call.assert_has_calls(check_call_calls)
|
||||
self.check_output.assert_called_once_with(['ovs-vsctl', 'list-br'])
|
||||
self.status_set.assert_called
|
||||
|
||||
def test_configure_neutron_plugin(self):
|
||||
neutron_plugin = mock.MagicMock()
|
||||
a = openvswitch_odl.OVSODLCharm()
|
||||
a.configure_neutron_plugin(neutron_plugin)
|
||||
neutron_plugin.configure_plugin.assert_called_with(
|
||||
plugin='ovs-odl',
|
||||
config={
|
||||
"nova-compute": {
|
||||
"/etc/nova/nova.conf": {
|
||||
"sections": {
|
||||
'DEFAULT': [
|
||||
('firewall_driver',
|
||||
'nova.virt.firewall.NoopFirewallDriver'),
|
||||
('libvirt_vif_driver',
|
||||
'nova.virt.libvirt.vif.'
|
||||
'LibvirtGenericVIFDriver'),
|
||||
('security_group_api', 'neutron'),
|
||||
],
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
)
|
||||
|
||||
def test_odl_node_registration(self):
|
||||
controller = mock.MagicMock()
|
||||
odl = mock.MagicMock()
|
||||
self.patch(openvswitch_odl.socket, 'gethostname')
|
||||
self.patch(openvswitch_odl.odl, 'ODLConfig')
|
||||
self.patch(openvswitch_odl.ch_ip, 'get_address_in_network')
|
||||
self.patch(openvswitch_odl.hookenv, 'log')
|
||||
self.patch(openvswitch_odl.hookenv, 'unit_private_ip')
|
||||
self.gethostname.return_value = 'ovs-host'
|
||||
self.unit_private_ip.return_value = LOCALHOST
|
||||
self.get_address_in_network.return_value = LOCALHOST
|
||||
self.ODLConfig.return_value = odl
|
||||
odl.is_device_registered.return_value = False
|
||||
a = openvswitch_odl.OVSODLCharm()
|
||||
a.odl_node_registration(controller)
|
||||
self.get_address_in_network.assert_called_once_with(mock.ANY,
|
||||
'10.1.1.1')
|
||||
odl.odl_register_node.assert_called_once_with('ovs-host', '10.1.1.1')
|
||||
|
||||
def test_odl_node_registration_already_registered(self):
|
||||
controller = mock.MagicMock()
|
||||
odl = mock.MagicMock()
|
||||
self.patch(openvswitch_odl.socket, 'gethostname')
|
||||
self.patch(openvswitch_odl.odl, 'ODLConfig')
|
||||
self.ODLConfig.return_value = odl
|
||||
odl.is_device_registered.return_value = True
|
||||
a = openvswitch_odl.OVSODLCharm()
|
||||
a.odl_node_registration(controller)
|
||||
self.assertFalse(odl.odl_register_node.called)
|
||||
|
||||
def test_odl_register_macs(self):
|
||||
controller = mock.MagicMock()
|
||||
odl = mock.MagicMock()
|
||||
odl.is_net_device_registered.return_value = False
|
||||
self.patch(openvswitch_odl.socket, 'gethostname')
|
||||
self.patch(openvswitch_odl.odl, 'ODLConfig')
|
||||
self.patch(openvswitch_odl.pci, 'PCIInfo')
|
||||
self.ODLConfig.return_value = odl
|
||||
self.gethostname.return_value = 'ovs-host'
|
||||
self.PCIInfo.return_value = {
|
||||
'local_config': {
|
||||
'mac1': [
|
||||
{'net': 'net1', 'interface': 'eth1'}]}}
|
||||
a = openvswitch_odl.OVSODLCharm()
|
||||
a.odl_register_macs(controller)
|
||||
odl.odl_register_macs.assert_called_once_with(
|
||||
'ovs-host', 'net1', 'eth1', 'mac1', device_type='ovs')
|
|
@ -0,0 +1,111 @@
|
|||
# Copyright 2016 Canonical Ltd
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
from __future__ import absolute_import
|
||||
from __future__ import print_function
|
||||
|
||||
import mock
|
||||
|
||||
import reactive.openvswitch_odl_handlers as handlers
|
||||
|
||||
import charms_openstack.test_utils as test_utils
|
||||
|
||||
|
||||
class TestRegisteredHooks(test_utils.TestRegisteredHooks):
|
||||
|
||||
def test_hooks(self):
|
||||
defaults = [
|
||||
'charm.installed',
|
||||
'config.changed',
|
||||
'update-status']
|
||||
hook_set = {
|
||||
'when': {
|
||||
'configure_openvswitch': (
|
||||
'charm.installed',
|
||||
'ovsdb-manager.access.available',),
|
||||
'unconfigure_openvswitch': (
|
||||
'ovs.configured',),
|
||||
'configure_neutron_plugin': (
|
||||
'neutron-plugin.connected',),
|
||||
'odl_node_registration': (
|
||||
'controller-api.access.available',),
|
||||
'odl_mac_registration': (
|
||||
'controller-api.access.available',),
|
||||
},
|
||||
'when_not': {
|
||||
'unconfigure_openvswitch': (
|
||||
'ovsdb-manager.access.available',),
|
||||
}
|
||||
}
|
||||
# test that the hooks were registered via the
|
||||
# reactive.barbican_handlers
|
||||
self.registered_hooks_test_helper(handlers, hook_set, defaults)
|
||||
|
||||
|
||||
class TestConfigureOpenvswitch(test_utils.PatchHelper):
|
||||
|
||||
def test_configure_openvswitch(self):
|
||||
ovs_odl_charm = mock.MagicMock()
|
||||
self.patch_object(handlers.reactive, 'set_state')
|
||||
self.patch_object(handlers.charm, 'provide_charm_instance',
|
||||
new=mock.MagicMock())
|
||||
self.provide_charm_instance().__enter__.return_value = ovs_odl_charm
|
||||
self.provide_charm_instance().__exit__.return_value = None
|
||||
|
||||
handlers.configure_openvswitch('arg1')
|
||||
ovs_odl_charm.configure_openvswitch.assert_called_once_with(('arg1'))
|
||||
self.set_state.assert_called_once_with('ovs.configured')
|
||||
|
||||
def test_unconfigure_openvswitch(self):
|
||||
ovs_odl_charm = mock.MagicMock()
|
||||
self.patch_object(handlers.reactive, 'remove_state')
|
||||
self.patch_object(handlers.charm, 'provide_charm_instance',
|
||||
new=mock.MagicMock())
|
||||
self.provide_charm_instance().__enter__.return_value = ovs_odl_charm
|
||||
self.provide_charm_instance().__exit__.return_value = None
|
||||
|
||||
handlers.unconfigure_openvswitch()
|
||||
ovs_odl_charm.unconfigure_openvswitch.assert_called_once_with(None)
|
||||
self.remove_state.assert_called_once_with('ovs.configured')
|
||||
|
||||
def test_configure_neutron_plugin(self):
|
||||
ovs_odl_charm = mock.MagicMock()
|
||||
self.patch_object(handlers.charm, 'provide_charm_instance',
|
||||
new=mock.MagicMock())
|
||||
self.provide_charm_instance().__enter__.return_value = ovs_odl_charm
|
||||
self.provide_charm_instance().__exit__.return_value = None
|
||||
|
||||
handlers.configure_neutron_plugin('arg1')
|
||||
ovs_odl_charm.configure_neutron_plugin.assert_called_once_with(
|
||||
('arg1'))
|
||||
|
||||
def test_odl_node_registration(self):
|
||||
ovs_odl_charm = mock.MagicMock()
|
||||
self.patch_object(handlers.charm, 'provide_charm_instance',
|
||||
new=mock.MagicMock())
|
||||
self.provide_charm_instance().__enter__.return_value = ovs_odl_charm
|
||||
self.provide_charm_instance().__exit__.return_value = None
|
||||
|
||||
handlers.odl_node_registration('arg1')
|
||||
ovs_odl_charm.register_node.assert_called_once_with(('arg1'))
|
||||
|
||||
def test_odl_mac_registration(self):
|
||||
ovs_odl_charm = mock.MagicMock()
|
||||
self.patch_object(handlers.charm, 'provide_charm_instance',
|
||||
new=mock.MagicMock())
|
||||
self.provide_charm_instance().__enter__.return_value = ovs_odl_charm
|
||||
self.provide_charm_instance().__exit__.return_value = None
|
||||
|
||||
handlers.odl_mac_registration('arg1')
|
||||
ovs_odl_charm.register_macs.assert_called_once_with(('arg1'))
|
|
@ -1,163 +0,0 @@
|
|||
# Copyright 2016 Canonical Ltd
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
import testtools
|
||||
|
||||
from mock import call
|
||||
from mock import patch
|
||||
from mock import MagicMock
|
||||
|
||||
import reactive.main as ovs_odl_main
|
||||
|
||||
TO_PATCH = [
|
||||
'get_address_in_network',
|
||||
'config',
|
||||
'log',
|
||||
'status_set',
|
||||
'unit_private_ip',
|
||||
'when',
|
||||
'when_not',
|
||||
'kv',
|
||||
'subprocess',
|
||||
'ovs',
|
||||
'gethostname',
|
||||
]
|
||||
|
||||
CONN_STRING = 'tcp:odl-controller:6640'
|
||||
LOCALHOST = '10.1.1.1'
|
||||
|
||||
|
||||
class CharmUnitTestCase(testtools.TestCase):
|
||||
|
||||
def setUp(self, obj, patches):
|
||||
super(CharmUnitTestCase, self).setUp()
|
||||
self.patches = patches
|
||||
self.obj = obj
|
||||
self.patch_all()
|
||||
|
||||
def patch(self, method):
|
||||
_m = patch.object(self.obj, method)
|
||||
mock = _m.start()
|
||||
self.addCleanup(_m.stop)
|
||||
return mock
|
||||
|
||||
def patch_all(self):
|
||||
for method in self.patches:
|
||||
setattr(self, method, self.patch(method))
|
||||
|
||||
|
||||
class MockUnitData():
|
||||
|
||||
data = {}
|
||||
|
||||
def set(self, k, v):
|
||||
self.data[k] = v
|
||||
|
||||
def unset(self, k):
|
||||
if k in self.data:
|
||||
del self.data[k]
|
||||
|
||||
def get(self, k):
|
||||
return self.data.get(k)
|
||||
|
||||
def reset(self):
|
||||
self.data = {}
|
||||
|
||||
|
||||
class TestOVSODL(CharmUnitTestCase):
|
||||
|
||||
def setUp(self):
|
||||
super(TestOVSODL, self).setUp(ovs_odl_main, TO_PATCH)
|
||||
self.unitdata = MockUnitData()
|
||||
self.unit_private_ip.return_value = LOCALHOST
|
||||
self.kv.return_value = self.unitdata
|
||||
|
||||
def tearDown(self):
|
||||
super(TestOVSODL, self).tearDown()
|
||||
self.unitdata.reset()
|
||||
|
||||
def test_configure_openvswitch_not_installed(self):
|
||||
self.unitdata.unset('installed')
|
||||
odl_ovsdb = MagicMock()
|
||||
ovs_odl_main.configure_openvswitch(odl_ovsdb)
|
||||
self.assertFalse(self.subprocess.check_call.called)
|
||||
|
||||
def test_configure_openvswitch_installed(self):
|
||||
self.unitdata.set('installed', True)
|
||||
odl_ovsdb = MagicMock()
|
||||
odl_ovsdb.connection_string.return_value = None
|
||||
ovs_odl_main.configure_openvswitch(odl_ovsdb)
|
||||
self.assertFalse(self.subprocess.check_call.called)
|
||||
|
||||
def test_configure_openvswitch_installed_related(self):
|
||||
self.unitdata.set('installed', True)
|
||||
self.gethostname.return_value = 'ovs-host'
|
||||
self.subprocess.check_output.return_value = 'local_uuid'
|
||||
self.config.return_value = None
|
||||
self.get_address_in_network.return_value = LOCALHOST
|
||||
odl_ovsdb = MagicMock()
|
||||
odl_ovsdb.connection_string.return_value = CONN_STRING
|
||||
odl_ovsdb.private_address.return_value = 'odl-controller'
|
||||
ovs_odl_main.configure_openvswitch(odl_ovsdb)
|
||||
self.ovs.set_manager.assert_called_with(CONN_STRING)
|
||||
self.ovs.set_config.assert_has_calls([
|
||||
call('local_ip', '10.1.1.1'),
|
||||
call('controller-ips', 'odl-controller',
|
||||
table='external_ids'),
|
||||
call('host-id', 'ovs-host',
|
||||
table='external_ids'),
|
||||
])
|
||||
self.get_address_in_network.assert_called_with(None, '10.1.1.1')
|
||||
self.status_set.assert_called_with('active',
|
||||
'Open vSwitch configured and ready')
|
||||
|
||||
def test_unconfigure_openvswitch_not_installed(self):
|
||||
self.unitdata.unset('installed')
|
||||
odl_ovsdb = MagicMock()
|
||||
ovs_odl_main.unconfigure_openvswitch(odl_ovsdb)
|
||||
self.assertFalse(self.subprocess.check_call.called)
|
||||
|
||||
def test_unconfigure_openvswitch_installed(self):
|
||||
self.unitdata.set('installed', True)
|
||||
self.subprocess.check_output.return_value = 'br-int br-ex'
|
||||
odl_ovsdb = MagicMock()
|
||||
ovs_odl_main.unconfigure_openvswitch(odl_ovsdb)
|
||||
self.subprocess.check_call.assert_has_calls([
|
||||
call(['ovs-vsctl', 'del-manager']),
|
||||
call(['ovs-vsctl', 'del-controller', 'br-int']),
|
||||
call(['ovs-vsctl', 'del-controller', 'br-ex']),
|
||||
])
|
||||
|
||||
def test_configure_neutron_plugin(self):
|
||||
neutron_plugin = MagicMock()
|
||||
ovs_odl_main.configure_neutron_plugin(neutron_plugin)
|
||||
neutron_plugin.configure_plugin.assert_called_with(
|
||||
plugin='ovs-odl',
|
||||
config={
|
||||
"nova-compute": {
|
||||
"/etc/nova/nova.conf": {
|
||||
"sections": {
|
||||
'DEFAULT': [
|
||||
('firewall_driver',
|
||||
'nova.virt.firewall.NoopFirewallDriver'),
|
||||
('libvirt_vif_driver',
|
||||
'nova.virt.libvirt.vif.'
|
||||
'LibvirtGenericVIFDriver'),
|
||||
('security_group_api', 'neutron'),
|
||||
],
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
)
|
Loading…
Reference in New Issue