Rebase
This commit is contained in:
commit
01ff8508a4
9
Makefile
9
Makefile
|
@ -29,14 +29,15 @@ sync: bin/charm_helpers_sync.py
|
||||||
@$(PYTHON) bin/charm_helpers_sync.py -c charm-helpers.yaml
|
@$(PYTHON) bin/charm_helpers_sync.py -c charm-helpers.yaml
|
||||||
@$(PYTHON) bin/charm_helpers_sync.py -c charm-helpers-tests.yaml
|
@$(PYTHON) bin/charm_helpers_sync.py -c charm-helpers-tests.yaml
|
||||||
|
|
||||||
publish: lint
|
publish: lint test
|
||||||
bzr push lp:charms/rabbitmq-server
|
bzr push lp:charms/rabbitmq-server
|
||||||
bzr push lp:charms/trusty/rabbitmq-server
|
bzr push lp:charms/trusty/rabbitmq-server
|
||||||
|
|
||||||
unit_test: .venv
|
test: .venv
|
||||||
@echo Starting tests...
|
@echo Starting tests...
|
||||||
env CHARM_DIR=$(CHARM_DIR) $(TEST_PREFIX) .venv/bin/nosetests unit_tests/
|
env CHARM_DIR=$(CHARM_DIR) $(TEST_PREFIX) .venv/bin/nosetests \
|
||||||
|
--nologcapture --with-coverage unit_tests/
|
||||||
|
|
||||||
functional_test:
|
functional_test:
|
||||||
@echo Starting amulet tests...
|
@echo Starting amulet tests...
|
||||||
@juju test -v -p AMULET_HTTP_PROXY --timeout 900
|
@juju test -v -p AMULET_HTTP_PROXY,AMULET_OS_VIP --timeout 2700
|
||||||
|
|
|
@ -1,5 +1,5 @@
|
||||||
destination: tests/charmhelpers
|
destination: tests/charmhelpers
|
||||||
branch: lp:charm-helpers
|
branch: lp:charm-helpers
|
||||||
include:
|
include:
|
||||||
- core
|
- contrib.amulet
|
||||||
- contrib.ssl
|
- contrib.openstack.amulet
|
||||||
|
|
|
@ -3,6 +3,7 @@ branch: lp:charm-helpers
|
||||||
include:
|
include:
|
||||||
- fetch
|
- fetch
|
||||||
- core
|
- core
|
||||||
|
- cli
|
||||||
- contrib.charmsupport
|
- contrib.charmsupport
|
||||||
- contrib.openstack|inc=*
|
- contrib.openstack|inc=*
|
||||||
- contrib.storage
|
- contrib.storage
|
||||||
|
|
21
config.yaml
21
config.yaml
|
@ -188,3 +188,24 @@ options:
|
||||||
order for this charm to function correctly, the privacy extension must be
|
order for this charm to function correctly, the privacy extension must be
|
||||||
disabled and a non-temporary address must be configured/available on
|
disabled and a non-temporary address must be configured/available on
|
||||||
your network interface.
|
your network interface.
|
||||||
|
min-cluster-size:
|
||||||
|
type: int
|
||||||
|
default:
|
||||||
|
description: |
|
||||||
|
Minimum number of units expected to exist before charm will attempt to
|
||||||
|
form a rabbitmq cluster.
|
||||||
|
stats_cron_schedule:
|
||||||
|
type: string
|
||||||
|
default: '*/5 * * * *'
|
||||||
|
description: |
|
||||||
|
Cron schedule used to generate rabbitmq stats. If unset
|
||||||
|
no stats will be generated
|
||||||
|
queue_thresholds:
|
||||||
|
type: string
|
||||||
|
default: "[['\\*', '\\*', 100, 200]]"
|
||||||
|
description: |
|
||||||
|
List of RabbitMQ queue size check thresholds. Interpreted as YAML
|
||||||
|
in format [<vhost>, <queue>, <warn>, <crit>]
|
||||||
|
- ['/', 'queue1', 10, 20]
|
||||||
|
- ['/', 'queue2', 200, 300]
|
||||||
|
Wildcards '*' are accepted to monitor all vhosts and/or queues
|
||||||
|
|
|
@ -0,0 +1,191 @@
|
||||||
|
# Copyright 2014-2015 Canonical Limited.
|
||||||
|
#
|
||||||
|
# This file is part of charm-helpers.
|
||||||
|
#
|
||||||
|
# charm-helpers is free software: you can redistribute it and/or modify
|
||||||
|
# it under the terms of the GNU Lesser General Public License version 3 as
|
||||||
|
# published by the Free Software Foundation.
|
||||||
|
#
|
||||||
|
# charm-helpers is distributed in the hope that it will be useful,
|
||||||
|
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||||
|
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||||
|
# GNU Lesser General Public License for more details.
|
||||||
|
#
|
||||||
|
# You should have received a copy of the GNU Lesser General Public License
|
||||||
|
# along with charm-helpers. If not, see <http://www.gnu.org/licenses/>.
|
||||||
|
|
||||||
|
import inspect
|
||||||
|
import argparse
|
||||||
|
import sys
|
||||||
|
|
||||||
|
from six.moves import zip
|
||||||
|
|
||||||
|
from charmhelpers.core import unitdata
|
||||||
|
|
||||||
|
|
||||||
|
class OutputFormatter(object):
|
||||||
|
def __init__(self, outfile=sys.stdout):
|
||||||
|
self.formats = (
|
||||||
|
"raw",
|
||||||
|
"json",
|
||||||
|
"py",
|
||||||
|
"yaml",
|
||||||
|
"csv",
|
||||||
|
"tab",
|
||||||
|
)
|
||||||
|
self.outfile = outfile
|
||||||
|
|
||||||
|
def add_arguments(self, argument_parser):
|
||||||
|
formatgroup = argument_parser.add_mutually_exclusive_group()
|
||||||
|
choices = self.supported_formats
|
||||||
|
formatgroup.add_argument("--format", metavar='FMT',
|
||||||
|
help="Select output format for returned data, "
|
||||||
|
"where FMT is one of: {}".format(choices),
|
||||||
|
choices=choices, default='raw')
|
||||||
|
for fmt in self.formats:
|
||||||
|
fmtfunc = getattr(self, fmt)
|
||||||
|
formatgroup.add_argument("-{}".format(fmt[0]),
|
||||||
|
"--{}".format(fmt), action='store_const',
|
||||||
|
const=fmt, dest='format',
|
||||||
|
help=fmtfunc.__doc__)
|
||||||
|
|
||||||
|
@property
|
||||||
|
def supported_formats(self):
|
||||||
|
return self.formats
|
||||||
|
|
||||||
|
def raw(self, output):
|
||||||
|
"""Output data as raw string (default)"""
|
||||||
|
if isinstance(output, (list, tuple)):
|
||||||
|
output = '\n'.join(map(str, output))
|
||||||
|
self.outfile.write(str(output))
|
||||||
|
|
||||||
|
def py(self, output):
|
||||||
|
"""Output data as a nicely-formatted python data structure"""
|
||||||
|
import pprint
|
||||||
|
pprint.pprint(output, stream=self.outfile)
|
||||||
|
|
||||||
|
def json(self, output):
|
||||||
|
"""Output data in JSON format"""
|
||||||
|
import json
|
||||||
|
json.dump(output, self.outfile)
|
||||||
|
|
||||||
|
def yaml(self, output):
|
||||||
|
"""Output data in YAML format"""
|
||||||
|
import yaml
|
||||||
|
yaml.safe_dump(output, self.outfile)
|
||||||
|
|
||||||
|
def csv(self, output):
|
||||||
|
"""Output data as excel-compatible CSV"""
|
||||||
|
import csv
|
||||||
|
csvwriter = csv.writer(self.outfile)
|
||||||
|
csvwriter.writerows(output)
|
||||||
|
|
||||||
|
def tab(self, output):
|
||||||
|
"""Output data in excel-compatible tab-delimited format"""
|
||||||
|
import csv
|
||||||
|
csvwriter = csv.writer(self.outfile, dialect=csv.excel_tab)
|
||||||
|
csvwriter.writerows(output)
|
||||||
|
|
||||||
|
def format_output(self, output, fmt='raw'):
|
||||||
|
fmtfunc = getattr(self, fmt)
|
||||||
|
fmtfunc(output)
|
||||||
|
|
||||||
|
|
||||||
|
class CommandLine(object):
|
||||||
|
argument_parser = None
|
||||||
|
subparsers = None
|
||||||
|
formatter = None
|
||||||
|
exit_code = 0
|
||||||
|
|
||||||
|
def __init__(self):
|
||||||
|
if not self.argument_parser:
|
||||||
|
self.argument_parser = argparse.ArgumentParser(description='Perform common charm tasks')
|
||||||
|
if not self.formatter:
|
||||||
|
self.formatter = OutputFormatter()
|
||||||
|
self.formatter.add_arguments(self.argument_parser)
|
||||||
|
if not self.subparsers:
|
||||||
|
self.subparsers = self.argument_parser.add_subparsers(help='Commands')
|
||||||
|
|
||||||
|
def subcommand(self, command_name=None):
|
||||||
|
"""
|
||||||
|
Decorate a function as a subcommand. Use its arguments as the
|
||||||
|
command-line arguments"""
|
||||||
|
def wrapper(decorated):
|
||||||
|
cmd_name = command_name or decorated.__name__
|
||||||
|
subparser = self.subparsers.add_parser(cmd_name,
|
||||||
|
description=decorated.__doc__)
|
||||||
|
for args, kwargs in describe_arguments(decorated):
|
||||||
|
subparser.add_argument(*args, **kwargs)
|
||||||
|
subparser.set_defaults(func=decorated)
|
||||||
|
return decorated
|
||||||
|
return wrapper
|
||||||
|
|
||||||
|
def test_command(self, decorated):
|
||||||
|
"""
|
||||||
|
Subcommand is a boolean test function, so bool return values should be
|
||||||
|
converted to a 0/1 exit code.
|
||||||
|
"""
|
||||||
|
decorated._cli_test_command = True
|
||||||
|
return decorated
|
||||||
|
|
||||||
|
def no_output(self, decorated):
|
||||||
|
"""
|
||||||
|
Subcommand is not expected to return a value, so don't print a spurious None.
|
||||||
|
"""
|
||||||
|
decorated._cli_no_output = True
|
||||||
|
return decorated
|
||||||
|
|
||||||
|
def subcommand_builder(self, command_name, description=None):
|
||||||
|
"""
|
||||||
|
Decorate a function that builds a subcommand. Builders should accept a
|
||||||
|
single argument (the subparser instance) and return the function to be
|
||||||
|
run as the command."""
|
||||||
|
def wrapper(decorated):
|
||||||
|
subparser = self.subparsers.add_parser(command_name)
|
||||||
|
func = decorated(subparser)
|
||||||
|
subparser.set_defaults(func=func)
|
||||||
|
subparser.description = description or func.__doc__
|
||||||
|
return wrapper
|
||||||
|
|
||||||
|
def run(self):
|
||||||
|
"Run cli, processing arguments and executing subcommands."
|
||||||
|
arguments = self.argument_parser.parse_args()
|
||||||
|
argspec = inspect.getargspec(arguments.func)
|
||||||
|
vargs = []
|
||||||
|
for arg in argspec.args:
|
||||||
|
vargs.append(getattr(arguments, arg))
|
||||||
|
if argspec.varargs:
|
||||||
|
vargs.extend(getattr(arguments, argspec.varargs))
|
||||||
|
output = arguments.func(*vargs)
|
||||||
|
if getattr(arguments.func, '_cli_test_command', False):
|
||||||
|
self.exit_code = 0 if output else 1
|
||||||
|
output = ''
|
||||||
|
if getattr(arguments.func, '_cli_no_output', False):
|
||||||
|
output = ''
|
||||||
|
self.formatter.format_output(output, arguments.format)
|
||||||
|
if unitdata._KV:
|
||||||
|
unitdata._KV.flush()
|
||||||
|
|
||||||
|
|
||||||
|
cmdline = CommandLine()
|
||||||
|
|
||||||
|
|
||||||
|
def describe_arguments(func):
|
||||||
|
"""
|
||||||
|
Analyze a function's signature and return a data structure suitable for
|
||||||
|
passing in as arguments to an argparse parser's add_argument() method."""
|
||||||
|
|
||||||
|
argspec = inspect.getargspec(func)
|
||||||
|
# we should probably raise an exception somewhere if func includes **kwargs
|
||||||
|
if argspec.defaults:
|
||||||
|
positional_args = argspec.args[:-len(argspec.defaults)]
|
||||||
|
keyword_names = argspec.args[-len(argspec.defaults):]
|
||||||
|
for arg, default in zip(keyword_names, argspec.defaults):
|
||||||
|
yield ('--{}'.format(arg),), {'default': default}
|
||||||
|
else:
|
||||||
|
positional_args = argspec.args
|
||||||
|
|
||||||
|
for arg in positional_args:
|
||||||
|
yield (arg,), {}
|
||||||
|
if argspec.varargs:
|
||||||
|
yield (argspec.varargs,), {'nargs': '*'}
|
|
@ -0,0 +1,36 @@
|
||||||
|
# Copyright 2014-2015 Canonical Limited.
|
||||||
|
#
|
||||||
|
# This file is part of charm-helpers.
|
||||||
|
#
|
||||||
|
# charm-helpers is free software: you can redistribute it and/or modify
|
||||||
|
# it under the terms of the GNU Lesser General Public License version 3 as
|
||||||
|
# published by the Free Software Foundation.
|
||||||
|
#
|
||||||
|
# charm-helpers is distributed in the hope that it will be useful,
|
||||||
|
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||||
|
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||||
|
# GNU Lesser General Public License for more details.
|
||||||
|
#
|
||||||
|
# You should have received a copy of the GNU Lesser General Public License
|
||||||
|
# along with charm-helpers. If not, see <http://www.gnu.org/licenses/>.
|
||||||
|
|
||||||
|
from . import cmdline
|
||||||
|
from charmhelpers.contrib.benchmark import Benchmark
|
||||||
|
|
||||||
|
|
||||||
|
@cmdline.subcommand(command_name='benchmark-start')
|
||||||
|
def start():
|
||||||
|
Benchmark.start()
|
||||||
|
|
||||||
|
|
||||||
|
@cmdline.subcommand(command_name='benchmark-finish')
|
||||||
|
def finish():
|
||||||
|
Benchmark.finish()
|
||||||
|
|
||||||
|
|
||||||
|
@cmdline.subcommand_builder('benchmark-composite', description="Set the benchmark composite score")
|
||||||
|
def service(subparser):
|
||||||
|
subparser.add_argument("value", help="The composite score.")
|
||||||
|
subparser.add_argument("units", help="The units the composite score represents, i.e., 'reads/sec'.")
|
||||||
|
subparser.add_argument("direction", help="'asc' if a lower score is better, 'desc' if a higher score is better.")
|
||||||
|
return Benchmark.set_composite_score
|
|
@ -0,0 +1,32 @@
|
||||||
|
# Copyright 2014-2015 Canonical Limited.
|
||||||
|
#
|
||||||
|
# This file is part of charm-helpers.
|
||||||
|
#
|
||||||
|
# charm-helpers is free software: you can redistribute it and/or modify
|
||||||
|
# it under the terms of the GNU Lesser General Public License version 3 as
|
||||||
|
# published by the Free Software Foundation.
|
||||||
|
#
|
||||||
|
# charm-helpers is distributed in the hope that it will be useful,
|
||||||
|
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||||
|
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||||
|
# GNU Lesser General Public License for more details.
|
||||||
|
#
|
||||||
|
# You should have received a copy of the GNU Lesser General Public License
|
||||||
|
# along with charm-helpers. If not, see <http://www.gnu.org/licenses/>.
|
||||||
|
|
||||||
|
"""
|
||||||
|
This module loads sub-modules into the python runtime so they can be
|
||||||
|
discovered via the inspect module. In order to prevent flake8 from (rightfully)
|
||||||
|
telling us these are unused modules, throw a ' # noqa' at the end of each import
|
||||||
|
so that the warning is suppressed.
|
||||||
|
"""
|
||||||
|
|
||||||
|
from . import CommandLine # noqa
|
||||||
|
|
||||||
|
"""
|
||||||
|
Import the sub-modules which have decorated subcommands to register with chlp.
|
||||||
|
"""
|
||||||
|
from . import host # noqa
|
||||||
|
from . import benchmark # noqa
|
||||||
|
from . import unitdata # noqa
|
||||||
|
from . import hookenv # noqa
|
|
@ -0,0 +1,23 @@
|
||||||
|
# Copyright 2014-2015 Canonical Limited.
|
||||||
|
#
|
||||||
|
# This file is part of charm-helpers.
|
||||||
|
#
|
||||||
|
# charm-helpers is free software: you can redistribute it and/or modify
|
||||||
|
# it under the terms of the GNU Lesser General Public License version 3 as
|
||||||
|
# published by the Free Software Foundation.
|
||||||
|
#
|
||||||
|
# charm-helpers is distributed in the hope that it will be useful,
|
||||||
|
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||||
|
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||||
|
# GNU Lesser General Public License for more details.
|
||||||
|
#
|
||||||
|
# You should have received a copy of the GNU Lesser General Public License
|
||||||
|
# along with charm-helpers. If not, see <http://www.gnu.org/licenses/>.
|
||||||
|
|
||||||
|
from . import cmdline
|
||||||
|
from charmhelpers.core import hookenv
|
||||||
|
|
||||||
|
|
||||||
|
cmdline.subcommand('relation-id')(hookenv.relation_id._wrapped)
|
||||||
|
cmdline.subcommand('service-name')(hookenv.service_name)
|
||||||
|
cmdline.subcommand('remote-service-name')(hookenv.remote_service_name._wrapped)
|
|
@ -1,6 +1,3 @@
|
||||||
#!/usr/bin/env python
|
|
||||||
# -*- coding: utf-8 -*-
|
|
||||||
|
|
||||||
# Copyright 2014-2015 Canonical Limited.
|
# Copyright 2014-2015 Canonical Limited.
|
||||||
#
|
#
|
||||||
# This file is part of charm-helpers.
|
# This file is part of charm-helpers.
|
||||||
|
@ -17,26 +14,18 @@
|
||||||
# You should have received a copy of the GNU Lesser General Public License
|
# You should have received a copy of the GNU Lesser General Public License
|
||||||
# along with charm-helpers. If not, see <http://www.gnu.org/licenses/>.
|
# along with charm-helpers. If not, see <http://www.gnu.org/licenses/>.
|
||||||
|
|
||||||
import six
|
from . import cmdline
|
||||||
|
from charmhelpers.core import host
|
||||||
|
|
||||||
|
|
||||||
def bool_from_string(value):
|
@cmdline.subcommand()
|
||||||
"""Interpret string value as boolean.
|
def mounts():
|
||||||
|
"List mounts"
|
||||||
|
return host.mounts()
|
||||||
|
|
||||||
Returns True if value translates to True otherwise False.
|
|
||||||
"""
|
|
||||||
if isinstance(value, six.string_types):
|
|
||||||
value = six.text_type(value)
|
|
||||||
else:
|
|
||||||
msg = "Unable to interpret non-string value '%s' as boolean" % (value)
|
|
||||||
raise ValueError(msg)
|
|
||||||
|
|
||||||
value = value.strip().lower()
|
@cmdline.subcommand_builder('service', description="Control system services")
|
||||||
|
def service(subparser):
|
||||||
if value in ['y', 'yes', 'true', 't', 'on']:
|
subparser.add_argument("action", help="The action to perform (start, stop, etc...)")
|
||||||
return True
|
subparser.add_argument("service_name", help="Name of the service to control")
|
||||||
elif value in ['n', 'no', 'false', 'f', 'off']:
|
return host.service
|
||||||
return False
|
|
||||||
|
|
||||||
msg = "Unable to interpret string value '%s' as boolean" % (value)
|
|
||||||
raise ValueError(msg)
|
|
|
@ -0,0 +1,39 @@
|
||||||
|
# Copyright 2014-2015 Canonical Limited.
|
||||||
|
#
|
||||||
|
# This file is part of charm-helpers.
|
||||||
|
#
|
||||||
|
# charm-helpers is free software: you can redistribute it and/or modify
|
||||||
|
# it under the terms of the GNU Lesser General Public License version 3 as
|
||||||
|
# published by the Free Software Foundation.
|
||||||
|
#
|
||||||
|
# charm-helpers is distributed in the hope that it will be useful,
|
||||||
|
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||||
|
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||||
|
# GNU Lesser General Public License for more details.
|
||||||
|
#
|
||||||
|
# You should have received a copy of the GNU Lesser General Public License
|
||||||
|
# along with charm-helpers. If not, see <http://www.gnu.org/licenses/>.
|
||||||
|
|
||||||
|
from . import cmdline
|
||||||
|
from charmhelpers.core import unitdata
|
||||||
|
|
||||||
|
|
||||||
|
@cmdline.subcommand_builder('unitdata', description="Store and retrieve data")
|
||||||
|
def unitdata_cmd(subparser):
|
||||||
|
nested = subparser.add_subparsers()
|
||||||
|
get_cmd = nested.add_parser('get', help='Retrieve data')
|
||||||
|
get_cmd.add_argument('key', help='Key to retrieve the value of')
|
||||||
|
get_cmd.set_defaults(action='get', value=None)
|
||||||
|
set_cmd = nested.add_parser('set', help='Store data')
|
||||||
|
set_cmd.add_argument('key', help='Key to set')
|
||||||
|
set_cmd.add_argument('value', help='Value to store')
|
||||||
|
set_cmd.set_defaults(action='set')
|
||||||
|
|
||||||
|
def _unitdata_cmd(action, key, value):
|
||||||
|
if action == 'get':
|
||||||
|
return unitdata.kv().get(key)
|
||||||
|
elif action == 'set':
|
||||||
|
unitdata.kv().set(key, value)
|
||||||
|
unitdata.kv().flush()
|
||||||
|
return ''
|
||||||
|
return _unitdata_cmd
|
|
@ -247,7 +247,9 @@ class NRPE(object):
|
||||||
|
|
||||||
service('restart', 'nagios-nrpe-server')
|
service('restart', 'nagios-nrpe-server')
|
||||||
|
|
||||||
for rid in relation_ids("local-monitors"):
|
monitor_ids = relation_ids("local-monitors") + \
|
||||||
|
relation_ids("nrpe-external-master")
|
||||||
|
for rid in monitor_ids:
|
||||||
relation_set(relation_id=rid, monitors=yaml.dump(monitors))
|
relation_set(relation_id=rid, monitors=yaml.dump(monitors))
|
||||||
|
|
||||||
|
|
||||||
|
|
|
@ -44,6 +44,7 @@ from charmhelpers.core.hookenv import (
|
||||||
ERROR,
|
ERROR,
|
||||||
WARNING,
|
WARNING,
|
||||||
unit_get,
|
unit_get,
|
||||||
|
is_leader as juju_is_leader
|
||||||
)
|
)
|
||||||
from charmhelpers.core.decorators import (
|
from charmhelpers.core.decorators import (
|
||||||
retry_on_exception,
|
retry_on_exception,
|
||||||
|
@ -52,6 +53,8 @@ from charmhelpers.core.strutils import (
|
||||||
bool_from_string,
|
bool_from_string,
|
||||||
)
|
)
|
||||||
|
|
||||||
|
DC_RESOURCE_NAME = 'DC'
|
||||||
|
|
||||||
|
|
||||||
class HAIncompleteConfig(Exception):
|
class HAIncompleteConfig(Exception):
|
||||||
pass
|
pass
|
||||||
|
@ -61,17 +64,30 @@ class CRMResourceNotFound(Exception):
|
||||||
pass
|
pass
|
||||||
|
|
||||||
|
|
||||||
|
class CRMDCNotFound(Exception):
|
||||||
|
pass
|
||||||
|
|
||||||
|
|
||||||
def is_elected_leader(resource):
|
def is_elected_leader(resource):
|
||||||
"""
|
"""
|
||||||
Returns True if the charm executing this is the elected cluster leader.
|
Returns True if the charm executing this is the elected cluster leader.
|
||||||
|
|
||||||
It relies on two mechanisms to determine leadership:
|
It relies on two mechanisms to determine leadership:
|
||||||
1. If the charm is part of a corosync cluster, call corosync to
|
1. If juju is sufficiently new and leadership election is supported,
|
||||||
|
the is_leader command will be used.
|
||||||
|
2. If the charm is part of a corosync cluster, call corosync to
|
||||||
determine leadership.
|
determine leadership.
|
||||||
2. If the charm is not part of a corosync cluster, the leader is
|
3. If the charm is not part of a corosync cluster, the leader is
|
||||||
determined as being "the alive unit with the lowest unit numer". In
|
determined as being "the alive unit with the lowest unit numer". In
|
||||||
other words, the oldest surviving unit.
|
other words, the oldest surviving unit.
|
||||||
"""
|
"""
|
||||||
|
try:
|
||||||
|
return juju_is_leader()
|
||||||
|
except NotImplementedError:
|
||||||
|
log('Juju leadership election feature not enabled'
|
||||||
|
', using fallback support',
|
||||||
|
level=WARNING)
|
||||||
|
|
||||||
if is_clustered():
|
if is_clustered():
|
||||||
if not is_crm_leader(resource):
|
if not is_crm_leader(resource):
|
||||||
log('Deferring action to CRM leader.', level=INFO)
|
log('Deferring action to CRM leader.', level=INFO)
|
||||||
|
@ -95,7 +111,33 @@ def is_clustered():
|
||||||
return False
|
return False
|
||||||
|
|
||||||
|
|
||||||
@retry_on_exception(5, base_delay=2, exc_type=CRMResourceNotFound)
|
def is_crm_dc():
|
||||||
|
"""
|
||||||
|
Determine leadership by querying the pacemaker Designated Controller
|
||||||
|
"""
|
||||||
|
cmd = ['crm', 'status']
|
||||||
|
try:
|
||||||
|
status = subprocess.check_output(cmd, stderr=subprocess.STDOUT)
|
||||||
|
if not isinstance(status, six.text_type):
|
||||||
|
status = six.text_type(status, "utf-8")
|
||||||
|
except subprocess.CalledProcessError as ex:
|
||||||
|
raise CRMDCNotFound(str(ex))
|
||||||
|
|
||||||
|
current_dc = ''
|
||||||
|
for line in status.split('\n'):
|
||||||
|
if line.startswith('Current DC'):
|
||||||
|
# Current DC: juju-lytrusty-machine-2 (168108163) - partition with quorum
|
||||||
|
current_dc = line.split(':')[1].split()[0]
|
||||||
|
if current_dc == get_unit_hostname():
|
||||||
|
return True
|
||||||
|
elif current_dc == 'NONE':
|
||||||
|
raise CRMDCNotFound('Current DC: NONE')
|
||||||
|
|
||||||
|
return False
|
||||||
|
|
||||||
|
|
||||||
|
@retry_on_exception(5, base_delay=2,
|
||||||
|
exc_type=(CRMResourceNotFound, CRMDCNotFound))
|
||||||
def is_crm_leader(resource, retry=False):
|
def is_crm_leader(resource, retry=False):
|
||||||
"""
|
"""
|
||||||
Returns True if the charm calling this is the elected corosync leader,
|
Returns True if the charm calling this is the elected corosync leader,
|
||||||
|
@ -104,6 +146,8 @@ def is_crm_leader(resource, retry=False):
|
||||||
We allow this operation to be retried to avoid the possibility of getting a
|
We allow this operation to be retried to avoid the possibility of getting a
|
||||||
false negative. See LP #1396246 for more info.
|
false negative. See LP #1396246 for more info.
|
||||||
"""
|
"""
|
||||||
|
if resource == DC_RESOURCE_NAME:
|
||||||
|
return is_crm_dc()
|
||||||
cmd = ['crm', 'resource', 'show', resource]
|
cmd = ['crm', 'resource', 'show', resource]
|
||||||
try:
|
try:
|
||||||
status = subprocess.check_output(cmd, stderr=subprocess.STDOUT)
|
status = subprocess.check_output(cmd, stderr=subprocess.STDOUT)
|
||||||
|
|
|
@ -435,8 +435,12 @@ def get_hostname(address, fqdn=True):
|
||||||
|
|
||||||
rev = dns.reversename.from_address(address)
|
rev = dns.reversename.from_address(address)
|
||||||
result = ns_query(rev)
|
result = ns_query(rev)
|
||||||
|
|
||||||
if not result:
|
if not result:
|
||||||
return None
|
try:
|
||||||
|
result = socket.gethostbyaddr(address)[0]
|
||||||
|
except:
|
||||||
|
return None
|
||||||
else:
|
else:
|
||||||
result = address
|
result = address
|
||||||
|
|
||||||
|
|
|
@ -44,17 +44,24 @@ class OpenStackAmuletDeployment(AmuletDeployment):
|
||||||
Determine if the local branch being tested is derived from its
|
Determine if the local branch being tested is derived from its
|
||||||
stable or next (dev) branch, and based on this, use the corresonding
|
stable or next (dev) branch, and based on this, use the corresonding
|
||||||
stable or next branches for the other_services."""
|
stable or next branches for the other_services."""
|
||||||
base_charms = ['mysql', 'mongodb']
|
base_charms = ['mysql', 'mongodb', 'nrpe']
|
||||||
|
|
||||||
|
if self.series in ['precise', 'trusty']:
|
||||||
|
base_series = self.series
|
||||||
|
else:
|
||||||
|
base_series = self.current_next
|
||||||
|
|
||||||
if self.stable:
|
if self.stable:
|
||||||
for svc in other_services:
|
for svc in other_services:
|
||||||
temp = 'lp:charms/{}'
|
temp = 'lp:charms/{}/{}'
|
||||||
svc['location'] = temp.format(svc['name'])
|
svc['location'] = temp.format(base_series,
|
||||||
|
svc['name'])
|
||||||
else:
|
else:
|
||||||
for svc in other_services:
|
for svc in other_services:
|
||||||
if svc['name'] in base_charms:
|
if svc['name'] in base_charms:
|
||||||
temp = 'lp:charms/{}'
|
temp = 'lp:charms/{}/{}'
|
||||||
svc['location'] = temp.format(svc['name'])
|
svc['location'] = temp.format(base_series,
|
||||||
|
svc['name'])
|
||||||
else:
|
else:
|
||||||
temp = 'lp:~openstack-charmers/charms/{}/{}/next'
|
temp = 'lp:~openstack-charmers/charms/{}/{}/next'
|
||||||
svc['location'] = temp.format(self.current_next,
|
svc['location'] = temp.format(self.current_next,
|
||||||
|
@ -72,9 +79,9 @@ class OpenStackAmuletDeployment(AmuletDeployment):
|
||||||
services.append(this_service)
|
services.append(this_service)
|
||||||
use_source = ['mysql', 'mongodb', 'rabbitmq-server', 'ceph',
|
use_source = ['mysql', 'mongodb', 'rabbitmq-server', 'ceph',
|
||||||
'ceph-osd', 'ceph-radosgw']
|
'ceph-osd', 'ceph-radosgw']
|
||||||
# Openstack subordinate charms do not expose an origin option as that
|
# Most OpenStack subordinate charms do not expose an origin option
|
||||||
# is controlled by the principle
|
# as that is controlled by the principle.
|
||||||
ignore = ['neutron-openvswitch']
|
ignore = ['cinder-ceph', 'hacluster', 'neutron-openvswitch', 'nrpe']
|
||||||
|
|
||||||
if self.openstack:
|
if self.openstack:
|
||||||
for svc in services:
|
for svc in services:
|
||||||
|
@ -99,9 +106,13 @@ class OpenStackAmuletDeployment(AmuletDeployment):
|
||||||
Return an integer representing the enum value of the openstack
|
Return an integer representing the enum value of the openstack
|
||||||
release.
|
release.
|
||||||
"""
|
"""
|
||||||
|
# Must be ordered by OpenStack release (not by Ubuntu release):
|
||||||
(self.precise_essex, self.precise_folsom, self.precise_grizzly,
|
(self.precise_essex, self.precise_folsom, self.precise_grizzly,
|
||||||
self.precise_havana, self.precise_icehouse,
|
self.precise_havana, self.precise_icehouse,
|
||||||
self.trusty_icehouse, self.trusty_juno, self.trusty_kilo) = range(8)
|
self.trusty_icehouse, self.trusty_juno, self.utopic_juno,
|
||||||
|
self.trusty_kilo, self.vivid_kilo, self.trusty_liberty,
|
||||||
|
self.wily_liberty) = range(12)
|
||||||
|
|
||||||
releases = {
|
releases = {
|
||||||
('precise', None): self.precise_essex,
|
('precise', None): self.precise_essex,
|
||||||
('precise', 'cloud:precise-folsom'): self.precise_folsom,
|
('precise', 'cloud:precise-folsom'): self.precise_folsom,
|
||||||
|
@ -110,7 +121,11 @@ class OpenStackAmuletDeployment(AmuletDeployment):
|
||||||
('precise', 'cloud:precise-icehouse'): self.precise_icehouse,
|
('precise', 'cloud:precise-icehouse'): self.precise_icehouse,
|
||||||
('trusty', None): self.trusty_icehouse,
|
('trusty', None): self.trusty_icehouse,
|
||||||
('trusty', 'cloud:trusty-juno'): self.trusty_juno,
|
('trusty', 'cloud:trusty-juno'): self.trusty_juno,
|
||||||
('trusty', 'cloud:trusty-kilo'): self.trusty_kilo}
|
('trusty', 'cloud:trusty-kilo'): self.trusty_kilo,
|
||||||
|
('trusty', 'cloud:trusty-liberty'): self.trusty_liberty,
|
||||||
|
('utopic', None): self.utopic_juno,
|
||||||
|
('vivid', None): self.vivid_kilo,
|
||||||
|
('wily', None): self.wily_liberty}
|
||||||
return releases[(self.series, self.openstack)]
|
return releases[(self.series, self.openstack)]
|
||||||
|
|
||||||
def _get_openstack_release_string(self):
|
def _get_openstack_release_string(self):
|
||||||
|
@ -126,9 +141,43 @@ class OpenStackAmuletDeployment(AmuletDeployment):
|
||||||
('trusty', 'icehouse'),
|
('trusty', 'icehouse'),
|
||||||
('utopic', 'juno'),
|
('utopic', 'juno'),
|
||||||
('vivid', 'kilo'),
|
('vivid', 'kilo'),
|
||||||
|
('wily', 'liberty'),
|
||||||
])
|
])
|
||||||
if self.openstack:
|
if self.openstack:
|
||||||
os_origin = self.openstack.split(':')[1]
|
os_origin = self.openstack.split(':')[1]
|
||||||
return os_origin.split('%s-' % self.series)[1].split('/')[0]
|
return os_origin.split('%s-' % self.series)[1].split('/')[0]
|
||||||
else:
|
else:
|
||||||
return releases[self.series]
|
return releases[self.series]
|
||||||
|
|
||||||
|
def get_ceph_expected_pools(self, radosgw=False):
|
||||||
|
"""Return a list of expected ceph pools in a ceph + cinder + glance
|
||||||
|
test scenario, based on OpenStack release and whether ceph radosgw
|
||||||
|
is flagged as present or not."""
|
||||||
|
|
||||||
|
if self._get_openstack_release() >= self.trusty_kilo:
|
||||||
|
# Kilo or later
|
||||||
|
pools = [
|
||||||
|
'rbd',
|
||||||
|
'cinder',
|
||||||
|
'glance'
|
||||||
|
]
|
||||||
|
else:
|
||||||
|
# Juno or earlier
|
||||||
|
pools = [
|
||||||
|
'data',
|
||||||
|
'metadata',
|
||||||
|
'rbd',
|
||||||
|
'cinder',
|
||||||
|
'glance'
|
||||||
|
]
|
||||||
|
|
||||||
|
if radosgw:
|
||||||
|
pools.extend([
|
||||||
|
'.rgw.root',
|
||||||
|
'.rgw.control',
|
||||||
|
'.rgw',
|
||||||
|
'.rgw.gc',
|
||||||
|
'.users.uid'
|
||||||
|
])
|
||||||
|
|
||||||
|
return pools
|
||||||
|
|
|
@ -14,16 +14,20 @@
|
||||||
# You should have received a copy of the GNU Lesser General Public License
|
# You should have received a copy of the GNU Lesser General Public License
|
||||||
# along with charm-helpers. If not, see <http://www.gnu.org/licenses/>.
|
# along with charm-helpers. If not, see <http://www.gnu.org/licenses/>.
|
||||||
|
|
||||||
|
import amulet
|
||||||
|
import json
|
||||||
import logging
|
import logging
|
||||||
import os
|
import os
|
||||||
|
import six
|
||||||
import time
|
import time
|
||||||
import urllib
|
import urllib
|
||||||
|
|
||||||
|
import cinderclient.v1.client as cinder_client
|
||||||
import glanceclient.v1.client as glance_client
|
import glanceclient.v1.client as glance_client
|
||||||
|
import heatclient.v1.client as heat_client
|
||||||
import keystoneclient.v2_0 as keystone_client
|
import keystoneclient.v2_0 as keystone_client
|
||||||
import novaclient.v1_1.client as nova_client
|
import novaclient.v1_1.client as nova_client
|
||||||
|
import swiftclient
|
||||||
import six
|
|
||||||
|
|
||||||
from charmhelpers.contrib.amulet.utils import (
|
from charmhelpers.contrib.amulet.utils import (
|
||||||
AmuletUtils
|
AmuletUtils
|
||||||
|
@ -37,7 +41,7 @@ class OpenStackAmuletUtils(AmuletUtils):
|
||||||
"""OpenStack amulet utilities.
|
"""OpenStack amulet utilities.
|
||||||
|
|
||||||
This class inherits from AmuletUtils and has additional support
|
This class inherits from AmuletUtils and has additional support
|
||||||
that is specifically for use by OpenStack charms.
|
that is specifically for use by OpenStack charm tests.
|
||||||
"""
|
"""
|
||||||
|
|
||||||
def __init__(self, log_level=ERROR):
|
def __init__(self, log_level=ERROR):
|
||||||
|
@ -51,6 +55,8 @@ class OpenStackAmuletUtils(AmuletUtils):
|
||||||
Validate actual endpoint data vs expected endpoint data. The ports
|
Validate actual endpoint data vs expected endpoint data. The ports
|
||||||
are used to find the matching endpoint.
|
are used to find the matching endpoint.
|
||||||
"""
|
"""
|
||||||
|
self.log.debug('Validating endpoint data...')
|
||||||
|
self.log.debug('actual: {}'.format(repr(endpoints)))
|
||||||
found = False
|
found = False
|
||||||
for ep in endpoints:
|
for ep in endpoints:
|
||||||
self.log.debug('endpoint: {}'.format(repr(ep)))
|
self.log.debug('endpoint: {}'.format(repr(ep)))
|
||||||
|
@ -77,6 +83,7 @@ class OpenStackAmuletUtils(AmuletUtils):
|
||||||
Validate a list of actual service catalog endpoints vs a list of
|
Validate a list of actual service catalog endpoints vs a list of
|
||||||
expected service catalog endpoints.
|
expected service catalog endpoints.
|
||||||
"""
|
"""
|
||||||
|
self.log.debug('Validating service catalog endpoint data...')
|
||||||
self.log.debug('actual: {}'.format(repr(actual)))
|
self.log.debug('actual: {}'.format(repr(actual)))
|
||||||
for k, v in six.iteritems(expected):
|
for k, v in six.iteritems(expected):
|
||||||
if k in actual:
|
if k in actual:
|
||||||
|
@ -93,6 +100,7 @@ class OpenStackAmuletUtils(AmuletUtils):
|
||||||
Validate a list of actual tenant data vs list of expected tenant
|
Validate a list of actual tenant data vs list of expected tenant
|
||||||
data.
|
data.
|
||||||
"""
|
"""
|
||||||
|
self.log.debug('Validating tenant data...')
|
||||||
self.log.debug('actual: {}'.format(repr(actual)))
|
self.log.debug('actual: {}'.format(repr(actual)))
|
||||||
for e in expected:
|
for e in expected:
|
||||||
found = False
|
found = False
|
||||||
|
@ -114,6 +122,7 @@ class OpenStackAmuletUtils(AmuletUtils):
|
||||||
Validate a list of actual role data vs a list of expected role
|
Validate a list of actual role data vs a list of expected role
|
||||||
data.
|
data.
|
||||||
"""
|
"""
|
||||||
|
self.log.debug('Validating role data...')
|
||||||
self.log.debug('actual: {}'.format(repr(actual)))
|
self.log.debug('actual: {}'.format(repr(actual)))
|
||||||
for e in expected:
|
for e in expected:
|
||||||
found = False
|
found = False
|
||||||
|
@ -134,6 +143,7 @@ class OpenStackAmuletUtils(AmuletUtils):
|
||||||
Validate a list of actual user data vs a list of expected user
|
Validate a list of actual user data vs a list of expected user
|
||||||
data.
|
data.
|
||||||
"""
|
"""
|
||||||
|
self.log.debug('Validating user data...')
|
||||||
self.log.debug('actual: {}'.format(repr(actual)))
|
self.log.debug('actual: {}'.format(repr(actual)))
|
||||||
for e in expected:
|
for e in expected:
|
||||||
found = False
|
found = False
|
||||||
|
@ -155,17 +165,30 @@ class OpenStackAmuletUtils(AmuletUtils):
|
||||||
|
|
||||||
Validate a list of actual flavors vs a list of expected flavors.
|
Validate a list of actual flavors vs a list of expected flavors.
|
||||||
"""
|
"""
|
||||||
|
self.log.debug('Validating flavor data...')
|
||||||
self.log.debug('actual: {}'.format(repr(actual)))
|
self.log.debug('actual: {}'.format(repr(actual)))
|
||||||
act = [a.name for a in actual]
|
act = [a.name for a in actual]
|
||||||
return self._validate_list_data(expected, act)
|
return self._validate_list_data(expected, act)
|
||||||
|
|
||||||
def tenant_exists(self, keystone, tenant):
|
def tenant_exists(self, keystone, tenant):
|
||||||
"""Return True if tenant exists."""
|
"""Return True if tenant exists."""
|
||||||
|
self.log.debug('Checking if tenant exists ({})...'.format(tenant))
|
||||||
return tenant in [t.name for t in keystone.tenants.list()]
|
return tenant in [t.name for t in keystone.tenants.list()]
|
||||||
|
|
||||||
|
def authenticate_cinder_admin(self, keystone_sentry, username,
|
||||||
|
password, tenant):
|
||||||
|
"""Authenticates admin user with cinder."""
|
||||||
|
# NOTE(beisner): cinder python client doesn't accept tokens.
|
||||||
|
service_ip = \
|
||||||
|
keystone_sentry.relation('shared-db',
|
||||||
|
'mysql:shared-db')['private-address']
|
||||||
|
ept = "http://{}:5000/v2.0".format(service_ip.strip().decode('utf-8'))
|
||||||
|
return cinder_client.Client(username, password, tenant, ept)
|
||||||
|
|
||||||
def authenticate_keystone_admin(self, keystone_sentry, user, password,
|
def authenticate_keystone_admin(self, keystone_sentry, user, password,
|
||||||
tenant):
|
tenant):
|
||||||
"""Authenticates admin user with the keystone admin endpoint."""
|
"""Authenticates admin user with the keystone admin endpoint."""
|
||||||
|
self.log.debug('Authenticating keystone admin...')
|
||||||
unit = keystone_sentry
|
unit = keystone_sentry
|
||||||
service_ip = unit.relation('shared-db',
|
service_ip = unit.relation('shared-db',
|
||||||
'mysql:shared-db')['private-address']
|
'mysql:shared-db')['private-address']
|
||||||
|
@ -175,6 +198,7 @@ class OpenStackAmuletUtils(AmuletUtils):
|
||||||
|
|
||||||
def authenticate_keystone_user(self, keystone, user, password, tenant):
|
def authenticate_keystone_user(self, keystone, user, password, tenant):
|
||||||
"""Authenticates a regular user with the keystone public endpoint."""
|
"""Authenticates a regular user with the keystone public endpoint."""
|
||||||
|
self.log.debug('Authenticating keystone user ({})...'.format(user))
|
||||||
ep = keystone.service_catalog.url_for(service_type='identity',
|
ep = keystone.service_catalog.url_for(service_type='identity',
|
||||||
endpoint_type='publicURL')
|
endpoint_type='publicURL')
|
||||||
return keystone_client.Client(username=user, password=password,
|
return keystone_client.Client(username=user, password=password,
|
||||||
|
@ -182,19 +206,49 @@ class OpenStackAmuletUtils(AmuletUtils):
|
||||||
|
|
||||||
def authenticate_glance_admin(self, keystone):
|
def authenticate_glance_admin(self, keystone):
|
||||||
"""Authenticates admin user with glance."""
|
"""Authenticates admin user with glance."""
|
||||||
|
self.log.debug('Authenticating glance admin...')
|
||||||
ep = keystone.service_catalog.url_for(service_type='image',
|
ep = keystone.service_catalog.url_for(service_type='image',
|
||||||
endpoint_type='adminURL')
|
endpoint_type='adminURL')
|
||||||
return glance_client.Client(ep, token=keystone.auth_token)
|
return glance_client.Client(ep, token=keystone.auth_token)
|
||||||
|
|
||||||
|
def authenticate_heat_admin(self, keystone):
|
||||||
|
"""Authenticates the admin user with heat."""
|
||||||
|
self.log.debug('Authenticating heat admin...')
|
||||||
|
ep = keystone.service_catalog.url_for(service_type='orchestration',
|
||||||
|
endpoint_type='publicURL')
|
||||||
|
return heat_client.Client(endpoint=ep, token=keystone.auth_token)
|
||||||
|
|
||||||
def authenticate_nova_user(self, keystone, user, password, tenant):
|
def authenticate_nova_user(self, keystone, user, password, tenant):
|
||||||
"""Authenticates a regular user with nova-api."""
|
"""Authenticates a regular user with nova-api."""
|
||||||
|
self.log.debug('Authenticating nova user ({})...'.format(user))
|
||||||
ep = keystone.service_catalog.url_for(service_type='identity',
|
ep = keystone.service_catalog.url_for(service_type='identity',
|
||||||
endpoint_type='publicURL')
|
endpoint_type='publicURL')
|
||||||
return nova_client.Client(username=user, api_key=password,
|
return nova_client.Client(username=user, api_key=password,
|
||||||
project_id=tenant, auth_url=ep)
|
project_id=tenant, auth_url=ep)
|
||||||
|
|
||||||
|
def authenticate_swift_user(self, keystone, user, password, tenant):
|
||||||
|
"""Authenticates a regular user with swift api."""
|
||||||
|
self.log.debug('Authenticating swift user ({})...'.format(user))
|
||||||
|
ep = keystone.service_catalog.url_for(service_type='identity',
|
||||||
|
endpoint_type='publicURL')
|
||||||
|
return swiftclient.Connection(authurl=ep,
|
||||||
|
user=user,
|
||||||
|
key=password,
|
||||||
|
tenant_name=tenant,
|
||||||
|
auth_version='2.0')
|
||||||
|
|
||||||
def create_cirros_image(self, glance, image_name):
|
def create_cirros_image(self, glance, image_name):
|
||||||
"""Download the latest cirros image and upload it to glance."""
|
"""Download the latest cirros image and upload it to glance,
|
||||||
|
validate and return a resource pointer.
|
||||||
|
|
||||||
|
:param glance: pointer to authenticated glance connection
|
||||||
|
:param image_name: display name for new image
|
||||||
|
:returns: glance image pointer
|
||||||
|
"""
|
||||||
|
self.log.debug('Creating glance cirros image '
|
||||||
|
'({})...'.format(image_name))
|
||||||
|
|
||||||
|
# Download cirros image
|
||||||
http_proxy = os.getenv('AMULET_HTTP_PROXY')
|
http_proxy = os.getenv('AMULET_HTTP_PROXY')
|
||||||
self.log.debug('AMULET_HTTP_PROXY: {}'.format(http_proxy))
|
self.log.debug('AMULET_HTTP_PROXY: {}'.format(http_proxy))
|
||||||
if http_proxy:
|
if http_proxy:
|
||||||
|
@ -203,57 +257,67 @@ class OpenStackAmuletUtils(AmuletUtils):
|
||||||
else:
|
else:
|
||||||
opener = urllib.FancyURLopener()
|
opener = urllib.FancyURLopener()
|
||||||
|
|
||||||
f = opener.open("http://download.cirros-cloud.net/version/released")
|
f = opener.open('http://download.cirros-cloud.net/version/released')
|
||||||
version = f.read().strip()
|
version = f.read().strip()
|
||||||
cirros_img = "cirros-{}-x86_64-disk.img".format(version)
|
cirros_img = 'cirros-{}-x86_64-disk.img'.format(version)
|
||||||
local_path = os.path.join('tests', cirros_img)
|
local_path = os.path.join('tests', cirros_img)
|
||||||
|
|
||||||
if not os.path.exists(local_path):
|
if not os.path.exists(local_path):
|
||||||
cirros_url = "http://{}/{}/{}".format("download.cirros-cloud.net",
|
cirros_url = 'http://{}/{}/{}'.format('download.cirros-cloud.net',
|
||||||
version, cirros_img)
|
version, cirros_img)
|
||||||
opener.retrieve(cirros_url, local_path)
|
opener.retrieve(cirros_url, local_path)
|
||||||
f.close()
|
f.close()
|
||||||
|
|
||||||
|
# Create glance image
|
||||||
with open(local_path) as f:
|
with open(local_path) as f:
|
||||||
image = glance.images.create(name=image_name, is_public=True,
|
image = glance.images.create(name=image_name, is_public=True,
|
||||||
disk_format='qcow2',
|
disk_format='qcow2',
|
||||||
container_format='bare', data=f)
|
container_format='bare', data=f)
|
||||||
count = 1
|
|
||||||
status = image.status
|
|
||||||
while status != 'active' and count < 10:
|
|
||||||
time.sleep(3)
|
|
||||||
image = glance.images.get(image.id)
|
|
||||||
status = image.status
|
|
||||||
self.log.debug('image status: {}'.format(status))
|
|
||||||
count += 1
|
|
||||||
|
|
||||||
if status != 'active':
|
# Wait for image to reach active status
|
||||||
self.log.error('image creation timed out')
|
img_id = image.id
|
||||||
return None
|
ret = self.resource_reaches_status(glance.images, img_id,
|
||||||
|
expected_stat='active',
|
||||||
|
msg='Image status wait')
|
||||||
|
if not ret:
|
||||||
|
msg = 'Glance image failed to reach expected state.'
|
||||||
|
amulet.raise_status(amulet.FAIL, msg=msg)
|
||||||
|
|
||||||
|
# Re-validate new image
|
||||||
|
self.log.debug('Validating image attributes...')
|
||||||
|
val_img_name = glance.images.get(img_id).name
|
||||||
|
val_img_stat = glance.images.get(img_id).status
|
||||||
|
val_img_pub = glance.images.get(img_id).is_public
|
||||||
|
val_img_cfmt = glance.images.get(img_id).container_format
|
||||||
|
val_img_dfmt = glance.images.get(img_id).disk_format
|
||||||
|
msg_attr = ('Image attributes - name:{} public:{} id:{} stat:{} '
|
||||||
|
'container fmt:{} disk fmt:{}'.format(
|
||||||
|
val_img_name, val_img_pub, img_id,
|
||||||
|
val_img_stat, val_img_cfmt, val_img_dfmt))
|
||||||
|
|
||||||
|
if val_img_name == image_name and val_img_stat == 'active' \
|
||||||
|
and val_img_pub is True and val_img_cfmt == 'bare' \
|
||||||
|
and val_img_dfmt == 'qcow2':
|
||||||
|
self.log.debug(msg_attr)
|
||||||
|
else:
|
||||||
|
msg = ('Volume validation failed, {}'.format(msg_attr))
|
||||||
|
amulet.raise_status(amulet.FAIL, msg=msg)
|
||||||
|
|
||||||
return image
|
return image
|
||||||
|
|
||||||
def delete_image(self, glance, image):
|
def delete_image(self, glance, image):
|
||||||
"""Delete the specified image."""
|
"""Delete the specified image."""
|
||||||
num_before = len(list(glance.images.list()))
|
|
||||||
glance.images.delete(image)
|
|
||||||
|
|
||||||
count = 1
|
# /!\ DEPRECATION WARNING
|
||||||
num_after = len(list(glance.images.list()))
|
self.log.warn('/!\\ DEPRECATION WARNING: use '
|
||||||
while num_after != (num_before - 1) and count < 10:
|
'delete_resource instead of delete_image.')
|
||||||
time.sleep(3)
|
self.log.debug('Deleting glance image ({})...'.format(image))
|
||||||
num_after = len(list(glance.images.list()))
|
return self.delete_resource(glance.images, image, msg='glance image')
|
||||||
self.log.debug('number of images: {}'.format(num_after))
|
|
||||||
count += 1
|
|
||||||
|
|
||||||
if num_after != (num_before - 1):
|
|
||||||
self.log.error('image deletion timed out')
|
|
||||||
return False
|
|
||||||
|
|
||||||
return True
|
|
||||||
|
|
||||||
def create_instance(self, nova, image_name, instance_name, flavor):
|
def create_instance(self, nova, image_name, instance_name, flavor):
|
||||||
"""Create the specified instance."""
|
"""Create the specified instance."""
|
||||||
|
self.log.debug('Creating instance '
|
||||||
|
'({}|{}|{})'.format(instance_name, image_name, flavor))
|
||||||
image = nova.images.find(name=image_name)
|
image = nova.images.find(name=image_name)
|
||||||
flavor = nova.flavors.find(name=flavor)
|
flavor = nova.flavors.find(name=flavor)
|
||||||
instance = nova.servers.create(name=instance_name, image=image,
|
instance = nova.servers.create(name=instance_name, image=image,
|
||||||
|
@ -276,19 +340,265 @@ class OpenStackAmuletUtils(AmuletUtils):
|
||||||
|
|
||||||
def delete_instance(self, nova, instance):
|
def delete_instance(self, nova, instance):
|
||||||
"""Delete the specified instance."""
|
"""Delete the specified instance."""
|
||||||
num_before = len(list(nova.servers.list()))
|
|
||||||
nova.servers.delete(instance)
|
|
||||||
|
|
||||||
count = 1
|
# /!\ DEPRECATION WARNING
|
||||||
num_after = len(list(nova.servers.list()))
|
self.log.warn('/!\\ DEPRECATION WARNING: use '
|
||||||
while num_after != (num_before - 1) and count < 10:
|
'delete_resource instead of delete_instance.')
|
||||||
time.sleep(3)
|
self.log.debug('Deleting instance ({})...'.format(instance))
|
||||||
num_after = len(list(nova.servers.list()))
|
return self.delete_resource(nova.servers, instance,
|
||||||
self.log.debug('number of instances: {}'.format(num_after))
|
msg='nova instance')
|
||||||
count += 1
|
|
||||||
|
|
||||||
if num_after != (num_before - 1):
|
def create_or_get_keypair(self, nova, keypair_name="testkey"):
|
||||||
self.log.error('instance deletion timed out')
|
"""Create a new keypair, or return pointer if it already exists."""
|
||||||
|
try:
|
||||||
|
_keypair = nova.keypairs.get(keypair_name)
|
||||||
|
self.log.debug('Keypair ({}) already exists, '
|
||||||
|
'using it.'.format(keypair_name))
|
||||||
|
return _keypair
|
||||||
|
except:
|
||||||
|
self.log.debug('Keypair ({}) does not exist, '
|
||||||
|
'creating it.'.format(keypair_name))
|
||||||
|
|
||||||
|
_keypair = nova.keypairs.create(name=keypair_name)
|
||||||
|
return _keypair
|
||||||
|
|
||||||
|
def create_cinder_volume(self, cinder, vol_name="demo-vol", vol_size=1,
|
||||||
|
img_id=None, src_vol_id=None, snap_id=None):
|
||||||
|
"""Create cinder volume, optionally from a glance image, OR
|
||||||
|
optionally as a clone of an existing volume, OR optionally
|
||||||
|
from a snapshot. Wait for the new volume status to reach
|
||||||
|
the expected status, validate and return a resource pointer.
|
||||||
|
|
||||||
|
:param vol_name: cinder volume display name
|
||||||
|
:param vol_size: size in gigabytes
|
||||||
|
:param img_id: optional glance image id
|
||||||
|
:param src_vol_id: optional source volume id to clone
|
||||||
|
:param snap_id: optional snapshot id to use
|
||||||
|
:returns: cinder volume pointer
|
||||||
|
"""
|
||||||
|
# Handle parameter input and avoid impossible combinations
|
||||||
|
if img_id and not src_vol_id and not snap_id:
|
||||||
|
# Create volume from image
|
||||||
|
self.log.debug('Creating cinder volume from glance image...')
|
||||||
|
bootable = 'true'
|
||||||
|
elif src_vol_id and not img_id and not snap_id:
|
||||||
|
# Clone an existing volume
|
||||||
|
self.log.debug('Cloning cinder volume...')
|
||||||
|
bootable = cinder.volumes.get(src_vol_id).bootable
|
||||||
|
elif snap_id and not src_vol_id and not img_id:
|
||||||
|
# Create volume from snapshot
|
||||||
|
self.log.debug('Creating cinder volume from snapshot...')
|
||||||
|
snap = cinder.volume_snapshots.find(id=snap_id)
|
||||||
|
vol_size = snap.size
|
||||||
|
snap_vol_id = cinder.volume_snapshots.get(snap_id).volume_id
|
||||||
|
bootable = cinder.volumes.get(snap_vol_id).bootable
|
||||||
|
elif not img_id and not src_vol_id and not snap_id:
|
||||||
|
# Create volume
|
||||||
|
self.log.debug('Creating cinder volume...')
|
||||||
|
bootable = 'false'
|
||||||
|
else:
|
||||||
|
# Impossible combination of parameters
|
||||||
|
msg = ('Invalid method use - name:{} size:{} img_id:{} '
|
||||||
|
'src_vol_id:{} snap_id:{}'.format(vol_name, vol_size,
|
||||||
|
img_id, src_vol_id,
|
||||||
|
snap_id))
|
||||||
|
amulet.raise_status(amulet.FAIL, msg=msg)
|
||||||
|
|
||||||
|
# Create new volume
|
||||||
|
try:
|
||||||
|
vol_new = cinder.volumes.create(display_name=vol_name,
|
||||||
|
imageRef=img_id,
|
||||||
|
size=vol_size,
|
||||||
|
source_volid=src_vol_id,
|
||||||
|
snapshot_id=snap_id)
|
||||||
|
vol_id = vol_new.id
|
||||||
|
except Exception as e:
|
||||||
|
msg = 'Failed to create volume: {}'.format(e)
|
||||||
|
amulet.raise_status(amulet.FAIL, msg=msg)
|
||||||
|
|
||||||
|
# Wait for volume to reach available status
|
||||||
|
ret = self.resource_reaches_status(cinder.volumes, vol_id,
|
||||||
|
expected_stat="available",
|
||||||
|
msg="Volume status wait")
|
||||||
|
if not ret:
|
||||||
|
msg = 'Cinder volume failed to reach expected state.'
|
||||||
|
amulet.raise_status(amulet.FAIL, msg=msg)
|
||||||
|
|
||||||
|
# Re-validate new volume
|
||||||
|
self.log.debug('Validating volume attributes...')
|
||||||
|
val_vol_name = cinder.volumes.get(vol_id).display_name
|
||||||
|
val_vol_boot = cinder.volumes.get(vol_id).bootable
|
||||||
|
val_vol_stat = cinder.volumes.get(vol_id).status
|
||||||
|
val_vol_size = cinder.volumes.get(vol_id).size
|
||||||
|
msg_attr = ('Volume attributes - name:{} id:{} stat:{} boot:'
|
||||||
|
'{} size:{}'.format(val_vol_name, vol_id,
|
||||||
|
val_vol_stat, val_vol_boot,
|
||||||
|
val_vol_size))
|
||||||
|
|
||||||
|
if val_vol_boot == bootable and val_vol_stat == 'available' \
|
||||||
|
and val_vol_name == vol_name and val_vol_size == vol_size:
|
||||||
|
self.log.debug(msg_attr)
|
||||||
|
else:
|
||||||
|
msg = ('Volume validation failed, {}'.format(msg_attr))
|
||||||
|
amulet.raise_status(amulet.FAIL, msg=msg)
|
||||||
|
|
||||||
|
return vol_new
|
||||||
|
|
||||||
|
def delete_resource(self, resource, resource_id,
|
||||||
|
msg="resource", max_wait=120):
|
||||||
|
"""Delete one openstack resource, such as one instance, keypair,
|
||||||
|
image, volume, stack, etc., and confirm deletion within max wait time.
|
||||||
|
|
||||||
|
:param resource: pointer to os resource type, ex:glance_client.images
|
||||||
|
:param resource_id: unique name or id for the openstack resource
|
||||||
|
:param msg: text to identify purpose in logging
|
||||||
|
:param max_wait: maximum wait time in seconds
|
||||||
|
:returns: True if successful, otherwise False
|
||||||
|
"""
|
||||||
|
self.log.debug('Deleting OpenStack resource '
|
||||||
|
'{} ({})'.format(resource_id, msg))
|
||||||
|
num_before = len(list(resource.list()))
|
||||||
|
resource.delete(resource_id)
|
||||||
|
|
||||||
|
tries = 0
|
||||||
|
num_after = len(list(resource.list()))
|
||||||
|
while num_after != (num_before - 1) and tries < (max_wait / 4):
|
||||||
|
self.log.debug('{} delete check: '
|
||||||
|
'{} [{}:{}] {}'.format(msg, tries,
|
||||||
|
num_before,
|
||||||
|
num_after,
|
||||||
|
resource_id))
|
||||||
|
time.sleep(4)
|
||||||
|
num_after = len(list(resource.list()))
|
||||||
|
tries += 1
|
||||||
|
|
||||||
|
self.log.debug('{}: expected, actual count = {}, '
|
||||||
|
'{}'.format(msg, num_before - 1, num_after))
|
||||||
|
|
||||||
|
if num_after == (num_before - 1):
|
||||||
|
return True
|
||||||
|
else:
|
||||||
|
self.log.error('{} delete timed out'.format(msg))
|
||||||
return False
|
return False
|
||||||
|
|
||||||
return True
|
def resource_reaches_status(self, resource, resource_id,
|
||||||
|
expected_stat='available',
|
||||||
|
msg='resource', max_wait=120):
|
||||||
|
"""Wait for an openstack resources status to reach an
|
||||||
|
expected status within a specified time. Useful to confirm that
|
||||||
|
nova instances, cinder vols, snapshots, glance images, heat stacks
|
||||||
|
and other resources eventually reach the expected status.
|
||||||
|
|
||||||
|
:param resource: pointer to os resource type, ex: heat_client.stacks
|
||||||
|
:param resource_id: unique id for the openstack resource
|
||||||
|
:param expected_stat: status to expect resource to reach
|
||||||
|
:param msg: text to identify purpose in logging
|
||||||
|
:param max_wait: maximum wait time in seconds
|
||||||
|
:returns: True if successful, False if status is not reached
|
||||||
|
"""
|
||||||
|
|
||||||
|
tries = 0
|
||||||
|
resource_stat = resource.get(resource_id).status
|
||||||
|
while resource_stat != expected_stat and tries < (max_wait / 4):
|
||||||
|
self.log.debug('{} status check: '
|
||||||
|
'{} [{}:{}] {}'.format(msg, tries,
|
||||||
|
resource_stat,
|
||||||
|
expected_stat,
|
||||||
|
resource_id))
|
||||||
|
time.sleep(4)
|
||||||
|
resource_stat = resource.get(resource_id).status
|
||||||
|
tries += 1
|
||||||
|
|
||||||
|
self.log.debug('{}: expected, actual status = {}, '
|
||||||
|
'{}'.format(msg, resource_stat, expected_stat))
|
||||||
|
|
||||||
|
if resource_stat == expected_stat:
|
||||||
|
return True
|
||||||
|
else:
|
||||||
|
self.log.debug('{} never reached expected status: '
|
||||||
|
'{}'.format(resource_id, expected_stat))
|
||||||
|
return False
|
||||||
|
|
||||||
|
def get_ceph_osd_id_cmd(self, index):
|
||||||
|
"""Produce a shell command that will return a ceph-osd id."""
|
||||||
|
return ("`initctl list | grep 'ceph-osd ' | "
|
||||||
|
"awk 'NR=={} {{ print $2 }}' | "
|
||||||
|
"grep -o '[0-9]*'`".format(index + 1))
|
||||||
|
|
||||||
|
def get_ceph_pools(self, sentry_unit):
|
||||||
|
"""Return a dict of ceph pools from a single ceph unit, with
|
||||||
|
pool name as keys, pool id as vals."""
|
||||||
|
pools = {}
|
||||||
|
cmd = 'sudo ceph osd lspools'
|
||||||
|
output, code = sentry_unit.run(cmd)
|
||||||
|
if code != 0:
|
||||||
|
msg = ('{} `{}` returned {} '
|
||||||
|
'{}'.format(sentry_unit.info['unit_name'],
|
||||||
|
cmd, code, output))
|
||||||
|
amulet.raise_status(amulet.FAIL, msg=msg)
|
||||||
|
|
||||||
|
# Example output: 0 data,1 metadata,2 rbd,3 cinder,4 glance,
|
||||||
|
for pool in str(output).split(','):
|
||||||
|
pool_id_name = pool.split(' ')
|
||||||
|
if len(pool_id_name) == 2:
|
||||||
|
pool_id = pool_id_name[0]
|
||||||
|
pool_name = pool_id_name[1]
|
||||||
|
pools[pool_name] = int(pool_id)
|
||||||
|
|
||||||
|
self.log.debug('Pools on {}: {}'.format(sentry_unit.info['unit_name'],
|
||||||
|
pools))
|
||||||
|
return pools
|
||||||
|
|
||||||
|
def get_ceph_df(self, sentry_unit):
|
||||||
|
"""Return dict of ceph df json output, including ceph pool state.
|
||||||
|
|
||||||
|
:param sentry_unit: Pointer to amulet sentry instance (juju unit)
|
||||||
|
:returns: Dict of ceph df output
|
||||||
|
"""
|
||||||
|
cmd = 'sudo ceph df --format=json'
|
||||||
|
output, code = sentry_unit.run(cmd)
|
||||||
|
if code != 0:
|
||||||
|
msg = ('{} `{}` returned {} '
|
||||||
|
'{}'.format(sentry_unit.info['unit_name'],
|
||||||
|
cmd, code, output))
|
||||||
|
amulet.raise_status(amulet.FAIL, msg=msg)
|
||||||
|
return json.loads(output)
|
||||||
|
|
||||||
|
def get_ceph_pool_sample(self, sentry_unit, pool_id=0):
|
||||||
|
"""Take a sample of attributes of a ceph pool, returning ceph
|
||||||
|
pool name, object count and disk space used for the specified
|
||||||
|
pool ID number.
|
||||||
|
|
||||||
|
:param sentry_unit: Pointer to amulet sentry instance (juju unit)
|
||||||
|
:param pool_id: Ceph pool ID
|
||||||
|
:returns: List of pool name, object count, kb disk space used
|
||||||
|
"""
|
||||||
|
df = self.get_ceph_df(sentry_unit)
|
||||||
|
pool_name = df['pools'][pool_id]['name']
|
||||||
|
obj_count = df['pools'][pool_id]['stats']['objects']
|
||||||
|
kb_used = df['pools'][pool_id]['stats']['kb_used']
|
||||||
|
self.log.debug('Ceph {} pool (ID {}): {} objects, '
|
||||||
|
'{} kb used'.format(pool_name, pool_id,
|
||||||
|
obj_count, kb_used))
|
||||||
|
return pool_name, obj_count, kb_used
|
||||||
|
|
||||||
|
def validate_ceph_pool_samples(self, samples, sample_type="resource pool"):
|
||||||
|
"""Validate ceph pool samples taken over time, such as pool
|
||||||
|
object counts or pool kb used, before adding, after adding, and
|
||||||
|
after deleting items which affect those pool attributes. The
|
||||||
|
2nd element is expected to be greater than the 1st; 3rd is expected
|
||||||
|
to be less than the 2nd.
|
||||||
|
|
||||||
|
:param samples: List containing 3 data samples
|
||||||
|
:param sample_type: String for logging and usage context
|
||||||
|
:returns: None if successful, Failure message otherwise
|
||||||
|
"""
|
||||||
|
original, created, deleted = range(3)
|
||||||
|
if samples[created] <= samples[original] or \
|
||||||
|
samples[deleted] >= samples[created]:
|
||||||
|
return ('Ceph {} samples ({}) '
|
||||||
|
'unexpected.'.format(sample_type, samples))
|
||||||
|
else:
|
||||||
|
self.log.debug('Ceph {} samples (OK): '
|
||||||
|
'{}'.format(sample_type, samples))
|
||||||
|
return None
|
||||||
|
|
|
@ -50,6 +50,8 @@ from charmhelpers.core.sysctl import create as sysctl_create
|
||||||
from charmhelpers.core.strutils import bool_from_string
|
from charmhelpers.core.strutils import bool_from_string
|
||||||
|
|
||||||
from charmhelpers.core.host import (
|
from charmhelpers.core.host import (
|
||||||
|
get_bond_master,
|
||||||
|
is_phy_iface,
|
||||||
list_nics,
|
list_nics,
|
||||||
get_nic_hwaddr,
|
get_nic_hwaddr,
|
||||||
mkdir,
|
mkdir,
|
||||||
|
@ -122,21 +124,24 @@ def config_flags_parser(config_flags):
|
||||||
of specifying multiple key value pairs within the same string. For
|
of specifying multiple key value pairs within the same string. For
|
||||||
example, a string in the format of 'key1=value1, key2=value2' will
|
example, a string in the format of 'key1=value1, key2=value2' will
|
||||||
return a dict of:
|
return a dict of:
|
||||||
{'key1': 'value1',
|
|
||||||
'key2': 'value2'}.
|
{'key1': 'value1',
|
||||||
|
'key2': 'value2'}.
|
||||||
|
|
||||||
2. A string in the above format, but supporting a comma-delimited list
|
2. A string in the above format, but supporting a comma-delimited list
|
||||||
of values for the same key. For example, a string in the format of
|
of values for the same key. For example, a string in the format of
|
||||||
'key1=value1, key2=value3,value4,value5' will return a dict of:
|
'key1=value1, key2=value3,value4,value5' will return a dict of:
|
||||||
{'key1', 'value1',
|
|
||||||
'key2', 'value2,value3,value4'}
|
{'key1', 'value1',
|
||||||
|
'key2', 'value2,value3,value4'}
|
||||||
|
|
||||||
3. A string containing a colon character (:) prior to an equal
|
3. A string containing a colon character (:) prior to an equal
|
||||||
character (=) will be treated as yaml and parsed as such. This can be
|
character (=) will be treated as yaml and parsed as such. This can be
|
||||||
used to specify more complex key value pairs. For example,
|
used to specify more complex key value pairs. For example,
|
||||||
a string in the format of 'key1: subkey1=value1, subkey2=value2' will
|
a string in the format of 'key1: subkey1=value1, subkey2=value2' will
|
||||||
return a dict of:
|
return a dict of:
|
||||||
{'key1', 'subkey1=value1, subkey2=value2'}
|
|
||||||
|
{'key1', 'subkey1=value1, subkey2=value2'}
|
||||||
|
|
||||||
The provided config_flags string may be a list of comma-separated values
|
The provided config_flags string may be a list of comma-separated values
|
||||||
which themselves may be comma-separated list of values.
|
which themselves may be comma-separated list of values.
|
||||||
|
@ -240,7 +245,7 @@ class SharedDBContext(OSContextGenerator):
|
||||||
if self.relation_prefix:
|
if self.relation_prefix:
|
||||||
password_setting = self.relation_prefix + '_password'
|
password_setting = self.relation_prefix + '_password'
|
||||||
|
|
||||||
for rid in relation_ids('shared-db'):
|
for rid in relation_ids(self.interfaces[0]):
|
||||||
for unit in related_units(rid):
|
for unit in related_units(rid):
|
||||||
rdata = relation_get(rid=rid, unit=unit)
|
rdata = relation_get(rid=rid, unit=unit)
|
||||||
host = rdata.get('db_host')
|
host = rdata.get('db_host')
|
||||||
|
@ -459,6 +464,11 @@ class AMQPContext(OSContextGenerator):
|
||||||
|
|
||||||
ctxt['rabbitmq_hosts'] = ','.join(sorted(rabbitmq_hosts))
|
ctxt['rabbitmq_hosts'] = ','.join(sorted(rabbitmq_hosts))
|
||||||
|
|
||||||
|
oslo_messaging_flags = conf.get('oslo-messaging-flags', None)
|
||||||
|
if oslo_messaging_flags:
|
||||||
|
ctxt['oslo_messaging_flags'] = config_flags_parser(
|
||||||
|
oslo_messaging_flags)
|
||||||
|
|
||||||
if not context_complete(ctxt):
|
if not context_complete(ctxt):
|
||||||
return {}
|
return {}
|
||||||
|
|
||||||
|
@ -885,9 +895,19 @@ class NeutronContext(OSContextGenerator):
|
||||||
'neutron_url': '%s://%s:%s' % (proto, host, '9696')}
|
'neutron_url': '%s://%s:%s' % (proto, host, '9696')}
|
||||||
return ctxt
|
return ctxt
|
||||||
|
|
||||||
def __call__(self):
|
def pg_ctxt(self):
|
||||||
self._ensure_packages()
|
driver = neutron_plugin_attribute(self.plugin, 'driver',
|
||||||
|
self.network_manager)
|
||||||
|
config = neutron_plugin_attribute(self.plugin, 'config',
|
||||||
|
self.network_manager)
|
||||||
|
ovs_ctxt = {'core_plugin': driver,
|
||||||
|
'neutron_plugin': 'plumgrid',
|
||||||
|
'neutron_security_groups': self.neutron_security_groups,
|
||||||
|
'local_ip': unit_private_ip(),
|
||||||
|
'config': config}
|
||||||
|
return ovs_ctxt
|
||||||
|
|
||||||
|
def __call__(self):
|
||||||
if self.network_manager not in ['quantum', 'neutron']:
|
if self.network_manager not in ['quantum', 'neutron']:
|
||||||
return {}
|
return {}
|
||||||
|
|
||||||
|
@ -906,6 +926,8 @@ class NeutronContext(OSContextGenerator):
|
||||||
ctxt.update(self.calico_ctxt())
|
ctxt.update(self.calico_ctxt())
|
||||||
elif self.plugin == 'vsp':
|
elif self.plugin == 'vsp':
|
||||||
ctxt.update(self.nuage_ctxt())
|
ctxt.update(self.nuage_ctxt())
|
||||||
|
elif self.plugin == 'plumgrid':
|
||||||
|
ctxt.update(self.pg_ctxt())
|
||||||
|
|
||||||
alchemy_flags = config('neutron-alchemy-flags')
|
alchemy_flags = config('neutron-alchemy-flags')
|
||||||
if alchemy_flags:
|
if alchemy_flags:
|
||||||
|
@ -917,7 +939,6 @@ class NeutronContext(OSContextGenerator):
|
||||||
|
|
||||||
|
|
||||||
class NeutronPortContext(OSContextGenerator):
|
class NeutronPortContext(OSContextGenerator):
|
||||||
NIC_PREFIXES = ['eth', 'bond']
|
|
||||||
|
|
||||||
def resolve_ports(self, ports):
|
def resolve_ports(self, ports):
|
||||||
"""Resolve NICs not yet bound to bridge(s)
|
"""Resolve NICs not yet bound to bridge(s)
|
||||||
|
@ -929,7 +950,18 @@ class NeutronPortContext(OSContextGenerator):
|
||||||
|
|
||||||
hwaddr_to_nic = {}
|
hwaddr_to_nic = {}
|
||||||
hwaddr_to_ip = {}
|
hwaddr_to_ip = {}
|
||||||
for nic in list_nics(self.NIC_PREFIXES):
|
for nic in list_nics():
|
||||||
|
# Ignore virtual interfaces (bond masters will be identified from
|
||||||
|
# their slaves)
|
||||||
|
if not is_phy_iface(nic):
|
||||||
|
continue
|
||||||
|
|
||||||
|
_nic = get_bond_master(nic)
|
||||||
|
if _nic:
|
||||||
|
log("Replacing iface '%s' with bond master '%s'" % (nic, _nic),
|
||||||
|
level=DEBUG)
|
||||||
|
nic = _nic
|
||||||
|
|
||||||
hwaddr = get_nic_hwaddr(nic)
|
hwaddr = get_nic_hwaddr(nic)
|
||||||
hwaddr_to_nic[hwaddr] = nic
|
hwaddr_to_nic[hwaddr] = nic
|
||||||
addresses = get_ipv4_addr(nic, fatal=False)
|
addresses = get_ipv4_addr(nic, fatal=False)
|
||||||
|
@ -955,7 +987,8 @@ class NeutronPortContext(OSContextGenerator):
|
||||||
# trust it to be the real external network).
|
# trust it to be the real external network).
|
||||||
resolved.append(entry)
|
resolved.append(entry)
|
||||||
|
|
||||||
return resolved
|
# Ensure no duplicates
|
||||||
|
return list(set(resolved))
|
||||||
|
|
||||||
|
|
||||||
class OSConfigFlagContext(OSContextGenerator):
|
class OSConfigFlagContext(OSContextGenerator):
|
||||||
|
@ -1045,13 +1078,22 @@ class SubordinateConfigContext(OSContextGenerator):
|
||||||
:param config_file : Service's config file to query sections
|
:param config_file : Service's config file to query sections
|
||||||
:param interface : Subordinate interface to inspect
|
:param interface : Subordinate interface to inspect
|
||||||
"""
|
"""
|
||||||
self.service = service
|
|
||||||
self.config_file = config_file
|
self.config_file = config_file
|
||||||
self.interface = interface
|
if isinstance(service, list):
|
||||||
|
self.services = service
|
||||||
|
else:
|
||||||
|
self.services = [service]
|
||||||
|
if isinstance(interface, list):
|
||||||
|
self.interfaces = interface
|
||||||
|
else:
|
||||||
|
self.interfaces = [interface]
|
||||||
|
|
||||||
def __call__(self):
|
def __call__(self):
|
||||||
ctxt = {'sections': {}}
|
ctxt = {'sections': {}}
|
||||||
for rid in relation_ids(self.interface):
|
rids = []
|
||||||
|
for interface in self.interfaces:
|
||||||
|
rids.extend(relation_ids(interface))
|
||||||
|
for rid in rids:
|
||||||
for unit in related_units(rid):
|
for unit in related_units(rid):
|
||||||
sub_config = relation_get('subordinate_configuration',
|
sub_config = relation_get('subordinate_configuration',
|
||||||
rid=rid, unit=unit)
|
rid=rid, unit=unit)
|
||||||
|
@ -1063,29 +1105,32 @@ class SubordinateConfigContext(OSContextGenerator):
|
||||||
'setting from %s' % rid, level=ERROR)
|
'setting from %s' % rid, level=ERROR)
|
||||||
continue
|
continue
|
||||||
|
|
||||||
if self.service not in sub_config:
|
for service in self.services:
|
||||||
log('Found subordinate_config on %s but it contained'
|
if service not in sub_config:
|
||||||
'nothing for %s service' % (rid, self.service),
|
log('Found subordinate_config on %s but it contained'
|
||||||
level=INFO)
|
'nothing for %s service' % (rid, service),
|
||||||
continue
|
level=INFO)
|
||||||
|
continue
|
||||||
|
|
||||||
sub_config = sub_config[self.service]
|
sub_config = sub_config[service]
|
||||||
if self.config_file not in sub_config:
|
if self.config_file not in sub_config:
|
||||||
log('Found subordinate_config on %s but it contained'
|
log('Found subordinate_config on %s but it contained'
|
||||||
'nothing for %s' % (rid, self.config_file),
|
'nothing for %s' % (rid, self.config_file),
|
||||||
level=INFO)
|
level=INFO)
|
||||||
continue
|
continue
|
||||||
|
|
||||||
sub_config = sub_config[self.config_file]
|
|
||||||
for k, v in six.iteritems(sub_config):
|
|
||||||
if k == 'sections':
|
|
||||||
for section, config_dict in six.iteritems(v):
|
|
||||||
log("adding section '%s'" % (section),
|
|
||||||
level=DEBUG)
|
|
||||||
ctxt[k][section] = config_dict
|
|
||||||
else:
|
|
||||||
ctxt[k] = v
|
|
||||||
|
|
||||||
|
sub_config = sub_config[self.config_file]
|
||||||
|
for k, v in six.iteritems(sub_config):
|
||||||
|
if k == 'sections':
|
||||||
|
for section, config_list in six.iteritems(v):
|
||||||
|
log("adding section '%s'" % (section),
|
||||||
|
level=DEBUG)
|
||||||
|
if ctxt[k].get(section):
|
||||||
|
ctxt[k][section].extend(config_list)
|
||||||
|
else:
|
||||||
|
ctxt[k][section] = config_list
|
||||||
|
else:
|
||||||
|
ctxt[k] = v
|
||||||
log("%d section(s) found" % (len(ctxt['sections'])), level=DEBUG)
|
log("%d section(s) found" % (len(ctxt['sections'])), level=DEBUG)
|
||||||
return ctxt
|
return ctxt
|
||||||
|
|
||||||
|
@ -1262,15 +1307,19 @@ class DataPortContext(NeutronPortContext):
|
||||||
def __call__(self):
|
def __call__(self):
|
||||||
ports = config('data-port')
|
ports = config('data-port')
|
||||||
if ports:
|
if ports:
|
||||||
|
# Map of {port/mac:bridge}
|
||||||
portmap = parse_data_port_mappings(ports)
|
portmap = parse_data_port_mappings(ports)
|
||||||
ports = portmap.values()
|
ports = portmap.keys()
|
||||||
|
# Resolve provided ports or mac addresses and filter out those
|
||||||
|
# already attached to a bridge.
|
||||||
resolved = self.resolve_ports(ports)
|
resolved = self.resolve_ports(ports)
|
||||||
|
# FIXME: is this necessary?
|
||||||
normalized = {get_nic_hwaddr(port): port for port in resolved
|
normalized = {get_nic_hwaddr(port): port for port in resolved
|
||||||
if port not in ports}
|
if port not in ports}
|
||||||
normalized.update({port: port for port in resolved
|
normalized.update({port: port for port in resolved
|
||||||
if port in ports})
|
if port in ports})
|
||||||
if resolved:
|
if resolved:
|
||||||
return {bridge: normalized[port] for bridge, port in
|
return {bridge: normalized[port] for port, bridge in
|
||||||
six.iteritems(portmap) if port in normalized.keys()}
|
six.iteritems(portmap) if port in normalized.keys()}
|
||||||
|
|
||||||
return None
|
return None
|
||||||
|
|
|
@ -17,6 +17,7 @@
|
||||||
from charmhelpers.core.hookenv import (
|
from charmhelpers.core.hookenv import (
|
||||||
config,
|
config,
|
||||||
unit_get,
|
unit_get,
|
||||||
|
service_name,
|
||||||
)
|
)
|
||||||
from charmhelpers.contrib.network.ip import (
|
from charmhelpers.contrib.network.ip import (
|
||||||
get_address_in_network,
|
get_address_in_network,
|
||||||
|
@ -26,8 +27,6 @@ from charmhelpers.contrib.network.ip import (
|
||||||
)
|
)
|
||||||
from charmhelpers.contrib.hahelpers.cluster import is_clustered
|
from charmhelpers.contrib.hahelpers.cluster import is_clustered
|
||||||
|
|
||||||
from functools import partial
|
|
||||||
|
|
||||||
PUBLIC = 'public'
|
PUBLIC = 'public'
|
||||||
INTERNAL = 'int'
|
INTERNAL = 'int'
|
||||||
ADMIN = 'admin'
|
ADMIN = 'admin'
|
||||||
|
@ -35,15 +34,18 @@ ADMIN = 'admin'
|
||||||
ADDRESS_MAP = {
|
ADDRESS_MAP = {
|
||||||
PUBLIC: {
|
PUBLIC: {
|
||||||
'config': 'os-public-network',
|
'config': 'os-public-network',
|
||||||
'fallback': 'public-address'
|
'fallback': 'public-address',
|
||||||
|
'override': 'os-public-hostname',
|
||||||
},
|
},
|
||||||
INTERNAL: {
|
INTERNAL: {
|
||||||
'config': 'os-internal-network',
|
'config': 'os-internal-network',
|
||||||
'fallback': 'private-address'
|
'fallback': 'private-address',
|
||||||
|
'override': 'os-internal-hostname',
|
||||||
},
|
},
|
||||||
ADMIN: {
|
ADMIN: {
|
||||||
'config': 'os-admin-network',
|
'config': 'os-admin-network',
|
||||||
'fallback': 'private-address'
|
'fallback': 'private-address',
|
||||||
|
'override': 'os-admin-hostname',
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -57,15 +59,50 @@ def canonical_url(configs, endpoint_type=PUBLIC):
|
||||||
:param endpoint_type: str endpoint type to resolve.
|
:param endpoint_type: str endpoint type to resolve.
|
||||||
:param returns: str base URL for services on the current service unit.
|
:param returns: str base URL for services on the current service unit.
|
||||||
"""
|
"""
|
||||||
scheme = 'http'
|
scheme = _get_scheme(configs)
|
||||||
if 'https' in configs.complete_contexts():
|
|
||||||
scheme = 'https'
|
|
||||||
address = resolve_address(endpoint_type)
|
address = resolve_address(endpoint_type)
|
||||||
if is_ipv6(address):
|
if is_ipv6(address):
|
||||||
address = "[{}]".format(address)
|
address = "[{}]".format(address)
|
||||||
|
|
||||||
return '%s://%s' % (scheme, address)
|
return '%s://%s' % (scheme, address)
|
||||||
|
|
||||||
|
|
||||||
|
def _get_scheme(configs):
|
||||||
|
"""Returns the scheme to use for the url (either http or https)
|
||||||
|
depending upon whether https is in the configs value.
|
||||||
|
|
||||||
|
:param configs: OSTemplateRenderer config templating object to inspect
|
||||||
|
for a complete https context.
|
||||||
|
:returns: either 'http' or 'https' depending on whether https is
|
||||||
|
configured within the configs context.
|
||||||
|
"""
|
||||||
|
scheme = 'http'
|
||||||
|
if configs and 'https' in configs.complete_contexts():
|
||||||
|
scheme = 'https'
|
||||||
|
return scheme
|
||||||
|
|
||||||
|
|
||||||
|
def _get_address_override(endpoint_type=PUBLIC):
|
||||||
|
"""Returns any address overrides that the user has defined based on the
|
||||||
|
endpoint type.
|
||||||
|
|
||||||
|
Note: this function allows for the service name to be inserted into the
|
||||||
|
address if the user specifies {service_name}.somehost.org.
|
||||||
|
|
||||||
|
:param endpoint_type: the type of endpoint to retrieve the override
|
||||||
|
value for.
|
||||||
|
:returns: any endpoint address or hostname that the user has overridden
|
||||||
|
or None if an override is not present.
|
||||||
|
"""
|
||||||
|
override_key = ADDRESS_MAP[endpoint_type]['override']
|
||||||
|
addr_override = config(override_key)
|
||||||
|
if not addr_override:
|
||||||
|
return None
|
||||||
|
else:
|
||||||
|
return addr_override.format(service_name=service_name())
|
||||||
|
|
||||||
|
|
||||||
def resolve_address(endpoint_type=PUBLIC):
|
def resolve_address(endpoint_type=PUBLIC):
|
||||||
"""Return unit address depending on net config.
|
"""Return unit address depending on net config.
|
||||||
|
|
||||||
|
@ -77,7 +114,10 @@ def resolve_address(endpoint_type=PUBLIC):
|
||||||
|
|
||||||
:param endpoint_type: Network endpoing type
|
:param endpoint_type: Network endpoing type
|
||||||
"""
|
"""
|
||||||
resolved_address = None
|
resolved_address = _get_address_override(endpoint_type)
|
||||||
|
if resolved_address:
|
||||||
|
return resolved_address
|
||||||
|
|
||||||
vips = config('vip')
|
vips = config('vip')
|
||||||
if vips:
|
if vips:
|
||||||
vips = vips.split()
|
vips = vips.split()
|
||||||
|
@ -109,38 +149,3 @@ def resolve_address(endpoint_type=PUBLIC):
|
||||||
"clustered=%s)" % (net_type, clustered))
|
"clustered=%s)" % (net_type, clustered))
|
||||||
|
|
||||||
return resolved_address
|
return resolved_address
|
||||||
|
|
||||||
|
|
||||||
def endpoint_url(configs, url_template, port, endpoint_type=PUBLIC,
|
|
||||||
override=None):
|
|
||||||
"""Returns the correct endpoint URL to advertise to Keystone.
|
|
||||||
|
|
||||||
This method provides the correct endpoint URL which should be advertised to
|
|
||||||
the keystone charm for endpoint creation. This method allows for the url to
|
|
||||||
be overridden to force a keystone endpoint to have specific URL for any of
|
|
||||||
the defined scopes (admin, internal, public).
|
|
||||||
|
|
||||||
:param configs: OSTemplateRenderer config templating object to inspect
|
|
||||||
for a complete https context.
|
|
||||||
:param url_template: str format string for creating the url template. Only
|
|
||||||
two values will be passed - the scheme+hostname
|
|
||||||
returned by the canonical_url and the port.
|
|
||||||
:param endpoint_type: str endpoint type to resolve.
|
|
||||||
:param override: str the name of the config option which overrides the
|
|
||||||
endpoint URL defined by the charm itself. None will
|
|
||||||
disable any overrides (default).
|
|
||||||
"""
|
|
||||||
if override:
|
|
||||||
# Return any user-defined overrides for the keystone endpoint URL.
|
|
||||||
user_value = config(override)
|
|
||||||
if user_value:
|
|
||||||
return user_value.strip()
|
|
||||||
|
|
||||||
return url_template % (canonical_url(configs, endpoint_type), port)
|
|
||||||
|
|
||||||
|
|
||||||
public_endpoint = partial(endpoint_url, endpoint_type=PUBLIC)
|
|
||||||
|
|
||||||
internal_endpoint = partial(endpoint_url, endpoint_type=INTERNAL)
|
|
||||||
|
|
||||||
admin_endpoint = partial(endpoint_url, endpoint_type=ADMIN)
|
|
||||||
|
|
|
@ -172,14 +172,16 @@ def neutron_plugins():
|
||||||
'services': ['calico-felix',
|
'services': ['calico-felix',
|
||||||
'bird',
|
'bird',
|
||||||
'neutron-dhcp-agent',
|
'neutron-dhcp-agent',
|
||||||
'nova-api-metadata'],
|
'nova-api-metadata',
|
||||||
|
'etcd'],
|
||||||
'packages': [[headers_package()] + determine_dkms_package(),
|
'packages': [[headers_package()] + determine_dkms_package(),
|
||||||
['calico-compute',
|
['calico-compute',
|
||||||
'bird',
|
'bird',
|
||||||
'neutron-dhcp-agent',
|
'neutron-dhcp-agent',
|
||||||
'nova-api-metadata']],
|
'nova-api-metadata',
|
||||||
'server_packages': ['neutron-server', 'calico-control'],
|
'etcd']],
|
||||||
'server_services': ['neutron-server']
|
'server_packages': ['neutron-server', 'calico-control', 'etcd'],
|
||||||
|
'server_services': ['neutron-server', 'etcd']
|
||||||
},
|
},
|
||||||
'vsp': {
|
'vsp': {
|
||||||
'config': '/etc/neutron/plugins/nuage/nuage_plugin.ini',
|
'config': '/etc/neutron/plugins/nuage/nuage_plugin.ini',
|
||||||
|
@ -193,6 +195,20 @@ def neutron_plugins():
|
||||||
'packages': [],
|
'packages': [],
|
||||||
'server_packages': ['neutron-server', 'neutron-plugin-nuage'],
|
'server_packages': ['neutron-server', 'neutron-plugin-nuage'],
|
||||||
'server_services': ['neutron-server']
|
'server_services': ['neutron-server']
|
||||||
|
},
|
||||||
|
'plumgrid': {
|
||||||
|
'config': '/etc/neutron/plugins/plumgrid/plumgrid.ini',
|
||||||
|
'driver': 'neutron.plugins.plumgrid.plumgrid_plugin.plumgrid_plugin.NeutronPluginPLUMgridV2',
|
||||||
|
'contexts': [
|
||||||
|
context.SharedDBContext(user=config('database-user'),
|
||||||
|
database=config('database'),
|
||||||
|
ssl_dir=NEUTRON_CONF_DIR)],
|
||||||
|
'services': [],
|
||||||
|
'packages': [['plumgrid-lxc'],
|
||||||
|
['iovisor-dkms']],
|
||||||
|
'server_packages': ['neutron-server',
|
||||||
|
'neutron-plugin-plumgrid'],
|
||||||
|
'server_services': ['neutron-server']
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
if release >= 'icehouse':
|
if release >= 'icehouse':
|
||||||
|
@ -253,14 +269,30 @@ def network_manager():
|
||||||
return 'neutron'
|
return 'neutron'
|
||||||
|
|
||||||
|
|
||||||
def parse_mappings(mappings):
|
def parse_mappings(mappings, key_rvalue=False):
|
||||||
|
"""By default mappings are lvalue keyed.
|
||||||
|
|
||||||
|
If key_rvalue is True, the mapping will be reversed to allow multiple
|
||||||
|
configs for the same lvalue.
|
||||||
|
"""
|
||||||
parsed = {}
|
parsed = {}
|
||||||
if mappings:
|
if mappings:
|
||||||
mappings = mappings.split(' ')
|
mappings = mappings.split()
|
||||||
for m in mappings:
|
for m in mappings:
|
||||||
p = m.partition(':')
|
p = m.partition(':')
|
||||||
if p[1] == ':':
|
|
||||||
parsed[p[0].strip()] = p[2].strip()
|
if key_rvalue:
|
||||||
|
key_index = 2
|
||||||
|
val_index = 0
|
||||||
|
# if there is no rvalue skip to next
|
||||||
|
if not p[1]:
|
||||||
|
continue
|
||||||
|
else:
|
||||||
|
key_index = 0
|
||||||
|
val_index = 2
|
||||||
|
|
||||||
|
key = p[key_index].strip()
|
||||||
|
parsed[key] = p[val_index].strip()
|
||||||
|
|
||||||
return parsed
|
return parsed
|
||||||
|
|
||||||
|
@ -278,25 +310,25 @@ def parse_bridge_mappings(mappings):
|
||||||
def parse_data_port_mappings(mappings, default_bridge='br-data'):
|
def parse_data_port_mappings(mappings, default_bridge='br-data'):
|
||||||
"""Parse data port mappings.
|
"""Parse data port mappings.
|
||||||
|
|
||||||
Mappings must be a space-delimited list of bridge:port mappings.
|
Mappings must be a space-delimited list of port:bridge mappings.
|
||||||
|
|
||||||
Returns dict of the form {bridge:port}.
|
Returns dict of the form {port:bridge} where port may be an mac address or
|
||||||
|
interface name.
|
||||||
"""
|
"""
|
||||||
_mappings = parse_mappings(mappings)
|
|
||||||
if not _mappings:
|
# NOTE(dosaboy): we use rvalue for key to allow multiple values to be
|
||||||
|
# proposed for <port> since it may be a mac address which will differ
|
||||||
|
# across units this allowing first-known-good to be chosen.
|
||||||
|
_mappings = parse_mappings(mappings, key_rvalue=True)
|
||||||
|
if not _mappings or list(_mappings.values()) == ['']:
|
||||||
if not mappings:
|
if not mappings:
|
||||||
return {}
|
return {}
|
||||||
|
|
||||||
# For backwards-compatibility we need to support port-only provided in
|
# For backwards-compatibility we need to support port-only provided in
|
||||||
# config.
|
# config.
|
||||||
_mappings = {default_bridge: mappings.split(' ')[0]}
|
_mappings = {mappings.split()[0]: default_bridge}
|
||||||
|
|
||||||
bridges = _mappings.keys()
|
|
||||||
ports = _mappings.values()
|
|
||||||
if len(set(bridges)) != len(bridges):
|
|
||||||
raise Exception("It is not allowed to have more than one port "
|
|
||||||
"configured on the same bridge")
|
|
||||||
|
|
||||||
|
ports = _mappings.keys()
|
||||||
if len(set(ports)) != len(ports):
|
if len(set(ports)) != len(ports):
|
||||||
raise Exception("It is not allowed to have the same port configured "
|
raise Exception("It is not allowed to have the same port configured "
|
||||||
"on more than one bridge")
|
"on more than one bridge")
|
||||||
|
@ -309,6 +341,8 @@ def parse_vlan_range_mappings(mappings):
|
||||||
|
|
||||||
Mappings must be a space-delimited list of provider:start:end mappings.
|
Mappings must be a space-delimited list of provider:start:end mappings.
|
||||||
|
|
||||||
|
The start:end range is optional and may be omitted.
|
||||||
|
|
||||||
Returns dict of the form {provider: (start, end)}.
|
Returns dict of the form {provider: (start, end)}.
|
||||||
"""
|
"""
|
||||||
_mappings = parse_mappings(mappings)
|
_mappings = parse_mappings(mappings)
|
||||||
|
|
|
@ -5,11 +5,11 @@
|
||||||
###############################################################################
|
###############################################################################
|
||||||
[global]
|
[global]
|
||||||
{% if auth -%}
|
{% if auth -%}
|
||||||
auth_supported = {{ auth }}
|
auth_supported = {{ auth }}
|
||||||
keyring = /etc/ceph/$cluster.$name.keyring
|
keyring = /etc/ceph/$cluster.$name.keyring
|
||||||
mon host = {{ mon_hosts }}
|
mon host = {{ mon_hosts }}
|
||||||
{% endif -%}
|
{% endif -%}
|
||||||
log to syslog = {{ use_syslog }}
|
log to syslog = {{ use_syslog }}
|
||||||
err to syslog = {{ use_syslog }}
|
err to syslog = {{ use_syslog }}
|
||||||
clog to syslog = {{ use_syslog }}
|
clog to syslog = {{ use_syslog }}
|
||||||
|
|
||||||
|
|
|
@ -29,8 +29,8 @@ from charmhelpers.contrib.openstack.utils import OPENSTACK_CODENAMES
|
||||||
try:
|
try:
|
||||||
from jinja2 import FileSystemLoader, ChoiceLoader, Environment, exceptions
|
from jinja2 import FileSystemLoader, ChoiceLoader, Environment, exceptions
|
||||||
except ImportError:
|
except ImportError:
|
||||||
# python-jinja2 may not be installed yet, or we're running unittests.
|
apt_install('python-jinja2', fatal=True)
|
||||||
FileSystemLoader = ChoiceLoader = Environment = exceptions = None
|
from jinja2 import FileSystemLoader, ChoiceLoader, Environment, exceptions
|
||||||
|
|
||||||
|
|
||||||
class OSConfigException(Exception):
|
class OSConfigException(Exception):
|
||||||
|
|
|
@ -1,5 +1,3 @@
|
||||||
#!/usr/bin/python
|
|
||||||
|
|
||||||
# Copyright 2014-2015 Canonical Limited.
|
# Copyright 2014-2015 Canonical Limited.
|
||||||
#
|
#
|
||||||
# This file is part of charm-helpers.
|
# This file is part of charm-helpers.
|
||||||
|
@ -24,6 +22,7 @@ import subprocess
|
||||||
import json
|
import json
|
||||||
import os
|
import os
|
||||||
import sys
|
import sys
|
||||||
|
import re
|
||||||
|
|
||||||
import six
|
import six
|
||||||
import yaml
|
import yaml
|
||||||
|
@ -53,9 +52,13 @@ from charmhelpers.contrib.network.ip import (
|
||||||
get_ipv6_addr
|
get_ipv6_addr
|
||||||
)
|
)
|
||||||
|
|
||||||
|
from charmhelpers.contrib.python.packages import (
|
||||||
|
pip_create_virtualenv,
|
||||||
|
pip_install,
|
||||||
|
)
|
||||||
|
|
||||||
from charmhelpers.core.host import lsb_release, mounts, umount
|
from charmhelpers.core.host import lsb_release, mounts, umount
|
||||||
from charmhelpers.fetch import apt_install, apt_cache, install_remote
|
from charmhelpers.fetch import apt_install, apt_cache, install_remote
|
||||||
from charmhelpers.contrib.python.packages import pip_install
|
|
||||||
from charmhelpers.contrib.storage.linux.utils import is_block_device, zap_disk
|
from charmhelpers.contrib.storage.linux.utils import is_block_device, zap_disk
|
||||||
from charmhelpers.contrib.storage.linux.loopback import ensure_loopback_device
|
from charmhelpers.contrib.storage.linux.loopback import ensure_loopback_device
|
||||||
|
|
||||||
|
@ -65,7 +68,6 @@ CLOUD_ARCHIVE_KEY_ID = '5EDB1B62EC4926EA'
|
||||||
DISTRO_PROPOSED = ('deb http://archive.ubuntu.com/ubuntu/ %s-proposed '
|
DISTRO_PROPOSED = ('deb http://archive.ubuntu.com/ubuntu/ %s-proposed '
|
||||||
'restricted main multiverse universe')
|
'restricted main multiverse universe')
|
||||||
|
|
||||||
|
|
||||||
UBUNTU_OPENSTACK_RELEASE = OrderedDict([
|
UBUNTU_OPENSTACK_RELEASE = OrderedDict([
|
||||||
('oneiric', 'diablo'),
|
('oneiric', 'diablo'),
|
||||||
('precise', 'essex'),
|
('precise', 'essex'),
|
||||||
|
@ -75,6 +77,7 @@ UBUNTU_OPENSTACK_RELEASE = OrderedDict([
|
||||||
('trusty', 'icehouse'),
|
('trusty', 'icehouse'),
|
||||||
('utopic', 'juno'),
|
('utopic', 'juno'),
|
||||||
('vivid', 'kilo'),
|
('vivid', 'kilo'),
|
||||||
|
('wily', 'liberty'),
|
||||||
])
|
])
|
||||||
|
|
||||||
|
|
||||||
|
@ -87,6 +90,7 @@ OPENSTACK_CODENAMES = OrderedDict([
|
||||||
('2014.1', 'icehouse'),
|
('2014.1', 'icehouse'),
|
||||||
('2014.2', 'juno'),
|
('2014.2', 'juno'),
|
||||||
('2015.1', 'kilo'),
|
('2015.1', 'kilo'),
|
||||||
|
('2015.2', 'liberty'),
|
||||||
])
|
])
|
||||||
|
|
||||||
# The ugly duckling
|
# The ugly duckling
|
||||||
|
@ -109,8 +113,37 @@ SWIFT_CODENAMES = OrderedDict([
|
||||||
('2.2.0', 'juno'),
|
('2.2.0', 'juno'),
|
||||||
('2.2.1', 'kilo'),
|
('2.2.1', 'kilo'),
|
||||||
('2.2.2', 'kilo'),
|
('2.2.2', 'kilo'),
|
||||||
|
('2.3.0', 'liberty'),
|
||||||
])
|
])
|
||||||
|
|
||||||
|
# >= Liberty version->codename mapping
|
||||||
|
PACKAGE_CODENAMES = {
|
||||||
|
'nova-common': OrderedDict([
|
||||||
|
('12.0.0', 'liberty'),
|
||||||
|
]),
|
||||||
|
'neutron-common': OrderedDict([
|
||||||
|
('7.0.0', 'liberty'),
|
||||||
|
]),
|
||||||
|
'cinder-common': OrderedDict([
|
||||||
|
('7.0.0', 'liberty'),
|
||||||
|
]),
|
||||||
|
'keystone': OrderedDict([
|
||||||
|
('8.0.0', 'liberty'),
|
||||||
|
]),
|
||||||
|
'horizon-common': OrderedDict([
|
||||||
|
('8.0.0', 'liberty'),
|
||||||
|
]),
|
||||||
|
'ceilometer-common': OrderedDict([
|
||||||
|
('5.0.0', 'liberty'),
|
||||||
|
]),
|
||||||
|
'heat-common': OrderedDict([
|
||||||
|
('5.0.0', 'liberty'),
|
||||||
|
]),
|
||||||
|
'glance-common': OrderedDict([
|
||||||
|
('11.0.0', 'liberty'),
|
||||||
|
]),
|
||||||
|
}
|
||||||
|
|
||||||
DEFAULT_LOOPBACK_SIZE = '5G'
|
DEFAULT_LOOPBACK_SIZE = '5G'
|
||||||
|
|
||||||
|
|
||||||
|
@ -160,9 +193,9 @@ def get_os_codename_version(vers):
|
||||||
error_out(e)
|
error_out(e)
|
||||||
|
|
||||||
|
|
||||||
def get_os_version_codename(codename):
|
def get_os_version_codename(codename, version_map=OPENSTACK_CODENAMES):
|
||||||
'''Determine OpenStack version number from codename.'''
|
'''Determine OpenStack version number from codename.'''
|
||||||
for k, v in six.iteritems(OPENSTACK_CODENAMES):
|
for k, v in six.iteritems(version_map):
|
||||||
if v == codename:
|
if v == codename:
|
||||||
return k
|
return k
|
||||||
e = 'Could not derive OpenStack version for '\
|
e = 'Could not derive OpenStack version for '\
|
||||||
|
@ -194,20 +227,31 @@ def get_os_codename_package(package, fatal=True):
|
||||||
error_out(e)
|
error_out(e)
|
||||||
|
|
||||||
vers = apt.upstream_version(pkg.current_ver.ver_str)
|
vers = apt.upstream_version(pkg.current_ver.ver_str)
|
||||||
|
match = re.match('^(\d+)\.(\d+)\.(\d+)', vers)
|
||||||
|
if match:
|
||||||
|
vers = match.group(0)
|
||||||
|
|
||||||
try:
|
# >= Liberty independent project versions
|
||||||
if 'swift' in pkg.name:
|
if (package in PACKAGE_CODENAMES and
|
||||||
swift_vers = vers[:5]
|
vers in PACKAGE_CODENAMES[package]):
|
||||||
if swift_vers not in SWIFT_CODENAMES:
|
return PACKAGE_CODENAMES[package][vers]
|
||||||
# Deal with 1.10.0 upward
|
else:
|
||||||
swift_vers = vers[:6]
|
# < Liberty co-ordinated project versions
|
||||||
return SWIFT_CODENAMES[swift_vers]
|
try:
|
||||||
else:
|
if 'swift' in pkg.name:
|
||||||
vers = vers[:6]
|
swift_vers = vers[:5]
|
||||||
return OPENSTACK_CODENAMES[vers]
|
if swift_vers not in SWIFT_CODENAMES:
|
||||||
except KeyError:
|
# Deal with 1.10.0 upward
|
||||||
e = 'Could not determine OpenStack codename for version %s' % vers
|
swift_vers = vers[:6]
|
||||||
error_out(e)
|
return SWIFT_CODENAMES[swift_vers]
|
||||||
|
else:
|
||||||
|
vers = vers[:6]
|
||||||
|
return OPENSTACK_CODENAMES[vers]
|
||||||
|
except KeyError:
|
||||||
|
if not fatal:
|
||||||
|
return None
|
||||||
|
e = 'Could not determine OpenStack codename for version %s' % vers
|
||||||
|
error_out(e)
|
||||||
|
|
||||||
|
|
||||||
def get_os_version_package(pkg, fatal=True):
|
def get_os_version_package(pkg, fatal=True):
|
||||||
|
@ -317,6 +361,9 @@ def configure_installation_source(rel):
|
||||||
'kilo': 'trusty-updates/kilo',
|
'kilo': 'trusty-updates/kilo',
|
||||||
'kilo/updates': 'trusty-updates/kilo',
|
'kilo/updates': 'trusty-updates/kilo',
|
||||||
'kilo/proposed': 'trusty-proposed/kilo',
|
'kilo/proposed': 'trusty-proposed/kilo',
|
||||||
|
'liberty': 'trusty-updates/liberty',
|
||||||
|
'liberty/updates': 'trusty-updates/liberty',
|
||||||
|
'liberty/proposed': 'trusty-proposed/liberty',
|
||||||
}
|
}
|
||||||
|
|
||||||
try:
|
try:
|
||||||
|
@ -382,7 +429,11 @@ def openstack_upgrade_available(package):
|
||||||
import apt_pkg as apt
|
import apt_pkg as apt
|
||||||
src = config('openstack-origin')
|
src = config('openstack-origin')
|
||||||
cur_vers = get_os_version_package(package)
|
cur_vers = get_os_version_package(package)
|
||||||
available_vers = get_os_version_install_source(src)
|
if "swift" in package:
|
||||||
|
codename = get_os_codename_install_source(src)
|
||||||
|
available_vers = get_os_version_codename(codename, SWIFT_CODENAMES)
|
||||||
|
else:
|
||||||
|
available_vers = get_os_version_install_source(src)
|
||||||
apt.init()
|
apt.init()
|
||||||
return apt.version_compare(available_vers, cur_vers) == 1
|
return apt.version_compare(available_vers, cur_vers) == 1
|
||||||
|
|
||||||
|
@ -497,11 +548,22 @@ def git_install_requested():
|
||||||
requirements_dir = None
|
requirements_dir = None
|
||||||
|
|
||||||
|
|
||||||
def git_clone_and_install(projects_yaml, core_project):
|
def _git_yaml_load(projects_yaml):
|
||||||
|
"""
|
||||||
|
Load the specified yaml into a dictionary.
|
||||||
|
"""
|
||||||
|
if not projects_yaml:
|
||||||
|
return None
|
||||||
|
|
||||||
|
return yaml.load(projects_yaml)
|
||||||
|
|
||||||
|
|
||||||
|
def git_clone_and_install(projects_yaml, core_project, depth=1):
|
||||||
"""
|
"""
|
||||||
Clone/install all specified OpenStack repositories.
|
Clone/install all specified OpenStack repositories.
|
||||||
|
|
||||||
The expected format of projects_yaml is:
|
The expected format of projects_yaml is:
|
||||||
|
|
||||||
repositories:
|
repositories:
|
||||||
- {name: keystone,
|
- {name: keystone,
|
||||||
repository: 'git://git.openstack.org/openstack/keystone.git',
|
repository: 'git://git.openstack.org/openstack/keystone.git',
|
||||||
|
@ -509,41 +571,55 @@ def git_clone_and_install(projects_yaml, core_project):
|
||||||
- {name: requirements,
|
- {name: requirements,
|
||||||
repository: 'git://git.openstack.org/openstack/requirements.git',
|
repository: 'git://git.openstack.org/openstack/requirements.git',
|
||||||
branch: 'stable/icehouse'}
|
branch: 'stable/icehouse'}
|
||||||
directory: /mnt/openstack-git
|
|
||||||
http_proxy: http://squid.internal:3128
|
|
||||||
https_proxy: https://squid.internal:3128
|
|
||||||
|
|
||||||
The directory, http_proxy, and https_proxy keys are optional.
|
directory: /mnt/openstack-git
|
||||||
|
http_proxy: squid-proxy-url
|
||||||
|
https_proxy: squid-proxy-url
|
||||||
|
|
||||||
|
The directory, http_proxy, and https_proxy keys are optional.
|
||||||
|
|
||||||
"""
|
"""
|
||||||
global requirements_dir
|
global requirements_dir
|
||||||
parent_dir = '/mnt/openstack-git'
|
parent_dir = '/mnt/openstack-git'
|
||||||
|
http_proxy = None
|
||||||
|
|
||||||
if not projects_yaml:
|
projects = _git_yaml_load(projects_yaml)
|
||||||
return
|
|
||||||
|
|
||||||
projects = yaml.load(projects_yaml)
|
|
||||||
_git_validate_projects_yaml(projects, core_project)
|
_git_validate_projects_yaml(projects, core_project)
|
||||||
|
|
||||||
if 'http_proxy' in projects.keys():
|
old_environ = dict(os.environ)
|
||||||
os.environ['http_proxy'] = projects['http_proxy']
|
|
||||||
|
|
||||||
|
if 'http_proxy' in projects.keys():
|
||||||
|
http_proxy = projects['http_proxy']
|
||||||
|
os.environ['http_proxy'] = projects['http_proxy']
|
||||||
if 'https_proxy' in projects.keys():
|
if 'https_proxy' in projects.keys():
|
||||||
os.environ['https_proxy'] = projects['https_proxy']
|
os.environ['https_proxy'] = projects['https_proxy']
|
||||||
|
|
||||||
if 'directory' in projects.keys():
|
if 'directory' in projects.keys():
|
||||||
parent_dir = projects['directory']
|
parent_dir = projects['directory']
|
||||||
|
|
||||||
|
pip_create_virtualenv(os.path.join(parent_dir, 'venv'))
|
||||||
|
|
||||||
|
# Upgrade setuptools and pip from default virtualenv versions. The default
|
||||||
|
# versions in trusty break master OpenStack branch deployments.
|
||||||
|
for p in ['pip', 'setuptools']:
|
||||||
|
pip_install(p, upgrade=True, proxy=http_proxy,
|
||||||
|
venv=os.path.join(parent_dir, 'venv'))
|
||||||
|
|
||||||
for p in projects['repositories']:
|
for p in projects['repositories']:
|
||||||
repo = p['repository']
|
repo = p['repository']
|
||||||
branch = p['branch']
|
branch = p['branch']
|
||||||
if p['name'] == 'requirements':
|
if p['name'] == 'requirements':
|
||||||
repo_dir = _git_clone_and_install_single(repo, branch, parent_dir,
|
repo_dir = _git_clone_and_install_single(repo, branch, depth,
|
||||||
|
parent_dir, http_proxy,
|
||||||
update_requirements=False)
|
update_requirements=False)
|
||||||
requirements_dir = repo_dir
|
requirements_dir = repo_dir
|
||||||
else:
|
else:
|
||||||
repo_dir = _git_clone_and_install_single(repo, branch, parent_dir,
|
repo_dir = _git_clone_and_install_single(repo, branch, depth,
|
||||||
|
parent_dir, http_proxy,
|
||||||
update_requirements=True)
|
update_requirements=True)
|
||||||
|
|
||||||
|
os.environ = old_environ
|
||||||
|
|
||||||
|
|
||||||
def _git_validate_projects_yaml(projects, core_project):
|
def _git_validate_projects_yaml(projects, core_project):
|
||||||
"""
|
"""
|
||||||
|
@ -571,7 +647,8 @@ def _git_ensure_key_exists(key, keys):
|
||||||
error_out('openstack-origin-git key \'{}\' is missing'.format(key))
|
error_out('openstack-origin-git key \'{}\' is missing'.format(key))
|
||||||
|
|
||||||
|
|
||||||
def _git_clone_and_install_single(repo, branch, parent_dir, update_requirements):
|
def _git_clone_and_install_single(repo, branch, depth, parent_dir, http_proxy,
|
||||||
|
update_requirements):
|
||||||
"""
|
"""
|
||||||
Clone and install a single git repository.
|
Clone and install a single git repository.
|
||||||
"""
|
"""
|
||||||
|
@ -584,23 +661,29 @@ def _git_clone_and_install_single(repo, branch, parent_dir, update_requirements)
|
||||||
|
|
||||||
if not os.path.exists(dest_dir):
|
if not os.path.exists(dest_dir):
|
||||||
juju_log('Cloning git repo: {}, branch: {}'.format(repo, branch))
|
juju_log('Cloning git repo: {}, branch: {}'.format(repo, branch))
|
||||||
repo_dir = install_remote(repo, dest=parent_dir, branch=branch)
|
repo_dir = install_remote(repo, dest=parent_dir, branch=branch,
|
||||||
|
depth=depth)
|
||||||
else:
|
else:
|
||||||
repo_dir = dest_dir
|
repo_dir = dest_dir
|
||||||
|
|
||||||
|
venv = os.path.join(parent_dir, 'venv')
|
||||||
|
|
||||||
if update_requirements:
|
if update_requirements:
|
||||||
if not requirements_dir:
|
if not requirements_dir:
|
||||||
error_out('requirements repo must be cloned before '
|
error_out('requirements repo must be cloned before '
|
||||||
'updating from global requirements.')
|
'updating from global requirements.')
|
||||||
_git_update_requirements(repo_dir, requirements_dir)
|
_git_update_requirements(venv, repo_dir, requirements_dir)
|
||||||
|
|
||||||
juju_log('Installing git repo from dir: {}'.format(repo_dir))
|
juju_log('Installing git repo from dir: {}'.format(repo_dir))
|
||||||
pip_install(repo_dir)
|
if http_proxy:
|
||||||
|
pip_install(repo_dir, proxy=http_proxy, venv=venv)
|
||||||
|
else:
|
||||||
|
pip_install(repo_dir, venv=venv)
|
||||||
|
|
||||||
return repo_dir
|
return repo_dir
|
||||||
|
|
||||||
|
|
||||||
def _git_update_requirements(package_dir, reqs_dir):
|
def _git_update_requirements(venv, package_dir, reqs_dir):
|
||||||
"""
|
"""
|
||||||
Update from global requirements.
|
Update from global requirements.
|
||||||
|
|
||||||
|
@ -609,25 +692,38 @@ def _git_update_requirements(package_dir, reqs_dir):
|
||||||
"""
|
"""
|
||||||
orig_dir = os.getcwd()
|
orig_dir = os.getcwd()
|
||||||
os.chdir(reqs_dir)
|
os.chdir(reqs_dir)
|
||||||
cmd = ['python', 'update.py', package_dir]
|
python = os.path.join(venv, 'bin/python')
|
||||||
|
cmd = [python, 'update.py', package_dir]
|
||||||
try:
|
try:
|
||||||
subprocess.check_call(cmd)
|
subprocess.check_call(cmd)
|
||||||
except subprocess.CalledProcessError:
|
except subprocess.CalledProcessError:
|
||||||
package = os.path.basename(package_dir)
|
package = os.path.basename(package_dir)
|
||||||
error_out("Error updating {} from global-requirements.txt".format(package))
|
error_out("Error updating {} from "
|
||||||
|
"global-requirements.txt".format(package))
|
||||||
os.chdir(orig_dir)
|
os.chdir(orig_dir)
|
||||||
|
|
||||||
|
|
||||||
|
def git_pip_venv_dir(projects_yaml):
|
||||||
|
"""
|
||||||
|
Return the pip virtualenv path.
|
||||||
|
"""
|
||||||
|
parent_dir = '/mnt/openstack-git'
|
||||||
|
|
||||||
|
projects = _git_yaml_load(projects_yaml)
|
||||||
|
|
||||||
|
if 'directory' in projects.keys():
|
||||||
|
parent_dir = projects['directory']
|
||||||
|
|
||||||
|
return os.path.join(parent_dir, 'venv')
|
||||||
|
|
||||||
|
|
||||||
def git_src_dir(projects_yaml, project):
|
def git_src_dir(projects_yaml, project):
|
||||||
"""
|
"""
|
||||||
Return the directory where the specified project's source is located.
|
Return the directory where the specified project's source is located.
|
||||||
"""
|
"""
|
||||||
parent_dir = '/mnt/openstack-git'
|
parent_dir = '/mnt/openstack-git'
|
||||||
|
|
||||||
if not projects_yaml:
|
projects = _git_yaml_load(projects_yaml)
|
||||||
return
|
|
||||||
|
|
||||||
projects = yaml.load(projects_yaml)
|
|
||||||
|
|
||||||
if 'directory' in projects.keys():
|
if 'directory' in projects.keys():
|
||||||
parent_dir = projects['directory']
|
parent_dir = projects['directory']
|
||||||
|
@ -637,3 +733,15 @@ def git_src_dir(projects_yaml, project):
|
||||||
return os.path.join(parent_dir, os.path.basename(p['repository']))
|
return os.path.join(parent_dir, os.path.basename(p['repository']))
|
||||||
|
|
||||||
return None
|
return None
|
||||||
|
|
||||||
|
|
||||||
|
def git_yaml_value(projects_yaml, key):
|
||||||
|
"""
|
||||||
|
Return the value in projects_yaml for the specified key.
|
||||||
|
"""
|
||||||
|
projects = _git_yaml_load(projects_yaml)
|
||||||
|
|
||||||
|
if key in projects.keys():
|
||||||
|
return projects[key]
|
||||||
|
|
||||||
|
return None
|
||||||
|
|
|
@ -14,14 +14,19 @@
|
||||||
# You should have received a copy of the GNU Lesser General Public License
|
# You should have received a copy of the GNU Lesser General Public License
|
||||||
# along with charm-helpers. If not, see <http://www.gnu.org/licenses/>.
|
# along with charm-helpers. If not, see <http://www.gnu.org/licenses/>.
|
||||||
|
|
||||||
|
import json
|
||||||
import six
|
import six
|
||||||
|
|
||||||
from charmhelpers.core.hookenv import relation_id as current_relation_id
|
from charmhelpers.core.hookenv import relation_id as current_relation_id
|
||||||
from charmhelpers.core.hookenv import (
|
from charmhelpers.core.hookenv import (
|
||||||
is_relation_made,
|
is_relation_made,
|
||||||
relation_ids,
|
relation_ids,
|
||||||
relation_get,
|
relation_get as _relation_get,
|
||||||
local_unit,
|
local_unit,
|
||||||
relation_set,
|
relation_set as _relation_set,
|
||||||
|
leader_get as _leader_get,
|
||||||
|
leader_set,
|
||||||
|
is_leader,
|
||||||
)
|
)
|
||||||
|
|
||||||
|
|
||||||
|
@ -54,6 +59,106 @@ def some_hook():
|
||||||
"""
|
"""
|
||||||
|
|
||||||
|
|
||||||
|
def leader_get(attribute=None, rid=None):
|
||||||
|
"""Wrapper to ensure that settings are migrated from the peer relation.
|
||||||
|
|
||||||
|
This is to support upgrading an environment that does not support
|
||||||
|
Juju leadership election to one that does.
|
||||||
|
|
||||||
|
If a setting is not extant in the leader-get but is on the relation-get
|
||||||
|
peer rel, it is migrated and marked as such so that it is not re-migrated.
|
||||||
|
"""
|
||||||
|
migration_key = '__leader_get_migrated_settings__'
|
||||||
|
if not is_leader():
|
||||||
|
return _leader_get(attribute=attribute)
|
||||||
|
|
||||||
|
settings_migrated = False
|
||||||
|
leader_settings = _leader_get(attribute=attribute)
|
||||||
|
previously_migrated = _leader_get(attribute=migration_key)
|
||||||
|
|
||||||
|
if previously_migrated:
|
||||||
|
migrated = set(json.loads(previously_migrated))
|
||||||
|
else:
|
||||||
|
migrated = set([])
|
||||||
|
|
||||||
|
try:
|
||||||
|
if migration_key in leader_settings:
|
||||||
|
del leader_settings[migration_key]
|
||||||
|
except TypeError:
|
||||||
|
pass
|
||||||
|
|
||||||
|
if attribute:
|
||||||
|
if attribute in migrated:
|
||||||
|
return leader_settings
|
||||||
|
|
||||||
|
# If attribute not present in leader db, check if this unit has set
|
||||||
|
# the attribute in the peer relation
|
||||||
|
if not leader_settings:
|
||||||
|
peer_setting = _relation_get(attribute=attribute, unit=local_unit(),
|
||||||
|
rid=rid)
|
||||||
|
if peer_setting:
|
||||||
|
leader_set(settings={attribute: peer_setting})
|
||||||
|
leader_settings = peer_setting
|
||||||
|
|
||||||
|
if leader_settings:
|
||||||
|
settings_migrated = True
|
||||||
|
migrated.add(attribute)
|
||||||
|
else:
|
||||||
|
r_settings = _relation_get(unit=local_unit(), rid=rid)
|
||||||
|
if r_settings:
|
||||||
|
for key in set(r_settings.keys()).difference(migrated):
|
||||||
|
# Leader setting wins
|
||||||
|
if not leader_settings.get(key):
|
||||||
|
leader_settings[key] = r_settings[key]
|
||||||
|
|
||||||
|
settings_migrated = True
|
||||||
|
migrated.add(key)
|
||||||
|
|
||||||
|
if settings_migrated:
|
||||||
|
leader_set(**leader_settings)
|
||||||
|
|
||||||
|
if migrated and settings_migrated:
|
||||||
|
migrated = json.dumps(list(migrated))
|
||||||
|
leader_set(settings={migration_key: migrated})
|
||||||
|
|
||||||
|
return leader_settings
|
||||||
|
|
||||||
|
|
||||||
|
def relation_set(relation_id=None, relation_settings=None, **kwargs):
|
||||||
|
"""Attempt to use leader-set if supported in the current version of Juju,
|
||||||
|
otherwise falls back on relation-set.
|
||||||
|
|
||||||
|
Note that we only attempt to use leader-set if the provided relation_id is
|
||||||
|
a peer relation id or no relation id is provided (in which case we assume
|
||||||
|
we are within the peer relation context).
|
||||||
|
"""
|
||||||
|
try:
|
||||||
|
if relation_id in relation_ids('cluster'):
|
||||||
|
return leader_set(settings=relation_settings, **kwargs)
|
||||||
|
else:
|
||||||
|
raise NotImplementedError
|
||||||
|
except NotImplementedError:
|
||||||
|
return _relation_set(relation_id=relation_id,
|
||||||
|
relation_settings=relation_settings, **kwargs)
|
||||||
|
|
||||||
|
|
||||||
|
def relation_get(attribute=None, unit=None, rid=None):
|
||||||
|
"""Attempt to use leader-get if supported in the current version of Juju,
|
||||||
|
otherwise falls back on relation-get.
|
||||||
|
|
||||||
|
Note that we only attempt to use leader-get if the provided rid is a peer
|
||||||
|
relation id or no relation id is provided (in which case we assume we are
|
||||||
|
within the peer relation context).
|
||||||
|
"""
|
||||||
|
try:
|
||||||
|
if rid in relation_ids('cluster'):
|
||||||
|
return leader_get(attribute, rid)
|
||||||
|
else:
|
||||||
|
raise NotImplementedError
|
||||||
|
except NotImplementedError:
|
||||||
|
return _relation_get(attribute=attribute, rid=rid, unit=unit)
|
||||||
|
|
||||||
|
|
||||||
def peer_retrieve(key, relation_name='cluster'):
|
def peer_retrieve(key, relation_name='cluster'):
|
||||||
"""Retrieve a named key from peer relation `relation_name`."""
|
"""Retrieve a named key from peer relation `relation_name`."""
|
||||||
cluster_rels = relation_ids(relation_name)
|
cluster_rels = relation_ids(relation_name)
|
||||||
|
@ -73,6 +178,8 @@ def peer_retrieve_by_prefix(prefix, relation_name='cluster', delimiter='_',
|
||||||
exc_list = exc_list if exc_list else []
|
exc_list = exc_list if exc_list else []
|
||||||
peerdb_settings = peer_retrieve('-', relation_name=relation_name)
|
peerdb_settings = peer_retrieve('-', relation_name=relation_name)
|
||||||
matched = {}
|
matched = {}
|
||||||
|
if peerdb_settings is None:
|
||||||
|
return matched
|
||||||
for k, v in peerdb_settings.items():
|
for k, v in peerdb_settings.items():
|
||||||
full_prefix = prefix + delimiter
|
full_prefix = prefix + delimiter
|
||||||
if k.startswith(full_prefix):
|
if k.startswith(full_prefix):
|
||||||
|
@ -96,12 +203,26 @@ def peer_store(key, value, relation_name='cluster'):
|
||||||
'peer relation {}'.format(relation_name))
|
'peer relation {}'.format(relation_name))
|
||||||
|
|
||||||
|
|
||||||
def peer_echo(includes=None):
|
def peer_echo(includes=None, force=False):
|
||||||
"""Echo filtered attributes back onto the same relation for storage.
|
"""Echo filtered attributes back onto the same relation for storage.
|
||||||
|
|
||||||
This is a requirement to use the peerstorage module - it needs to be called
|
This is a requirement to use the peerstorage module - it needs to be called
|
||||||
from the peer relation's changed hook.
|
from the peer relation's changed hook.
|
||||||
|
|
||||||
|
If Juju leader support exists this will be a noop unless force is True.
|
||||||
"""
|
"""
|
||||||
|
try:
|
||||||
|
is_leader()
|
||||||
|
except NotImplementedError:
|
||||||
|
pass
|
||||||
|
else:
|
||||||
|
if not force:
|
||||||
|
return # NOOP if leader-election is supported
|
||||||
|
|
||||||
|
# Use original non-leader calls
|
||||||
|
relation_get = _relation_get
|
||||||
|
relation_set = _relation_set
|
||||||
|
|
||||||
rdata = relation_get()
|
rdata = relation_get()
|
||||||
echo_data = {}
|
echo_data = {}
|
||||||
if includes is None:
|
if includes is None:
|
||||||
|
|
|
@ -17,8 +17,11 @@
|
||||||
# You should have received a copy of the GNU Lesser General Public License
|
# You should have received a copy of the GNU Lesser General Public License
|
||||||
# along with charm-helpers. If not, see <http://www.gnu.org/licenses/>.
|
# along with charm-helpers. If not, see <http://www.gnu.org/licenses/>.
|
||||||
|
|
||||||
|
import os
|
||||||
|
import subprocess
|
||||||
|
|
||||||
from charmhelpers.fetch import apt_install, apt_update
|
from charmhelpers.fetch import apt_install, apt_update
|
||||||
from charmhelpers.core.hookenv import log
|
from charmhelpers.core.hookenv import charm_dir, log
|
||||||
|
|
||||||
try:
|
try:
|
||||||
from pip import main as pip_execute
|
from pip import main as pip_execute
|
||||||
|
@ -33,6 +36,8 @@ __author__ = "Jorge Niedbalski <jorge.niedbalski@canonical.com>"
|
||||||
def parse_options(given, available):
|
def parse_options(given, available):
|
||||||
"""Given a set of options, check if available"""
|
"""Given a set of options, check if available"""
|
||||||
for key, value in sorted(given.items()):
|
for key, value in sorted(given.items()):
|
||||||
|
if not value:
|
||||||
|
continue
|
||||||
if key in available:
|
if key in available:
|
||||||
yield "--{0}={1}".format(key, value)
|
yield "--{0}={1}".format(key, value)
|
||||||
|
|
||||||
|
@ -51,11 +56,15 @@ def pip_install_requirements(requirements, **options):
|
||||||
pip_execute(command)
|
pip_execute(command)
|
||||||
|
|
||||||
|
|
||||||
def pip_install(package, fatal=False, upgrade=False, **options):
|
def pip_install(package, fatal=False, upgrade=False, venv=None, **options):
|
||||||
"""Install a python package"""
|
"""Install a python package"""
|
||||||
command = ["install"]
|
if venv:
|
||||||
|
venv_python = os.path.join(venv, 'bin/pip')
|
||||||
|
command = [venv_python, "install"]
|
||||||
|
else:
|
||||||
|
command = ["install"]
|
||||||
|
|
||||||
available_options = ('proxy', 'src', 'log', "index-url", )
|
available_options = ('proxy', 'src', 'log', 'index-url', )
|
||||||
for option in parse_options(options, available_options):
|
for option in parse_options(options, available_options):
|
||||||
command.append(option)
|
command.append(option)
|
||||||
|
|
||||||
|
@ -69,7 +78,10 @@ def pip_install(package, fatal=False, upgrade=False, **options):
|
||||||
|
|
||||||
log("Installing {} package with options: {}".format(package,
|
log("Installing {} package with options: {}".format(package,
|
||||||
command))
|
command))
|
||||||
pip_execute(command)
|
if venv:
|
||||||
|
subprocess.check_call(command)
|
||||||
|
else:
|
||||||
|
pip_execute(command)
|
||||||
|
|
||||||
|
|
||||||
def pip_uninstall(package, **options):
|
def pip_uninstall(package, **options):
|
||||||
|
@ -94,3 +106,16 @@ def pip_list():
|
||||||
"""Returns the list of current python installed packages
|
"""Returns the list of current python installed packages
|
||||||
"""
|
"""
|
||||||
return pip_execute(["list"])
|
return pip_execute(["list"])
|
||||||
|
|
||||||
|
|
||||||
|
def pip_create_virtualenv(path=None):
|
||||||
|
"""Create an isolated Python environment."""
|
||||||
|
apt_install('python-virtualenv')
|
||||||
|
|
||||||
|
if path:
|
||||||
|
venv_path = path
|
||||||
|
else:
|
||||||
|
venv_path = os.path.join(charm_dir(), 'venv')
|
||||||
|
|
||||||
|
if not os.path.exists(venv_path):
|
||||||
|
subprocess.check_call(['virtualenv', venv_path])
|
||||||
|
|
|
@ -14,16 +14,12 @@
|
||||||
# You should have received a copy of the GNU Lesser General Public License
|
# You should have received a copy of the GNU Lesser General Public License
|
||||||
# along with charm-helpers. If not, see <http://www.gnu.org/licenses/>.
|
# along with charm-helpers. If not, see <http://www.gnu.org/licenses/>.
|
||||||
|
|
||||||
import logging
|
|
||||||
import os
|
import os
|
||||||
from os.path import join as path_join
|
from os.path import join as path_join
|
||||||
from os.path import exists
|
from os.path import exists
|
||||||
import subprocess
|
import subprocess
|
||||||
|
|
||||||
|
from charmhelpers.core.hookenv import log, DEBUG
|
||||||
log = logging.getLogger("service_ca")
|
|
||||||
|
|
||||||
logging.basicConfig(level=logging.DEBUG)
|
|
||||||
|
|
||||||
STD_CERT = "standard"
|
STD_CERT = "standard"
|
||||||
|
|
||||||
|
@ -62,7 +58,7 @@ class ServiceCA(object):
|
||||||
###############
|
###############
|
||||||
|
|
||||||
def init(self):
|
def init(self):
|
||||||
log.debug("initializing service ca")
|
log("initializing service ca", level=DEBUG)
|
||||||
if not exists(self.ca_dir):
|
if not exists(self.ca_dir):
|
||||||
self._init_ca_dir(self.ca_dir)
|
self._init_ca_dir(self.ca_dir)
|
||||||
self._init_ca()
|
self._init_ca()
|
||||||
|
@ -119,7 +115,7 @@ class ServiceCA(object):
|
||||||
'-keyout', self.ca_key, '-out', self.ca_cert,
|
'-keyout', self.ca_key, '-out', self.ca_cert,
|
||||||
'-outform', 'PEM']
|
'-outform', 'PEM']
|
||||||
output = subprocess.check_output(cmd, stderr=subprocess.STDOUT)
|
output = subprocess.check_output(cmd, stderr=subprocess.STDOUT)
|
||||||
log.debug("CA Init:\n %s", output)
|
log("CA Init:\n %s" % output, level=DEBUG)
|
||||||
|
|
||||||
def get_conf_variables(self):
|
def get_conf_variables(self):
|
||||||
return dict(
|
return dict(
|
||||||
|
@ -163,15 +159,15 @@ class ServiceCA(object):
|
||||||
subj = '/O=%(org_name)s/OU=%(org_unit_name)s/CN=%(common_name)s' % (
|
subj = '/O=%(org_name)s/OU=%(org_unit_name)s/CN=%(common_name)s' % (
|
||||||
template_vars)
|
template_vars)
|
||||||
|
|
||||||
log.debug("CA Create Cert %s", common_name)
|
log("CA Create Cert %s" % common_name, level=DEBUG)
|
||||||
cmd = ['openssl', 'req', '-sha1', '-newkey', 'rsa:2048',
|
cmd = ['openssl', 'req', '-sha1', '-newkey', 'rsa:2048',
|
||||||
'-nodes', '-days', self.default_expiry,
|
'-nodes', '-days', self.default_expiry,
|
||||||
'-keyout', key_p, '-out', csr_p, '-subj', subj]
|
'-keyout', key_p, '-out', csr_p, '-subj', subj]
|
||||||
subprocess.check_call(cmd)
|
subprocess.check_call(cmd, stderr=subprocess.PIPE)
|
||||||
cmd = ['openssl', 'rsa', '-in', key_p, '-out', key_p]
|
cmd = ['openssl', 'rsa', '-in', key_p, '-out', key_p]
|
||||||
subprocess.check_call(cmd)
|
subprocess.check_call(cmd, stderr=subprocess.PIPE)
|
||||||
|
|
||||||
log.debug("CA Sign Cert %s", common_name)
|
log("CA Sign Cert %s" % common_name, level=DEBUG)
|
||||||
if self.cert_type == MYSQL_CERT:
|
if self.cert_type == MYSQL_CERT:
|
||||||
cmd = ['openssl', 'x509', '-req',
|
cmd = ['openssl', 'x509', '-req',
|
||||||
'-in', csr_p, '-days', self.default_expiry,
|
'-in', csr_p, '-days', self.default_expiry,
|
||||||
|
@ -182,8 +178,8 @@ class ServiceCA(object):
|
||||||
'-extensions', 'req_extensions',
|
'-extensions', 'req_extensions',
|
||||||
'-days', self.default_expiry, '-notext',
|
'-days', self.default_expiry, '-notext',
|
||||||
'-in', csr_p, '-out', crt_p, '-subj', subj, '-batch']
|
'-in', csr_p, '-out', crt_p, '-subj', subj, '-batch']
|
||||||
log.debug("running %s", " ".join(cmd))
|
log("running %s" % " ".join(cmd), level=DEBUG)
|
||||||
subprocess.check_call(cmd)
|
subprocess.check_call(cmd, stderr=subprocess.PIPE)
|
||||||
|
|
||||||
def get_ca_bundle(self):
|
def get_ca_bundle(self):
|
||||||
with open(self.ca_cert) as fh:
|
with open(self.ca_cert) as fh:
|
||||||
|
|
|
@ -60,12 +60,12 @@ KEYRING = '/etc/ceph/ceph.client.{}.keyring'
|
||||||
KEYFILE = '/etc/ceph/ceph.client.{}.key'
|
KEYFILE = '/etc/ceph/ceph.client.{}.key'
|
||||||
|
|
||||||
CEPH_CONF = """[global]
|
CEPH_CONF = """[global]
|
||||||
auth supported = {auth}
|
auth supported = {auth}
|
||||||
keyring = {keyring}
|
keyring = {keyring}
|
||||||
mon host = {mon_hosts}
|
mon host = {mon_hosts}
|
||||||
log to syslog = {use_syslog}
|
log to syslog = {use_syslog}
|
||||||
err to syslog = {use_syslog}
|
err to syslog = {use_syslog}
|
||||||
clog to syslog = {use_syslog}
|
clog to syslog = {use_syslog}
|
||||||
"""
|
"""
|
||||||
|
|
||||||
|
|
||||||
|
|
|
@ -43,9 +43,10 @@ def zap_disk(block_device):
|
||||||
|
|
||||||
:param block_device: str: Full path of block device to clean.
|
:param block_device: str: Full path of block device to clean.
|
||||||
'''
|
'''
|
||||||
|
# https://github.com/ceph/ceph/commit/fdd7f8d83afa25c4e09aaedd90ab93f3b64a677b
|
||||||
# sometimes sgdisk exits non-zero; this is OK, dd will clean up
|
# sometimes sgdisk exits non-zero; this is OK, dd will clean up
|
||||||
call(['sgdisk', '--zap-all', '--mbrtogpt',
|
call(['sgdisk', '--zap-all', '--', block_device])
|
||||||
'--clear', block_device])
|
call(['sgdisk', '--clear', '--mbrtogpt', '--', block_device])
|
||||||
dev_end = check_output(['blockdev', '--getsz',
|
dev_end = check_output(['blockdev', '--getsz',
|
||||||
block_device]).decode('UTF-8')
|
block_device]).decode('UTF-8')
|
||||||
gpt_end = int(dev_end.split()[0]) - 100
|
gpt_end = int(dev_end.split()[0]) - 100
|
||||||
|
@ -67,4 +68,4 @@ def is_device_mounted(device):
|
||||||
out = check_output(['mount']).decode('UTF-8')
|
out = check_output(['mount']).decode('UTF-8')
|
||||||
if is_partition:
|
if is_partition:
|
||||||
return bool(re.search(device + r"\b", out))
|
return bool(re.search(device + r"\b", out))
|
||||||
return bool(re.search(device + r"[0-9]+\b", out))
|
return bool(re.search(device + r"[0-9]*\b", out))
|
||||||
|
|
|
@ -0,0 +1,45 @@
|
||||||
|
#!/usr/bin/env python
|
||||||
|
# -*- coding: utf-8 -*-
|
||||||
|
|
||||||
|
# Copyright 2014-2015 Canonical Limited.
|
||||||
|
#
|
||||||
|
# This file is part of charm-helpers.
|
||||||
|
#
|
||||||
|
# charm-helpers is free software: you can redistribute it and/or modify
|
||||||
|
# it under the terms of the GNU Lesser General Public License version 3 as
|
||||||
|
# published by the Free Software Foundation.
|
||||||
|
#
|
||||||
|
# charm-helpers is distributed in the hope that it will be useful,
|
||||||
|
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||||
|
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||||
|
# GNU Lesser General Public License for more details.
|
||||||
|
#
|
||||||
|
# You should have received a copy of the GNU Lesser General Public License
|
||||||
|
# along with charm-helpers. If not, see <http://www.gnu.org/licenses/>.
|
||||||
|
|
||||||
|
__author__ = 'Jorge Niedbalski <niedbalski@ubuntu.com>'
|
||||||
|
|
||||||
|
import os
|
||||||
|
import subprocess
|
||||||
|
|
||||||
|
|
||||||
|
def sed(filename, before, after, flags='g'):
|
||||||
|
"""
|
||||||
|
Search and replaces the given pattern on filename.
|
||||||
|
|
||||||
|
:param filename: relative or absolute file path.
|
||||||
|
:param before: expression to be replaced (see 'man sed')
|
||||||
|
:param after: expression to replace with (see 'man sed')
|
||||||
|
:param flags: sed-compatible regex flags in example, to make
|
||||||
|
the search and replace case insensitive, specify ``flags="i"``.
|
||||||
|
The ``g`` flag is always specified regardless, so you do not
|
||||||
|
need to remember to include it when overriding this parameter.
|
||||||
|
:returns: If the sed command exit code was zero then return,
|
||||||
|
otherwise raise CalledProcessError.
|
||||||
|
"""
|
||||||
|
expression = r's/{0}/{1}/{2}'.format(before,
|
||||||
|
after, flags)
|
||||||
|
|
||||||
|
return subprocess.check_call(["sed", "-i", "-r", "-e",
|
||||||
|
expression,
|
||||||
|
os.path.expanduser(filename)])
|
|
@ -21,12 +21,17 @@
|
||||||
# Charm Helpers Developers <juju@lists.ubuntu.com>
|
# Charm Helpers Developers <juju@lists.ubuntu.com>
|
||||||
|
|
||||||
from __future__ import print_function
|
from __future__ import print_function
|
||||||
|
import copy
|
||||||
|
from distutils.version import LooseVersion
|
||||||
|
from functools import wraps
|
||||||
|
import glob
|
||||||
import os
|
import os
|
||||||
import json
|
import json
|
||||||
import yaml
|
import yaml
|
||||||
import subprocess
|
import subprocess
|
||||||
import sys
|
import sys
|
||||||
import errno
|
import errno
|
||||||
|
import tempfile
|
||||||
from subprocess import CalledProcessError
|
from subprocess import CalledProcessError
|
||||||
|
|
||||||
import six
|
import six
|
||||||
|
@ -58,15 +63,18 @@ def cached(func):
|
||||||
|
|
||||||
will cache the result of unit_get + 'test' for future calls.
|
will cache the result of unit_get + 'test' for future calls.
|
||||||
"""
|
"""
|
||||||
|
@wraps(func)
|
||||||
def wrapper(*args, **kwargs):
|
def wrapper(*args, **kwargs):
|
||||||
global cache
|
global cache
|
||||||
key = str((func, args, kwargs))
|
key = str((func, args, kwargs))
|
||||||
try:
|
try:
|
||||||
return cache[key]
|
return cache[key]
|
||||||
except KeyError:
|
except KeyError:
|
||||||
res = func(*args, **kwargs)
|
pass # Drop out of the exception handler scope.
|
||||||
cache[key] = res
|
res = func(*args, **kwargs)
|
||||||
return res
|
cache[key] = res
|
||||||
|
return res
|
||||||
|
wrapper._wrapped = func
|
||||||
return wrapper
|
return wrapper
|
||||||
|
|
||||||
|
|
||||||
|
@ -166,9 +174,19 @@ def relation_type():
|
||||||
return os.environ.get('JUJU_RELATION', None)
|
return os.environ.get('JUJU_RELATION', None)
|
||||||
|
|
||||||
|
|
||||||
def relation_id():
|
@cached
|
||||||
"""The relation ID for the current relation hook"""
|
def relation_id(relation_name=None, service_or_unit=None):
|
||||||
return os.environ.get('JUJU_RELATION_ID', None)
|
"""The relation ID for the current or a specified relation"""
|
||||||
|
if not relation_name and not service_or_unit:
|
||||||
|
return os.environ.get('JUJU_RELATION_ID', None)
|
||||||
|
elif relation_name and service_or_unit:
|
||||||
|
service_name = service_or_unit.split('/')[0]
|
||||||
|
for relid in relation_ids(relation_name):
|
||||||
|
remote_service = remote_service_name(relid)
|
||||||
|
if remote_service == service_name:
|
||||||
|
return relid
|
||||||
|
else:
|
||||||
|
raise ValueError('Must specify neither or both of relation_name and service_or_unit')
|
||||||
|
|
||||||
|
|
||||||
def local_unit():
|
def local_unit():
|
||||||
|
@ -178,7 +196,7 @@ def local_unit():
|
||||||
|
|
||||||
def remote_unit():
|
def remote_unit():
|
||||||
"""The remote unit for the current relation hook"""
|
"""The remote unit for the current relation hook"""
|
||||||
return os.environ['JUJU_REMOTE_UNIT']
|
return os.environ.get('JUJU_REMOTE_UNIT', None)
|
||||||
|
|
||||||
|
|
||||||
def service_name():
|
def service_name():
|
||||||
|
@ -186,9 +204,20 @@ def service_name():
|
||||||
return local_unit().split('/')[0]
|
return local_unit().split('/')[0]
|
||||||
|
|
||||||
|
|
||||||
|
@cached
|
||||||
|
def remote_service_name(relid=None):
|
||||||
|
"""The remote service name for a given relation-id (or the current relation)"""
|
||||||
|
if relid is None:
|
||||||
|
unit = remote_unit()
|
||||||
|
else:
|
||||||
|
units = related_units(relid)
|
||||||
|
unit = units[0] if units else None
|
||||||
|
return unit.split('/')[0] if unit else None
|
||||||
|
|
||||||
|
|
||||||
def hook_name():
|
def hook_name():
|
||||||
"""The name of the currently executing hook"""
|
"""The name of the currently executing hook"""
|
||||||
return os.path.basename(sys.argv[0])
|
return os.environ.get('JUJU_HOOK_NAME', os.path.basename(sys.argv[0]))
|
||||||
|
|
||||||
|
|
||||||
class Config(dict):
|
class Config(dict):
|
||||||
|
@ -238,23 +267,7 @@ class Config(dict):
|
||||||
self.path = os.path.join(charm_dir(), Config.CONFIG_FILE_NAME)
|
self.path = os.path.join(charm_dir(), Config.CONFIG_FILE_NAME)
|
||||||
if os.path.exists(self.path):
|
if os.path.exists(self.path):
|
||||||
self.load_previous()
|
self.load_previous()
|
||||||
|
atexit(self._implicit_save)
|
||||||
def __getitem__(self, key):
|
|
||||||
"""For regular dict lookups, check the current juju config first,
|
|
||||||
then the previous (saved) copy. This ensures that user-saved values
|
|
||||||
will be returned by a dict lookup.
|
|
||||||
|
|
||||||
"""
|
|
||||||
try:
|
|
||||||
return dict.__getitem__(self, key)
|
|
||||||
except KeyError:
|
|
||||||
return (self._prev_dict or {})[key]
|
|
||||||
|
|
||||||
def keys(self):
|
|
||||||
prev_keys = []
|
|
||||||
if self._prev_dict is not None:
|
|
||||||
prev_keys = self._prev_dict.keys()
|
|
||||||
return list(set(prev_keys + list(dict.keys(self))))
|
|
||||||
|
|
||||||
def load_previous(self, path=None):
|
def load_previous(self, path=None):
|
||||||
"""Load previous copy of config from disk.
|
"""Load previous copy of config from disk.
|
||||||
|
@ -273,6 +286,9 @@ class Config(dict):
|
||||||
self.path = path or self.path
|
self.path = path or self.path
|
||||||
with open(self.path) as f:
|
with open(self.path) as f:
|
||||||
self._prev_dict = json.load(f)
|
self._prev_dict = json.load(f)
|
||||||
|
for k, v in copy.deepcopy(self._prev_dict).items():
|
||||||
|
if k not in self:
|
||||||
|
self[k] = v
|
||||||
|
|
||||||
def changed(self, key):
|
def changed(self, key):
|
||||||
"""Return True if the current value for this key is different from
|
"""Return True if the current value for this key is different from
|
||||||
|
@ -304,13 +320,13 @@ class Config(dict):
|
||||||
instance.
|
instance.
|
||||||
|
|
||||||
"""
|
"""
|
||||||
if self._prev_dict:
|
|
||||||
for k, v in six.iteritems(self._prev_dict):
|
|
||||||
if k not in self:
|
|
||||||
self[k] = v
|
|
||||||
with open(self.path, 'w') as f:
|
with open(self.path, 'w') as f:
|
||||||
json.dump(self, f)
|
json.dump(self, f)
|
||||||
|
|
||||||
|
def _implicit_save(self):
|
||||||
|
if self.implicit_save:
|
||||||
|
self.save()
|
||||||
|
|
||||||
|
|
||||||
@cached
|
@cached
|
||||||
def config(scope=None):
|
def config(scope=None):
|
||||||
|
@ -353,18 +369,49 @@ def relation_set(relation_id=None, relation_settings=None, **kwargs):
|
||||||
"""Set relation information for the current unit"""
|
"""Set relation information for the current unit"""
|
||||||
relation_settings = relation_settings if relation_settings else {}
|
relation_settings = relation_settings if relation_settings else {}
|
||||||
relation_cmd_line = ['relation-set']
|
relation_cmd_line = ['relation-set']
|
||||||
|
accepts_file = "--file" in subprocess.check_output(
|
||||||
|
relation_cmd_line + ["--help"], universal_newlines=True)
|
||||||
if relation_id is not None:
|
if relation_id is not None:
|
||||||
relation_cmd_line.extend(('-r', relation_id))
|
relation_cmd_line.extend(('-r', relation_id))
|
||||||
for k, v in (list(relation_settings.items()) + list(kwargs.items())):
|
settings = relation_settings.copy()
|
||||||
if v is None:
|
settings.update(kwargs)
|
||||||
relation_cmd_line.append('{}='.format(k))
|
for key, value in settings.items():
|
||||||
else:
|
# Force value to be a string: it always should, but some call
|
||||||
relation_cmd_line.append('{}={}'.format(k, v))
|
# sites pass in things like dicts or numbers.
|
||||||
subprocess.check_call(relation_cmd_line)
|
if value is not None:
|
||||||
|
settings[key] = "{}".format(value)
|
||||||
|
if accepts_file:
|
||||||
|
# --file was introduced in Juju 1.23.2. Use it by default if
|
||||||
|
# available, since otherwise we'll break if the relation data is
|
||||||
|
# too big. Ideally we should tell relation-set to read the data from
|
||||||
|
# stdin, but that feature is broken in 1.23.2: Bug #1454678.
|
||||||
|
with tempfile.NamedTemporaryFile(delete=False) as settings_file:
|
||||||
|
settings_file.write(yaml.safe_dump(settings).encode("utf-8"))
|
||||||
|
subprocess.check_call(
|
||||||
|
relation_cmd_line + ["--file", settings_file.name])
|
||||||
|
os.remove(settings_file.name)
|
||||||
|
else:
|
||||||
|
for key, value in settings.items():
|
||||||
|
if value is None:
|
||||||
|
relation_cmd_line.append('{}='.format(key))
|
||||||
|
else:
|
||||||
|
relation_cmd_line.append('{}={}'.format(key, value))
|
||||||
|
subprocess.check_call(relation_cmd_line)
|
||||||
# Flush cache of any relation-gets for local unit
|
# Flush cache of any relation-gets for local unit
|
||||||
flush(local_unit())
|
flush(local_unit())
|
||||||
|
|
||||||
|
|
||||||
|
def relation_clear(r_id=None):
|
||||||
|
''' Clears any relation data already set on relation r_id '''
|
||||||
|
settings = relation_get(rid=r_id,
|
||||||
|
unit=local_unit())
|
||||||
|
for setting in settings:
|
||||||
|
if setting not in ['public-address', 'private-address']:
|
||||||
|
settings[setting] = None
|
||||||
|
relation_set(relation_id=r_id,
|
||||||
|
**settings)
|
||||||
|
|
||||||
|
|
||||||
@cached
|
@cached
|
||||||
def relation_ids(reltype=None):
|
def relation_ids(reltype=None):
|
||||||
"""A list of relation_ids"""
|
"""A list of relation_ids"""
|
||||||
|
@ -443,6 +490,63 @@ def relation_types():
|
||||||
return rel_types
|
return rel_types
|
||||||
|
|
||||||
|
|
||||||
|
@cached
|
||||||
|
def relation_to_interface(relation_name):
|
||||||
|
"""
|
||||||
|
Given the name of a relation, return the interface that relation uses.
|
||||||
|
|
||||||
|
:returns: The interface name, or ``None``.
|
||||||
|
"""
|
||||||
|
return relation_to_role_and_interface(relation_name)[1]
|
||||||
|
|
||||||
|
|
||||||
|
@cached
|
||||||
|
def relation_to_role_and_interface(relation_name):
|
||||||
|
"""
|
||||||
|
Given the name of a relation, return the role and the name of the interface
|
||||||
|
that relation uses (where role is one of ``provides``, ``requires``, or ``peer``).
|
||||||
|
|
||||||
|
:returns: A tuple containing ``(role, interface)``, or ``(None, None)``.
|
||||||
|
"""
|
||||||
|
_metadata = metadata()
|
||||||
|
for role in ('provides', 'requires', 'peer'):
|
||||||
|
interface = _metadata.get(role, {}).get(relation_name, {}).get('interface')
|
||||||
|
if interface:
|
||||||
|
return role, interface
|
||||||
|
return None, None
|
||||||
|
|
||||||
|
|
||||||
|
@cached
|
||||||
|
def role_and_interface_to_relations(role, interface_name):
|
||||||
|
"""
|
||||||
|
Given a role and interface name, return a list of relation names for the
|
||||||
|
current charm that use that interface under that role (where role is one
|
||||||
|
of ``provides``, ``requires``, or ``peer``).
|
||||||
|
|
||||||
|
:returns: A list of relation names.
|
||||||
|
"""
|
||||||
|
_metadata = metadata()
|
||||||
|
results = []
|
||||||
|
for relation_name, relation in _metadata.get(role, {}).items():
|
||||||
|
if relation['interface'] == interface_name:
|
||||||
|
results.append(relation_name)
|
||||||
|
return results
|
||||||
|
|
||||||
|
|
||||||
|
@cached
|
||||||
|
def interface_to_relations(interface_name):
|
||||||
|
"""
|
||||||
|
Given an interface, return a list of relation names for the current
|
||||||
|
charm that use that interface.
|
||||||
|
|
||||||
|
:returns: A list of relation names.
|
||||||
|
"""
|
||||||
|
results = []
|
||||||
|
for role in ('provides', 'requires', 'peer'):
|
||||||
|
results.extend(role_and_interface_to_relations(role, interface_name))
|
||||||
|
return results
|
||||||
|
|
||||||
|
|
||||||
@cached
|
@cached
|
||||||
def charm_name():
|
def charm_name():
|
||||||
"""Get the name of the current charm as is specified on metadata.yaml"""
|
"""Get the name of the current charm as is specified on metadata.yaml"""
|
||||||
|
@ -509,6 +613,11 @@ def unit_get(attribute):
|
||||||
return None
|
return None
|
||||||
|
|
||||||
|
|
||||||
|
def unit_public_ip():
|
||||||
|
"""Get this unit's public IP address"""
|
||||||
|
return unit_get('public-address')
|
||||||
|
|
||||||
|
|
||||||
def unit_private_ip():
|
def unit_private_ip():
|
||||||
"""Get this unit's private IP address"""
|
"""Get this unit's private IP address"""
|
||||||
return unit_get('private-address')
|
return unit_get('private-address')
|
||||||
|
@ -541,10 +650,14 @@ class Hooks(object):
|
||||||
hooks.execute(sys.argv)
|
hooks.execute(sys.argv)
|
||||||
"""
|
"""
|
||||||
|
|
||||||
def __init__(self, config_save=True):
|
def __init__(self, config_save=None):
|
||||||
super(Hooks, self).__init__()
|
super(Hooks, self).__init__()
|
||||||
self._hooks = {}
|
self._hooks = {}
|
||||||
self._config_save = config_save
|
|
||||||
|
# For unknown reasons, we allow the Hooks constructor to override
|
||||||
|
# config().implicit_save.
|
||||||
|
if config_save is not None:
|
||||||
|
config().implicit_save = config_save
|
||||||
|
|
||||||
def register(self, name, function):
|
def register(self, name, function):
|
||||||
"""Register a hook"""
|
"""Register a hook"""
|
||||||
|
@ -552,13 +665,16 @@ class Hooks(object):
|
||||||
|
|
||||||
def execute(self, args):
|
def execute(self, args):
|
||||||
"""Execute a registered hook based on args[0]"""
|
"""Execute a registered hook based on args[0]"""
|
||||||
|
_run_atstart()
|
||||||
hook_name = os.path.basename(args[0])
|
hook_name = os.path.basename(args[0])
|
||||||
if hook_name in self._hooks:
|
if hook_name in self._hooks:
|
||||||
self._hooks[hook_name]()
|
try:
|
||||||
if self._config_save:
|
self._hooks[hook_name]()
|
||||||
cfg = config()
|
except SystemExit as x:
|
||||||
if cfg.implicit_save:
|
if x.code is None or x.code == 0:
|
||||||
cfg.save()
|
_run_atexit()
|
||||||
|
raise
|
||||||
|
_run_atexit()
|
||||||
else:
|
else:
|
||||||
raise UnregisteredHookError(hook_name)
|
raise UnregisteredHookError(hook_name)
|
||||||
|
|
||||||
|
@ -605,3 +721,178 @@ def action_fail(message):
|
||||||
|
|
||||||
The results set by action_set are preserved."""
|
The results set by action_set are preserved."""
|
||||||
subprocess.check_call(['action-fail', message])
|
subprocess.check_call(['action-fail', message])
|
||||||
|
|
||||||
|
|
||||||
|
def action_name():
|
||||||
|
"""Get the name of the currently executing action."""
|
||||||
|
return os.environ.get('JUJU_ACTION_NAME')
|
||||||
|
|
||||||
|
|
||||||
|
def action_uuid():
|
||||||
|
"""Get the UUID of the currently executing action."""
|
||||||
|
return os.environ.get('JUJU_ACTION_UUID')
|
||||||
|
|
||||||
|
|
||||||
|
def action_tag():
|
||||||
|
"""Get the tag for the currently executing action."""
|
||||||
|
return os.environ.get('JUJU_ACTION_TAG')
|
||||||
|
|
||||||
|
|
||||||
|
def status_set(workload_state, message):
|
||||||
|
"""Set the workload state with a message
|
||||||
|
|
||||||
|
Use status-set to set the workload state with a message which is visible
|
||||||
|
to the user via juju status. If the status-set command is not found then
|
||||||
|
assume this is juju < 1.23 and juju-log the message unstead.
|
||||||
|
|
||||||
|
workload_state -- valid juju workload state.
|
||||||
|
message -- status update message
|
||||||
|
"""
|
||||||
|
valid_states = ['maintenance', 'blocked', 'waiting', 'active']
|
||||||
|
if workload_state not in valid_states:
|
||||||
|
raise ValueError(
|
||||||
|
'{!r} is not a valid workload state'.format(workload_state)
|
||||||
|
)
|
||||||
|
cmd = ['status-set', workload_state, message]
|
||||||
|
try:
|
||||||
|
ret = subprocess.call(cmd)
|
||||||
|
if ret == 0:
|
||||||
|
return
|
||||||
|
except OSError as e:
|
||||||
|
if e.errno != errno.ENOENT:
|
||||||
|
raise
|
||||||
|
log_message = 'status-set failed: {} {}'.format(workload_state,
|
||||||
|
message)
|
||||||
|
log(log_message, level='INFO')
|
||||||
|
|
||||||
|
|
||||||
|
def status_get():
|
||||||
|
"""Retrieve the previously set juju workload state and message
|
||||||
|
|
||||||
|
If the status-get command is not found then assume this is juju < 1.23 and
|
||||||
|
return 'unknown', ""
|
||||||
|
|
||||||
|
"""
|
||||||
|
cmd = ['status-get', "--format=json", "--include-data"]
|
||||||
|
try:
|
||||||
|
raw_status = subprocess.check_output(cmd)
|
||||||
|
except OSError as e:
|
||||||
|
if e.errno == errno.ENOENT:
|
||||||
|
return ('unknown', "")
|
||||||
|
else:
|
||||||
|
raise
|
||||||
|
else:
|
||||||
|
status = json.loads(raw_status.decode("UTF-8"))
|
||||||
|
return (status["status"], status["message"])
|
||||||
|
|
||||||
|
|
||||||
|
def translate_exc(from_exc, to_exc):
|
||||||
|
def inner_translate_exc1(f):
|
||||||
|
def inner_translate_exc2(*args, **kwargs):
|
||||||
|
try:
|
||||||
|
return f(*args, **kwargs)
|
||||||
|
except from_exc:
|
||||||
|
raise to_exc
|
||||||
|
|
||||||
|
return inner_translate_exc2
|
||||||
|
|
||||||
|
return inner_translate_exc1
|
||||||
|
|
||||||
|
|
||||||
|
@translate_exc(from_exc=OSError, to_exc=NotImplementedError)
|
||||||
|
def is_leader():
|
||||||
|
"""Does the current unit hold the juju leadership
|
||||||
|
|
||||||
|
Uses juju to determine whether the current unit is the leader of its peers
|
||||||
|
"""
|
||||||
|
cmd = ['is-leader', '--format=json']
|
||||||
|
return json.loads(subprocess.check_output(cmd).decode('UTF-8'))
|
||||||
|
|
||||||
|
|
||||||
|
@translate_exc(from_exc=OSError, to_exc=NotImplementedError)
|
||||||
|
def leader_get(attribute=None):
|
||||||
|
"""Juju leader get value(s)"""
|
||||||
|
cmd = ['leader-get', '--format=json'] + [attribute or '-']
|
||||||
|
return json.loads(subprocess.check_output(cmd).decode('UTF-8'))
|
||||||
|
|
||||||
|
|
||||||
|
@translate_exc(from_exc=OSError, to_exc=NotImplementedError)
|
||||||
|
def leader_set(settings=None, **kwargs):
|
||||||
|
"""Juju leader set value(s)"""
|
||||||
|
# Don't log secrets.
|
||||||
|
# log("Juju leader-set '%s'" % (settings), level=DEBUG)
|
||||||
|
cmd = ['leader-set']
|
||||||
|
settings = settings or {}
|
||||||
|
settings.update(kwargs)
|
||||||
|
for k, v in settings.items():
|
||||||
|
if v is None:
|
||||||
|
cmd.append('{}='.format(k))
|
||||||
|
else:
|
||||||
|
cmd.append('{}={}'.format(k, v))
|
||||||
|
subprocess.check_call(cmd)
|
||||||
|
|
||||||
|
|
||||||
|
@cached
|
||||||
|
def juju_version():
|
||||||
|
"""Full version string (eg. '1.23.3.1-trusty-amd64')"""
|
||||||
|
# Per https://bugs.launchpad.net/juju-core/+bug/1455368/comments/1
|
||||||
|
jujud = glob.glob('/var/lib/juju/tools/machine-*/jujud')[0]
|
||||||
|
return subprocess.check_output([jujud, 'version'],
|
||||||
|
universal_newlines=True).strip()
|
||||||
|
|
||||||
|
|
||||||
|
@cached
|
||||||
|
def has_juju_version(minimum_version):
|
||||||
|
"""Return True if the Juju version is at least the provided version"""
|
||||||
|
return LooseVersion(juju_version()) >= LooseVersion(minimum_version)
|
||||||
|
|
||||||
|
|
||||||
|
_atexit = []
|
||||||
|
_atstart = []
|
||||||
|
|
||||||
|
|
||||||
|
def atstart(callback, *args, **kwargs):
|
||||||
|
'''Schedule a callback to run before the main hook.
|
||||||
|
|
||||||
|
Callbacks are run in the order they were added.
|
||||||
|
|
||||||
|
This is useful for modules and classes to perform initialization
|
||||||
|
and inject behavior. In particular:
|
||||||
|
|
||||||
|
- Run common code before all of your hooks, such as logging
|
||||||
|
the hook name or interesting relation data.
|
||||||
|
- Defer object or module initialization that requires a hook
|
||||||
|
context until we know there actually is a hook context,
|
||||||
|
making testing easier.
|
||||||
|
- Rather than requiring charm authors to include boilerplate to
|
||||||
|
invoke your helper's behavior, have it run automatically if
|
||||||
|
your object is instantiated or module imported.
|
||||||
|
|
||||||
|
This is not at all useful after your hook framework as been launched.
|
||||||
|
'''
|
||||||
|
global _atstart
|
||||||
|
_atstart.append((callback, args, kwargs))
|
||||||
|
|
||||||
|
|
||||||
|
def atexit(callback, *args, **kwargs):
|
||||||
|
'''Schedule a callback to run on successful hook completion.
|
||||||
|
|
||||||
|
Callbacks are run in the reverse order that they were added.'''
|
||||||
|
_atexit.append((callback, args, kwargs))
|
||||||
|
|
||||||
|
|
||||||
|
def _run_atstart():
|
||||||
|
'''Hook frameworks must invoke this before running the main hook body.'''
|
||||||
|
global _atstart
|
||||||
|
for callback, args, kwargs in _atstart:
|
||||||
|
callback(*args, **kwargs)
|
||||||
|
del _atstart[:]
|
||||||
|
|
||||||
|
|
||||||
|
def _run_atexit():
|
||||||
|
'''Hook frameworks must invoke this after the main hook body has
|
||||||
|
successfully completed. Do not invoke it if the hook fails.'''
|
||||||
|
global _atexit
|
||||||
|
for callback, args, kwargs in reversed(_atexit):
|
||||||
|
callback(*args, **kwargs)
|
||||||
|
del _atexit[:]
|
||||||
|
|
|
@ -24,6 +24,7 @@
|
||||||
import os
|
import os
|
||||||
import re
|
import re
|
||||||
import pwd
|
import pwd
|
||||||
|
import glob
|
||||||
import grp
|
import grp
|
||||||
import random
|
import random
|
||||||
import string
|
import string
|
||||||
|
@ -62,6 +63,36 @@ def service_reload(service_name, restart_on_failure=False):
|
||||||
return service_result
|
return service_result
|
||||||
|
|
||||||
|
|
||||||
|
def service_pause(service_name, init_dir=None):
|
||||||
|
"""Pause a system service.
|
||||||
|
|
||||||
|
Stop it, and prevent it from starting again at boot."""
|
||||||
|
if init_dir is None:
|
||||||
|
init_dir = "/etc/init"
|
||||||
|
stopped = service_stop(service_name)
|
||||||
|
# XXX: Support systemd too
|
||||||
|
override_path = os.path.join(
|
||||||
|
init_dir, '{}.override'.format(service_name))
|
||||||
|
with open(override_path, 'w') as fh:
|
||||||
|
fh.write("manual\n")
|
||||||
|
return stopped
|
||||||
|
|
||||||
|
|
||||||
|
def service_resume(service_name, init_dir=None):
|
||||||
|
"""Resume a system service.
|
||||||
|
|
||||||
|
Reenable starting again at boot. Start the service"""
|
||||||
|
# XXX: Support systemd too
|
||||||
|
if init_dir is None:
|
||||||
|
init_dir = "/etc/init"
|
||||||
|
override_path = os.path.join(
|
||||||
|
init_dir, '{}.override'.format(service_name))
|
||||||
|
if os.path.exists(override_path):
|
||||||
|
os.unlink(override_path)
|
||||||
|
started = service_start(service_name)
|
||||||
|
return started
|
||||||
|
|
||||||
|
|
||||||
def service(action, service_name):
|
def service(action, service_name):
|
||||||
"""Control a system service"""
|
"""Control a system service"""
|
||||||
cmd = ['service', service_name, action]
|
cmd = ['service', service_name, action]
|
||||||
|
@ -90,7 +121,7 @@ def service_available(service_name):
|
||||||
['service', service_name, 'status'],
|
['service', service_name, 'status'],
|
||||||
stderr=subprocess.STDOUT).decode('UTF-8')
|
stderr=subprocess.STDOUT).decode('UTF-8')
|
||||||
except subprocess.CalledProcessError as e:
|
except subprocess.CalledProcessError as e:
|
||||||
return 'unrecognized service' not in e.output
|
return b'unrecognized service' not in e.output
|
||||||
else:
|
else:
|
||||||
return True
|
return True
|
||||||
|
|
||||||
|
@ -117,6 +148,16 @@ def adduser(username, password=None, shell='/bin/bash', system_user=False):
|
||||||
return user_info
|
return user_info
|
||||||
|
|
||||||
|
|
||||||
|
def user_exists(username):
|
||||||
|
"""Check if a user exists"""
|
||||||
|
try:
|
||||||
|
pwd.getpwnam(username)
|
||||||
|
user_exists = True
|
||||||
|
except KeyError:
|
||||||
|
user_exists = False
|
||||||
|
return user_exists
|
||||||
|
|
||||||
|
|
||||||
def add_group(group_name, system_group=False):
|
def add_group(group_name, system_group=False):
|
||||||
"""Add a group to the system"""
|
"""Add a group to the system"""
|
||||||
try:
|
try:
|
||||||
|
@ -139,11 +180,7 @@ def add_group(group_name, system_group=False):
|
||||||
|
|
||||||
def add_user_to_group(username, group):
|
def add_user_to_group(username, group):
|
||||||
"""Add a user to a group"""
|
"""Add a user to a group"""
|
||||||
cmd = [
|
cmd = ['gpasswd', '-a', username, group]
|
||||||
'gpasswd', '-a',
|
|
||||||
username,
|
|
||||||
group
|
|
||||||
]
|
|
||||||
log("Adding user {} to group {}".format(username, group))
|
log("Adding user {} to group {}".format(username, group))
|
||||||
subprocess.check_call(cmd)
|
subprocess.check_call(cmd)
|
||||||
|
|
||||||
|
@ -253,6 +290,17 @@ def mounts():
|
||||||
return system_mounts
|
return system_mounts
|
||||||
|
|
||||||
|
|
||||||
|
def fstab_mount(mountpoint):
|
||||||
|
"""Mount filesystem using fstab"""
|
||||||
|
cmd_args = ['mount', mountpoint]
|
||||||
|
try:
|
||||||
|
subprocess.check_output(cmd_args)
|
||||||
|
except subprocess.CalledProcessError as e:
|
||||||
|
log('Error unmounting {}\n{}'.format(mountpoint, e.output))
|
||||||
|
return False
|
||||||
|
return True
|
||||||
|
|
||||||
|
|
||||||
def file_hash(path, hash_type='md5'):
|
def file_hash(path, hash_type='md5'):
|
||||||
"""
|
"""
|
||||||
Generate a hash checksum of the contents of 'path' or None if not found.
|
Generate a hash checksum of the contents of 'path' or None if not found.
|
||||||
|
@ -269,6 +317,21 @@ def file_hash(path, hash_type='md5'):
|
||||||
return None
|
return None
|
||||||
|
|
||||||
|
|
||||||
|
def path_hash(path):
|
||||||
|
"""
|
||||||
|
Generate a hash checksum of all files matching 'path'. Standard wildcards
|
||||||
|
like '*' and '?' are supported, see documentation for the 'glob' module for
|
||||||
|
more information.
|
||||||
|
|
||||||
|
:return: dict: A { filename: hash } dictionary for all matched files.
|
||||||
|
Empty if none found.
|
||||||
|
"""
|
||||||
|
return {
|
||||||
|
filename: file_hash(filename)
|
||||||
|
for filename in glob.iglob(path)
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
def check_hash(path, checksum, hash_type='md5'):
|
def check_hash(path, checksum, hash_type='md5'):
|
||||||
"""
|
"""
|
||||||
Validate a file using a cryptographic checksum.
|
Validate a file using a cryptographic checksum.
|
||||||
|
@ -296,23 +359,25 @@ def restart_on_change(restart_map, stopstart=False):
|
||||||
|
|
||||||
@restart_on_change({
|
@restart_on_change({
|
||||||
'/etc/ceph/ceph.conf': [ 'cinder-api', 'cinder-volume' ]
|
'/etc/ceph/ceph.conf': [ 'cinder-api', 'cinder-volume' ]
|
||||||
|
'/etc/apache/sites-enabled/*': [ 'apache2' ]
|
||||||
})
|
})
|
||||||
def ceph_client_changed():
|
def config_changed():
|
||||||
pass # your code here
|
pass # your code here
|
||||||
|
|
||||||
In this example, the cinder-api and cinder-volume services
|
In this example, the cinder-api and cinder-volume services
|
||||||
would be restarted if /etc/ceph/ceph.conf is changed by the
|
would be restarted if /etc/ceph/ceph.conf is changed by the
|
||||||
ceph_client_changed function.
|
ceph_client_changed function. The apache2 service would be
|
||||||
|
restarted if any file matching the pattern got changed, created
|
||||||
|
or removed. Standard wildcards are supported, see documentation
|
||||||
|
for the 'glob' module for more information.
|
||||||
"""
|
"""
|
||||||
def wrap(f):
|
def wrap(f):
|
||||||
def wrapped_f(*args, **kwargs):
|
def wrapped_f(*args, **kwargs):
|
||||||
checksums = {}
|
checksums = {path: path_hash(path) for path in restart_map}
|
||||||
for path in restart_map:
|
|
||||||
checksums[path] = file_hash(path)
|
|
||||||
f(*args, **kwargs)
|
f(*args, **kwargs)
|
||||||
restarts = []
|
restarts = []
|
||||||
for path in restart_map:
|
for path in restart_map:
|
||||||
if checksums[path] != file_hash(path):
|
if path_hash(path) != checksums[path]:
|
||||||
restarts += restart_map[path]
|
restarts += restart_map[path]
|
||||||
services_list = list(OrderedDict.fromkeys(restarts))
|
services_list = list(OrderedDict.fromkeys(restarts))
|
||||||
if not stopstart:
|
if not stopstart:
|
||||||
|
@ -352,25 +417,80 @@ def pwgen(length=None):
|
||||||
return(''.join(random_chars))
|
return(''.join(random_chars))
|
||||||
|
|
||||||
|
|
||||||
def list_nics(nic_type):
|
def is_phy_iface(interface):
|
||||||
|
"""Returns True if interface is not virtual, otherwise False."""
|
||||||
|
if interface:
|
||||||
|
sys_net = '/sys/class/net'
|
||||||
|
if os.path.isdir(sys_net):
|
||||||
|
for iface in glob.glob(os.path.join(sys_net, '*')):
|
||||||
|
if '/virtual/' in os.path.realpath(iface):
|
||||||
|
continue
|
||||||
|
|
||||||
|
if interface == os.path.basename(iface):
|
||||||
|
return True
|
||||||
|
|
||||||
|
return False
|
||||||
|
|
||||||
|
|
||||||
|
def get_bond_master(interface):
|
||||||
|
"""Returns bond master if interface is bond slave otherwise None.
|
||||||
|
|
||||||
|
NOTE: the provided interface is expected to be physical
|
||||||
|
"""
|
||||||
|
if interface:
|
||||||
|
iface_path = '/sys/class/net/%s' % (interface)
|
||||||
|
if os.path.exists(iface_path):
|
||||||
|
if '/virtual/' in os.path.realpath(iface_path):
|
||||||
|
return None
|
||||||
|
|
||||||
|
master = os.path.join(iface_path, 'master')
|
||||||
|
if os.path.exists(master):
|
||||||
|
master = os.path.realpath(master)
|
||||||
|
# make sure it is a bond master
|
||||||
|
if os.path.exists(os.path.join(master, 'bonding')):
|
||||||
|
return os.path.basename(master)
|
||||||
|
|
||||||
|
return None
|
||||||
|
|
||||||
|
|
||||||
|
def list_nics(nic_type=None):
|
||||||
'''Return a list of nics of given type(s)'''
|
'''Return a list of nics of given type(s)'''
|
||||||
if isinstance(nic_type, six.string_types):
|
if isinstance(nic_type, six.string_types):
|
||||||
int_types = [nic_type]
|
int_types = [nic_type]
|
||||||
else:
|
else:
|
||||||
int_types = nic_type
|
int_types = nic_type
|
||||||
|
|
||||||
interfaces = []
|
interfaces = []
|
||||||
for int_type in int_types:
|
if nic_type:
|
||||||
cmd = ['ip', 'addr', 'show', 'label', int_type + '*']
|
for int_type in int_types:
|
||||||
|
cmd = ['ip', 'addr', 'show', 'label', int_type + '*']
|
||||||
|
ip_output = subprocess.check_output(cmd).decode('UTF-8')
|
||||||
|
ip_output = ip_output.split('\n')
|
||||||
|
ip_output = (line for line in ip_output if line)
|
||||||
|
for line in ip_output:
|
||||||
|
if line.split()[1].startswith(int_type):
|
||||||
|
matched = re.search('.*: (' + int_type +
|
||||||
|
r'[0-9]+\.[0-9]+)@.*', line)
|
||||||
|
if matched:
|
||||||
|
iface = matched.groups()[0]
|
||||||
|
else:
|
||||||
|
iface = line.split()[1].replace(":", "")
|
||||||
|
|
||||||
|
if iface not in interfaces:
|
||||||
|
interfaces.append(iface)
|
||||||
|
else:
|
||||||
|
cmd = ['ip', 'a']
|
||||||
ip_output = subprocess.check_output(cmd).decode('UTF-8').split('\n')
|
ip_output = subprocess.check_output(cmd).decode('UTF-8').split('\n')
|
||||||
ip_output = (line for line in ip_output if line)
|
ip_output = (line.strip() for line in ip_output if line)
|
||||||
|
|
||||||
|
key = re.compile('^[0-9]+:\s+(.+):')
|
||||||
for line in ip_output:
|
for line in ip_output:
|
||||||
if line.split()[1].startswith(int_type):
|
matched = re.search(key, line)
|
||||||
matched = re.search('.*: (' + int_type + r'[0-9]+\.[0-9]+)@.*', line)
|
if matched:
|
||||||
if matched:
|
iface = matched.group(1)
|
||||||
interface = matched.groups()[0]
|
iface = iface.partition("@")[0]
|
||||||
else:
|
if iface not in interfaces:
|
||||||
interface = line.split()[1].replace(":", "")
|
interfaces.append(iface)
|
||||||
interfaces.append(interface)
|
|
||||||
|
|
||||||
return interfaces
|
return interfaces
|
||||||
|
|
||||||
|
|
|
@ -0,0 +1,62 @@
|
||||||
|
# -*- coding: utf-8 -*-
|
||||||
|
|
||||||
|
# Copyright 2014-2015 Canonical Limited.
|
||||||
|
#
|
||||||
|
# This file is part of charm-helpers.
|
||||||
|
#
|
||||||
|
# charm-helpers is free software: you can redistribute it and/or modify
|
||||||
|
# it under the terms of the GNU Lesser General Public License version 3 as
|
||||||
|
# published by the Free Software Foundation.
|
||||||
|
#
|
||||||
|
# charm-helpers is distributed in the hope that it will be useful,
|
||||||
|
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||||
|
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||||
|
# GNU Lesser General Public License for more details.
|
||||||
|
#
|
||||||
|
# You should have received a copy of the GNU Lesser General Public License
|
||||||
|
# along with charm-helpers. If not, see <http://www.gnu.org/licenses/>.
|
||||||
|
|
||||||
|
import yaml
|
||||||
|
from charmhelpers.core import fstab
|
||||||
|
from charmhelpers.core import sysctl
|
||||||
|
from charmhelpers.core.host import (
|
||||||
|
add_group,
|
||||||
|
add_user_to_group,
|
||||||
|
fstab_mount,
|
||||||
|
mkdir,
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
|
def hugepage_support(user, group='hugetlb', nr_hugepages=256,
|
||||||
|
max_map_count=65536, mnt_point='/run/hugepages/kvm',
|
||||||
|
pagesize='2MB', mount=True):
|
||||||
|
"""Enable hugepages on system.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
user (str) -- Username to allow access to hugepages to
|
||||||
|
group (str) -- Group name to own hugepages
|
||||||
|
nr_hugepages (int) -- Number of pages to reserve
|
||||||
|
max_map_count (int) -- Number of Virtual Memory Areas a process can own
|
||||||
|
mnt_point (str) -- Directory to mount hugepages on
|
||||||
|
pagesize (str) -- Size of hugepages
|
||||||
|
mount (bool) -- Whether to Mount hugepages
|
||||||
|
"""
|
||||||
|
group_info = add_group(group)
|
||||||
|
gid = group_info.gr_gid
|
||||||
|
add_user_to_group(user, group)
|
||||||
|
sysctl_settings = {
|
||||||
|
'vm.nr_hugepages': nr_hugepages,
|
||||||
|
'vm.max_map_count': max_map_count,
|
||||||
|
'vm.hugetlb_shm_group': gid,
|
||||||
|
}
|
||||||
|
sysctl.create(yaml.dump(sysctl_settings), '/etc/sysctl.d/10-hugepage.conf')
|
||||||
|
mkdir(mnt_point, owner='root', group='root', perms=0o755, force=False)
|
||||||
|
lfstab = fstab.Fstab()
|
||||||
|
fstab_entry = lfstab.get_entry_by_attr('mountpoint', mnt_point)
|
||||||
|
if fstab_entry:
|
||||||
|
lfstab.remove_entry(fstab_entry)
|
||||||
|
entry = lfstab.Entry('nodev', mnt_point, 'hugetlbfs',
|
||||||
|
'mode=1770,gid={},pagesize={}'.format(gid, pagesize), 0, 0)
|
||||||
|
lfstab.add_entry(entry)
|
||||||
|
if mount:
|
||||||
|
fstab_mount(mnt_point)
|
|
@ -15,9 +15,9 @@
|
||||||
# along with charm-helpers. If not, see <http://www.gnu.org/licenses/>.
|
# along with charm-helpers. If not, see <http://www.gnu.org/licenses/>.
|
||||||
|
|
||||||
import os
|
import os
|
||||||
import re
|
|
||||||
import json
|
import json
|
||||||
from collections import Iterable
|
from inspect import getargspec
|
||||||
|
from collections import Iterable, OrderedDict
|
||||||
|
|
||||||
from charmhelpers.core import host
|
from charmhelpers.core import host
|
||||||
from charmhelpers.core import hookenv
|
from charmhelpers.core import hookenv
|
||||||
|
@ -119,7 +119,7 @@ class ServiceManager(object):
|
||||||
"""
|
"""
|
||||||
self._ready_file = os.path.join(hookenv.charm_dir(), 'READY-SERVICES.json')
|
self._ready_file = os.path.join(hookenv.charm_dir(), 'READY-SERVICES.json')
|
||||||
self._ready = None
|
self._ready = None
|
||||||
self.services = {}
|
self.services = OrderedDict()
|
||||||
for service in services or []:
|
for service in services or []:
|
||||||
service_name = service['service']
|
service_name = service['service']
|
||||||
self.services[service_name] = service
|
self.services[service_name] = service
|
||||||
|
@ -128,15 +128,18 @@ class ServiceManager(object):
|
||||||
"""
|
"""
|
||||||
Handle the current hook by doing The Right Thing with the registered services.
|
Handle the current hook by doing The Right Thing with the registered services.
|
||||||
"""
|
"""
|
||||||
hook_name = hookenv.hook_name()
|
hookenv._run_atstart()
|
||||||
if hook_name == 'stop':
|
try:
|
||||||
self.stop_services()
|
hook_name = hookenv.hook_name()
|
||||||
else:
|
if hook_name == 'stop':
|
||||||
self.provide_data()
|
self.stop_services()
|
||||||
self.reconfigure_services()
|
else:
|
||||||
cfg = hookenv.config()
|
self.reconfigure_services()
|
||||||
if cfg.implicit_save:
|
self.provide_data()
|
||||||
cfg.save()
|
except SystemExit as x:
|
||||||
|
if x.code is None or x.code == 0:
|
||||||
|
hookenv._run_atexit()
|
||||||
|
hookenv._run_atexit()
|
||||||
|
|
||||||
def provide_data(self):
|
def provide_data(self):
|
||||||
"""
|
"""
|
||||||
|
@ -145,15 +148,36 @@ class ServiceManager(object):
|
||||||
A provider must have a `name` attribute, which indicates which relation
|
A provider must have a `name` attribute, which indicates which relation
|
||||||
to set data on, and a `provide_data()` method, which returns a dict of
|
to set data on, and a `provide_data()` method, which returns a dict of
|
||||||
data to set.
|
data to set.
|
||||||
|
|
||||||
|
The `provide_data()` method can optionally accept two parameters:
|
||||||
|
|
||||||
|
* ``remote_service`` The name of the remote service that the data will
|
||||||
|
be provided to. The `provide_data()` method will be called once
|
||||||
|
for each connected service (not unit). This allows the method to
|
||||||
|
tailor its data to the given service.
|
||||||
|
* ``service_ready`` Whether or not the service definition had all of
|
||||||
|
its requirements met, and thus the ``data_ready`` callbacks run.
|
||||||
|
|
||||||
|
Note that the ``provided_data`` methods are now called **after** the
|
||||||
|
``data_ready`` callbacks are run. This gives the ``data_ready`` callbacks
|
||||||
|
a chance to generate any data necessary for the providing to the remote
|
||||||
|
services.
|
||||||
"""
|
"""
|
||||||
hook_name = hookenv.hook_name()
|
for service_name, service in self.services.items():
|
||||||
for service in self.services.values():
|
service_ready = self.is_ready(service_name)
|
||||||
for provider in service.get('provided_data', []):
|
for provider in service.get('provided_data', []):
|
||||||
if re.match(r'{}-relation-(joined|changed)'.format(provider.name), hook_name):
|
for relid in hookenv.relation_ids(provider.name):
|
||||||
data = provider.provide_data()
|
units = hookenv.related_units(relid)
|
||||||
_ready = provider._is_ready(data) if hasattr(provider, '_is_ready') else data
|
if not units:
|
||||||
if _ready:
|
continue
|
||||||
hookenv.relation_set(None, data)
|
remote_service = units[0].split('/')[0]
|
||||||
|
argspec = getargspec(provider.provide_data)
|
||||||
|
if len(argspec.args) > 1:
|
||||||
|
data = provider.provide_data(remote_service, service_ready)
|
||||||
|
else:
|
||||||
|
data = provider.provide_data()
|
||||||
|
if data:
|
||||||
|
hookenv.relation_set(relid, data)
|
||||||
|
|
||||||
def reconfigure_services(self, *service_names):
|
def reconfigure_services(self, *service_names):
|
||||||
"""
|
"""
|
||||||
|
|
|
@ -16,7 +16,9 @@
|
||||||
|
|
||||||
import os
|
import os
|
||||||
import yaml
|
import yaml
|
||||||
|
|
||||||
from charmhelpers.core import hookenv
|
from charmhelpers.core import hookenv
|
||||||
|
from charmhelpers.core import host
|
||||||
from charmhelpers.core import templating
|
from charmhelpers.core import templating
|
||||||
|
|
||||||
from charmhelpers.core.services.base import ManagerCallback
|
from charmhelpers.core.services.base import ManagerCallback
|
||||||
|
@ -239,28 +241,42 @@ class TemplateCallback(ManagerCallback):
|
||||||
action.
|
action.
|
||||||
|
|
||||||
:param str source: The template source file, relative to
|
:param str source: The template source file, relative to
|
||||||
`$CHARM_DIR/templates`
|
`$CHARM_DIR/templates`
|
||||||
|
|
||||||
:param str target: The target to write the rendered template to
|
:param str target: The target to write the rendered template to
|
||||||
:param str owner: The owner of the rendered file
|
:param str owner: The owner of the rendered file
|
||||||
:param str group: The group of the rendered file
|
:param str group: The group of the rendered file
|
||||||
:param int perms: The permissions of the rendered file
|
:param int perms: The permissions of the rendered file
|
||||||
|
:param partial on_change_action: functools partial to be executed when
|
||||||
|
rendered file changes
|
||||||
"""
|
"""
|
||||||
def __init__(self, source, target,
|
def __init__(self, source, target,
|
||||||
owner='root', group='root', perms=0o444):
|
owner='root', group='root', perms=0o444,
|
||||||
|
on_change_action=None):
|
||||||
self.source = source
|
self.source = source
|
||||||
self.target = target
|
self.target = target
|
||||||
self.owner = owner
|
self.owner = owner
|
||||||
self.group = group
|
self.group = group
|
||||||
self.perms = perms
|
self.perms = perms
|
||||||
|
self.on_change_action = on_change_action
|
||||||
|
|
||||||
def __call__(self, manager, service_name, event_name):
|
def __call__(self, manager, service_name, event_name):
|
||||||
|
pre_checksum = ''
|
||||||
|
if self.on_change_action and os.path.isfile(self.target):
|
||||||
|
pre_checksum = host.file_hash(self.target)
|
||||||
service = manager.get_service(service_name)
|
service = manager.get_service(service_name)
|
||||||
context = {}
|
context = {}
|
||||||
for ctx in service.get('required_data', []):
|
for ctx in service.get('required_data', []):
|
||||||
context.update(ctx)
|
context.update(ctx)
|
||||||
templating.render(self.source, self.target, context,
|
templating.render(self.source, self.target, context,
|
||||||
self.owner, self.group, self.perms)
|
self.owner, self.group, self.perms)
|
||||||
|
if self.on_change_action:
|
||||||
|
if pre_checksum == host.file_hash(self.target):
|
||||||
|
hookenv.log(
|
||||||
|
'No change detected: {}'.format(self.target),
|
||||||
|
hookenv.DEBUG)
|
||||||
|
else:
|
||||||
|
self.on_change_action()
|
||||||
|
|
||||||
|
|
||||||
# Convenience aliases for templates
|
# Convenience aliases for templates
|
||||||
|
|
|
@ -152,6 +152,7 @@ associated to the hookname.
|
||||||
import collections
|
import collections
|
||||||
import contextlib
|
import contextlib
|
||||||
import datetime
|
import datetime
|
||||||
|
import itertools
|
||||||
import json
|
import json
|
||||||
import os
|
import os
|
||||||
import pprint
|
import pprint
|
||||||
|
@ -164,8 +165,7 @@ __author__ = 'Kapil Thangavelu <kapil.foss@gmail.com>'
|
||||||
class Storage(object):
|
class Storage(object):
|
||||||
"""Simple key value database for local unit state within charms.
|
"""Simple key value database for local unit state within charms.
|
||||||
|
|
||||||
Modifications are automatically committed at hook exit. That's
|
Modifications are not persisted unless :meth:`flush` is called.
|
||||||
currently regardless of exit code.
|
|
||||||
|
|
||||||
To support dicts, lists, integer, floats, and booleans values
|
To support dicts, lists, integer, floats, and booleans values
|
||||||
are automatically json encoded/decoded.
|
are automatically json encoded/decoded.
|
||||||
|
@ -173,8 +173,11 @@ class Storage(object):
|
||||||
def __init__(self, path=None):
|
def __init__(self, path=None):
|
||||||
self.db_path = path
|
self.db_path = path
|
||||||
if path is None:
|
if path is None:
|
||||||
self.db_path = os.path.join(
|
if 'UNIT_STATE_DB' in os.environ:
|
||||||
os.environ.get('CHARM_DIR', ''), '.unit-state.db')
|
self.db_path = os.environ['UNIT_STATE_DB']
|
||||||
|
else:
|
||||||
|
self.db_path = os.path.join(
|
||||||
|
os.environ.get('CHARM_DIR', ''), '.unit-state.db')
|
||||||
self.conn = sqlite3.connect('%s' % self.db_path)
|
self.conn = sqlite3.connect('%s' % self.db_path)
|
||||||
self.cursor = self.conn.cursor()
|
self.cursor = self.conn.cursor()
|
||||||
self.revision = None
|
self.revision = None
|
||||||
|
@ -189,15 +192,8 @@ class Storage(object):
|
||||||
self.conn.close()
|
self.conn.close()
|
||||||
self._closed = True
|
self._closed = True
|
||||||
|
|
||||||
def _scoped_query(self, stmt, params=None):
|
|
||||||
if params is None:
|
|
||||||
params = []
|
|
||||||
return stmt, params
|
|
||||||
|
|
||||||
def get(self, key, default=None, record=False):
|
def get(self, key, default=None, record=False):
|
||||||
self.cursor.execute(
|
self.cursor.execute('select data from kv where key=?', [key])
|
||||||
*self._scoped_query(
|
|
||||||
'select data from kv where key=?', [key]))
|
|
||||||
result = self.cursor.fetchone()
|
result = self.cursor.fetchone()
|
||||||
if not result:
|
if not result:
|
||||||
return default
|
return default
|
||||||
|
@ -206,33 +202,81 @@ class Storage(object):
|
||||||
return json.loads(result[0])
|
return json.loads(result[0])
|
||||||
|
|
||||||
def getrange(self, key_prefix, strip=False):
|
def getrange(self, key_prefix, strip=False):
|
||||||
stmt = "select key, data from kv where key like '%s%%'" % key_prefix
|
"""
|
||||||
self.cursor.execute(*self._scoped_query(stmt))
|
Get a range of keys starting with a common prefix as a mapping of
|
||||||
|
keys to values.
|
||||||
|
|
||||||
|
:param str key_prefix: Common prefix among all keys
|
||||||
|
:param bool strip: Optionally strip the common prefix from the key
|
||||||
|
names in the returned dict
|
||||||
|
:return dict: A (possibly empty) dict of key-value mappings
|
||||||
|
"""
|
||||||
|
self.cursor.execute("select key, data from kv where key like ?",
|
||||||
|
['%s%%' % key_prefix])
|
||||||
result = self.cursor.fetchall()
|
result = self.cursor.fetchall()
|
||||||
|
|
||||||
if not result:
|
if not result:
|
||||||
return None
|
return {}
|
||||||
if not strip:
|
if not strip:
|
||||||
key_prefix = ''
|
key_prefix = ''
|
||||||
return dict([
|
return dict([
|
||||||
(k[len(key_prefix):], json.loads(v)) for k, v in result])
|
(k[len(key_prefix):], json.loads(v)) for k, v in result])
|
||||||
|
|
||||||
def update(self, mapping, prefix=""):
|
def update(self, mapping, prefix=""):
|
||||||
|
"""
|
||||||
|
Set the values of multiple keys at once.
|
||||||
|
|
||||||
|
:param dict mapping: Mapping of keys to values
|
||||||
|
:param str prefix: Optional prefix to apply to all keys in `mapping`
|
||||||
|
before setting
|
||||||
|
"""
|
||||||
for k, v in mapping.items():
|
for k, v in mapping.items():
|
||||||
self.set("%s%s" % (prefix, k), v)
|
self.set("%s%s" % (prefix, k), v)
|
||||||
|
|
||||||
def unset(self, key):
|
def unset(self, key):
|
||||||
|
"""
|
||||||
|
Remove a key from the database entirely.
|
||||||
|
"""
|
||||||
self.cursor.execute('delete from kv where key=?', [key])
|
self.cursor.execute('delete from kv where key=?', [key])
|
||||||
if self.revision and self.cursor.rowcount:
|
if self.revision and self.cursor.rowcount:
|
||||||
self.cursor.execute(
|
self.cursor.execute(
|
||||||
'insert into kv_revisions values (?, ?, ?)',
|
'insert into kv_revisions values (?, ?, ?)',
|
||||||
[key, self.revision, json.dumps('DELETED')])
|
[key, self.revision, json.dumps('DELETED')])
|
||||||
|
|
||||||
|
def unsetrange(self, keys=None, prefix=""):
|
||||||
|
"""
|
||||||
|
Remove a range of keys starting with a common prefix, from the database
|
||||||
|
entirely.
|
||||||
|
|
||||||
|
:param list keys: List of keys to remove.
|
||||||
|
:param str prefix: Optional prefix to apply to all keys in ``keys``
|
||||||
|
before removing.
|
||||||
|
"""
|
||||||
|
if keys is not None:
|
||||||
|
keys = ['%s%s' % (prefix, key) for key in keys]
|
||||||
|
self.cursor.execute('delete from kv where key in (%s)' % ','.join(['?'] * len(keys)), keys)
|
||||||
|
if self.revision and self.cursor.rowcount:
|
||||||
|
self.cursor.execute(
|
||||||
|
'insert into kv_revisions values %s' % ','.join(['(?, ?, ?)'] * len(keys)),
|
||||||
|
list(itertools.chain.from_iterable((key, self.revision, json.dumps('DELETED')) for key in keys)))
|
||||||
|
else:
|
||||||
|
self.cursor.execute('delete from kv where key like ?',
|
||||||
|
['%s%%' % prefix])
|
||||||
|
if self.revision and self.cursor.rowcount:
|
||||||
|
self.cursor.execute(
|
||||||
|
'insert into kv_revisions values (?, ?, ?)',
|
||||||
|
['%s%%' % prefix, self.revision, json.dumps('DELETED')])
|
||||||
|
|
||||||
def set(self, key, value):
|
def set(self, key, value):
|
||||||
|
"""
|
||||||
|
Set a value in the database.
|
||||||
|
|
||||||
|
:param str key: Key to set the value for
|
||||||
|
:param value: Any JSON-serializable value to be set
|
||||||
|
"""
|
||||||
serialized = json.dumps(value)
|
serialized = json.dumps(value)
|
||||||
|
|
||||||
self.cursor.execute(
|
self.cursor.execute('select data from kv where key=?', [key])
|
||||||
'select data from kv where key=?', [key])
|
|
||||||
exists = self.cursor.fetchone()
|
exists = self.cursor.fetchone()
|
||||||
|
|
||||||
# Skip mutations to the same value
|
# Skip mutations to the same value
|
||||||
|
|
|
@ -90,6 +90,14 @@ CLOUD_ARCHIVE_POCKETS = {
|
||||||
'kilo/proposed': 'trusty-proposed/kilo',
|
'kilo/proposed': 'trusty-proposed/kilo',
|
||||||
'trusty-kilo/proposed': 'trusty-proposed/kilo',
|
'trusty-kilo/proposed': 'trusty-proposed/kilo',
|
||||||
'trusty-proposed/kilo': 'trusty-proposed/kilo',
|
'trusty-proposed/kilo': 'trusty-proposed/kilo',
|
||||||
|
# Liberty
|
||||||
|
'liberty': 'trusty-updates/liberty',
|
||||||
|
'trusty-liberty': 'trusty-updates/liberty',
|
||||||
|
'trusty-liberty/updates': 'trusty-updates/liberty',
|
||||||
|
'trusty-updates/liberty': 'trusty-updates/liberty',
|
||||||
|
'liberty/proposed': 'trusty-proposed/liberty',
|
||||||
|
'trusty-liberty/proposed': 'trusty-proposed/liberty',
|
||||||
|
'trusty-proposed/liberty': 'trusty-proposed/liberty',
|
||||||
}
|
}
|
||||||
|
|
||||||
# The order of this list is very important. Handlers should be listed in from
|
# The order of this list is very important. Handlers should be listed in from
|
||||||
|
@ -158,7 +166,7 @@ def filter_installed_packages(packages):
|
||||||
|
|
||||||
def apt_cache(in_memory=True):
|
def apt_cache(in_memory=True):
|
||||||
"""Build and return an apt cache"""
|
"""Build and return an apt cache"""
|
||||||
import apt_pkg
|
from apt import apt_pkg
|
||||||
apt_pkg.init()
|
apt_pkg.init()
|
||||||
if in_memory:
|
if in_memory:
|
||||||
apt_pkg.config.set("Dir::Cache::pkgcache", "")
|
apt_pkg.config.set("Dir::Cache::pkgcache", "")
|
||||||
|
@ -215,9 +223,9 @@ def apt_purge(packages, fatal=False):
|
||||||
_run_apt_command(cmd, fatal)
|
_run_apt_command(cmd, fatal)
|
||||||
|
|
||||||
|
|
||||||
def apt_hold(packages, fatal=False):
|
def apt_mark(packages, mark, fatal=False):
|
||||||
"""Hold one or more packages"""
|
"""Flag one or more packages using apt-mark"""
|
||||||
cmd = ['apt-mark', 'hold']
|
cmd = ['apt-mark', mark]
|
||||||
if isinstance(packages, six.string_types):
|
if isinstance(packages, six.string_types):
|
||||||
cmd.append(packages)
|
cmd.append(packages)
|
||||||
else:
|
else:
|
||||||
|
@ -225,9 +233,17 @@ def apt_hold(packages, fatal=False):
|
||||||
log("Holding {}".format(packages))
|
log("Holding {}".format(packages))
|
||||||
|
|
||||||
if fatal:
|
if fatal:
|
||||||
subprocess.check_call(cmd)
|
subprocess.check_call(cmd, universal_newlines=True)
|
||||||
else:
|
else:
|
||||||
subprocess.call(cmd)
|
subprocess.call(cmd, universal_newlines=True)
|
||||||
|
|
||||||
|
|
||||||
|
def apt_hold(packages, fatal=False):
|
||||||
|
return apt_mark(packages, 'hold', fatal=fatal)
|
||||||
|
|
||||||
|
|
||||||
|
def apt_unhold(packages, fatal=False):
|
||||||
|
return apt_mark(packages, 'unhold', fatal=fatal)
|
||||||
|
|
||||||
|
|
||||||
def add_source(source, key=None):
|
def add_source(source, key=None):
|
||||||
|
@ -370,8 +386,9 @@ def install_remote(source, *args, **kwargs):
|
||||||
for handler in handlers:
|
for handler in handlers:
|
||||||
try:
|
try:
|
||||||
installed_to = handler.install(source, *args, **kwargs)
|
installed_to = handler.install(source, *args, **kwargs)
|
||||||
except UnhandledSource:
|
except UnhandledSource as e:
|
||||||
pass
|
log('Install source attempt unsuccessful: {}'.format(e),
|
||||||
|
level='WARNING')
|
||||||
if not installed_to:
|
if not installed_to:
|
||||||
raise UnhandledSource("No handler found for source {}".format(source))
|
raise UnhandledSource("No handler found for source {}".format(source))
|
||||||
return installed_to
|
return installed_to
|
||||||
|
|
|
@ -77,6 +77,8 @@ class ArchiveUrlFetchHandler(BaseFetchHandler):
|
||||||
def can_handle(self, source):
|
def can_handle(self, source):
|
||||||
url_parts = self.parse_url(source)
|
url_parts = self.parse_url(source)
|
||||||
if url_parts.scheme not in ('http', 'https', 'ftp', 'file'):
|
if url_parts.scheme not in ('http', 'https', 'ftp', 'file'):
|
||||||
|
# XXX: Why is this returning a boolean and a string? It's
|
||||||
|
# doomed to fail since "bool(can_handle('foo://'))" will be True.
|
||||||
return "Wrong source type"
|
return "Wrong source type"
|
||||||
if get_archive_handler(self.base_url(source)):
|
if get_archive_handler(self.base_url(source)):
|
||||||
return True
|
return True
|
||||||
|
@ -155,7 +157,11 @@ class ArchiveUrlFetchHandler(BaseFetchHandler):
|
||||||
else:
|
else:
|
||||||
algorithms = hashlib.algorithms_available
|
algorithms = hashlib.algorithms_available
|
||||||
if key in algorithms:
|
if key in algorithms:
|
||||||
check_hash(dld_file, value, key)
|
if len(value) != 1:
|
||||||
|
raise TypeError(
|
||||||
|
"Expected 1 hash value, not %d" % len(value))
|
||||||
|
expected = value[0]
|
||||||
|
check_hash(dld_file, expected, key)
|
||||||
if checksum:
|
if checksum:
|
||||||
check_hash(dld_file, checksum, hash_type)
|
check_hash(dld_file, checksum, hash_type)
|
||||||
return extract(dld_file, dest)
|
return extract(dld_file, dest)
|
||||||
|
|
|
@ -45,14 +45,16 @@ class GitUrlFetchHandler(BaseFetchHandler):
|
||||||
else:
|
else:
|
||||||
return True
|
return True
|
||||||
|
|
||||||
def clone(self, source, dest, branch):
|
def clone(self, source, dest, branch, depth=None):
|
||||||
if not self.can_handle(source):
|
if not self.can_handle(source):
|
||||||
raise UnhandledSource("Cannot handle {}".format(source))
|
raise UnhandledSource("Cannot handle {}".format(source))
|
||||||
|
|
||||||
repo = Repo.clone_from(source, dest)
|
if depth:
|
||||||
repo.git.checkout(branch)
|
Repo.clone_from(source, dest, branch=branch, depth=depth)
|
||||||
|
else:
|
||||||
|
Repo.clone_from(source, dest, branch=branch)
|
||||||
|
|
||||||
def install(self, source, branch="master", dest=None):
|
def install(self, source, branch="master", dest=None, depth=None):
|
||||||
url_parts = self.parse_url(source)
|
url_parts = self.parse_url(source)
|
||||||
branch_name = url_parts.path.strip("/").split("/")[-1]
|
branch_name = url_parts.path.strip("/").split("/")[-1]
|
||||||
if dest:
|
if dest:
|
||||||
|
@ -63,9 +65,9 @@ class GitUrlFetchHandler(BaseFetchHandler):
|
||||||
if not os.path.exists(dest_dir):
|
if not os.path.exists(dest_dir):
|
||||||
mkdir(dest_dir, perms=0o755)
|
mkdir(dest_dir, perms=0o755)
|
||||||
try:
|
try:
|
||||||
self.clone(source, dest_dir, branch)
|
self.clone(source, dest_dir, branch, depth)
|
||||||
except GitCommandError as e:
|
except GitCommandError as e:
|
||||||
raise UnhandledSource(e.message)
|
raise UnhandledSource(e)
|
||||||
except OSError as e:
|
except OSError as e:
|
||||||
raise UnhandledSource(e.strerror)
|
raise UnhandledSource(e.strerror)
|
||||||
return dest_dir
|
return dest_dir
|
||||||
|
|
|
@ -1 +0,0 @@
|
||||||
rabbitmq_server_relations.py
|
|
|
@ -0,0 +1,20 @@
|
||||||
|
#!/bin/bash
|
||||||
|
# Wrapper to deal with newer Ubuntu versions that don't have py2 installed
|
||||||
|
# by default.
|
||||||
|
|
||||||
|
declare -a DEPS=('apt' 'netaddr' 'netifaces' 'pip' 'yaml')
|
||||||
|
|
||||||
|
check_and_install() {
|
||||||
|
pkg="${1}-${2}"
|
||||||
|
if ! dpkg -s ${pkg} 2>&1 > /dev/null; then
|
||||||
|
apt-get -y install ${pkg}
|
||||||
|
fi
|
||||||
|
}
|
||||||
|
|
||||||
|
PYTHON="python"
|
||||||
|
|
||||||
|
for dep in ${DEPS[@]}; do
|
||||||
|
check_and_install ${PYTHON} ${dep}
|
||||||
|
done
|
||||||
|
|
||||||
|
exec ./hooks/install.real
|
|
@ -0,0 +1 @@
|
||||||
|
rabbitmq_server_relations.py
|
|
@ -0,0 +1 @@
|
||||||
|
rabbitmq_server_relations.py
|
|
@ -0,0 +1 @@
|
||||||
|
rabbitmq_server_relations.py
|
|
@ -0,0 +1 @@
|
||||||
|
rabbitmq_server_relations.py
|
|
@ -1,6 +1,5 @@
|
||||||
import os
|
import os
|
||||||
import re
|
import re
|
||||||
import socket
|
|
||||||
import sys
|
import sys
|
||||||
import subprocess
|
import subprocess
|
||||||
import glob
|
import glob
|
||||||
|
@ -54,6 +53,8 @@ LIB_PATH = '/var/lib/rabbitmq/'
|
||||||
HOSTS_FILE = '/etc/hosts'
|
HOSTS_FILE = '/etc/hosts'
|
||||||
|
|
||||||
_named_passwd = '/var/lib/charm/{}/{}.passwd'
|
_named_passwd = '/var/lib/charm/{}/{}.passwd'
|
||||||
|
_local_named_passwd = '/var/lib/charm/{}/{}.local_passwd'
|
||||||
|
|
||||||
|
|
||||||
# hook_contexts are used as a convenient mechanism to render templates
|
# hook_contexts are used as a convenient mechanism to render templates
|
||||||
# logically, consider building a hook_context for template rendering so
|
# logically, consider building a hook_context for template rendering so
|
||||||
|
@ -300,18 +301,13 @@ def cluster_with():
|
||||||
address = relation_get('private-address',
|
address = relation_get('private-address',
|
||||||
rid=r_id, unit=unit)
|
rid=r_id, unit=unit)
|
||||||
if address is not None:
|
if address is not None:
|
||||||
try:
|
node = get_hostname(address, fqdn=False)
|
||||||
node = get_hostname(address, fqdn=False)
|
if node:
|
||||||
except:
|
available_nodes.append(node)
|
||||||
|
else:
|
||||||
log('Cannot resolve hostname for {} '
|
log('Cannot resolve hostname for {} '
|
||||||
'using DNS servers'.format(address), level='WARNING')
|
'using DNS servers'.format(address),
|
||||||
log('Falling back to use socket.gethostname()',
|
|
||||||
level='WARNING')
|
level='WARNING')
|
||||||
# If the private-address is not resolvable using DNS
|
|
||||||
# then use the current hostname
|
|
||||||
node = socket.gethostname()
|
|
||||||
|
|
||||||
available_nodes.append(node)
|
|
||||||
|
|
||||||
if len(available_nodes) == 0:
|
if len(available_nodes) == 0:
|
||||||
log('No nodes available to cluster with')
|
log('No nodes available to cluster with')
|
||||||
|
@ -329,7 +325,12 @@ def cluster_with():
|
||||||
cmd = [RABBITMQ_CTL, 'stop_app']
|
cmd = [RABBITMQ_CTL, 'stop_app']
|
||||||
subprocess.check_call(cmd)
|
subprocess.check_call(cmd)
|
||||||
cmd = [RABBITMQ_CTL, cluster_cmd, 'rabbit@%s' % node]
|
cmd = [RABBITMQ_CTL, cluster_cmd, 'rabbit@%s' % node]
|
||||||
subprocess.check_call(cmd)
|
try:
|
||||||
|
subprocess.check_output(cmd, stderr=subprocess.STDOUT)
|
||||||
|
except subprocess.CalledProcessError as e:
|
||||||
|
if not e.returncode == 2 or \
|
||||||
|
"{ok,already_member}" not in e.output:
|
||||||
|
raise e
|
||||||
cmd = [RABBITMQ_CTL, 'start_app']
|
cmd = [RABBITMQ_CTL, 'start_app']
|
||||||
subprocess.check_call(cmd)
|
subprocess.check_call(cmd)
|
||||||
log('Host clustered with %s.' % node)
|
log('Host clustered with %s.' % node)
|
||||||
|
@ -458,10 +459,14 @@ def execute(cmd, die=False, echo=False):
|
||||||
return (stdout, stderr, rc)
|
return (stdout, stderr, rc)
|
||||||
|
|
||||||
|
|
||||||
def get_rabbit_password_on_disk(username, password=None):
|
def get_rabbit_password_on_disk(username, password=None, local=False):
|
||||||
''' Retrieve, generate or store a rabbit password for
|
''' Retrieve, generate or store a rabbit password for
|
||||||
the provided username on disk'''
|
the provided username on disk'''
|
||||||
_passwd_file = _named_passwd.format(service_name(), username)
|
if local:
|
||||||
|
_passwd_file = _local_named_passwd.format(service_name(), username)
|
||||||
|
else:
|
||||||
|
_passwd_file = _named_passwd.format(service_name(), username)
|
||||||
|
|
||||||
_password = None
|
_password = None
|
||||||
if os.path.exists(_passwd_file):
|
if os.path.exists(_passwd_file):
|
||||||
with open(_passwd_file, 'r') as passwd:
|
with open(_passwd_file, 'r') as passwd:
|
||||||
|
@ -473,6 +478,7 @@ def get_rabbit_password_on_disk(username, password=None):
|
||||||
_password = password or pwgen(length=64)
|
_password = password or pwgen(length=64)
|
||||||
write_file(_passwd_file, _password, owner=RABBIT_USER,
|
write_file(_passwd_file, _password, owner=RABBIT_USER,
|
||||||
group=RABBIT_USER, perms=0o660)
|
group=RABBIT_USER, perms=0o660)
|
||||||
|
|
||||||
return _password
|
return _password
|
||||||
|
|
||||||
|
|
||||||
|
@ -490,20 +496,23 @@ def migrate_passwords_to_peer_relation():
|
||||||
pass
|
pass
|
||||||
|
|
||||||
|
|
||||||
def get_rabbit_password(username, password=None):
|
def get_rabbit_password(username, password=None, local=False):
|
||||||
''' Retrieve, generate or store a rabbit password for
|
''' Retrieve, generate or store a rabbit password for
|
||||||
the provided username using peer relation cluster'''
|
the provided username using peer relation cluster'''
|
||||||
migrate_passwords_to_peer_relation()
|
if local:
|
||||||
_key = '{}.passwd'.format(username)
|
return get_rabbit_password_on_disk(username, password, local)
|
||||||
try:
|
else:
|
||||||
_password = peer_retrieve(_key)
|
migrate_passwords_to_peer_relation()
|
||||||
if _password is None:
|
_key = '{}.passwd'.format(username)
|
||||||
_password = password or pwgen(length=64)
|
try:
|
||||||
peer_store(_key, _password)
|
_password = peer_retrieve(_key)
|
||||||
except ValueError:
|
if _password is None:
|
||||||
# cluster relation is not yet started, use on-disk
|
_password = password or pwgen(length=64)
|
||||||
_password = get_rabbit_password_on_disk(username, password)
|
peer_store(_key, _password)
|
||||||
return _password
|
except ValueError:
|
||||||
|
# cluster relation is not yet started, use on-disk
|
||||||
|
_password = get_rabbit_password_on_disk(username, password)
|
||||||
|
return _password
|
||||||
|
|
||||||
|
|
||||||
def bind_ipv6_interface():
|
def bind_ipv6_interface():
|
||||||
|
|
|
@ -7,6 +7,15 @@ import subprocess
|
||||||
import glob
|
import glob
|
||||||
import socket
|
import socket
|
||||||
|
|
||||||
|
try:
|
||||||
|
import yaml # flake8: noqa
|
||||||
|
except ImportError:
|
||||||
|
if sys.version_info.major == 2:
|
||||||
|
subprocess.check_call(['apt-get', 'install', '-y', 'python-yaml'])
|
||||||
|
else:
|
||||||
|
subprocess.check_call(['apt-get', 'install', '-y', 'python3-yaml'])
|
||||||
|
import yaml # flake8: noqa
|
||||||
|
|
||||||
import rabbit_utils as rabbit
|
import rabbit_utils as rabbit
|
||||||
import ssl_utils
|
import ssl_utils
|
||||||
|
|
||||||
|
@ -43,6 +52,7 @@ from charmhelpers.core.hookenv import (
|
||||||
ERROR,
|
ERROR,
|
||||||
INFO,
|
INFO,
|
||||||
relation_get,
|
relation_get,
|
||||||
|
relation_clear,
|
||||||
relation_set,
|
relation_set,
|
||||||
relation_ids,
|
relation_ids,
|
||||||
related_units,
|
related_units,
|
||||||
|
@ -52,7 +62,9 @@ from charmhelpers.core.hookenv import (
|
||||||
unit_get,
|
unit_get,
|
||||||
is_relation_made,
|
is_relation_made,
|
||||||
Hooks,
|
Hooks,
|
||||||
UnregisteredHookError
|
UnregisteredHookError,
|
||||||
|
is_leader,
|
||||||
|
charm_dir,
|
||||||
)
|
)
|
||||||
from charmhelpers.core.host import (
|
from charmhelpers.core.host import (
|
||||||
cmp_pkgrevno,
|
cmp_pkgrevno,
|
||||||
|
@ -60,6 +72,7 @@ from charmhelpers.core.host import (
|
||||||
rsync,
|
rsync,
|
||||||
service_stop,
|
service_stop,
|
||||||
service_restart,
|
service_restart,
|
||||||
|
write_file,
|
||||||
)
|
)
|
||||||
from charmhelpers.contrib.charmsupport import nrpe
|
from charmhelpers.contrib.charmsupport import nrpe
|
||||||
|
|
||||||
|
@ -69,6 +82,7 @@ from charmhelpers.contrib.peerstorage import (
|
||||||
peer_store,
|
peer_store,
|
||||||
peer_store_and_set,
|
peer_store_and_set,
|
||||||
peer_retrieve_by_prefix,
|
peer_retrieve_by_prefix,
|
||||||
|
leader_get,
|
||||||
)
|
)
|
||||||
|
|
||||||
from charmhelpers.contrib.network.ip import get_address_in_network
|
from charmhelpers.contrib.network.ip import get_address_in_network
|
||||||
|
@ -81,14 +95,54 @@ RABBIT_DIR = '/var/lib/rabbitmq'
|
||||||
RABBIT_USER = 'rabbitmq'
|
RABBIT_USER = 'rabbitmq'
|
||||||
RABBIT_GROUP = 'rabbitmq'
|
RABBIT_GROUP = 'rabbitmq'
|
||||||
NAGIOS_PLUGINS = '/usr/local/lib/nagios/plugins'
|
NAGIOS_PLUGINS = '/usr/local/lib/nagios/plugins'
|
||||||
|
SCRIPTS_DIR = '/usr/local/bin'
|
||||||
|
STATS_CRONFILE = '/etc/cron.d/rabbitmq-stats'
|
||||||
|
STATS_DATAFILE = os.path.join(RABBIT_DIR, 'data',
|
||||||
|
'{}_queue_stats.dat'
|
||||||
|
''.format(socket.gethostname()))
|
||||||
|
|
||||||
|
|
||||||
@hooks.hook('install')
|
@hooks.hook('install.real')
|
||||||
def install():
|
def install():
|
||||||
pre_install_hooks()
|
pre_install_hooks()
|
||||||
# NOTE(jamespage) install actually happens in config_changed hook
|
# NOTE(jamespage) install actually happens in config_changed hook
|
||||||
|
|
||||||
|
|
||||||
|
def get_local_nodename():
|
||||||
|
'''Resolve local nodename into something that's universally addressable'''
|
||||||
|
ip_addr = get_host_ip(unit_get('private-address'))
|
||||||
|
log('getting local nodename for ip address: %s' % ip_addr, level=INFO)
|
||||||
|
try:
|
||||||
|
nodename = get_hostname(ip_addr, fqdn=False)
|
||||||
|
except:
|
||||||
|
log('Cannot resolve hostname for %s using DNS servers' % ip_addr,
|
||||||
|
level='WARNING')
|
||||||
|
log('Falling back to use socket.gethostname()',
|
||||||
|
level='WARNING')
|
||||||
|
# If the private-address is not resolvable using DNS
|
||||||
|
# then use the current hostname
|
||||||
|
nodename = socket.gethostname()
|
||||||
|
log('local nodename: %s' % nodename, level=INFO)
|
||||||
|
return nodename
|
||||||
|
|
||||||
|
|
||||||
|
def configure_nodename():
|
||||||
|
'''Set RABBITMQ_NODENAME to something that's resolvable by my peers'''
|
||||||
|
nodename = get_local_nodename()
|
||||||
|
log('configuring nodename', level=INFO)
|
||||||
|
if (nodename and
|
||||||
|
rabbit.get_node_name() != 'rabbit@%s' % nodename):
|
||||||
|
log('forcing nodename=%s' % nodename, level=INFO)
|
||||||
|
# would like to have used the restart_on_change decorator, but
|
||||||
|
# need to stop it under current nodename prior to updating env
|
||||||
|
log('Stopping rabbitmq-server.')
|
||||||
|
service_stop('rabbitmq-server')
|
||||||
|
rabbit.update_rmq_env_conf(hostname='rabbit@%s' % nodename,
|
||||||
|
ipv6=config('prefer-ipv6'))
|
||||||
|
log('Starting rabbitmq-server.')
|
||||||
|
service_restart('rabbitmq-server')
|
||||||
|
|
||||||
|
|
||||||
def configure_amqp(username, vhost, admin=False):
|
def configure_amqp(username, vhost, admin=False):
|
||||||
# get and update service password
|
# get and update service password
|
||||||
password = rabbit.get_rabbit_password(username)
|
password = rabbit.get_rabbit_password(username)
|
||||||
|
@ -115,6 +169,9 @@ def amqp_changed(relation_id=None, remote_unit=None):
|
||||||
host_addr = unit_get('private-address')
|
host_addr = unit_get('private-address')
|
||||||
|
|
||||||
if not is_elected_leader('res_rabbitmq_vip'):
|
if not is_elected_leader('res_rabbitmq_vip'):
|
||||||
|
# NOTE(jamespage) clear relation to deal with data being
|
||||||
|
# removed from peer storage
|
||||||
|
relation_clear(relation_id)
|
||||||
# Each unit needs to set the db information otherwise if the unit
|
# Each unit needs to set the db information otherwise if the unit
|
||||||
# with the info dies the settings die with it Bug# 1355848
|
# with the info dies the settings die with it Bug# 1355848
|
||||||
exc_list = ['hostname', 'private-address']
|
exc_list = ['hostname', 'private-address']
|
||||||
|
@ -195,13 +252,40 @@ def amqp_changed(relation_id=None, remote_unit=None):
|
||||||
relation_settings=relation_settings)
|
relation_settings=relation_settings)
|
||||||
|
|
||||||
|
|
||||||
|
def is_sufficient_peers():
|
||||||
|
"""If min-cluster-size has been provided, check that we have sufficient
|
||||||
|
number of peers to proceed with creating rabbitmq cluster.
|
||||||
|
"""
|
||||||
|
min_size = config('min-cluster-size')
|
||||||
|
if min_size:
|
||||||
|
size = 0
|
||||||
|
for rid in relation_ids('cluster'):
|
||||||
|
size = len(related_units(rid))
|
||||||
|
|
||||||
|
# Include this unit
|
||||||
|
size += 1
|
||||||
|
if min_size > size:
|
||||||
|
log("Insufficient number of peer units to form cluster "
|
||||||
|
"(expected=%s, got=%s)" % (min_size, size), level=INFO)
|
||||||
|
return False
|
||||||
|
|
||||||
|
return True
|
||||||
|
|
||||||
|
|
||||||
@hooks.hook('cluster-relation-joined')
|
@hooks.hook('cluster-relation-joined')
|
||||||
def cluster_joined(relation_id=None):
|
def cluster_joined(relation_id=None):
|
||||||
|
relation_settings = {
|
||||||
|
'hostname': get_local_nodename(),
|
||||||
|
}
|
||||||
|
|
||||||
if config('prefer-ipv6'):
|
if config('prefer-ipv6'):
|
||||||
relation_settings = {'hostname': socket.gethostname(),
|
relation_settings['private-address'] = get_ipv6_addr()[0]
|
||||||
'private-address': get_ipv6_addr()[0]}
|
else:
|
||||||
relation_set(relation_id=relation_id,
|
relation_settings['private-address'] = get_host_ip(
|
||||||
relation_settings=relation_settings)
|
unit_get('private-address'))
|
||||||
|
|
||||||
|
relation_set(relation_id=relation_id,
|
||||||
|
relation_settings=relation_settings)
|
||||||
|
|
||||||
if is_relation_made('ha') and \
|
if is_relation_made('ha') and \
|
||||||
config('ha-vip-only') is False:
|
config('ha-vip-only') is False:
|
||||||
|
@ -209,60 +293,60 @@ def cluster_joined(relation_id=None):
|
||||||
'rabbitmq cluster config.')
|
'rabbitmq cluster config.')
|
||||||
return
|
return
|
||||||
|
|
||||||
# Set RABBITMQ_NODENAME to something that's resolvable by my peers
|
configure_nodename()
|
||||||
# get_host_ip() is called to sanitize private-address in case it
|
|
||||||
# doesn't return an IP address
|
|
||||||
ip_addr = get_host_ip(unit_get('private-address'))
|
|
||||||
try:
|
try:
|
||||||
nodename = get_hostname(ip_addr, fqdn=False)
|
if not is_leader():
|
||||||
except:
|
log('Not the leader, deferring cookie propagation to leader')
|
||||||
log('Cannot resolve hostname for %s using DNS servers' % ip_addr,
|
return
|
||||||
level='WARNING')
|
except NotImplementedError:
|
||||||
log('Falling back to use socket.gethostname()',
|
if is_newer():
|
||||||
level='WARNING')
|
log('cluster_joined: Relation greater.')
|
||||||
# If the private-address is not resolvable using DNS
|
return
|
||||||
# then use the current hostname
|
|
||||||
nodename = socket.gethostname()
|
|
||||||
|
|
||||||
if nodename and rabbit.get_node_name() != nodename:
|
|
||||||
log('forcing nodename=%s' % nodename)
|
|
||||||
# would like to have used the restart_on_change decorator, but
|
|
||||||
# need to stop it under current nodename prior to updating env
|
|
||||||
service_stop('rabbitmq-server')
|
|
||||||
rabbit.update_rmq_env_conf(hostname='rabbit@%s' % nodename,
|
|
||||||
ipv6=config('prefer-ipv6'))
|
|
||||||
service_restart('rabbitmq-server')
|
|
||||||
|
|
||||||
if is_newer():
|
|
||||||
log('cluster_joined: Relation greater.')
|
|
||||||
return
|
|
||||||
|
|
||||||
if not os.path.isfile(rabbit.COOKIE_PATH):
|
if not os.path.isfile(rabbit.COOKIE_PATH):
|
||||||
log('erlang cookie missing from %s' % rabbit.COOKIE_PATH,
|
log('erlang cookie missing from %s' % rabbit.COOKIE_PATH,
|
||||||
level=ERROR)
|
level=ERROR)
|
||||||
return
|
return
|
||||||
cookie = open(rabbit.COOKIE_PATH, 'r').read().strip()
|
|
||||||
peer_store('cookie', cookie)
|
if not is_sufficient_peers():
|
||||||
|
return
|
||||||
|
|
||||||
|
if is_elected_leader('res_rabbitmq_vip'):
|
||||||
|
log('Leader peer_storing cookie', level=INFO)
|
||||||
|
cookie = open(rabbit.COOKIE_PATH, 'r').read().strip()
|
||||||
|
peer_store('cookie', cookie)
|
||||||
|
|
||||||
|
|
||||||
@hooks.hook('cluster-relation-changed')
|
@hooks.hook('cluster-relation-changed')
|
||||||
def cluster_changed():
|
def cluster_changed():
|
||||||
|
# Future travelers beware ordering is significant
|
||||||
rdata = relation_get()
|
rdata = relation_get()
|
||||||
if 'cookie' not in rdata:
|
|
||||||
log('cluster_joined: cookie not yet set.', level=INFO)
|
|
||||||
return
|
|
||||||
|
|
||||||
if config('prefer-ipv6') and rdata.get('hostname'):
|
|
||||||
private_address = rdata['private-address']
|
|
||||||
hostname = rdata['hostname']
|
|
||||||
if hostname:
|
|
||||||
rabbit.update_hosts_file({private_address: hostname})
|
|
||||||
|
|
||||||
# sync passwords
|
# sync passwords
|
||||||
blacklist = ['hostname', 'private-address', 'public-address']
|
blacklist = ['hostname', 'private-address', 'public-address']
|
||||||
whitelist = [a for a in rdata.keys() if a not in blacklist]
|
whitelist = [a for a in rdata.keys() if a not in blacklist]
|
||||||
peer_echo(includes=whitelist)
|
peer_echo(includes=whitelist)
|
||||||
|
|
||||||
|
cookie = peer_retrieve('cookie')
|
||||||
|
if not cookie:
|
||||||
|
log('cluster_joined: cookie not yet set.', level=INFO)
|
||||||
|
return
|
||||||
|
|
||||||
|
rdata = relation_get()
|
||||||
|
if rdata:
|
||||||
|
hostname = rdata.get('hostname', None)
|
||||||
|
private_address = rdata.get('private-address', None)
|
||||||
|
|
||||||
|
if hostname and private_address:
|
||||||
|
rabbit.update_hosts_file({private_address: hostname})
|
||||||
|
|
||||||
|
if not is_sufficient_peers():
|
||||||
|
# Stop rabbit until leader has finished configuring
|
||||||
|
log('Not enough peers, stopping until leader is configured',
|
||||||
|
level=INFO)
|
||||||
|
service_stop('rabbitmq-server')
|
||||||
|
return
|
||||||
|
|
||||||
# sync the cookie with peers if necessary
|
# sync the cookie with peers if necessary
|
||||||
update_cookie()
|
update_cookie()
|
||||||
|
|
||||||
|
@ -272,11 +356,16 @@ def cluster_changed():
|
||||||
'rabbitmq cluster config.', level=INFO)
|
'rabbitmq cluster config.', level=INFO)
|
||||||
return
|
return
|
||||||
|
|
||||||
# cluster with node
|
# cluster with node?
|
||||||
if is_newer():
|
try:
|
||||||
if rabbit.cluster_with():
|
if not is_leader():
|
||||||
# resync nrpe user after clustering
|
rabbit.cluster_with()
|
||||||
update_nrpe_checks()
|
update_nrpe_checks()
|
||||||
|
except NotImplementedError:
|
||||||
|
if is_newer():
|
||||||
|
rabbit.cluster_with()
|
||||||
|
update_nrpe_checks()
|
||||||
|
|
||||||
# If cluster has changed peer db may have changed so run amqp_changed
|
# If cluster has changed peer db may have changed so run amqp_changed
|
||||||
# to sync any changes
|
# to sync any changes
|
||||||
for rid in relation_ids('amqp'):
|
for rid in relation_ids('amqp'):
|
||||||
|
@ -284,9 +373,12 @@ def cluster_changed():
|
||||||
amqp_changed(relation_id=rid, remote_unit=unit)
|
amqp_changed(relation_id=rid, remote_unit=unit)
|
||||||
|
|
||||||
|
|
||||||
def update_cookie():
|
def update_cookie(leaders_cookie=None):
|
||||||
# sync cookie
|
# sync cookie
|
||||||
cookie = peer_retrieve('cookie')
|
if leaders_cookie:
|
||||||
|
cookie = leaders_cookie
|
||||||
|
else:
|
||||||
|
cookie = peer_retrieve('cookie')
|
||||||
cookie_local = None
|
cookie_local = None
|
||||||
with open(rabbit.COOKIE_PATH, 'r') as f:
|
with open(rabbit.COOKIE_PATH, 'r') as f:
|
||||||
cookie_local = f.read().strip()
|
cookie_local = f.read().strip()
|
||||||
|
@ -459,6 +551,17 @@ def update_nrpe_checks():
|
||||||
rsync(os.path.join(os.getenv('CHARM_DIR'), 'scripts',
|
rsync(os.path.join(os.getenv('CHARM_DIR'), 'scripts',
|
||||||
'check_rabbitmq.py'),
|
'check_rabbitmq.py'),
|
||||||
os.path.join(NAGIOS_PLUGINS, 'check_rabbitmq.py'))
|
os.path.join(NAGIOS_PLUGINS, 'check_rabbitmq.py'))
|
||||||
|
rsync(os.path.join(os.getenv('CHARM_DIR'), 'scripts',
|
||||||
|
'check_rabbitmq_queues.py'),
|
||||||
|
os.path.join(NAGIOS_PLUGINS, 'check_rabbitmq_queues.py'))
|
||||||
|
if config('stats_cron_schedule'):
|
||||||
|
script = os.path.join(SCRIPTS_DIR, 'collect_rabbitmq_stats.sh')
|
||||||
|
cronjob = "{} root {}\n".format(config('stats_cron_schedule'), script)
|
||||||
|
rsync(os.path.join(charm_dir(), 'scripts',
|
||||||
|
'collect_rabbitmq_stats.sh'), script)
|
||||||
|
write_file(STATS_CRONFILE, cronjob)
|
||||||
|
elif os.path.isfile(STATS_CRONFILE):
|
||||||
|
os.remove(STATS_CRONFILE)
|
||||||
|
|
||||||
# Find out if nrpe set nagios_hostname
|
# Find out if nrpe set nagios_hostname
|
||||||
hostname = nrpe.get_nagios_hostname()
|
hostname = nrpe.get_nagios_hostname()
|
||||||
|
@ -468,7 +571,7 @@ def update_nrpe_checks():
|
||||||
current_unit = local_unit().replace('/', '-')
|
current_unit = local_unit().replace('/', '-')
|
||||||
user = 'nagios-%s' % current_unit
|
user = 'nagios-%s' % current_unit
|
||||||
vhost = 'nagios-%s' % current_unit
|
vhost = 'nagios-%s' % current_unit
|
||||||
password = rabbit.get_rabbit_password(user)
|
password = rabbit.get_rabbit_password(user, local=True)
|
||||||
|
|
||||||
rabbit.create_vhost(vhost)
|
rabbit.create_vhost(vhost)
|
||||||
rabbit.create_user(user, password)
|
rabbit.create_user(user, password)
|
||||||
|
@ -481,6 +584,17 @@ def update_nrpe_checks():
|
||||||
check_cmd='{}/check_rabbitmq.py --user {} --password {} --vhost {}'
|
check_cmd='{}/check_rabbitmq.py --user {} --password {} --vhost {}'
|
||||||
''.format(NAGIOS_PLUGINS, user, password, vhost)
|
''.format(NAGIOS_PLUGINS, user, password, vhost)
|
||||||
)
|
)
|
||||||
|
if config('queue_thresholds'):
|
||||||
|
cmd = ""
|
||||||
|
# If value of queue_thresholds is incorrect we want the hook to fail
|
||||||
|
for item in yaml.safe_load(config('queue_thresholds')):
|
||||||
|
cmd += ' -c "{}" "{}" {} {}'.format(*item)
|
||||||
|
nrpe_compat.add_check(
|
||||||
|
shortname=rabbit.RABBIT_USER + '_queue',
|
||||||
|
description='Check RabbitMQ Queues',
|
||||||
|
check_cmd='{}/check_rabbitmq_queues.py{} {}'.format(
|
||||||
|
NAGIOS_PLUGINS, cmd, STATS_DATAFILE)
|
||||||
|
)
|
||||||
nrpe_compat.write()
|
nrpe_compat.write()
|
||||||
|
|
||||||
|
|
||||||
|
@ -501,7 +615,8 @@ def upgrade_charm():
|
||||||
log('upgrade_charm: Migrating stored passwd'
|
log('upgrade_charm: Migrating stored passwd'
|
||||||
' from %s to %s.' % (s, d))
|
' from %s to %s.' % (s, d))
|
||||||
shutil.move(s, d)
|
shutil.move(s, d)
|
||||||
rabbit.migrate_passwords_to_peer_relation()
|
if is_elected_leader('res_rabbitmq_vip'):
|
||||||
|
rabbit.migrate_passwords_to_peer_relation()
|
||||||
|
|
||||||
# explicitly update buggy file name naigos.passwd
|
# explicitly update buggy file name naigos.passwd
|
||||||
old = os.path.join('var/lib/rabbitmq', 'naigos.passwd')
|
old = os.path.join('var/lib/rabbitmq', 'naigos.passwd')
|
||||||
|
@ -536,8 +651,7 @@ def config_changed():
|
||||||
chown(RABBIT_DIR, rabbit.RABBIT_USER, rabbit.RABBIT_USER)
|
chown(RABBIT_DIR, rabbit.RABBIT_USER, rabbit.RABBIT_USER)
|
||||||
chmod(RABBIT_DIR, 0o775)
|
chmod(RABBIT_DIR, 0o775)
|
||||||
|
|
||||||
if config('prefer-ipv6'):
|
configure_nodename()
|
||||||
rabbit.update_rmq_env_conf(ipv6=config('prefer-ipv6'))
|
|
||||||
|
|
||||||
if config('management_plugin') is True:
|
if config('management_plugin') is True:
|
||||||
rabbit.enable_plugin(MAN_PLUGIN)
|
rabbit.enable_plugin(MAN_PLUGIN)
|
||||||
|
@ -572,6 +686,27 @@ def config_changed():
|
||||||
amqp_changed(relation_id=rid, remote_unit=unit)
|
amqp_changed(relation_id=rid, remote_unit=unit)
|
||||||
|
|
||||||
|
|
||||||
|
@hooks.hook('leader-settings-changed')
|
||||||
|
def leader_settings_changed():
|
||||||
|
# Get cookie from leader, update cookie locally and
|
||||||
|
# force cluster-relation-changed hooks to run on peers
|
||||||
|
cookie = leader_get(attribute='cookie')
|
||||||
|
if cookie:
|
||||||
|
update_cookie(leaders_cookie=cookie)
|
||||||
|
# Force cluster-relation-changed hooks to run on peers
|
||||||
|
# This will precipitate peer clustering
|
||||||
|
# Without this a chicken and egg scenario prevails when
|
||||||
|
# using LE and peerstorage
|
||||||
|
for rid in relation_ids('cluster'):
|
||||||
|
relation_set(relation_id=rid, relation_settings={'cookie': cookie})
|
||||||
|
|
||||||
|
# If leader has changed and access credentials, ripple these
|
||||||
|
# out from all units
|
||||||
|
for rid in relation_ids('amqp'):
|
||||||
|
for unit in related_units(rid):
|
||||||
|
amqp_changed(relation_id=rid, remote_unit=unit)
|
||||||
|
|
||||||
|
|
||||||
def pre_install_hooks():
|
def pre_install_hooks():
|
||||||
for f in glob.glob('exec.d/*/charm-pre-install'):
|
for f in glob.glob('exec.d/*/charm-pre-install'):
|
||||||
if os.path.isfile(f) and os.access(f, os.X_OK):
|
if os.path.isfile(f) and os.access(f, os.X_OK):
|
||||||
|
|
|
@ -5,7 +5,10 @@ description: |
|
||||||
RabbitMQ is an implementation of AMQP, the emerging standard for high
|
RabbitMQ is an implementation of AMQP, the emerging standard for high
|
||||||
performance enterprise messaging. The RabbitMQ server is a robust and
|
performance enterprise messaging. The RabbitMQ server is a robust and
|
||||||
scalable implementation of an AMQP broker.
|
scalable implementation of an AMQP broker.
|
||||||
categories: ["misc"]
|
tags:
|
||||||
|
- openstack
|
||||||
|
- amqp
|
||||||
|
- misc
|
||||||
provides:
|
provides:
|
||||||
amqp:
|
amqp:
|
||||||
interface: rabbitmq
|
interface: rabbitmq
|
||||||
|
|
|
@ -0,0 +1,99 @@
|
||||||
|
#!/usr/bin/python
|
||||||
|
|
||||||
|
# Copyright (C) 2011, 2012, 2014 Canonical
|
||||||
|
# All Rights Reserved
|
||||||
|
# Author: Liam Young, Jacek Nykis
|
||||||
|
|
||||||
|
from collections import defaultdict
|
||||||
|
from fnmatch import fnmatchcase
|
||||||
|
from itertools import chain
|
||||||
|
import argparse
|
||||||
|
import sys
|
||||||
|
|
||||||
|
def gen_data_lines(filename):
|
||||||
|
with open(filename, "rb") as fin:
|
||||||
|
for line in fin:
|
||||||
|
if not line.startswith("#"):
|
||||||
|
yield line
|
||||||
|
|
||||||
|
|
||||||
|
def gen_stats(data_lines):
|
||||||
|
for line in data_lines:
|
||||||
|
try:
|
||||||
|
vhost, queue, _, _, m_all, _ = line.split(None, 5)
|
||||||
|
except ValueError:
|
||||||
|
print "ERROR: problem parsing the stats file"
|
||||||
|
sys.exit(2)
|
||||||
|
assert m_all.isdigit(), "Message count is not a number: %r" % m_all
|
||||||
|
yield vhost, queue, int(m_all)
|
||||||
|
|
||||||
|
|
||||||
|
def collate_stats(stats, limits):
|
||||||
|
# Create a dict with stats collated according to the definitions in the
|
||||||
|
# limits file. If none of the definitions in the limits file is matched,
|
||||||
|
# store the stat without collating.
|
||||||
|
collated = defaultdict(lambda: 0)
|
||||||
|
for vhost, queue, m_all in stats:
|
||||||
|
for l_vhost, l_queue, _, _ in limits:
|
||||||
|
if fnmatchcase(vhost, l_vhost) and fnmatchcase(queue, l_queue):
|
||||||
|
collated[l_vhost, l_queue] += m_all
|
||||||
|
break
|
||||||
|
else:
|
||||||
|
collated[vhost, queue] += m_all
|
||||||
|
return collated
|
||||||
|
|
||||||
|
|
||||||
|
def check_stats(stats_collated, limits):
|
||||||
|
# Create a limits lookup dict with keys of the form (vhost, queue).
|
||||||
|
limits_lookup = dict(
|
||||||
|
((l_vhost, l_queue), (int(t_warning), int(t_critical)))
|
||||||
|
for l_vhost, l_queue, t_warning, t_critical in limits)
|
||||||
|
if not (stats_collated):
|
||||||
|
yield 'No Queues Found', 'No Vhosts Found', None, "CRIT"
|
||||||
|
# Go through the stats and compare again limits, if any.
|
||||||
|
for l_vhost, l_queue in sorted(stats_collated):
|
||||||
|
m_all = stats_collated[l_vhost, l_queue]
|
||||||
|
try:
|
||||||
|
t_warning, t_critical = limits_lookup[l_vhost, l_queue]
|
||||||
|
except KeyError:
|
||||||
|
yield l_queue, l_vhost, m_all, "UNKNOWN"
|
||||||
|
else:
|
||||||
|
if m_all >= t_critical:
|
||||||
|
yield l_queue, l_vhost, m_all, "CRIT"
|
||||||
|
elif m_all >= t_warning:
|
||||||
|
yield l_queue, l_vhost, m_all, "WARN"
|
||||||
|
|
||||||
|
|
||||||
|
if __name__ == "__main__":
|
||||||
|
parser = argparse.ArgumentParser(description='RabbitMQ queue size nagios check.')
|
||||||
|
parser.add_argument('-c', nargs=4, action='append', required=True,
|
||||||
|
metavar=('vhost', 'queue', 'warn', 'crit'),
|
||||||
|
help=('Vhost and queue to check. Can be used multiple times'))
|
||||||
|
parser.add_argument('stats_file', nargs='*', type=str, help='file containing queue stats')
|
||||||
|
args = parser.parse_args()
|
||||||
|
|
||||||
|
# Start generating stats from all files given on the command line.
|
||||||
|
stats = gen_stats(
|
||||||
|
chain.from_iterable(
|
||||||
|
gen_data_lines(filename) for filename in args.stats_file))
|
||||||
|
# Collate stats according to limit definitions and check.
|
||||||
|
stats_collated = collate_stats(stats, args.c)
|
||||||
|
stats_checked = check_stats(stats_collated, args.c)
|
||||||
|
criticals, warnings = [], []
|
||||||
|
for queue, vhost, message_no, status in stats_checked:
|
||||||
|
if status == "CRIT":
|
||||||
|
criticals.append(
|
||||||
|
"%s in %s has %s messages" % (queue, vhost, message_no))
|
||||||
|
elif status == "WARN":
|
||||||
|
warnings.append(
|
||||||
|
"%s in %s has %s messages" % (queue, vhost, message_no))
|
||||||
|
if len(criticals) > 0:
|
||||||
|
print "CRITICALS: %s" % ", ".join(criticals)
|
||||||
|
sys.exit(2)
|
||||||
|
# XXX: No warnings if there are criticals?
|
||||||
|
elif len(warnings) > 0:
|
||||||
|
print "WARNINGS: %s" % ", ".join(warnings)
|
||||||
|
sys.exit(1)
|
||||||
|
else:
|
||||||
|
print "OK"
|
||||||
|
sys.exit(0)
|
|
@ -0,0 +1,52 @@
|
||||||
|
#!/bin/bash
|
||||||
|
# Copyright (C) 2011, 2014 Canonical
|
||||||
|
# All Rights Reserved
|
||||||
|
# Author: Liam Young, Jacek Nykis
|
||||||
|
|
||||||
|
# Produce a queue data for a given vhost. Useful for graphing and Nagios checks
|
||||||
|
LOCK=/var/lock/rabbitmq-gather-metrics.lock
|
||||||
|
# Check for a lock file and if not, create one
|
||||||
|
lockfile-create -r2 --lock-name $LOCK > /dev/null 2>&1
|
||||||
|
if [ $? -ne 0 ]; then
|
||||||
|
exit 1
|
||||||
|
fi
|
||||||
|
trap "rm -f $LOCK > /dev/null 2>&1" exit
|
||||||
|
|
||||||
|
# Required to fix the bug about start-stop-daemon not being found in
|
||||||
|
# rabbitmq-server 2.7.1-0ubuntu4.
|
||||||
|
# '/usr/sbin/rabbitmqctl: 33: /usr/sbin/rabbitmqctl: start-stop-daemon: not found'
|
||||||
|
export PATH=${PATH}:/sbin/
|
||||||
|
|
||||||
|
if [ -f /var/lib/rabbitmq/pids ]; then
|
||||||
|
RABBIT_PID=$(grep "{rabbit\@${HOSTNAME}," /var/lib/rabbitmq/pids | sed -e 's!^.*,\([0-9]*\).*!\1!')
|
||||||
|
elif [ -f /var/run/rabbitmq/pid ]; then
|
||||||
|
RABBIT_PID=$(cat /var/run/rabbitmq/pid)
|
||||||
|
elif [ -f /var/lib/rabbitmq/mnesia/rabbit\@${HOSTNAME}.pid ]; then
|
||||||
|
# Vivid and later
|
||||||
|
RABBIT_PID=$(cat /var/lib/rabbitmq/mnesia/rabbit\@${HOSTNAME}.pid)
|
||||||
|
else
|
||||||
|
echo "No PID file found"
|
||||||
|
exit 3
|
||||||
|
fi
|
||||||
|
DATA_DIR="/var/lib/rabbitmq/data"
|
||||||
|
DATA_FILE="${DATA_DIR}/$(hostname -s)_queue_stats.dat"
|
||||||
|
LOG_DIR="/var/lib/rabbitmq/logs"
|
||||||
|
RABBIT_STATS_DATA_FILE="${DATA_DIR}/$(hostname -s)_general_stats.dat"
|
||||||
|
NOW=$(date +'%s')
|
||||||
|
HOSTNAME=$(hostname -s)
|
||||||
|
MNESIA_DB_SIZE=$(du -sm /var/lib/rabbitmq/mnesia | cut -f1)
|
||||||
|
RABBIT_RSS=$(ps -p $RABBIT_PID -o rss=)
|
||||||
|
if [ ! -d $DATA_DIR ]; then
|
||||||
|
mkdir -p $DATA_DIR
|
||||||
|
fi
|
||||||
|
if [ ! -d $LOG_DIR ]; then
|
||||||
|
mkdir -p $LOG_DIR
|
||||||
|
fi
|
||||||
|
echo "#Vhost Name Messages_ready Messages_unacknowledged Messages Consumers Memory Time" > $DATA_FILE
|
||||||
|
/usr/sbin/rabbitmqctl -q list_vhosts | \
|
||||||
|
while read VHOST; do
|
||||||
|
/usr/sbin/rabbitmqctl -q list_queues -p $VHOST name messages_ready messages_unacknowledged messages consumers memory | \
|
||||||
|
awk "{print \"$VHOST \" \$0 \" $(date +'%s') \"}" >> $DATA_FILE 2>${LOG_DIR}/list_queues.log
|
||||||
|
done
|
||||||
|
echo "mnesia_size: ${MNESIA_DB_SIZE}@$NOW" > $RABBIT_STATS_DATA_FILE
|
||||||
|
echo "rss_size: ${RABBIT_RSS}@$NOW" >> $RABBIT_STATS_DATA_FILE
|
|
@ -0,0 +1,16 @@
|
||||||
|
#!/bin/bash
|
||||||
|
|
||||||
|
set -ex
|
||||||
|
|
||||||
|
sudo add-apt-repository --yes ppa:juju/stable
|
||||||
|
sudo apt-get update --yes
|
||||||
|
sudo apt-get install --yes python-amulet \
|
||||||
|
python-cinderclient \
|
||||||
|
python-distro-info \
|
||||||
|
python-glanceclient \
|
||||||
|
python-heatclient \
|
||||||
|
python-keystoneclient \
|
||||||
|
python-neutronclient \
|
||||||
|
python-novaclient \
|
||||||
|
python-pika \
|
||||||
|
python-swiftclient
|
|
@ -0,0 +1,11 @@
|
||||||
|
#!/usr/bin/python
|
||||||
|
|
||||||
|
"""Amulet tests on a basic rabbitmq-server deployment on precise-icehouse."""
|
||||||
|
|
||||||
|
from basic_deployment import RmqBasicDeployment
|
||||||
|
|
||||||
|
if __name__ == '__main__':
|
||||||
|
deployment = RmqBasicDeployment(series='precise',
|
||||||
|
openstack='cloud:precise-icehouse',
|
||||||
|
source='cloud:precise-updates/icehouse')
|
||||||
|
deployment.run_tests()
|
|
@ -0,0 +1,9 @@
|
||||||
|
#!/usr/bin/python
|
||||||
|
|
||||||
|
"""Amulet tests on a basic rabbitmq-server deployment on trusty-icehouse."""
|
||||||
|
|
||||||
|
from basic_deployment import RmqBasicDeployment
|
||||||
|
|
||||||
|
if __name__ == '__main__':
|
||||||
|
deployment = RmqBasicDeployment(series='trusty')
|
||||||
|
deployment.run_tests()
|
|
@ -0,0 +1,11 @@
|
||||||
|
#!/usr/bin/python
|
||||||
|
|
||||||
|
"""Amulet tests on a basic rabbitmq-server deployment on trusty-juno."""
|
||||||
|
|
||||||
|
from basic_deployment import RmqBasicDeployment
|
||||||
|
|
||||||
|
if __name__ == '__main__':
|
||||||
|
deployment = RmqBasicDeployment(series='trusty',
|
||||||
|
openstack='cloud:trusty-juno',
|
||||||
|
source='cloud:trusty-updates/juno')
|
||||||
|
deployment.run_tests()
|
|
@ -0,0 +1,11 @@
|
||||||
|
#!/usr/bin/python
|
||||||
|
|
||||||
|
"""Amulet tests on a basic rabbitmq-server deployment on trusty-kilo."""
|
||||||
|
|
||||||
|
from basic_deployment import RmqBasicDeployment
|
||||||
|
|
||||||
|
if __name__ == '__main__':
|
||||||
|
deployment = RmqBasicDeployment(series='trusty',
|
||||||
|
openstack='cloud:trusty-kilo',
|
||||||
|
source='cloud:trusty-updates/kilo')
|
||||||
|
deployment.run_tests()
|
|
@ -0,0 +1,9 @@
|
||||||
|
#!/usr/bin/python
|
||||||
|
|
||||||
|
"""Amulet tests on a basic rabbitmq-server deployment on vivid-kilo."""
|
||||||
|
|
||||||
|
from basic_deployment import RmqBasicDeployment
|
||||||
|
|
||||||
|
if __name__ == '__main__':
|
||||||
|
deployment = RmqBasicDeployment(series='vivid')
|
||||||
|
deployment.run_tests()
|
|
@ -0,0 +1,11 @@
|
||||||
|
#!/usr/bin/python
|
||||||
|
|
||||||
|
"""Amulet tests on a basic rabbitmq-server deployment on trusty-liberty."""
|
||||||
|
|
||||||
|
from basic_deployment import RmqBasicDeployment
|
||||||
|
|
||||||
|
if __name__ == '__main__':
|
||||||
|
deployment = RmqBasicDeployment(series='trusty',
|
||||||
|
openstack='cloud:trusty-liberty',
|
||||||
|
source='cloud:trusty-updates/liberty')
|
||||||
|
deployment.run_tests()
|
|
@ -0,0 +1,9 @@
|
||||||
|
#!/usr/bin/python
|
||||||
|
|
||||||
|
"""Amulet tests on a basic rabbitmq-server deployment on wily-liberty."""
|
||||||
|
|
||||||
|
from basic_deployment import RmqBasicDeployment
|
||||||
|
|
||||||
|
if __name__ == '__main__':
|
||||||
|
deployment = RmqBasicDeployment(series='wily')
|
||||||
|
deployment.run_tests()
|
|
@ -0,0 +1,492 @@
|
||||||
|
#!/usr/bin/python
|
||||||
|
"""
|
||||||
|
Basic 3-node rabbitmq-server native cluster + nrpe functional tests
|
||||||
|
|
||||||
|
Cinder is present to exercise and inspect amqp relation functionality.
|
||||||
|
|
||||||
|
Each individual test is idempotent, in that it creates/deletes
|
||||||
|
a rmq test user, enables or disables ssl as needed.
|
||||||
|
|
||||||
|
Test order is not required, however tests are numbered to keep
|
||||||
|
relevant tests grouped together in run order.
|
||||||
|
"""
|
||||||
|
|
||||||
|
import amulet
|
||||||
|
import time
|
||||||
|
|
||||||
|
from charmhelpers.contrib.openstack.amulet.deployment import (
|
||||||
|
OpenStackAmuletDeployment
|
||||||
|
)
|
||||||
|
|
||||||
|
from charmhelpers.contrib.openstack.amulet.utils import (
|
||||||
|
OpenStackAmuletUtils,
|
||||||
|
DEBUG,
|
||||||
|
# ERROR
|
||||||
|
)
|
||||||
|
|
||||||
|
# Use DEBUG to turn on debug logging
|
||||||
|
u = OpenStackAmuletUtils(DEBUG)
|
||||||
|
|
||||||
|
|
||||||
|
class RmqBasicDeployment(OpenStackAmuletDeployment):
|
||||||
|
"""Amulet tests on a basic rabbitmq cluster deployment. Verify
|
||||||
|
relations, service status, users and endpoint service catalog."""
|
||||||
|
|
||||||
|
def __init__(self, series=None, openstack=None, source=None, stable=False):
|
||||||
|
"""Deploy the entire test environment."""
|
||||||
|
super(RmqBasicDeployment, self).__init__(series, openstack, source,
|
||||||
|
stable)
|
||||||
|
self._add_services()
|
||||||
|
self._add_relations()
|
||||||
|
self._configure_services()
|
||||||
|
self._deploy()
|
||||||
|
self._initialize_tests()
|
||||||
|
|
||||||
|
def _add_services(self):
|
||||||
|
"""Add services
|
||||||
|
|
||||||
|
Add the services that we're testing, where rmq is local,
|
||||||
|
and the rest of the service are from lp branches that are
|
||||||
|
compatible with the local charm (e.g. stable or next).
|
||||||
|
"""
|
||||||
|
this_service = {
|
||||||
|
'name': 'rabbitmq-server',
|
||||||
|
'units': 3
|
||||||
|
}
|
||||||
|
other_services = [{'name': 'cinder'},
|
||||||
|
{'name': 'nrpe'}]
|
||||||
|
|
||||||
|
super(RmqBasicDeployment, self)._add_services(this_service,
|
||||||
|
other_services)
|
||||||
|
|
||||||
|
def _add_relations(self):
|
||||||
|
"""Add relations for the services."""
|
||||||
|
relations = {'cinder:amqp': 'rabbitmq-server:amqp',
|
||||||
|
'nrpe:nrpe-external-master':
|
||||||
|
'rabbitmq-server:nrpe-external-master'}
|
||||||
|
|
||||||
|
super(RmqBasicDeployment, self)._add_relations(relations)
|
||||||
|
|
||||||
|
def _configure_services(self):
|
||||||
|
"""Configure all of the services."""
|
||||||
|
rmq_config = {
|
||||||
|
'min-cluster-size': '3',
|
||||||
|
'max-cluster-tries': '6',
|
||||||
|
'ssl': 'off',
|
||||||
|
'management_plugin': 'False',
|
||||||
|
'stats_cron_schedule': '*/1 * * * *'
|
||||||
|
}
|
||||||
|
cinder_config = {}
|
||||||
|
configs = {'rabbitmq-server': rmq_config,
|
||||||
|
'cinder': cinder_config}
|
||||||
|
super(RmqBasicDeployment, self)._configure_services(configs)
|
||||||
|
|
||||||
|
def _initialize_tests(self):
|
||||||
|
"""Perform final initialization before tests get run."""
|
||||||
|
# Access the sentries for inspecting service units
|
||||||
|
self.rmq0_sentry = self.d.sentry.unit['rabbitmq-server/0']
|
||||||
|
self.rmq1_sentry = self.d.sentry.unit['rabbitmq-server/1']
|
||||||
|
self.rmq2_sentry = self.d.sentry.unit['rabbitmq-server/2']
|
||||||
|
self.cinder_sentry = self.d.sentry.unit['cinder/0']
|
||||||
|
self.nrpe_sentry = self.d.sentry.unit['nrpe/0']
|
||||||
|
u.log.debug('openstack release val: {}'.format(
|
||||||
|
self._get_openstack_release()))
|
||||||
|
u.log.debug('openstack release str: {}'.format(
|
||||||
|
self._get_openstack_release_string()))
|
||||||
|
|
||||||
|
# Let things settle a bit before moving forward
|
||||||
|
time.sleep(30)
|
||||||
|
|
||||||
|
def _get_rmq_sentry_units(self):
|
||||||
|
"""Local helper specific to this 3-node rmq series of tests."""
|
||||||
|
return [self.rmq0_sentry,
|
||||||
|
self.rmq1_sentry,
|
||||||
|
self.rmq2_sentry]
|
||||||
|
|
||||||
|
def _test_rmq_amqp_messages_all_units(self, sentry_units,
|
||||||
|
ssl=False, port=None):
|
||||||
|
"""Reusable test to send amqp messages to every listed rmq unit
|
||||||
|
and check every listed rmq unit for messages.
|
||||||
|
|
||||||
|
:param sentry_units: list of sentry units
|
||||||
|
:returns: None if successful. Raise on error.
|
||||||
|
"""
|
||||||
|
|
||||||
|
# Add test user if it does not already exist
|
||||||
|
u.add_rmq_test_user(sentry_units)
|
||||||
|
|
||||||
|
# Handle ssl
|
||||||
|
if ssl:
|
||||||
|
u.configure_rmq_ssl_on(sentry_units, self.d, port=port)
|
||||||
|
else:
|
||||||
|
u.configure_rmq_ssl_off(sentry_units, self.d)
|
||||||
|
|
||||||
|
# Publish and get amqp messages in all possible unit combinations.
|
||||||
|
# Qty of checks == (qty of units) ^ 2
|
||||||
|
amqp_msg_counter = 1
|
||||||
|
host_names = u.get_unit_hostnames(sentry_units)
|
||||||
|
|
||||||
|
for dest_unit in sentry_units:
|
||||||
|
dest_unit_name = dest_unit.info['unit_name']
|
||||||
|
dest_unit_host = dest_unit.info['public-address']
|
||||||
|
dest_unit_host_name = host_names[dest_unit_name]
|
||||||
|
|
||||||
|
for check_unit in sentry_units:
|
||||||
|
check_unit_name = check_unit.info['unit_name']
|
||||||
|
check_unit_host = check_unit.info['public-address']
|
||||||
|
check_unit_host_name = host_names[check_unit_name]
|
||||||
|
|
||||||
|
amqp_msg_stamp = u.get_uuid_epoch_stamp()
|
||||||
|
amqp_msg = ('Message {}@{} {}'.format(amqp_msg_counter,
|
||||||
|
dest_unit_host,
|
||||||
|
amqp_msg_stamp)).upper()
|
||||||
|
# Publish amqp message
|
||||||
|
u.log.debug('Publish message to: {} '
|
||||||
|
'({} {})'.format(dest_unit_host,
|
||||||
|
dest_unit_name,
|
||||||
|
dest_unit_host_name))
|
||||||
|
|
||||||
|
u.publish_amqp_message_by_unit(dest_unit,
|
||||||
|
amqp_msg, ssl=ssl,
|
||||||
|
port=port)
|
||||||
|
|
||||||
|
# Wait a bit before checking for message
|
||||||
|
time.sleep(2)
|
||||||
|
|
||||||
|
# Get amqp message
|
||||||
|
u.log.debug('Get message from: {} '
|
||||||
|
'({} {})'.format(check_unit_host,
|
||||||
|
check_unit_name,
|
||||||
|
check_unit_host_name))
|
||||||
|
|
||||||
|
amqp_msg_rcvd = u.get_amqp_message_by_unit(check_unit,
|
||||||
|
ssl=ssl,
|
||||||
|
port=port)
|
||||||
|
|
||||||
|
# Validate amqp message content
|
||||||
|
if amqp_msg == amqp_msg_rcvd:
|
||||||
|
u.log.debug('Message {} received '
|
||||||
|
'OK.'.format(amqp_msg_counter))
|
||||||
|
else:
|
||||||
|
u.log.error('Expected: {}'.format(amqp_msg))
|
||||||
|
u.log.error('Actual: {}'.format(amqp_msg_rcvd))
|
||||||
|
msg = 'Message {} mismatch.'.format(amqp_msg_counter)
|
||||||
|
amulet.raise_status(amulet.FAIL, msg)
|
||||||
|
|
||||||
|
amqp_msg_counter += 1
|
||||||
|
|
||||||
|
# Delete the test user
|
||||||
|
u.delete_rmq_test_user(sentry_units)
|
||||||
|
|
||||||
|
def test_100_rmq_processes(self):
|
||||||
|
"""Verify that the expected service processes are running
|
||||||
|
on each rabbitmq-server unit."""
|
||||||
|
|
||||||
|
# Beam and epmd sometimes briefly have more than one PID,
|
||||||
|
# True checks for at least 1.
|
||||||
|
rmq_processes = {
|
||||||
|
'beam': True,
|
||||||
|
'epmd': True,
|
||||||
|
}
|
||||||
|
|
||||||
|
# Units with process names and PID quantities expected
|
||||||
|
expected_processes = {
|
||||||
|
self.rmq0_sentry: rmq_processes,
|
||||||
|
self.rmq1_sentry: rmq_processes,
|
||||||
|
self.rmq2_sentry: rmq_processes
|
||||||
|
}
|
||||||
|
|
||||||
|
actual_pids = u.get_unit_process_ids(expected_processes)
|
||||||
|
ret = u.validate_unit_process_ids(expected_processes, actual_pids)
|
||||||
|
if ret:
|
||||||
|
amulet.raise_status(amulet.FAIL, msg=ret)
|
||||||
|
|
||||||
|
u.log.info('OK\n')
|
||||||
|
|
||||||
|
def test_102_services(self):
|
||||||
|
"""Verify that the expected services are running on the
|
||||||
|
corresponding service units."""
|
||||||
|
services = {
|
||||||
|
self.rmq0_sentry: ['rabbitmq-server'],
|
||||||
|
self.rmq1_sentry: ['rabbitmq-server'],
|
||||||
|
self.rmq2_sentry: ['rabbitmq-server'],
|
||||||
|
self.cinder_sentry: ['cinder-api',
|
||||||
|
'cinder-scheduler',
|
||||||
|
'cinder-volume'],
|
||||||
|
}
|
||||||
|
ret = u.validate_services_by_name(services)
|
||||||
|
if ret:
|
||||||
|
amulet.raise_status(amulet.FAIL, msg=ret)
|
||||||
|
|
||||||
|
u.log.info('OK\n')
|
||||||
|
|
||||||
|
def test_200_rmq_cinder_amqp_relation(self):
|
||||||
|
"""Verify the rabbitmq-server:cinder amqp relation data"""
|
||||||
|
u.log.debug('Checking rmq:cinder amqp relation data...')
|
||||||
|
unit = self.rmq0_sentry
|
||||||
|
relation = ['amqp', 'cinder:amqp']
|
||||||
|
expected = {
|
||||||
|
'private-address': u.valid_ip,
|
||||||
|
'password': u.not_null,
|
||||||
|
'hostname': u.valid_ip
|
||||||
|
}
|
||||||
|
ret = u.validate_relation_data(unit, relation, expected)
|
||||||
|
if ret:
|
||||||
|
msg = u.relation_error('amqp cinder', ret)
|
||||||
|
amulet.raise_status(amulet.FAIL, msg=msg)
|
||||||
|
|
||||||
|
u.log.info('OK\n')
|
||||||
|
|
||||||
|
def test_201_cinder_rmq_amqp_relation(self):
|
||||||
|
"""Verify the cinder:rabbitmq-server amqp relation data"""
|
||||||
|
u.log.debug('Checking cinder:rmq amqp relation data...')
|
||||||
|
unit = self.cinder_sentry
|
||||||
|
relation = ['amqp', 'rabbitmq-server:amqp']
|
||||||
|
expected = {
|
||||||
|
'private-address': u.valid_ip,
|
||||||
|
'vhost': 'openstack',
|
||||||
|
'username': u.not_null
|
||||||
|
}
|
||||||
|
ret = u.validate_relation_data(unit, relation, expected)
|
||||||
|
if ret:
|
||||||
|
msg = u.relation_error('cinder amqp', ret)
|
||||||
|
amulet.raise_status(amulet.FAIL, msg=msg)
|
||||||
|
|
||||||
|
u.log.info('OK\n')
|
||||||
|
|
||||||
|
def test_202_rmq_nrpe_ext_master_relation(self):
|
||||||
|
"""Verify rabbitmq-server:nrpe nrpe-external-master relation data"""
|
||||||
|
u.log.debug('Checking rmq:nrpe external master relation data...')
|
||||||
|
unit = self.rmq0_sentry
|
||||||
|
relation = ['nrpe-external-master',
|
||||||
|
'nrpe:nrpe-external-master']
|
||||||
|
|
||||||
|
mon_sub = ('monitors:\n remote:\n nrpe:\n rabbitmq: '
|
||||||
|
'{command: check_rabbitmq}\n rabbitmq_queue: '
|
||||||
|
'{command: check_rabbitmq_queue}\n')
|
||||||
|
|
||||||
|
expected = {
|
||||||
|
'private-address': u.valid_ip,
|
||||||
|
'monitors': mon_sub
|
||||||
|
}
|
||||||
|
|
||||||
|
ret = u.validate_relation_data(unit, relation, expected)
|
||||||
|
if ret:
|
||||||
|
msg = u.relation_error('amqp nrpe', ret)
|
||||||
|
amulet.raise_status(amulet.FAIL, msg=msg)
|
||||||
|
|
||||||
|
u.log.info('OK\n')
|
||||||
|
|
||||||
|
def test_203_nrpe_rmq_ext_master_relation(self):
|
||||||
|
"""Verify nrpe:rabbitmq-server nrpe-external-master relation data"""
|
||||||
|
u.log.debug('Checking nrpe:rmq external master relation data...')
|
||||||
|
unit = self.nrpe_sentry
|
||||||
|
relation = ['nrpe-external-master',
|
||||||
|
'rabbitmq-server:nrpe-external-master']
|
||||||
|
|
||||||
|
expected = {
|
||||||
|
'private-address': u.valid_ip
|
||||||
|
}
|
||||||
|
|
||||||
|
ret = u.validate_relation_data(unit, relation, expected)
|
||||||
|
if ret:
|
||||||
|
msg = u.relation_error('nrpe amqp', ret)
|
||||||
|
amulet.raise_status(amulet.FAIL, msg=msg)
|
||||||
|
|
||||||
|
u.log.info('OK\n')
|
||||||
|
|
||||||
|
def test_300_rmq_config(self):
|
||||||
|
"""Verify the data in the rabbitmq conf file."""
|
||||||
|
conf = '/etc/rabbitmq/rabbitmq-env.conf'
|
||||||
|
sentry_units = self._get_rmq_sentry_units()
|
||||||
|
for unit in sentry_units:
|
||||||
|
host_name = unit.file_contents('/etc/hostname').strip()
|
||||||
|
u.log.debug('Checking rabbitmq config file data on '
|
||||||
|
'{} ({})...'.format(unit.info['unit_name'],
|
||||||
|
host_name))
|
||||||
|
expected = {
|
||||||
|
'RABBITMQ_NODENAME': 'rabbit@{}'.format(host_name)
|
||||||
|
}
|
||||||
|
|
||||||
|
file_contents = unit.file_contents(conf)
|
||||||
|
u.validate_sectionless_conf(file_contents, expected)
|
||||||
|
|
||||||
|
u.log.info('OK\n')
|
||||||
|
|
||||||
|
def test_400_rmq_cluster_running_nodes(self):
|
||||||
|
"""Verify that cluster status from each rmq juju unit shows
|
||||||
|
every cluster node as a running member in that cluster."""
|
||||||
|
u.log.debug('Checking that all units are in cluster_status '
|
||||||
|
'running nodes...')
|
||||||
|
|
||||||
|
sentry_units = self._get_rmq_sentry_units()
|
||||||
|
|
||||||
|
ret = u.validate_rmq_cluster_running_nodes(sentry_units)
|
||||||
|
if ret:
|
||||||
|
amulet.raise_status(amulet.FAIL, msg=ret)
|
||||||
|
|
||||||
|
u.log.info('OK\n')
|
||||||
|
|
||||||
|
def test_402_rmq_connect_with_ssl_off(self):
|
||||||
|
"""Verify successful non-ssl amqp connection to all units when
|
||||||
|
charm config option for ssl is set False."""
|
||||||
|
u.log.debug('Confirming that non-ssl connection succeeds when '
|
||||||
|
'ssl config is off...')
|
||||||
|
sentry_units = self._get_rmq_sentry_units()
|
||||||
|
u.add_rmq_test_user(sentry_units)
|
||||||
|
u.configure_rmq_ssl_off(sentry_units, self.d)
|
||||||
|
|
||||||
|
# Check amqp connection for all units, expect connections to succeed
|
||||||
|
for unit in sentry_units:
|
||||||
|
connection = u.connect_amqp_by_unit(unit, ssl=False, fatal=False)
|
||||||
|
connection.close()
|
||||||
|
|
||||||
|
u.delete_rmq_test_user(sentry_units)
|
||||||
|
u.log.info('OK\n')
|
||||||
|
|
||||||
|
def test_404_rmq_ssl_connect_with_ssl_off(self):
|
||||||
|
"""Verify unsuccessful ssl amqp connection to all units when
|
||||||
|
charm config option for ssl is set False."""
|
||||||
|
u.log.debug('Confirming that ssl connection fails when ssl '
|
||||||
|
'config is off...')
|
||||||
|
sentry_units = self._get_rmq_sentry_units()
|
||||||
|
u.add_rmq_test_user(sentry_units)
|
||||||
|
u.configure_rmq_ssl_off(sentry_units, self.d)
|
||||||
|
|
||||||
|
# Check ssl amqp connection for all units, expect connections to fail
|
||||||
|
for unit in sentry_units:
|
||||||
|
connection = u.connect_amqp_by_unit(unit, ssl=True,
|
||||||
|
port=5971, fatal=False)
|
||||||
|
if connection:
|
||||||
|
connection.close()
|
||||||
|
msg = 'SSL connection unexpectedly succeeded with ssl=off'
|
||||||
|
amulet.raise_status(amulet.FAIL, msg)
|
||||||
|
|
||||||
|
u.delete_rmq_test_user(sentry_units)
|
||||||
|
u.log.info('OK - Confirmed that ssl connection attempt fails '
|
||||||
|
'when ssl config is off.')
|
||||||
|
|
||||||
|
def test_406_rmq_amqp_messages_all_units_ssl_off(self):
|
||||||
|
"""Send amqp messages to every rmq unit and check every rmq unit
|
||||||
|
for messages. Standard amqp tcp port, no ssl."""
|
||||||
|
u.log.debug('Checking amqp message publish/get on all units '
|
||||||
|
'(ssl off)...')
|
||||||
|
|
||||||
|
sentry_units = self._get_rmq_sentry_units()
|
||||||
|
self._test_rmq_amqp_messages_all_units(sentry_units, ssl=False)
|
||||||
|
u.log.info('OK\n')
|
||||||
|
|
||||||
|
def test_408_rmq_amqp_messages_all_units_ssl_on(self):
|
||||||
|
"""Send amqp messages with ssl enabled, to every rmq unit and
|
||||||
|
check every rmq unit for messages. Standard ssl tcp port."""
|
||||||
|
u.log.debug('Checking amqp message publish/get on all units '
|
||||||
|
'(ssl on)...')
|
||||||
|
|
||||||
|
sentry_units = self._get_rmq_sentry_units()
|
||||||
|
self._test_rmq_amqp_messages_all_units(sentry_units,
|
||||||
|
ssl=True, port=5671)
|
||||||
|
u.log.info('OK\n')
|
||||||
|
|
||||||
|
def test_410_rmq_amqp_messages_all_units_ssl_alt_port(self):
|
||||||
|
"""Send amqp messages with ssl on, to every rmq unit and check
|
||||||
|
every rmq unit for messages. Custom ssl tcp port."""
|
||||||
|
u.log.debug('Checking amqp message publish/get on all units '
|
||||||
|
'(ssl on)...')
|
||||||
|
|
||||||
|
sentry_units = self._get_rmq_sentry_units()
|
||||||
|
self._test_rmq_amqp_messages_all_units(sentry_units,
|
||||||
|
ssl=True, port=5999)
|
||||||
|
u.log.info('OK\n')
|
||||||
|
|
||||||
|
def test_412_rmq_management_plugin(self):
|
||||||
|
"""Enable and check management plugin."""
|
||||||
|
u.log.debug('Checking tcp socket connect to management plugin '
|
||||||
|
'port on all rmq units...')
|
||||||
|
|
||||||
|
sentry_units = self._get_rmq_sentry_units()
|
||||||
|
mgmt_port = 15672
|
||||||
|
|
||||||
|
# Enable management plugin
|
||||||
|
u.log.debug('Enabling management_plugin charm config option...')
|
||||||
|
config = {'management_plugin': 'True'}
|
||||||
|
self.d.configure('rabbitmq-server', config)
|
||||||
|
|
||||||
|
# Check tcp connect to management plugin port
|
||||||
|
max_wait = 120
|
||||||
|
tries = 0
|
||||||
|
ret = u.port_knock_units(sentry_units, mgmt_port)
|
||||||
|
while ret and tries < (max_wait / 12):
|
||||||
|
time.sleep(12)
|
||||||
|
u.log.debug('Attempt {}: {}'.format(tries, ret))
|
||||||
|
ret = u.port_knock_units(sentry_units, mgmt_port)
|
||||||
|
tries += 1
|
||||||
|
|
||||||
|
if ret:
|
||||||
|
amulet.raise_status(amulet.FAIL, ret)
|
||||||
|
else:
|
||||||
|
u.log.debug('Connect to all units (OK)\n')
|
||||||
|
|
||||||
|
# Disable management plugin
|
||||||
|
u.log.debug('Disabling management_plugin charm config option...')
|
||||||
|
config = {'management_plugin': 'False'}
|
||||||
|
self.d.configure('rabbitmq-server', config)
|
||||||
|
|
||||||
|
# Negative check - tcp connect to management plugin port
|
||||||
|
u.log.info('Expect tcp connect fail since charm config '
|
||||||
|
'option is disabled.')
|
||||||
|
tries = 0
|
||||||
|
ret = u.port_knock_units(sentry_units, mgmt_port, expect_success=False)
|
||||||
|
while ret and tries < (max_wait / 12):
|
||||||
|
time.sleep(12)
|
||||||
|
u.log.debug('Attempt {}: {}'.format(tries, ret))
|
||||||
|
ret = u.port_knock_units(sentry_units, mgmt_port,
|
||||||
|
expect_success=False)
|
||||||
|
tries += 1
|
||||||
|
|
||||||
|
if ret:
|
||||||
|
amulet.raise_status(amulet.FAIL, ret)
|
||||||
|
else:
|
||||||
|
u.log.info('Confirm mgmt port closed on all units (OK)\n')
|
||||||
|
|
||||||
|
def test_414_rmq_nrpe_monitors(self):
|
||||||
|
"""Check rabbimq-server nrpe monitor basic functionality."""
|
||||||
|
sentry_units = self._get_rmq_sentry_units()
|
||||||
|
host_names = u.get_unit_hostnames(sentry_units)
|
||||||
|
|
||||||
|
# check_rabbitmq monitor
|
||||||
|
u.log.debug('Checking nrpe check_rabbitmq on units...')
|
||||||
|
cmds = ['egrep -oh /usr/local.* /etc/nagios/nrpe.d/'
|
||||||
|
'check_rabbitmq.cfg']
|
||||||
|
ret = u.check_commands_on_units(cmds, sentry_units)
|
||||||
|
if ret:
|
||||||
|
amulet.raise_status(amulet.FAIL, msg=ret)
|
||||||
|
|
||||||
|
u.log.debug('Sleeping 70s for 1m cron job to run...')
|
||||||
|
time.sleep(70)
|
||||||
|
|
||||||
|
# check_rabbitmq_queue monitor
|
||||||
|
u.log.debug('Checking nrpe check_rabbitmq_queue on units...')
|
||||||
|
cmds = ['egrep -oh /usr/local.* /etc/nagios/nrpe.d/'
|
||||||
|
'check_rabbitmq_queue.cfg']
|
||||||
|
ret = u.check_commands_on_units(cmds, sentry_units)
|
||||||
|
if ret:
|
||||||
|
amulet.raise_status(amulet.FAIL, msg=ret)
|
||||||
|
|
||||||
|
# check dat file existence
|
||||||
|
u.log.debug('Checking nrpe dat file existence on units...')
|
||||||
|
for sentry_unit in sentry_units:
|
||||||
|
unit_name = sentry_unit.info['unit_name']
|
||||||
|
unit_host_name = host_names[unit_name]
|
||||||
|
|
||||||
|
cmds = [
|
||||||
|
'stat /var/lib/rabbitmq/data/{}_general_stats.dat'.format(
|
||||||
|
unit_host_name),
|
||||||
|
'stat /var/lib/rabbitmq/data/{}_queue_stats.dat'.format(
|
||||||
|
unit_host_name)
|
||||||
|
]
|
||||||
|
|
||||||
|
ret = u.check_commands_on_units(cmds, [sentry_unit])
|
||||||
|
if ret:
|
||||||
|
amulet.raise_status(amulet.FAIL, msg=ret)
|
||||||
|
|
||||||
|
u.log.info('OK\n')
|
|
@ -0,0 +1,93 @@
|
||||||
|
# Copyright 2014-2015 Canonical Limited.
|
||||||
|
#
|
||||||
|
# This file is part of charm-helpers.
|
||||||
|
#
|
||||||
|
# charm-helpers is free software: you can redistribute it and/or modify
|
||||||
|
# it under the terms of the GNU Lesser General Public License version 3 as
|
||||||
|
# published by the Free Software Foundation.
|
||||||
|
#
|
||||||
|
# charm-helpers is distributed in the hope that it will be useful,
|
||||||
|
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||||
|
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||||
|
# GNU Lesser General Public License for more details.
|
||||||
|
#
|
||||||
|
# You should have received a copy of the GNU Lesser General Public License
|
||||||
|
# along with charm-helpers. If not, see <http://www.gnu.org/licenses/>.
|
||||||
|
|
||||||
|
import amulet
|
||||||
|
import os
|
||||||
|
import six
|
||||||
|
|
||||||
|
|
||||||
|
class AmuletDeployment(object):
|
||||||
|
"""Amulet deployment.
|
||||||
|
|
||||||
|
This class provides generic Amulet deployment and test runner
|
||||||
|
methods.
|
||||||
|
"""
|
||||||
|
|
||||||
|
def __init__(self, series=None):
|
||||||
|
"""Initialize the deployment environment."""
|
||||||
|
self.series = None
|
||||||
|
|
||||||
|
if series:
|
||||||
|
self.series = series
|
||||||
|
self.d = amulet.Deployment(series=self.series)
|
||||||
|
else:
|
||||||
|
self.d = amulet.Deployment()
|
||||||
|
|
||||||
|
def _add_services(self, this_service, other_services):
|
||||||
|
"""Add services.
|
||||||
|
|
||||||
|
Add services to the deployment where this_service is the local charm
|
||||||
|
that we're testing and other_services are the other services that
|
||||||
|
are being used in the local amulet tests.
|
||||||
|
"""
|
||||||
|
if this_service['name'] != os.path.basename(os.getcwd()):
|
||||||
|
s = this_service['name']
|
||||||
|
msg = "The charm's root directory name needs to be {}".format(s)
|
||||||
|
amulet.raise_status(amulet.FAIL, msg=msg)
|
||||||
|
|
||||||
|
if 'units' not in this_service:
|
||||||
|
this_service['units'] = 1
|
||||||
|
|
||||||
|
self.d.add(this_service['name'], units=this_service['units'])
|
||||||
|
|
||||||
|
for svc in other_services:
|
||||||
|
if 'location' in svc:
|
||||||
|
branch_location = svc['location']
|
||||||
|
elif self.series:
|
||||||
|
branch_location = 'cs:{}/{}'.format(self.series, svc['name']),
|
||||||
|
else:
|
||||||
|
branch_location = None
|
||||||
|
|
||||||
|
if 'units' not in svc:
|
||||||
|
svc['units'] = 1
|
||||||
|
|
||||||
|
self.d.add(svc['name'], charm=branch_location, units=svc['units'])
|
||||||
|
|
||||||
|
def _add_relations(self, relations):
|
||||||
|
"""Add all of the relations for the services."""
|
||||||
|
for k, v in six.iteritems(relations):
|
||||||
|
self.d.relate(k, v)
|
||||||
|
|
||||||
|
def _configure_services(self, configs):
|
||||||
|
"""Configure all of the services."""
|
||||||
|
for service, config in six.iteritems(configs):
|
||||||
|
self.d.configure(service, config)
|
||||||
|
|
||||||
|
def _deploy(self):
|
||||||
|
"""Deploy environment and wait for all hooks to finish executing."""
|
||||||
|
try:
|
||||||
|
self.d.setup(timeout=900)
|
||||||
|
self.d.sentry.wait(timeout=900)
|
||||||
|
except amulet.helpers.TimeoutError:
|
||||||
|
amulet.raise_status(amulet.FAIL, msg="Deployment timed out")
|
||||||
|
except Exception:
|
||||||
|
raise
|
||||||
|
|
||||||
|
def run_tests(self):
|
||||||
|
"""Run all of the methods that are prefixed with 'test_'."""
|
||||||
|
for test in dir(self):
|
||||||
|
if test.startswith('test_'):
|
||||||
|
getattr(self, test)()
|
|
@ -0,0 +1,778 @@
|
||||||
|
# Copyright 2014-2015 Canonical Limited.
|
||||||
|
#
|
||||||
|
# This file is part of charm-helpers.
|
||||||
|
#
|
||||||
|
# charm-helpers is free software: you can redistribute it and/or modify
|
||||||
|
# it under the terms of the GNU Lesser General Public License version 3 as
|
||||||
|
# published by the Free Software Foundation.
|
||||||
|
#
|
||||||
|
# charm-helpers is distributed in the hope that it will be useful,
|
||||||
|
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||||
|
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||||
|
# GNU Lesser General Public License for more details.
|
||||||
|
#
|
||||||
|
# You should have received a copy of the GNU Lesser General Public License
|
||||||
|
# along with charm-helpers. If not, see <http://www.gnu.org/licenses/>.
|
||||||
|
|
||||||
|
import io
|
||||||
|
import json
|
||||||
|
import logging
|
||||||
|
import os
|
||||||
|
import re
|
||||||
|
import socket
|
||||||
|
import subprocess
|
||||||
|
import sys
|
||||||
|
import time
|
||||||
|
import uuid
|
||||||
|
|
||||||
|
import amulet
|
||||||
|
import distro_info
|
||||||
|
import six
|
||||||
|
from six.moves import configparser
|
||||||
|
if six.PY3:
|
||||||
|
from urllib import parse as urlparse
|
||||||
|
else:
|
||||||
|
import urlparse
|
||||||
|
|
||||||
|
|
||||||
|
class AmuletUtils(object):
|
||||||
|
"""Amulet utilities.
|
||||||
|
|
||||||
|
This class provides common utility functions that are used by Amulet
|
||||||
|
tests.
|
||||||
|
"""
|
||||||
|
|
||||||
|
def __init__(self, log_level=logging.ERROR):
|
||||||
|
self.log = self.get_logger(level=log_level)
|
||||||
|
self.ubuntu_releases = self.get_ubuntu_releases()
|
||||||
|
|
||||||
|
def get_logger(self, name="amulet-logger", level=logging.DEBUG):
|
||||||
|
"""Get a logger object that will log to stdout."""
|
||||||
|
log = logging
|
||||||
|
logger = log.getLogger(name)
|
||||||
|
fmt = log.Formatter("%(asctime)s %(funcName)s "
|
||||||
|
"%(levelname)s: %(message)s")
|
||||||
|
|
||||||
|
handler = log.StreamHandler(stream=sys.stdout)
|
||||||
|
handler.setLevel(level)
|
||||||
|
handler.setFormatter(fmt)
|
||||||
|
|
||||||
|
logger.addHandler(handler)
|
||||||
|
logger.setLevel(level)
|
||||||
|
|
||||||
|
return logger
|
||||||
|
|
||||||
|
def valid_ip(self, ip):
|
||||||
|
if re.match(r"^\d{1,3}\.\d{1,3}\.\d{1,3}\.\d{1,3}$", ip):
|
||||||
|
return True
|
||||||
|
else:
|
||||||
|
return False
|
||||||
|
|
||||||
|
def valid_url(self, url):
|
||||||
|
p = re.compile(
|
||||||
|
r'^(?:http|ftp)s?://'
|
||||||
|
r'(?:(?:[A-Z0-9](?:[A-Z0-9-]{0,61}[A-Z0-9])?\.)+(?:[A-Z]{2,6}\.?|[A-Z0-9-]{2,}\.?)|' # noqa
|
||||||
|
r'localhost|'
|
||||||
|
r'\d{1,3}\.\d{1,3}\.\d{1,3}\.\d{1,3})'
|
||||||
|
r'(?::\d+)?'
|
||||||
|
r'(?:/?|[/?]\S+)$',
|
||||||
|
re.IGNORECASE)
|
||||||
|
if p.match(url):
|
||||||
|
return True
|
||||||
|
else:
|
||||||
|
return False
|
||||||
|
|
||||||
|
def get_ubuntu_release_from_sentry(self, sentry_unit):
|
||||||
|
"""Get Ubuntu release codename from sentry unit.
|
||||||
|
|
||||||
|
:param sentry_unit: amulet sentry/service unit pointer
|
||||||
|
:returns: list of strings - release codename, failure message
|
||||||
|
"""
|
||||||
|
msg = None
|
||||||
|
cmd = 'lsb_release -cs'
|
||||||
|
release, code = sentry_unit.run(cmd)
|
||||||
|
if code == 0:
|
||||||
|
self.log.debug('{} lsb_release: {}'.format(
|
||||||
|
sentry_unit.info['unit_name'], release))
|
||||||
|
else:
|
||||||
|
msg = ('{} `{}` returned {} '
|
||||||
|
'{}'.format(sentry_unit.info['unit_name'],
|
||||||
|
cmd, release, code))
|
||||||
|
if release not in self.ubuntu_releases:
|
||||||
|
msg = ("Release ({}) not found in Ubuntu releases "
|
||||||
|
"({})".format(release, self.ubuntu_releases))
|
||||||
|
return release, msg
|
||||||
|
|
||||||
|
def validate_services(self, commands):
|
||||||
|
"""Validate that lists of commands succeed on service units. Can be
|
||||||
|
used to verify system services are running on the corresponding
|
||||||
|
service units.
|
||||||
|
|
||||||
|
:param commands: dict with sentry keys and arbitrary command list vals
|
||||||
|
:returns: None if successful, Failure string message otherwise
|
||||||
|
"""
|
||||||
|
self.log.debug('Checking status of system services...')
|
||||||
|
|
||||||
|
# /!\ DEPRECATION WARNING (beisner):
|
||||||
|
# New and existing tests should be rewritten to use
|
||||||
|
# validate_services_by_name() as it is aware of init systems.
|
||||||
|
self.log.warn('DEPRECATION WARNING: use '
|
||||||
|
'validate_services_by_name instead of validate_services '
|
||||||
|
'due to init system differences.')
|
||||||
|
|
||||||
|
for k, v in six.iteritems(commands):
|
||||||
|
for cmd in v:
|
||||||
|
output, code = k.run(cmd)
|
||||||
|
self.log.debug('{} `{}` returned '
|
||||||
|
'{}'.format(k.info['unit_name'],
|
||||||
|
cmd, code))
|
||||||
|
if code != 0:
|
||||||
|
return "command `{}` returned {}".format(cmd, str(code))
|
||||||
|
return None
|
||||||
|
|
||||||
|
def validate_services_by_name(self, sentry_services):
|
||||||
|
"""Validate system service status by service name, automatically
|
||||||
|
detecting init system based on Ubuntu release codename.
|
||||||
|
|
||||||
|
:param sentry_services: dict with sentry keys and svc list values
|
||||||
|
:returns: None if successful, Failure string message otherwise
|
||||||
|
"""
|
||||||
|
self.log.debug('Checking status of system services...')
|
||||||
|
|
||||||
|
# Point at which systemd became a thing
|
||||||
|
systemd_switch = self.ubuntu_releases.index('vivid')
|
||||||
|
|
||||||
|
for sentry_unit, services_list in six.iteritems(sentry_services):
|
||||||
|
# Get lsb_release codename from unit
|
||||||
|
release, ret = self.get_ubuntu_release_from_sentry(sentry_unit)
|
||||||
|
if ret:
|
||||||
|
return ret
|
||||||
|
|
||||||
|
for service_name in services_list:
|
||||||
|
if (self.ubuntu_releases.index(release) >= systemd_switch or
|
||||||
|
service_name in ['rabbitmq-server', 'apache2']):
|
||||||
|
# init is systemd (or regular sysv)
|
||||||
|
cmd = 'sudo service {} status'.format(service_name)
|
||||||
|
output, code = sentry_unit.run(cmd)
|
||||||
|
service_running = code == 0
|
||||||
|
elif self.ubuntu_releases.index(release) < systemd_switch:
|
||||||
|
# init is upstart
|
||||||
|
cmd = 'sudo status {}'.format(service_name)
|
||||||
|
output, code = sentry_unit.run(cmd)
|
||||||
|
service_running = code == 0 and "start/running" in output
|
||||||
|
|
||||||
|
self.log.debug('{} `{}` returned '
|
||||||
|
'{}'.format(sentry_unit.info['unit_name'],
|
||||||
|
cmd, code))
|
||||||
|
if not service_running:
|
||||||
|
return u"command `{}` returned {} {}".format(
|
||||||
|
cmd, output, str(code))
|
||||||
|
return None
|
||||||
|
|
||||||
|
def _get_config(self, unit, filename):
|
||||||
|
"""Get a ConfigParser object for parsing a unit's config file."""
|
||||||
|
file_contents = unit.file_contents(filename)
|
||||||
|
|
||||||
|
# NOTE(beisner): by default, ConfigParser does not handle options
|
||||||
|
# with no value, such as the flags used in the mysql my.cnf file.
|
||||||
|
# https://bugs.python.org/issue7005
|
||||||
|
config = configparser.ConfigParser(allow_no_value=True)
|
||||||
|
config.readfp(io.StringIO(file_contents))
|
||||||
|
return config
|
||||||
|
|
||||||
|
def validate_config_data(self, sentry_unit, config_file, section,
|
||||||
|
expected):
|
||||||
|
"""Validate config file data.
|
||||||
|
|
||||||
|
Verify that the specified section of the config file contains
|
||||||
|
the expected option key:value pairs.
|
||||||
|
|
||||||
|
Compare expected dictionary data vs actual dictionary data.
|
||||||
|
The values in the 'expected' dictionary can be strings, bools, ints,
|
||||||
|
longs, or can be a function that evaluates a variable and returns a
|
||||||
|
bool.
|
||||||
|
"""
|
||||||
|
self.log.debug('Validating config file data ({} in {} on {})'
|
||||||
|
'...'.format(section, config_file,
|
||||||
|
sentry_unit.info['unit_name']))
|
||||||
|
config = self._get_config(sentry_unit, config_file)
|
||||||
|
|
||||||
|
if section != 'DEFAULT' and not config.has_section(section):
|
||||||
|
return "section [{}] does not exist".format(section)
|
||||||
|
|
||||||
|
for k in expected.keys():
|
||||||
|
if not config.has_option(section, k):
|
||||||
|
return "section [{}] is missing option {}".format(section, k)
|
||||||
|
|
||||||
|
actual = config.get(section, k)
|
||||||
|
v = expected[k]
|
||||||
|
if (isinstance(v, six.string_types) or
|
||||||
|
isinstance(v, bool) or
|
||||||
|
isinstance(v, six.integer_types)):
|
||||||
|
# handle explicit values
|
||||||
|
if actual != v:
|
||||||
|
return "section [{}] {}:{} != expected {}:{}".format(
|
||||||
|
section, k, actual, k, expected[k])
|
||||||
|
# handle function pointers, such as not_null or valid_ip
|
||||||
|
elif not v(actual):
|
||||||
|
return "section [{}] {}:{} != expected {}:{}".format(
|
||||||
|
section, k, actual, k, expected[k])
|
||||||
|
return None
|
||||||
|
|
||||||
|
def _validate_dict_data(self, expected, actual):
|
||||||
|
"""Validate dictionary data.
|
||||||
|
|
||||||
|
Compare expected dictionary data vs actual dictionary data.
|
||||||
|
The values in the 'expected' dictionary can be strings, bools, ints,
|
||||||
|
longs, or can be a function that evaluates a variable and returns a
|
||||||
|
bool.
|
||||||
|
"""
|
||||||
|
self.log.debug('actual: {}'.format(repr(actual)))
|
||||||
|
self.log.debug('expected: {}'.format(repr(expected)))
|
||||||
|
|
||||||
|
for k, v in six.iteritems(expected):
|
||||||
|
if k in actual:
|
||||||
|
if (isinstance(v, six.string_types) or
|
||||||
|
isinstance(v, bool) or
|
||||||
|
isinstance(v, six.integer_types)):
|
||||||
|
# handle explicit values
|
||||||
|
if v != actual[k]:
|
||||||
|
return "{}:{}".format(k, actual[k])
|
||||||
|
# handle function pointers, such as not_null or valid_ip
|
||||||
|
elif not v(actual[k]):
|
||||||
|
return "{}:{}".format(k, actual[k])
|
||||||
|
else:
|
||||||
|
return "key '{}' does not exist".format(k)
|
||||||
|
return None
|
||||||
|
|
||||||
|
def validate_relation_data(self, sentry_unit, relation, expected):
|
||||||
|
"""Validate actual relation data based on expected relation data."""
|
||||||
|
actual = sentry_unit.relation(relation[0], relation[1])
|
||||||
|
return self._validate_dict_data(expected, actual)
|
||||||
|
|
||||||
|
def _validate_list_data(self, expected, actual):
|
||||||
|
"""Compare expected list vs actual list data."""
|
||||||
|
for e in expected:
|
||||||
|
if e not in actual:
|
||||||
|
return "expected item {} not found in actual list".format(e)
|
||||||
|
return None
|
||||||
|
|
||||||
|
def not_null(self, string):
|
||||||
|
if string is not None:
|
||||||
|
return True
|
||||||
|
else:
|
||||||
|
return False
|
||||||
|
|
||||||
|
def _get_file_mtime(self, sentry_unit, filename):
|
||||||
|
"""Get last modification time of file."""
|
||||||
|
return sentry_unit.file_stat(filename)['mtime']
|
||||||
|
|
||||||
|
def _get_dir_mtime(self, sentry_unit, directory):
|
||||||
|
"""Get last modification time of directory."""
|
||||||
|
return sentry_unit.directory_stat(directory)['mtime']
|
||||||
|
|
||||||
|
def _get_proc_start_time(self, sentry_unit, service, pgrep_full=None):
|
||||||
|
"""Get start time of a process based on the last modification time
|
||||||
|
of the /proc/pid directory.
|
||||||
|
|
||||||
|
:sentry_unit: The sentry unit to check for the service on
|
||||||
|
:service: service name to look for in process table
|
||||||
|
:pgrep_full: [Deprecated] Use full command line search mode with pgrep
|
||||||
|
:returns: epoch time of service process start
|
||||||
|
:param commands: list of bash commands
|
||||||
|
:param sentry_units: list of sentry unit pointers
|
||||||
|
:returns: None if successful; Failure message otherwise
|
||||||
|
"""
|
||||||
|
if pgrep_full is not None:
|
||||||
|
# /!\ DEPRECATION WARNING (beisner):
|
||||||
|
# No longer implemented, as pidof is now used instead of pgrep.
|
||||||
|
# https://bugs.launchpad.net/charm-helpers/+bug/1474030
|
||||||
|
self.log.warn('DEPRECATION WARNING: pgrep_full bool is no '
|
||||||
|
'longer implemented re: lp 1474030.')
|
||||||
|
|
||||||
|
pid_list = self.get_process_id_list(sentry_unit, service)
|
||||||
|
pid = pid_list[0]
|
||||||
|
proc_dir = '/proc/{}'.format(pid)
|
||||||
|
self.log.debug('Pid for {} on {}: {}'.format(
|
||||||
|
service, sentry_unit.info['unit_name'], pid))
|
||||||
|
|
||||||
|
return self._get_dir_mtime(sentry_unit, proc_dir)
|
||||||
|
|
||||||
|
def service_restarted(self, sentry_unit, service, filename,
|
||||||
|
pgrep_full=None, sleep_time=20):
|
||||||
|
"""Check if service was restarted.
|
||||||
|
|
||||||
|
Compare a service's start time vs a file's last modification time
|
||||||
|
(such as a config file for that service) to determine if the service
|
||||||
|
has been restarted.
|
||||||
|
"""
|
||||||
|
# /!\ DEPRECATION WARNING (beisner):
|
||||||
|
# This method is prone to races in that no before-time is known.
|
||||||
|
# Use validate_service_config_changed instead.
|
||||||
|
|
||||||
|
# NOTE(beisner) pgrep_full is no longer implemented, as pidof is now
|
||||||
|
# used instead of pgrep. pgrep_full is still passed through to ensure
|
||||||
|
# deprecation WARNS. lp1474030
|
||||||
|
self.log.warn('DEPRECATION WARNING: use '
|
||||||
|
'validate_service_config_changed instead of '
|
||||||
|
'service_restarted due to known races.')
|
||||||
|
|
||||||
|
time.sleep(sleep_time)
|
||||||
|
if (self._get_proc_start_time(sentry_unit, service, pgrep_full) >=
|
||||||
|
self._get_file_mtime(sentry_unit, filename)):
|
||||||
|
return True
|
||||||
|
else:
|
||||||
|
return False
|
||||||
|
|
||||||
|
def service_restarted_since(self, sentry_unit, mtime, service,
|
||||||
|
pgrep_full=None, sleep_time=20,
|
||||||
|
retry_count=2, retry_sleep_time=30):
|
||||||
|
"""Check if service was been started after a given time.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
sentry_unit (sentry): The sentry unit to check for the service on
|
||||||
|
mtime (float): The epoch time to check against
|
||||||
|
service (string): service name to look for in process table
|
||||||
|
pgrep_full: [Deprecated] Use full command line search mode with pgrep
|
||||||
|
sleep_time (int): Seconds to sleep before looking for process
|
||||||
|
retry_count (int): If service is not found, how many times to retry
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
bool: True if service found and its start time it newer than mtime,
|
||||||
|
False if service is older than mtime or if service was
|
||||||
|
not found.
|
||||||
|
"""
|
||||||
|
# NOTE(beisner) pgrep_full is no longer implemented, as pidof is now
|
||||||
|
# used instead of pgrep. pgrep_full is still passed through to ensure
|
||||||
|
# deprecation WARNS. lp1474030
|
||||||
|
|
||||||
|
unit_name = sentry_unit.info['unit_name']
|
||||||
|
self.log.debug('Checking that %s service restarted since %s on '
|
||||||
|
'%s' % (service, mtime, unit_name))
|
||||||
|
time.sleep(sleep_time)
|
||||||
|
proc_start_time = None
|
||||||
|
tries = 0
|
||||||
|
while tries <= retry_count and not proc_start_time:
|
||||||
|
try:
|
||||||
|
proc_start_time = self._get_proc_start_time(sentry_unit,
|
||||||
|
service,
|
||||||
|
pgrep_full)
|
||||||
|
self.log.debug('Attempt {} to get {} proc start time on {} '
|
||||||
|
'OK'.format(tries, service, unit_name))
|
||||||
|
except IOError:
|
||||||
|
# NOTE(beisner) - race avoidance, proc may not exist yet.
|
||||||
|
# https://bugs.launchpad.net/charm-helpers/+bug/1474030
|
||||||
|
self.log.debug('Attempt {} to get {} proc start time on {} '
|
||||||
|
'failed'.format(tries, service, unit_name))
|
||||||
|
time.sleep(retry_sleep_time)
|
||||||
|
tries += 1
|
||||||
|
|
||||||
|
if not proc_start_time:
|
||||||
|
self.log.warn('No proc start time found, assuming service did '
|
||||||
|
'not start')
|
||||||
|
return False
|
||||||
|
if proc_start_time >= mtime:
|
||||||
|
self.log.debug('Proc start time is newer than provided mtime'
|
||||||
|
'(%s >= %s) on %s (OK)' % (proc_start_time,
|
||||||
|
mtime, unit_name))
|
||||||
|
return True
|
||||||
|
else:
|
||||||
|
self.log.warn('Proc start time (%s) is older than provided mtime '
|
||||||
|
'(%s) on %s, service did not '
|
||||||
|
'restart' % (proc_start_time, mtime, unit_name))
|
||||||
|
return False
|
||||||
|
|
||||||
|
def config_updated_since(self, sentry_unit, filename, mtime,
|
||||||
|
sleep_time=20):
|
||||||
|
"""Check if file was modified after a given time.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
sentry_unit (sentry): The sentry unit to check the file mtime on
|
||||||
|
filename (string): The file to check mtime of
|
||||||
|
mtime (float): The epoch time to check against
|
||||||
|
sleep_time (int): Seconds to sleep before looking for process
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
bool: True if file was modified more recently than mtime, False if
|
||||||
|
file was modified before mtime,
|
||||||
|
"""
|
||||||
|
self.log.debug('Checking %s updated since %s' % (filename, mtime))
|
||||||
|
time.sleep(sleep_time)
|
||||||
|
file_mtime = self._get_file_mtime(sentry_unit, filename)
|
||||||
|
if file_mtime >= mtime:
|
||||||
|
self.log.debug('File mtime is newer than provided mtime '
|
||||||
|
'(%s >= %s)' % (file_mtime, mtime))
|
||||||
|
return True
|
||||||
|
else:
|
||||||
|
self.log.warn('File mtime %s is older than provided mtime %s'
|
||||||
|
% (file_mtime, mtime))
|
||||||
|
return False
|
||||||
|
|
||||||
|
def validate_service_config_changed(self, sentry_unit, mtime, service,
|
||||||
|
filename, pgrep_full=None,
|
||||||
|
sleep_time=20, retry_count=2,
|
||||||
|
retry_sleep_time=30):
|
||||||
|
"""Check service and file were updated after mtime
|
||||||
|
|
||||||
|
Args:
|
||||||
|
sentry_unit (sentry): The sentry unit to check for the service on
|
||||||
|
mtime (float): The epoch time to check against
|
||||||
|
service (string): service name to look for in process table
|
||||||
|
filename (string): The file to check mtime of
|
||||||
|
pgrep_full: [Deprecated] Use full command line search mode with pgrep
|
||||||
|
sleep_time (int): Initial sleep in seconds to pass to test helpers
|
||||||
|
retry_count (int): If service is not found, how many times to retry
|
||||||
|
retry_sleep_time (int): Time in seconds to wait between retries
|
||||||
|
|
||||||
|
Typical Usage:
|
||||||
|
u = OpenStackAmuletUtils(ERROR)
|
||||||
|
...
|
||||||
|
mtime = u.get_sentry_time(self.cinder_sentry)
|
||||||
|
self.d.configure('cinder', {'verbose': 'True', 'debug': 'True'})
|
||||||
|
if not u.validate_service_config_changed(self.cinder_sentry,
|
||||||
|
mtime,
|
||||||
|
'cinder-api',
|
||||||
|
'/etc/cinder/cinder.conf')
|
||||||
|
amulet.raise_status(amulet.FAIL, msg='update failed')
|
||||||
|
Returns:
|
||||||
|
bool: True if both service and file where updated/restarted after
|
||||||
|
mtime, False if service is older than mtime or if service was
|
||||||
|
not found or if filename was modified before mtime.
|
||||||
|
"""
|
||||||
|
|
||||||
|
# NOTE(beisner) pgrep_full is no longer implemented, as pidof is now
|
||||||
|
# used instead of pgrep. pgrep_full is still passed through to ensure
|
||||||
|
# deprecation WARNS. lp1474030
|
||||||
|
|
||||||
|
service_restart = self.service_restarted_since(
|
||||||
|
sentry_unit, mtime,
|
||||||
|
service,
|
||||||
|
pgrep_full=pgrep_full,
|
||||||
|
sleep_time=sleep_time,
|
||||||
|
retry_count=retry_count,
|
||||||
|
retry_sleep_time=retry_sleep_time)
|
||||||
|
|
||||||
|
config_update = self.config_updated_since(
|
||||||
|
sentry_unit,
|
||||||
|
filename,
|
||||||
|
mtime,
|
||||||
|
sleep_time=0)
|
||||||
|
|
||||||
|
return service_restart and config_update
|
||||||
|
|
||||||
|
def get_sentry_time(self, sentry_unit):
|
||||||
|
"""Return current epoch time on a sentry"""
|
||||||
|
cmd = "date +'%s'"
|
||||||
|
return float(sentry_unit.run(cmd)[0])
|
||||||
|
|
||||||
|
def relation_error(self, name, data):
|
||||||
|
return 'unexpected relation data in {} - {}'.format(name, data)
|
||||||
|
|
||||||
|
def endpoint_error(self, name, data):
|
||||||
|
return 'unexpected endpoint data in {} - {}'.format(name, data)
|
||||||
|
|
||||||
|
def get_ubuntu_releases(self):
|
||||||
|
"""Return a list of all Ubuntu releases in order of release."""
|
||||||
|
_d = distro_info.UbuntuDistroInfo()
|
||||||
|
_release_list = _d.all
|
||||||
|
return _release_list
|
||||||
|
|
||||||
|
def file_to_url(self, file_rel_path):
|
||||||
|
"""Convert a relative file path to a file URL."""
|
||||||
|
_abs_path = os.path.abspath(file_rel_path)
|
||||||
|
return urlparse.urlparse(_abs_path, scheme='file').geturl()
|
||||||
|
|
||||||
|
def check_commands_on_units(self, commands, sentry_units):
|
||||||
|
"""Check that all commands in a list exit zero on all
|
||||||
|
sentry units in a list.
|
||||||
|
|
||||||
|
:param commands: list of bash commands
|
||||||
|
:param sentry_units: list of sentry unit pointers
|
||||||
|
:returns: None if successful; Failure message otherwise
|
||||||
|
"""
|
||||||
|
self.log.debug('Checking exit codes for {} commands on {} '
|
||||||
|
'sentry units...'.format(len(commands),
|
||||||
|
len(sentry_units)))
|
||||||
|
for sentry_unit in sentry_units:
|
||||||
|
for cmd in commands:
|
||||||
|
output, code = sentry_unit.run(cmd)
|
||||||
|
if code == 0:
|
||||||
|
self.log.debug('{} `{}` returned {} '
|
||||||
|
'(OK)'.format(sentry_unit.info['unit_name'],
|
||||||
|
cmd, code))
|
||||||
|
else:
|
||||||
|
return ('{} `{}` returned {} '
|
||||||
|
'{}'.format(sentry_unit.info['unit_name'],
|
||||||
|
cmd, code, output))
|
||||||
|
return None
|
||||||
|
|
||||||
|
def get_process_id_list(self, sentry_unit, process_name,
|
||||||
|
expect_success=True):
|
||||||
|
"""Get a list of process ID(s) from a single sentry juju unit
|
||||||
|
for a single process name.
|
||||||
|
|
||||||
|
:param sentry_unit: Amulet sentry instance (juju unit)
|
||||||
|
:param process_name: Process name
|
||||||
|
:param expect_success: If False, expect the PID to be missing,
|
||||||
|
raise if it is present.
|
||||||
|
:returns: List of process IDs
|
||||||
|
"""
|
||||||
|
cmd = 'pidof -x {}'.format(process_name)
|
||||||
|
if not expect_success:
|
||||||
|
cmd += " || exit 0 && exit 1"
|
||||||
|
output, code = sentry_unit.run(cmd)
|
||||||
|
if code != 0:
|
||||||
|
msg = ('{} `{}` returned {} '
|
||||||
|
'{}'.format(sentry_unit.info['unit_name'],
|
||||||
|
cmd, code, output))
|
||||||
|
amulet.raise_status(amulet.FAIL, msg=msg)
|
||||||
|
return str(output).split()
|
||||||
|
|
||||||
|
def get_unit_process_ids(self, unit_processes, expect_success=True):
|
||||||
|
"""Construct a dict containing unit sentries, process names, and
|
||||||
|
process IDs.
|
||||||
|
|
||||||
|
:param unit_processes: A dictionary of Amulet sentry instance
|
||||||
|
to list of process names.
|
||||||
|
:param expect_success: if False expect the processes to not be
|
||||||
|
running, raise if they are.
|
||||||
|
:returns: Dictionary of Amulet sentry instance to dictionary
|
||||||
|
of process names to PIDs.
|
||||||
|
"""
|
||||||
|
pid_dict = {}
|
||||||
|
for sentry_unit, process_list in six.iteritems(unit_processes):
|
||||||
|
pid_dict[sentry_unit] = {}
|
||||||
|
for process in process_list:
|
||||||
|
pids = self.get_process_id_list(
|
||||||
|
sentry_unit, process, expect_success=expect_success)
|
||||||
|
pid_dict[sentry_unit].update({process: pids})
|
||||||
|
return pid_dict
|
||||||
|
|
||||||
|
def validate_unit_process_ids(self, expected, actual):
|
||||||
|
"""Validate process id quantities for services on units."""
|
||||||
|
self.log.debug('Checking units for running processes...')
|
||||||
|
self.log.debug('Expected PIDs: {}'.format(expected))
|
||||||
|
self.log.debug('Actual PIDs: {}'.format(actual))
|
||||||
|
|
||||||
|
if len(actual) != len(expected):
|
||||||
|
return ('Unit count mismatch. expected, actual: {}, '
|
||||||
|
'{} '.format(len(expected), len(actual)))
|
||||||
|
|
||||||
|
for (e_sentry, e_proc_names) in six.iteritems(expected):
|
||||||
|
e_sentry_name = e_sentry.info['unit_name']
|
||||||
|
if e_sentry in actual.keys():
|
||||||
|
a_proc_names = actual[e_sentry]
|
||||||
|
else:
|
||||||
|
return ('Expected sentry ({}) not found in actual dict data.'
|
||||||
|
'{}'.format(e_sentry_name, e_sentry))
|
||||||
|
|
||||||
|
if len(e_proc_names.keys()) != len(a_proc_names.keys()):
|
||||||
|
return ('Process name count mismatch. expected, actual: {}, '
|
||||||
|
'{}'.format(len(expected), len(actual)))
|
||||||
|
|
||||||
|
for (e_proc_name, e_pids_length), (a_proc_name, a_pids) in \
|
||||||
|
zip(e_proc_names.items(), a_proc_names.items()):
|
||||||
|
if e_proc_name != a_proc_name:
|
||||||
|
return ('Process name mismatch. expected, actual: {}, '
|
||||||
|
'{}'.format(e_proc_name, a_proc_name))
|
||||||
|
|
||||||
|
a_pids_length = len(a_pids)
|
||||||
|
fail_msg = ('PID count mismatch. {} ({}) expected, actual: '
|
||||||
|
'{}, {} ({})'.format(e_sentry_name, e_proc_name,
|
||||||
|
e_pids_length, a_pids_length,
|
||||||
|
a_pids))
|
||||||
|
|
||||||
|
# If expected is not bool, ensure PID quantities match
|
||||||
|
if not isinstance(e_pids_length, bool) and \
|
||||||
|
a_pids_length != e_pids_length:
|
||||||
|
return fail_msg
|
||||||
|
# If expected is bool True, ensure 1 or more PIDs exist
|
||||||
|
elif isinstance(e_pids_length, bool) and \
|
||||||
|
e_pids_length is True and a_pids_length < 1:
|
||||||
|
return fail_msg
|
||||||
|
# If expected is bool False, ensure 0 PIDs exist
|
||||||
|
elif isinstance(e_pids_length, bool) and \
|
||||||
|
e_pids_length is False and a_pids_length != 0:
|
||||||
|
return fail_msg
|
||||||
|
else:
|
||||||
|
self.log.debug('PID check OK: {} {} {}: '
|
||||||
|
'{}'.format(e_sentry_name, e_proc_name,
|
||||||
|
e_pids_length, a_pids))
|
||||||
|
return None
|
||||||
|
|
||||||
|
def validate_list_of_identical_dicts(self, list_of_dicts):
|
||||||
|
"""Check that all dicts within a list are identical."""
|
||||||
|
hashes = []
|
||||||
|
for _dict in list_of_dicts:
|
||||||
|
hashes.append(hash(frozenset(_dict.items())))
|
||||||
|
|
||||||
|
self.log.debug('Hashes: {}'.format(hashes))
|
||||||
|
if len(set(hashes)) == 1:
|
||||||
|
self.log.debug('Dicts within list are identical')
|
||||||
|
else:
|
||||||
|
return 'Dicts within list are not identical'
|
||||||
|
|
||||||
|
return None
|
||||||
|
|
||||||
|
def validate_sectionless_conf(self, file_contents, expected):
|
||||||
|
"""A crude conf parser. Useful to inspect configuration files which
|
||||||
|
do not have section headers (as would be necessary in order to use
|
||||||
|
the configparser). Such as openstack-dashboard or rabbitmq confs."""
|
||||||
|
for line in file_contents.split('\n'):
|
||||||
|
if '=' in line:
|
||||||
|
args = line.split('=')
|
||||||
|
if len(args) <= 1:
|
||||||
|
continue
|
||||||
|
key = args[0].strip()
|
||||||
|
value = args[1].strip()
|
||||||
|
if key in expected.keys():
|
||||||
|
if expected[key] != value:
|
||||||
|
msg = ('Config mismatch. Expected, actual: {}, '
|
||||||
|
'{}'.format(expected[key], value))
|
||||||
|
amulet.raise_status(amulet.FAIL, msg=msg)
|
||||||
|
|
||||||
|
def get_unit_hostnames(self, units):
|
||||||
|
"""Return a dict of juju unit names to hostnames."""
|
||||||
|
host_names = {}
|
||||||
|
for unit in units:
|
||||||
|
host_names[unit.info['unit_name']] = \
|
||||||
|
str(unit.file_contents('/etc/hostname').strip())
|
||||||
|
self.log.debug('Unit host names: {}'.format(host_names))
|
||||||
|
return host_names
|
||||||
|
|
||||||
|
def run_cmd_unit(self, sentry_unit, cmd):
|
||||||
|
"""Run a command on a unit, return the output and exit code."""
|
||||||
|
output, code = sentry_unit.run(cmd)
|
||||||
|
if code == 0:
|
||||||
|
self.log.debug('{} `{}` command returned {} '
|
||||||
|
'(OK)'.format(sentry_unit.info['unit_name'],
|
||||||
|
cmd, code))
|
||||||
|
else:
|
||||||
|
msg = ('{} `{}` command returned {} '
|
||||||
|
'{}'.format(sentry_unit.info['unit_name'],
|
||||||
|
cmd, code, output))
|
||||||
|
amulet.raise_status(amulet.FAIL, msg=msg)
|
||||||
|
return str(output), code
|
||||||
|
|
||||||
|
def file_exists_on_unit(self, sentry_unit, file_name):
|
||||||
|
"""Check if a file exists on a unit."""
|
||||||
|
try:
|
||||||
|
sentry_unit.file_stat(file_name)
|
||||||
|
return True
|
||||||
|
except IOError:
|
||||||
|
return False
|
||||||
|
except Exception as e:
|
||||||
|
msg = 'Error checking file {}: {}'.format(file_name, e)
|
||||||
|
amulet.raise_status(amulet.FAIL, msg=msg)
|
||||||
|
|
||||||
|
def file_contents_safe(self, sentry_unit, file_name,
|
||||||
|
max_wait=60, fatal=False):
|
||||||
|
"""Get file contents from a sentry unit. Wrap amulet file_contents
|
||||||
|
with retry logic to address races where a file checks as existing,
|
||||||
|
but no longer exists by the time file_contents is called.
|
||||||
|
Return None if file not found. Optionally raise if fatal is True."""
|
||||||
|
unit_name = sentry_unit.info['unit_name']
|
||||||
|
file_contents = False
|
||||||
|
tries = 0
|
||||||
|
while not file_contents and tries < (max_wait / 4):
|
||||||
|
try:
|
||||||
|
file_contents = sentry_unit.file_contents(file_name)
|
||||||
|
except IOError:
|
||||||
|
self.log.debug('Attempt {} to open file {} from {} '
|
||||||
|
'failed'.format(tries, file_name,
|
||||||
|
unit_name))
|
||||||
|
time.sleep(4)
|
||||||
|
tries += 1
|
||||||
|
|
||||||
|
if file_contents:
|
||||||
|
return file_contents
|
||||||
|
elif not fatal:
|
||||||
|
return None
|
||||||
|
elif fatal:
|
||||||
|
msg = 'Failed to get file contents from unit.'
|
||||||
|
amulet.raise_status(amulet.FAIL, msg)
|
||||||
|
|
||||||
|
def port_knock_tcp(self, host="localhost", port=22, timeout=15):
|
||||||
|
"""Open a TCP socket to check for a listening sevice on a host.
|
||||||
|
|
||||||
|
:param host: host name or IP address, default to localhost
|
||||||
|
:param port: TCP port number, default to 22
|
||||||
|
:param timeout: Connect timeout, default to 15 seconds
|
||||||
|
:returns: True if successful, False if connect failed
|
||||||
|
"""
|
||||||
|
|
||||||
|
# Resolve host name if possible
|
||||||
|
try:
|
||||||
|
connect_host = socket.gethostbyname(host)
|
||||||
|
host_human = "{} ({})".format(connect_host, host)
|
||||||
|
except socket.error as e:
|
||||||
|
self.log.warn('Unable to resolve address: '
|
||||||
|
'{} ({}) Trying anyway!'.format(host, e))
|
||||||
|
connect_host = host
|
||||||
|
host_human = connect_host
|
||||||
|
|
||||||
|
# Attempt socket connection
|
||||||
|
try:
|
||||||
|
knock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
|
||||||
|
knock.settimeout(timeout)
|
||||||
|
knock.connect((connect_host, port))
|
||||||
|
knock.close()
|
||||||
|
self.log.debug('Socket connect OK for host '
|
||||||
|
'{} on port {}.'.format(host_human, port))
|
||||||
|
return True
|
||||||
|
except socket.error as e:
|
||||||
|
self.log.debug('Socket connect FAIL for'
|
||||||
|
' {} port {} ({})'.format(host_human, port, e))
|
||||||
|
return False
|
||||||
|
|
||||||
|
def port_knock_units(self, sentry_units, port=22,
|
||||||
|
timeout=15, expect_success=True):
|
||||||
|
"""Open a TCP socket to check for a listening sevice on each
|
||||||
|
listed juju unit.
|
||||||
|
|
||||||
|
:param sentry_units: list of sentry unit pointers
|
||||||
|
:param port: TCP port number, default to 22
|
||||||
|
:param timeout: Connect timeout, default to 15 seconds
|
||||||
|
:expect_success: True by default, set False to invert logic
|
||||||
|
:returns: None if successful, Failure message otherwise
|
||||||
|
"""
|
||||||
|
for unit in sentry_units:
|
||||||
|
host = unit.info['public-address']
|
||||||
|
connected = self.port_knock_tcp(host, port, timeout)
|
||||||
|
if not connected and expect_success:
|
||||||
|
return 'Socket connect failed.'
|
||||||
|
elif connected and not expect_success:
|
||||||
|
return 'Socket connected unexpectedly.'
|
||||||
|
|
||||||
|
def get_uuid_epoch_stamp(self):
|
||||||
|
"""Returns a stamp string based on uuid4 and epoch time. Useful in
|
||||||
|
generating test messages which need to be unique-ish."""
|
||||||
|
return '[{}-{}]'.format(uuid.uuid4(), time.time())
|
||||||
|
|
||||||
|
# amulet juju action helpers:
|
||||||
|
def run_action(self, unit_sentry, action,
|
||||||
|
_check_output=subprocess.check_output):
|
||||||
|
"""Run the named action on a given unit sentry.
|
||||||
|
|
||||||
|
_check_output parameter is used for dependency injection.
|
||||||
|
|
||||||
|
@return action_id.
|
||||||
|
"""
|
||||||
|
unit_id = unit_sentry.info["unit_name"]
|
||||||
|
command = ["juju", "action", "do", "--format=json", unit_id, action]
|
||||||
|
self.log.info("Running command: %s\n" % " ".join(command))
|
||||||
|
output = _check_output(command, universal_newlines=True)
|
||||||
|
data = json.loads(output)
|
||||||
|
action_id = data[u'Action queued with id']
|
||||||
|
return action_id
|
||||||
|
|
||||||
|
def wait_on_action(self, action_id, _check_output=subprocess.check_output):
|
||||||
|
"""Wait for a given action, returning if it completed or not.
|
||||||
|
|
||||||
|
_check_output parameter is used for dependency injection.
|
||||||
|
"""
|
||||||
|
command = ["juju", "action", "fetch", "--format=json", "--wait=0",
|
||||||
|
action_id]
|
||||||
|
output = _check_output(command, universal_newlines=True)
|
||||||
|
data = json.loads(output)
|
||||||
|
return data.get(u"status") == "completed"
|
|
@ -13,6 +13,3 @@
|
||||||
#
|
#
|
||||||
# You should have received a copy of the GNU Lesser General Public License
|
# You should have received a copy of the GNU Lesser General Public License
|
||||||
# along with charm-helpers. If not, see <http://www.gnu.org/licenses/>.
|
# along with charm-helpers. If not, see <http://www.gnu.org/licenses/>.
|
||||||
|
|
||||||
from .base import * # NOQA
|
|
||||||
from .helpers import * # NOQA
|
|
|
@ -0,0 +1,15 @@
|
||||||
|
# Copyright 2014-2015 Canonical Limited.
|
||||||
|
#
|
||||||
|
# This file is part of charm-helpers.
|
||||||
|
#
|
||||||
|
# charm-helpers is free software: you can redistribute it and/or modify
|
||||||
|
# it under the terms of the GNU Lesser General Public License version 3 as
|
||||||
|
# published by the Free Software Foundation.
|
||||||
|
#
|
||||||
|
# charm-helpers is distributed in the hope that it will be useful,
|
||||||
|
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||||
|
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||||
|
# GNU Lesser General Public License for more details.
|
||||||
|
#
|
||||||
|
# You should have received a copy of the GNU Lesser General Public License
|
||||||
|
# along with charm-helpers. If not, see <http://www.gnu.org/licenses/>.
|
|
@ -0,0 +1,198 @@
|
||||||
|
# Copyright 2014-2015 Canonical Limited.
|
||||||
|
#
|
||||||
|
# This file is part of charm-helpers.
|
||||||
|
#
|
||||||
|
# charm-helpers is free software: you can redistribute it and/or modify
|
||||||
|
# it under the terms of the GNU Lesser General Public License version 3 as
|
||||||
|
# published by the Free Software Foundation.
|
||||||
|
#
|
||||||
|
# charm-helpers is distributed in the hope that it will be useful,
|
||||||
|
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||||
|
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||||
|
# GNU Lesser General Public License for more details.
|
||||||
|
#
|
||||||
|
# You should have received a copy of the GNU Lesser General Public License
|
||||||
|
# along with charm-helpers. If not, see <http://www.gnu.org/licenses/>.
|
||||||
|
|
||||||
|
import six
|
||||||
|
from collections import OrderedDict
|
||||||
|
from charmhelpers.contrib.amulet.deployment import (
|
||||||
|
AmuletDeployment
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
|
class OpenStackAmuletDeployment(AmuletDeployment):
|
||||||
|
"""OpenStack amulet deployment.
|
||||||
|
|
||||||
|
This class inherits from AmuletDeployment and has additional support
|
||||||
|
that is specifically for use by OpenStack charms.
|
||||||
|
"""
|
||||||
|
|
||||||
|
def __init__(self, series=None, openstack=None, source=None, stable=True):
|
||||||
|
"""Initialize the deployment environment."""
|
||||||
|
super(OpenStackAmuletDeployment, self).__init__(series)
|
||||||
|
self.openstack = openstack
|
||||||
|
self.source = source
|
||||||
|
self.stable = stable
|
||||||
|
# Note(coreycb): this needs to be changed when new next branches come
|
||||||
|
# out.
|
||||||
|
self.current_next = "trusty"
|
||||||
|
|
||||||
|
def _determine_branch_locations(self, other_services):
|
||||||
|
"""Determine the branch locations for the other services.
|
||||||
|
|
||||||
|
Determine if the local branch being tested is derived from its
|
||||||
|
stable or next (dev) branch, and based on this, use the corresonding
|
||||||
|
stable or next branches for the other_services."""
|
||||||
|
|
||||||
|
# Charms outside the lp:~openstack-charmers namespace
|
||||||
|
base_charms = ['mysql', 'mongodb', 'nrpe']
|
||||||
|
|
||||||
|
# Force these charms to current series even when using an older series.
|
||||||
|
# ie. Use trusty/nrpe even when series is precise, as the P charm
|
||||||
|
# does not possess the necessary external master config and hooks.
|
||||||
|
force_series_current = ['nrpe']
|
||||||
|
|
||||||
|
if self.series in ['precise', 'trusty']:
|
||||||
|
base_series = self.series
|
||||||
|
else:
|
||||||
|
base_series = self.current_next
|
||||||
|
|
||||||
|
if self.stable:
|
||||||
|
for svc in other_services:
|
||||||
|
if svc['name'] in force_series_current:
|
||||||
|
base_series = self.current_next
|
||||||
|
|
||||||
|
temp = 'lp:charms/{}/{}'
|
||||||
|
svc['location'] = temp.format(base_series,
|
||||||
|
svc['name'])
|
||||||
|
else:
|
||||||
|
for svc in other_services:
|
||||||
|
if svc['name'] in force_series_current:
|
||||||
|
base_series = self.current_next
|
||||||
|
|
||||||
|
if svc['name'] in base_charms:
|
||||||
|
temp = 'lp:charms/{}/{}'
|
||||||
|
svc['location'] = temp.format(base_series,
|
||||||
|
svc['name'])
|
||||||
|
else:
|
||||||
|
temp = 'lp:~openstack-charmers/charms/{}/{}/next'
|
||||||
|
svc['location'] = temp.format(self.current_next,
|
||||||
|
svc['name'])
|
||||||
|
return other_services
|
||||||
|
|
||||||
|
def _add_services(self, this_service, other_services):
|
||||||
|
"""Add services to the deployment and set openstack-origin/source."""
|
||||||
|
other_services = self._determine_branch_locations(other_services)
|
||||||
|
|
||||||
|
super(OpenStackAmuletDeployment, self)._add_services(this_service,
|
||||||
|
other_services)
|
||||||
|
|
||||||
|
services = other_services
|
||||||
|
services.append(this_service)
|
||||||
|
|
||||||
|
# Charms which should use the source config option
|
||||||
|
use_source = ['mysql', 'mongodb', 'rabbitmq-server', 'ceph',
|
||||||
|
'ceph-osd', 'ceph-radosgw']
|
||||||
|
|
||||||
|
# Charms which can not use openstack-origin, ie. many subordinates
|
||||||
|
no_origin = ['cinder-ceph', 'hacluster', 'neutron-openvswitch', 'nrpe']
|
||||||
|
|
||||||
|
if self.openstack:
|
||||||
|
for svc in services:
|
||||||
|
if svc['name'] not in use_source + no_origin:
|
||||||
|
config = {'openstack-origin': self.openstack}
|
||||||
|
self.d.configure(svc['name'], config)
|
||||||
|
|
||||||
|
if self.source:
|
||||||
|
for svc in services:
|
||||||
|
if svc['name'] in use_source and svc['name'] not in no_origin:
|
||||||
|
config = {'source': self.source}
|
||||||
|
self.d.configure(svc['name'], config)
|
||||||
|
|
||||||
|
def _configure_services(self, configs):
|
||||||
|
"""Configure all of the services."""
|
||||||
|
for service, config in six.iteritems(configs):
|
||||||
|
self.d.configure(service, config)
|
||||||
|
|
||||||
|
def _get_openstack_release(self):
|
||||||
|
"""Get openstack release.
|
||||||
|
|
||||||
|
Return an integer representing the enum value of the openstack
|
||||||
|
release.
|
||||||
|
"""
|
||||||
|
# Must be ordered by OpenStack release (not by Ubuntu release):
|
||||||
|
(self.precise_essex, self.precise_folsom, self.precise_grizzly,
|
||||||
|
self.precise_havana, self.precise_icehouse,
|
||||||
|
self.trusty_icehouse, self.trusty_juno, self.utopic_juno,
|
||||||
|
self.trusty_kilo, self.vivid_kilo, self.trusty_liberty,
|
||||||
|
self.wily_liberty) = range(12)
|
||||||
|
|
||||||
|
releases = {
|
||||||
|
('precise', None): self.precise_essex,
|
||||||
|
('precise', 'cloud:precise-folsom'): self.precise_folsom,
|
||||||
|
('precise', 'cloud:precise-grizzly'): self.precise_grizzly,
|
||||||
|
('precise', 'cloud:precise-havana'): self.precise_havana,
|
||||||
|
('precise', 'cloud:precise-icehouse'): self.precise_icehouse,
|
||||||
|
('trusty', None): self.trusty_icehouse,
|
||||||
|
('trusty', 'cloud:trusty-juno'): self.trusty_juno,
|
||||||
|
('trusty', 'cloud:trusty-kilo'): self.trusty_kilo,
|
||||||
|
('trusty', 'cloud:trusty-liberty'): self.trusty_liberty,
|
||||||
|
('utopic', None): self.utopic_juno,
|
||||||
|
('vivid', None): self.vivid_kilo,
|
||||||
|
('wily', None): self.wily_liberty}
|
||||||
|
return releases[(self.series, self.openstack)]
|
||||||
|
|
||||||
|
def _get_openstack_release_string(self):
|
||||||
|
"""Get openstack release string.
|
||||||
|
|
||||||
|
Return a string representing the openstack release.
|
||||||
|
"""
|
||||||
|
releases = OrderedDict([
|
||||||
|
('precise', 'essex'),
|
||||||
|
('quantal', 'folsom'),
|
||||||
|
('raring', 'grizzly'),
|
||||||
|
('saucy', 'havana'),
|
||||||
|
('trusty', 'icehouse'),
|
||||||
|
('utopic', 'juno'),
|
||||||
|
('vivid', 'kilo'),
|
||||||
|
('wily', 'liberty'),
|
||||||
|
])
|
||||||
|
if self.openstack:
|
||||||
|
os_origin = self.openstack.split(':')[1]
|
||||||
|
return os_origin.split('%s-' % self.series)[1].split('/')[0]
|
||||||
|
else:
|
||||||
|
return releases[self.series]
|
||||||
|
|
||||||
|
def get_ceph_expected_pools(self, radosgw=False):
|
||||||
|
"""Return a list of expected ceph pools in a ceph + cinder + glance
|
||||||
|
test scenario, based on OpenStack release and whether ceph radosgw
|
||||||
|
is flagged as present or not."""
|
||||||
|
|
||||||
|
if self._get_openstack_release() >= self.trusty_kilo:
|
||||||
|
# Kilo or later
|
||||||
|
pools = [
|
||||||
|
'rbd',
|
||||||
|
'cinder',
|
||||||
|
'glance'
|
||||||
|
]
|
||||||
|
else:
|
||||||
|
# Juno or earlier
|
||||||
|
pools = [
|
||||||
|
'data',
|
||||||
|
'metadata',
|
||||||
|
'rbd',
|
||||||
|
'cinder',
|
||||||
|
'glance'
|
||||||
|
]
|
||||||
|
|
||||||
|
if radosgw:
|
||||||
|
pools.extend([
|
||||||
|
'.rgw.root',
|
||||||
|
'.rgw.control',
|
||||||
|
'.rgw',
|
||||||
|
'.rgw.gc',
|
||||||
|
'.users.uid'
|
||||||
|
])
|
||||||
|
|
||||||
|
return pools
|
|
@ -0,0 +1,963 @@
|
||||||
|
# Copyright 2014-2015 Canonical Limited.
|
||||||
|
#
|
||||||
|
# This file is part of charm-helpers.
|
||||||
|
#
|
||||||
|
# charm-helpers is free software: you can redistribute it and/or modify
|
||||||
|
# it under the terms of the GNU Lesser General Public License version 3 as
|
||||||
|
# published by the Free Software Foundation.
|
||||||
|
#
|
||||||
|
# charm-helpers is distributed in the hope that it will be useful,
|
||||||
|
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||||
|
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||||
|
# GNU Lesser General Public License for more details.
|
||||||
|
#
|
||||||
|
# You should have received a copy of the GNU Lesser General Public License
|
||||||
|
# along with charm-helpers. If not, see <http://www.gnu.org/licenses/>.
|
||||||
|
|
||||||
|
import amulet
|
||||||
|
import json
|
||||||
|
import logging
|
||||||
|
import os
|
||||||
|
import six
|
||||||
|
import time
|
||||||
|
import urllib
|
||||||
|
|
||||||
|
import cinderclient.v1.client as cinder_client
|
||||||
|
import glanceclient.v1.client as glance_client
|
||||||
|
import heatclient.v1.client as heat_client
|
||||||
|
import keystoneclient.v2_0 as keystone_client
|
||||||
|
import novaclient.v1_1.client as nova_client
|
||||||
|
import pika
|
||||||
|
import swiftclient
|
||||||
|
|
||||||
|
from charmhelpers.contrib.amulet.utils import (
|
||||||
|
AmuletUtils
|
||||||
|
)
|
||||||
|
|
||||||
|
DEBUG = logging.DEBUG
|
||||||
|
ERROR = logging.ERROR
|
||||||
|
|
||||||
|
|
||||||
|
class OpenStackAmuletUtils(AmuletUtils):
|
||||||
|
"""OpenStack amulet utilities.
|
||||||
|
|
||||||
|
This class inherits from AmuletUtils and has additional support
|
||||||
|
that is specifically for use by OpenStack charm tests.
|
||||||
|
"""
|
||||||
|
|
||||||
|
def __init__(self, log_level=ERROR):
|
||||||
|
"""Initialize the deployment environment."""
|
||||||
|
super(OpenStackAmuletUtils, self).__init__(log_level)
|
||||||
|
|
||||||
|
def validate_endpoint_data(self, endpoints, admin_port, internal_port,
|
||||||
|
public_port, expected):
|
||||||
|
"""Validate endpoint data.
|
||||||
|
|
||||||
|
Validate actual endpoint data vs expected endpoint data. The ports
|
||||||
|
are used to find the matching endpoint.
|
||||||
|
"""
|
||||||
|
self.log.debug('Validating endpoint data...')
|
||||||
|
self.log.debug('actual: {}'.format(repr(endpoints)))
|
||||||
|
found = False
|
||||||
|
for ep in endpoints:
|
||||||
|
self.log.debug('endpoint: {}'.format(repr(ep)))
|
||||||
|
if (admin_port in ep.adminurl and
|
||||||
|
internal_port in ep.internalurl and
|
||||||
|
public_port in ep.publicurl):
|
||||||
|
found = True
|
||||||
|
actual = {'id': ep.id,
|
||||||
|
'region': ep.region,
|
||||||
|
'adminurl': ep.adminurl,
|
||||||
|
'internalurl': ep.internalurl,
|
||||||
|
'publicurl': ep.publicurl,
|
||||||
|
'service_id': ep.service_id}
|
||||||
|
ret = self._validate_dict_data(expected, actual)
|
||||||
|
if ret:
|
||||||
|
return 'unexpected endpoint data - {}'.format(ret)
|
||||||
|
|
||||||
|
if not found:
|
||||||
|
return 'endpoint not found'
|
||||||
|
|
||||||
|
def validate_svc_catalog_endpoint_data(self, expected, actual):
|
||||||
|
"""Validate service catalog endpoint data.
|
||||||
|
|
||||||
|
Validate a list of actual service catalog endpoints vs a list of
|
||||||
|
expected service catalog endpoints.
|
||||||
|
"""
|
||||||
|
self.log.debug('Validating service catalog endpoint data...')
|
||||||
|
self.log.debug('actual: {}'.format(repr(actual)))
|
||||||
|
for k, v in six.iteritems(expected):
|
||||||
|
if k in actual:
|
||||||
|
ret = self._validate_dict_data(expected[k][0], actual[k][0])
|
||||||
|
if ret:
|
||||||
|
return self.endpoint_error(k, ret)
|
||||||
|
else:
|
||||||
|
return "endpoint {} does not exist".format(k)
|
||||||
|
return ret
|
||||||
|
|
||||||
|
def validate_tenant_data(self, expected, actual):
|
||||||
|
"""Validate tenant data.
|
||||||
|
|
||||||
|
Validate a list of actual tenant data vs list of expected tenant
|
||||||
|
data.
|
||||||
|
"""
|
||||||
|
self.log.debug('Validating tenant data...')
|
||||||
|
self.log.debug('actual: {}'.format(repr(actual)))
|
||||||
|
for e in expected:
|
||||||
|
found = False
|
||||||
|
for act in actual:
|
||||||
|
a = {'enabled': act.enabled, 'description': act.description,
|
||||||
|
'name': act.name, 'id': act.id}
|
||||||
|
if e['name'] == a['name']:
|
||||||
|
found = True
|
||||||
|
ret = self._validate_dict_data(e, a)
|
||||||
|
if ret:
|
||||||
|
return "unexpected tenant data - {}".format(ret)
|
||||||
|
if not found:
|
||||||
|
return "tenant {} does not exist".format(e['name'])
|
||||||
|
return ret
|
||||||
|
|
||||||
|
def validate_role_data(self, expected, actual):
|
||||||
|
"""Validate role data.
|
||||||
|
|
||||||
|
Validate a list of actual role data vs a list of expected role
|
||||||
|
data.
|
||||||
|
"""
|
||||||
|
self.log.debug('Validating role data...')
|
||||||
|
self.log.debug('actual: {}'.format(repr(actual)))
|
||||||
|
for e in expected:
|
||||||
|
found = False
|
||||||
|
for act in actual:
|
||||||
|
a = {'name': act.name, 'id': act.id}
|
||||||
|
if e['name'] == a['name']:
|
||||||
|
found = True
|
||||||
|
ret = self._validate_dict_data(e, a)
|
||||||
|
if ret:
|
||||||
|
return "unexpected role data - {}".format(ret)
|
||||||
|
if not found:
|
||||||
|
return "role {} does not exist".format(e['name'])
|
||||||
|
return ret
|
||||||
|
|
||||||
|
def validate_user_data(self, expected, actual):
|
||||||
|
"""Validate user data.
|
||||||
|
|
||||||
|
Validate a list of actual user data vs a list of expected user
|
||||||
|
data.
|
||||||
|
"""
|
||||||
|
self.log.debug('Validating user data...')
|
||||||
|
self.log.debug('actual: {}'.format(repr(actual)))
|
||||||
|
for e in expected:
|
||||||
|
found = False
|
||||||
|
for act in actual:
|
||||||
|
a = {'enabled': act.enabled, 'name': act.name,
|
||||||
|
'email': act.email, 'tenantId': act.tenantId,
|
||||||
|
'id': act.id}
|
||||||
|
if e['name'] == a['name']:
|
||||||
|
found = True
|
||||||
|
ret = self._validate_dict_data(e, a)
|
||||||
|
if ret:
|
||||||
|
return "unexpected user data - {}".format(ret)
|
||||||
|
if not found:
|
||||||
|
return "user {} does not exist".format(e['name'])
|
||||||
|
return ret
|
||||||
|
|
||||||
|
def validate_flavor_data(self, expected, actual):
|
||||||
|
"""Validate flavor data.
|
||||||
|
|
||||||
|
Validate a list of actual flavors vs a list of expected flavors.
|
||||||
|
"""
|
||||||
|
self.log.debug('Validating flavor data...')
|
||||||
|
self.log.debug('actual: {}'.format(repr(actual)))
|
||||||
|
act = [a.name for a in actual]
|
||||||
|
return self._validate_list_data(expected, act)
|
||||||
|
|
||||||
|
def tenant_exists(self, keystone, tenant):
|
||||||
|
"""Return True if tenant exists."""
|
||||||
|
self.log.debug('Checking if tenant exists ({})...'.format(tenant))
|
||||||
|
return tenant in [t.name for t in keystone.tenants.list()]
|
||||||
|
|
||||||
|
def authenticate_cinder_admin(self, keystone_sentry, username,
|
||||||
|
password, tenant):
|
||||||
|
"""Authenticates admin user with cinder."""
|
||||||
|
# NOTE(beisner): cinder python client doesn't accept tokens.
|
||||||
|
service_ip = \
|
||||||
|
keystone_sentry.relation('shared-db',
|
||||||
|
'mysql:shared-db')['private-address']
|
||||||
|
ept = "http://{}:5000/v2.0".format(service_ip.strip().decode('utf-8'))
|
||||||
|
return cinder_client.Client(username, password, tenant, ept)
|
||||||
|
|
||||||
|
def authenticate_keystone_admin(self, keystone_sentry, user, password,
|
||||||
|
tenant):
|
||||||
|
"""Authenticates admin user with the keystone admin endpoint."""
|
||||||
|
self.log.debug('Authenticating keystone admin...')
|
||||||
|
unit = keystone_sentry
|
||||||
|
service_ip = unit.relation('shared-db',
|
||||||
|
'mysql:shared-db')['private-address']
|
||||||
|
ep = "http://{}:35357/v2.0".format(service_ip.strip().decode('utf-8'))
|
||||||
|
return keystone_client.Client(username=user, password=password,
|
||||||
|
tenant_name=tenant, auth_url=ep)
|
||||||
|
|
||||||
|
def authenticate_keystone_user(self, keystone, user, password, tenant):
|
||||||
|
"""Authenticates a regular user with the keystone public endpoint."""
|
||||||
|
self.log.debug('Authenticating keystone user ({})...'.format(user))
|
||||||
|
ep = keystone.service_catalog.url_for(service_type='identity',
|
||||||
|
endpoint_type='publicURL')
|
||||||
|
return keystone_client.Client(username=user, password=password,
|
||||||
|
tenant_name=tenant, auth_url=ep)
|
||||||
|
|
||||||
|
def authenticate_glance_admin(self, keystone):
|
||||||
|
"""Authenticates admin user with glance."""
|
||||||
|
self.log.debug('Authenticating glance admin...')
|
||||||
|
ep = keystone.service_catalog.url_for(service_type='image',
|
||||||
|
endpoint_type='adminURL')
|
||||||
|
return glance_client.Client(ep, token=keystone.auth_token)
|
||||||
|
|
||||||
|
def authenticate_heat_admin(self, keystone):
|
||||||
|
"""Authenticates the admin user with heat."""
|
||||||
|
self.log.debug('Authenticating heat admin...')
|
||||||
|
ep = keystone.service_catalog.url_for(service_type='orchestration',
|
||||||
|
endpoint_type='publicURL')
|
||||||
|
return heat_client.Client(endpoint=ep, token=keystone.auth_token)
|
||||||
|
|
||||||
|
def authenticate_nova_user(self, keystone, user, password, tenant):
|
||||||
|
"""Authenticates a regular user with nova-api."""
|
||||||
|
self.log.debug('Authenticating nova user ({})...'.format(user))
|
||||||
|
ep = keystone.service_catalog.url_for(service_type='identity',
|
||||||
|
endpoint_type='publicURL')
|
||||||
|
return nova_client.Client(username=user, api_key=password,
|
||||||
|
project_id=tenant, auth_url=ep)
|
||||||
|
|
||||||
|
def authenticate_swift_user(self, keystone, user, password, tenant):
|
||||||
|
"""Authenticates a regular user with swift api."""
|
||||||
|
self.log.debug('Authenticating swift user ({})...'.format(user))
|
||||||
|
ep = keystone.service_catalog.url_for(service_type='identity',
|
||||||
|
endpoint_type='publicURL')
|
||||||
|
return swiftclient.Connection(authurl=ep,
|
||||||
|
user=user,
|
||||||
|
key=password,
|
||||||
|
tenant_name=tenant,
|
||||||
|
auth_version='2.0')
|
||||||
|
|
||||||
|
def create_cirros_image(self, glance, image_name):
|
||||||
|
"""Download the latest cirros image and upload it to glance,
|
||||||
|
validate and return a resource pointer.
|
||||||
|
|
||||||
|
:param glance: pointer to authenticated glance connection
|
||||||
|
:param image_name: display name for new image
|
||||||
|
:returns: glance image pointer
|
||||||
|
"""
|
||||||
|
self.log.debug('Creating glance cirros image '
|
||||||
|
'({})...'.format(image_name))
|
||||||
|
|
||||||
|
# Download cirros image
|
||||||
|
http_proxy = os.getenv('AMULET_HTTP_PROXY')
|
||||||
|
self.log.debug('AMULET_HTTP_PROXY: {}'.format(http_proxy))
|
||||||
|
if http_proxy:
|
||||||
|
proxies = {'http': http_proxy}
|
||||||
|
opener = urllib.FancyURLopener(proxies)
|
||||||
|
else:
|
||||||
|
opener = urllib.FancyURLopener()
|
||||||
|
|
||||||
|
f = opener.open('http://download.cirros-cloud.net/version/released')
|
||||||
|
version = f.read().strip()
|
||||||
|
cirros_img = 'cirros-{}-x86_64-disk.img'.format(version)
|
||||||
|
local_path = os.path.join('tests', cirros_img)
|
||||||
|
|
||||||
|
if not os.path.exists(local_path):
|
||||||
|
cirros_url = 'http://{}/{}/{}'.format('download.cirros-cloud.net',
|
||||||
|
version, cirros_img)
|
||||||
|
opener.retrieve(cirros_url, local_path)
|
||||||
|
f.close()
|
||||||
|
|
||||||
|
# Create glance image
|
||||||
|
with open(local_path) as f:
|
||||||
|
image = glance.images.create(name=image_name, is_public=True,
|
||||||
|
disk_format='qcow2',
|
||||||
|
container_format='bare', data=f)
|
||||||
|
|
||||||
|
# Wait for image to reach active status
|
||||||
|
img_id = image.id
|
||||||
|
ret = self.resource_reaches_status(glance.images, img_id,
|
||||||
|
expected_stat='active',
|
||||||
|
msg='Image status wait')
|
||||||
|
if not ret:
|
||||||
|
msg = 'Glance image failed to reach expected state.'
|
||||||
|
amulet.raise_status(amulet.FAIL, msg=msg)
|
||||||
|
|
||||||
|
# Re-validate new image
|
||||||
|
self.log.debug('Validating image attributes...')
|
||||||
|
val_img_name = glance.images.get(img_id).name
|
||||||
|
val_img_stat = glance.images.get(img_id).status
|
||||||
|
val_img_pub = glance.images.get(img_id).is_public
|
||||||
|
val_img_cfmt = glance.images.get(img_id).container_format
|
||||||
|
val_img_dfmt = glance.images.get(img_id).disk_format
|
||||||
|
msg_attr = ('Image attributes - name:{} public:{} id:{} stat:{} '
|
||||||
|
'container fmt:{} disk fmt:{}'.format(
|
||||||
|
val_img_name, val_img_pub, img_id,
|
||||||
|
val_img_stat, val_img_cfmt, val_img_dfmt))
|
||||||
|
|
||||||
|
if val_img_name == image_name and val_img_stat == 'active' \
|
||||||
|
and val_img_pub is True and val_img_cfmt == 'bare' \
|
||||||
|
and val_img_dfmt == 'qcow2':
|
||||||
|
self.log.debug(msg_attr)
|
||||||
|
else:
|
||||||
|
msg = ('Volume validation failed, {}'.format(msg_attr))
|
||||||
|
amulet.raise_status(amulet.FAIL, msg=msg)
|
||||||
|
|
||||||
|
return image
|
||||||
|
|
||||||
|
def delete_image(self, glance, image):
|
||||||
|
"""Delete the specified image."""
|
||||||
|
|
||||||
|
# /!\ DEPRECATION WARNING
|
||||||
|
self.log.warn('/!\\ DEPRECATION WARNING: use '
|
||||||
|
'delete_resource instead of delete_image.')
|
||||||
|
self.log.debug('Deleting glance image ({})...'.format(image))
|
||||||
|
return self.delete_resource(glance.images, image, msg='glance image')
|
||||||
|
|
||||||
|
def create_instance(self, nova, image_name, instance_name, flavor):
|
||||||
|
"""Create the specified instance."""
|
||||||
|
self.log.debug('Creating instance '
|
||||||
|
'({}|{}|{})'.format(instance_name, image_name, flavor))
|
||||||
|
image = nova.images.find(name=image_name)
|
||||||
|
flavor = nova.flavors.find(name=flavor)
|
||||||
|
instance = nova.servers.create(name=instance_name, image=image,
|
||||||
|
flavor=flavor)
|
||||||
|
|
||||||
|
count = 1
|
||||||
|
status = instance.status
|
||||||
|
while status != 'ACTIVE' and count < 60:
|
||||||
|
time.sleep(3)
|
||||||
|
instance = nova.servers.get(instance.id)
|
||||||
|
status = instance.status
|
||||||
|
self.log.debug('instance status: {}'.format(status))
|
||||||
|
count += 1
|
||||||
|
|
||||||
|
if status != 'ACTIVE':
|
||||||
|
self.log.error('instance creation timed out')
|
||||||
|
return None
|
||||||
|
|
||||||
|
return instance
|
||||||
|
|
||||||
|
def delete_instance(self, nova, instance):
|
||||||
|
"""Delete the specified instance."""
|
||||||
|
|
||||||
|
# /!\ DEPRECATION WARNING
|
||||||
|
self.log.warn('/!\\ DEPRECATION WARNING: use '
|
||||||
|
'delete_resource instead of delete_instance.')
|
||||||
|
self.log.debug('Deleting instance ({})...'.format(instance))
|
||||||
|
return self.delete_resource(nova.servers, instance,
|
||||||
|
msg='nova instance')
|
||||||
|
|
||||||
|
def create_or_get_keypair(self, nova, keypair_name="testkey"):
|
||||||
|
"""Create a new keypair, or return pointer if it already exists."""
|
||||||
|
try:
|
||||||
|
_keypair = nova.keypairs.get(keypair_name)
|
||||||
|
self.log.debug('Keypair ({}) already exists, '
|
||||||
|
'using it.'.format(keypair_name))
|
||||||
|
return _keypair
|
||||||
|
except:
|
||||||
|
self.log.debug('Keypair ({}) does not exist, '
|
||||||
|
'creating it.'.format(keypair_name))
|
||||||
|
|
||||||
|
_keypair = nova.keypairs.create(name=keypair_name)
|
||||||
|
return _keypair
|
||||||
|
|
||||||
|
def create_cinder_volume(self, cinder, vol_name="demo-vol", vol_size=1,
|
||||||
|
img_id=None, src_vol_id=None, snap_id=None):
|
||||||
|
"""Create cinder volume, optionally from a glance image, OR
|
||||||
|
optionally as a clone of an existing volume, OR optionally
|
||||||
|
from a snapshot. Wait for the new volume status to reach
|
||||||
|
the expected status, validate and return a resource pointer.
|
||||||
|
|
||||||
|
:param vol_name: cinder volume display name
|
||||||
|
:param vol_size: size in gigabytes
|
||||||
|
:param img_id: optional glance image id
|
||||||
|
:param src_vol_id: optional source volume id to clone
|
||||||
|
:param snap_id: optional snapshot id to use
|
||||||
|
:returns: cinder volume pointer
|
||||||
|
"""
|
||||||
|
# Handle parameter input and avoid impossible combinations
|
||||||
|
if img_id and not src_vol_id and not snap_id:
|
||||||
|
# Create volume from image
|
||||||
|
self.log.debug('Creating cinder volume from glance image...')
|
||||||
|
bootable = 'true'
|
||||||
|
elif src_vol_id and not img_id and not snap_id:
|
||||||
|
# Clone an existing volume
|
||||||
|
self.log.debug('Cloning cinder volume...')
|
||||||
|
bootable = cinder.volumes.get(src_vol_id).bootable
|
||||||
|
elif snap_id and not src_vol_id and not img_id:
|
||||||
|
# Create volume from snapshot
|
||||||
|
self.log.debug('Creating cinder volume from snapshot...')
|
||||||
|
snap = cinder.volume_snapshots.find(id=snap_id)
|
||||||
|
vol_size = snap.size
|
||||||
|
snap_vol_id = cinder.volume_snapshots.get(snap_id).volume_id
|
||||||
|
bootable = cinder.volumes.get(snap_vol_id).bootable
|
||||||
|
elif not img_id and not src_vol_id and not snap_id:
|
||||||
|
# Create volume
|
||||||
|
self.log.debug('Creating cinder volume...')
|
||||||
|
bootable = 'false'
|
||||||
|
else:
|
||||||
|
# Impossible combination of parameters
|
||||||
|
msg = ('Invalid method use - name:{} size:{} img_id:{} '
|
||||||
|
'src_vol_id:{} snap_id:{}'.format(vol_name, vol_size,
|
||||||
|
img_id, src_vol_id,
|
||||||
|
snap_id))
|
||||||
|
amulet.raise_status(amulet.FAIL, msg=msg)
|
||||||
|
|
||||||
|
# Create new volume
|
||||||
|
try:
|
||||||
|
vol_new = cinder.volumes.create(display_name=vol_name,
|
||||||
|
imageRef=img_id,
|
||||||
|
size=vol_size,
|
||||||
|
source_volid=src_vol_id,
|
||||||
|
snapshot_id=snap_id)
|
||||||
|
vol_id = vol_new.id
|
||||||
|
except Exception as e:
|
||||||
|
msg = 'Failed to create volume: {}'.format(e)
|
||||||
|
amulet.raise_status(amulet.FAIL, msg=msg)
|
||||||
|
|
||||||
|
# Wait for volume to reach available status
|
||||||
|
ret = self.resource_reaches_status(cinder.volumes, vol_id,
|
||||||
|
expected_stat="available",
|
||||||
|
msg="Volume status wait")
|
||||||
|
if not ret:
|
||||||
|
msg = 'Cinder volume failed to reach expected state.'
|
||||||
|
amulet.raise_status(amulet.FAIL, msg=msg)
|
||||||
|
|
||||||
|
# Re-validate new volume
|
||||||
|
self.log.debug('Validating volume attributes...')
|
||||||
|
val_vol_name = cinder.volumes.get(vol_id).display_name
|
||||||
|
val_vol_boot = cinder.volumes.get(vol_id).bootable
|
||||||
|
val_vol_stat = cinder.volumes.get(vol_id).status
|
||||||
|
val_vol_size = cinder.volumes.get(vol_id).size
|
||||||
|
msg_attr = ('Volume attributes - name:{} id:{} stat:{} boot:'
|
||||||
|
'{} size:{}'.format(val_vol_name, vol_id,
|
||||||
|
val_vol_stat, val_vol_boot,
|
||||||
|
val_vol_size))
|
||||||
|
|
||||||
|
if val_vol_boot == bootable and val_vol_stat == 'available' \
|
||||||
|
and val_vol_name == vol_name and val_vol_size == vol_size:
|
||||||
|
self.log.debug(msg_attr)
|
||||||
|
else:
|
||||||
|
msg = ('Volume validation failed, {}'.format(msg_attr))
|
||||||
|
amulet.raise_status(amulet.FAIL, msg=msg)
|
||||||
|
|
||||||
|
return vol_new
|
||||||
|
|
||||||
|
def delete_resource(self, resource, resource_id,
|
||||||
|
msg="resource", max_wait=120):
|
||||||
|
"""Delete one openstack resource, such as one instance, keypair,
|
||||||
|
image, volume, stack, etc., and confirm deletion within max wait time.
|
||||||
|
|
||||||
|
:param resource: pointer to os resource type, ex:glance_client.images
|
||||||
|
:param resource_id: unique name or id for the openstack resource
|
||||||
|
:param msg: text to identify purpose in logging
|
||||||
|
:param max_wait: maximum wait time in seconds
|
||||||
|
:returns: True if successful, otherwise False
|
||||||
|
"""
|
||||||
|
self.log.debug('Deleting OpenStack resource '
|
||||||
|
'{} ({})'.format(resource_id, msg))
|
||||||
|
num_before = len(list(resource.list()))
|
||||||
|
resource.delete(resource_id)
|
||||||
|
|
||||||
|
tries = 0
|
||||||
|
num_after = len(list(resource.list()))
|
||||||
|
while num_after != (num_before - 1) and tries < (max_wait / 4):
|
||||||
|
self.log.debug('{} delete check: '
|
||||||
|
'{} [{}:{}] {}'.format(msg, tries,
|
||||||
|
num_before,
|
||||||
|
num_after,
|
||||||
|
resource_id))
|
||||||
|
time.sleep(4)
|
||||||
|
num_after = len(list(resource.list()))
|
||||||
|
tries += 1
|
||||||
|
|
||||||
|
self.log.debug('{}: expected, actual count = {}, '
|
||||||
|
'{}'.format(msg, num_before - 1, num_after))
|
||||||
|
|
||||||
|
if num_after == (num_before - 1):
|
||||||
|
return True
|
||||||
|
else:
|
||||||
|
self.log.error('{} delete timed out'.format(msg))
|
||||||
|
return False
|
||||||
|
|
||||||
|
def resource_reaches_status(self, resource, resource_id,
|
||||||
|
expected_stat='available',
|
||||||
|
msg='resource', max_wait=120):
|
||||||
|
"""Wait for an openstack resources status to reach an
|
||||||
|
expected status within a specified time. Useful to confirm that
|
||||||
|
nova instances, cinder vols, snapshots, glance images, heat stacks
|
||||||
|
and other resources eventually reach the expected status.
|
||||||
|
|
||||||
|
:param resource: pointer to os resource type, ex: heat_client.stacks
|
||||||
|
:param resource_id: unique id for the openstack resource
|
||||||
|
:param expected_stat: status to expect resource to reach
|
||||||
|
:param msg: text to identify purpose in logging
|
||||||
|
:param max_wait: maximum wait time in seconds
|
||||||
|
:returns: True if successful, False if status is not reached
|
||||||
|
"""
|
||||||
|
|
||||||
|
tries = 0
|
||||||
|
resource_stat = resource.get(resource_id).status
|
||||||
|
while resource_stat != expected_stat and tries < (max_wait / 4):
|
||||||
|
self.log.debug('{} status check: '
|
||||||
|
'{} [{}:{}] {}'.format(msg, tries,
|
||||||
|
resource_stat,
|
||||||
|
expected_stat,
|
||||||
|
resource_id))
|
||||||
|
time.sleep(4)
|
||||||
|
resource_stat = resource.get(resource_id).status
|
||||||
|
tries += 1
|
||||||
|
|
||||||
|
self.log.debug('{}: expected, actual status = {}, '
|
||||||
|
'{}'.format(msg, resource_stat, expected_stat))
|
||||||
|
|
||||||
|
if resource_stat == expected_stat:
|
||||||
|
return True
|
||||||
|
else:
|
||||||
|
self.log.debug('{} never reached expected status: '
|
||||||
|
'{}'.format(resource_id, expected_stat))
|
||||||
|
return False
|
||||||
|
|
||||||
|
def get_ceph_osd_id_cmd(self, index):
|
||||||
|
"""Produce a shell command that will return a ceph-osd id."""
|
||||||
|
return ("`initctl list | grep 'ceph-osd ' | "
|
||||||
|
"awk 'NR=={} {{ print $2 }}' | "
|
||||||
|
"grep -o '[0-9]*'`".format(index + 1))
|
||||||
|
|
||||||
|
def get_ceph_pools(self, sentry_unit):
|
||||||
|
"""Return a dict of ceph pools from a single ceph unit, with
|
||||||
|
pool name as keys, pool id as vals."""
|
||||||
|
pools = {}
|
||||||
|
cmd = 'sudo ceph osd lspools'
|
||||||
|
output, code = sentry_unit.run(cmd)
|
||||||
|
if code != 0:
|
||||||
|
msg = ('{} `{}` returned {} '
|
||||||
|
'{}'.format(sentry_unit.info['unit_name'],
|
||||||
|
cmd, code, output))
|
||||||
|
amulet.raise_status(amulet.FAIL, msg=msg)
|
||||||
|
|
||||||
|
# Example output: 0 data,1 metadata,2 rbd,3 cinder,4 glance,
|
||||||
|
for pool in str(output).split(','):
|
||||||
|
pool_id_name = pool.split(' ')
|
||||||
|
if len(pool_id_name) == 2:
|
||||||
|
pool_id = pool_id_name[0]
|
||||||
|
pool_name = pool_id_name[1]
|
||||||
|
pools[pool_name] = int(pool_id)
|
||||||
|
|
||||||
|
self.log.debug('Pools on {}: {}'.format(sentry_unit.info['unit_name'],
|
||||||
|
pools))
|
||||||
|
return pools
|
||||||
|
|
||||||
|
def get_ceph_df(self, sentry_unit):
|
||||||
|
"""Return dict of ceph df json output, including ceph pool state.
|
||||||
|
|
||||||
|
:param sentry_unit: Pointer to amulet sentry instance (juju unit)
|
||||||
|
:returns: Dict of ceph df output
|
||||||
|
"""
|
||||||
|
cmd = 'sudo ceph df --format=json'
|
||||||
|
output, code = sentry_unit.run(cmd)
|
||||||
|
if code != 0:
|
||||||
|
msg = ('{} `{}` returned {} '
|
||||||
|
'{}'.format(sentry_unit.info['unit_name'],
|
||||||
|
cmd, code, output))
|
||||||
|
amulet.raise_status(amulet.FAIL, msg=msg)
|
||||||
|
return json.loads(output)
|
||||||
|
|
||||||
|
def get_ceph_pool_sample(self, sentry_unit, pool_id=0):
|
||||||
|
"""Take a sample of attributes of a ceph pool, returning ceph
|
||||||
|
pool name, object count and disk space used for the specified
|
||||||
|
pool ID number.
|
||||||
|
|
||||||
|
:param sentry_unit: Pointer to amulet sentry instance (juju unit)
|
||||||
|
:param pool_id: Ceph pool ID
|
||||||
|
:returns: List of pool name, object count, kb disk space used
|
||||||
|
"""
|
||||||
|
df = self.get_ceph_df(sentry_unit)
|
||||||
|
pool_name = df['pools'][pool_id]['name']
|
||||||
|
obj_count = df['pools'][pool_id]['stats']['objects']
|
||||||
|
kb_used = df['pools'][pool_id]['stats']['kb_used']
|
||||||
|
self.log.debug('Ceph {} pool (ID {}): {} objects, '
|
||||||
|
'{} kb used'.format(pool_name, pool_id,
|
||||||
|
obj_count, kb_used))
|
||||||
|
return pool_name, obj_count, kb_used
|
||||||
|
|
||||||
|
def validate_ceph_pool_samples(self, samples, sample_type="resource pool"):
|
||||||
|
"""Validate ceph pool samples taken over time, such as pool
|
||||||
|
object counts or pool kb used, before adding, after adding, and
|
||||||
|
after deleting items which affect those pool attributes. The
|
||||||
|
2nd element is expected to be greater than the 1st; 3rd is expected
|
||||||
|
to be less than the 2nd.
|
||||||
|
|
||||||
|
:param samples: List containing 3 data samples
|
||||||
|
:param sample_type: String for logging and usage context
|
||||||
|
:returns: None if successful, Failure message otherwise
|
||||||
|
"""
|
||||||
|
original, created, deleted = range(3)
|
||||||
|
if samples[created] <= samples[original] or \
|
||||||
|
samples[deleted] >= samples[created]:
|
||||||
|
return ('Ceph {} samples ({}) '
|
||||||
|
'unexpected.'.format(sample_type, samples))
|
||||||
|
else:
|
||||||
|
self.log.debug('Ceph {} samples (OK): '
|
||||||
|
'{}'.format(sample_type, samples))
|
||||||
|
return None
|
||||||
|
|
||||||
|
# rabbitmq/amqp specific helpers:
|
||||||
|
def add_rmq_test_user(self, sentry_units,
|
||||||
|
username="testuser1", password="changeme"):
|
||||||
|
"""Add a test user via the first rmq juju unit, check connection as
|
||||||
|
the new user against all sentry units.
|
||||||
|
|
||||||
|
:param sentry_units: list of sentry unit pointers
|
||||||
|
:param username: amqp user name, default to testuser1
|
||||||
|
:param password: amqp user password
|
||||||
|
:returns: None if successful. Raise on error.
|
||||||
|
"""
|
||||||
|
self.log.debug('Adding rmq user ({})...'.format(username))
|
||||||
|
|
||||||
|
# Check that user does not already exist
|
||||||
|
cmd_user_list = 'rabbitmqctl list_users'
|
||||||
|
output, _ = self.run_cmd_unit(sentry_units[0], cmd_user_list)
|
||||||
|
if username in output:
|
||||||
|
self.log.warning('User ({}) already exists, returning '
|
||||||
|
'gracefully.'.format(username))
|
||||||
|
return
|
||||||
|
|
||||||
|
perms = '".*" ".*" ".*"'
|
||||||
|
cmds = ['rabbitmqctl add_user {} {}'.format(username, password),
|
||||||
|
'rabbitmqctl set_permissions {} {}'.format(username, perms)]
|
||||||
|
|
||||||
|
# Add user via first unit
|
||||||
|
for cmd in cmds:
|
||||||
|
output, _ = self.run_cmd_unit(sentry_units[0], cmd)
|
||||||
|
|
||||||
|
# Check connection against the other sentry_units
|
||||||
|
self.log.debug('Checking user connect against units...')
|
||||||
|
for sentry_unit in sentry_units:
|
||||||
|
connection = self.connect_amqp_by_unit(sentry_unit, ssl=False,
|
||||||
|
username=username,
|
||||||
|
password=password)
|
||||||
|
connection.close()
|
||||||
|
|
||||||
|
def delete_rmq_test_user(self, sentry_units, username="testuser1"):
|
||||||
|
"""Delete a rabbitmq user via the first rmq juju unit.
|
||||||
|
|
||||||
|
:param sentry_units: list of sentry unit pointers
|
||||||
|
:param username: amqp user name, default to testuser1
|
||||||
|
:param password: amqp user password
|
||||||
|
:returns: None if successful or no such user.
|
||||||
|
"""
|
||||||
|
self.log.debug('Deleting rmq user ({})...'.format(username))
|
||||||
|
|
||||||
|
# Check that the user exists
|
||||||
|
cmd_user_list = 'rabbitmqctl list_users'
|
||||||
|
output, _ = self.run_cmd_unit(sentry_units[0], cmd_user_list)
|
||||||
|
|
||||||
|
if username not in output:
|
||||||
|
self.log.warning('User ({}) does not exist, returning '
|
||||||
|
'gracefully.'.format(username))
|
||||||
|
return
|
||||||
|
|
||||||
|
# Delete the user
|
||||||
|
cmd_user_del = 'rabbitmqctl delete_user {}'.format(username)
|
||||||
|
output, _ = self.run_cmd_unit(sentry_units[0], cmd_user_del)
|
||||||
|
|
||||||
|
def get_rmq_cluster_status(self, sentry_unit):
|
||||||
|
"""Execute rabbitmq cluster status command on a unit and return
|
||||||
|
the full output.
|
||||||
|
|
||||||
|
:param unit: sentry unit
|
||||||
|
:returns: String containing console output of cluster status command
|
||||||
|
"""
|
||||||
|
cmd = 'rabbitmqctl cluster_status'
|
||||||
|
output, _ = self.run_cmd_unit(sentry_unit, cmd)
|
||||||
|
self.log.debug('{} cluster_status:\n{}'.format(
|
||||||
|
sentry_unit.info['unit_name'], output))
|
||||||
|
return str(output)
|
||||||
|
|
||||||
|
def get_rmq_cluster_running_nodes(self, sentry_unit):
|
||||||
|
"""Parse rabbitmqctl cluster_status output string, return list of
|
||||||
|
running rabbitmq cluster nodes.
|
||||||
|
|
||||||
|
:param unit: sentry unit
|
||||||
|
:returns: List containing node names of running nodes
|
||||||
|
"""
|
||||||
|
# NOTE(beisner): rabbitmqctl cluster_status output is not
|
||||||
|
# json-parsable, do string chop foo, then json.loads that.
|
||||||
|
str_stat = self.get_rmq_cluster_status(sentry_unit)
|
||||||
|
if 'running_nodes' in str_stat:
|
||||||
|
pos_start = str_stat.find("{running_nodes,") + 15
|
||||||
|
pos_end = str_stat.find("]},", pos_start) + 1
|
||||||
|
str_run_nodes = str_stat[pos_start:pos_end].replace("'", '"')
|
||||||
|
run_nodes = json.loads(str_run_nodes)
|
||||||
|
return run_nodes
|
||||||
|
else:
|
||||||
|
return []
|
||||||
|
|
||||||
|
def validate_rmq_cluster_running_nodes(self, sentry_units):
|
||||||
|
"""Check that all rmq unit hostnames are represented in the
|
||||||
|
cluster_status output of all units.
|
||||||
|
|
||||||
|
:param host_names: dict of juju unit names to host names
|
||||||
|
:param units: list of sentry unit pointers (all rmq units)
|
||||||
|
:returns: None if successful, otherwise return error message
|
||||||
|
"""
|
||||||
|
host_names = self.get_unit_hostnames(sentry_units)
|
||||||
|
errors = []
|
||||||
|
|
||||||
|
# Query every unit for cluster_status running nodes
|
||||||
|
for query_unit in sentry_units:
|
||||||
|
query_unit_name = query_unit.info['unit_name']
|
||||||
|
running_nodes = self.get_rmq_cluster_running_nodes(query_unit)
|
||||||
|
|
||||||
|
# Confirm that every unit is represented in the queried unit's
|
||||||
|
# cluster_status running nodes output.
|
||||||
|
for validate_unit in sentry_units:
|
||||||
|
val_host_name = host_names[validate_unit.info['unit_name']]
|
||||||
|
val_node_name = 'rabbit@{}'.format(val_host_name)
|
||||||
|
|
||||||
|
if val_node_name not in running_nodes:
|
||||||
|
errors.append('Cluster member check failed on {}: {} not '
|
||||||
|
'in {}\n'.format(query_unit_name,
|
||||||
|
val_node_name,
|
||||||
|
running_nodes))
|
||||||
|
if errors:
|
||||||
|
return ''.join(errors)
|
||||||
|
|
||||||
|
def rmq_ssl_is_enabled_on_unit(self, sentry_unit, port=None):
|
||||||
|
"""Check a single juju rmq unit for ssl and port in the config file."""
|
||||||
|
host = sentry_unit.info['public-address']
|
||||||
|
unit_name = sentry_unit.info['unit_name']
|
||||||
|
|
||||||
|
conf_file = '/etc/rabbitmq/rabbitmq.config'
|
||||||
|
conf_contents = str(self.file_contents_safe(sentry_unit,
|
||||||
|
conf_file, max_wait=16))
|
||||||
|
# Checks
|
||||||
|
conf_ssl = 'ssl' in conf_contents
|
||||||
|
conf_port = str(port) in conf_contents
|
||||||
|
|
||||||
|
# Port explicitly checked in config
|
||||||
|
if port and conf_port and conf_ssl:
|
||||||
|
self.log.debug('SSL is enabled @{}:{} '
|
||||||
|
'({})'.format(host, port, unit_name))
|
||||||
|
return True
|
||||||
|
elif port and not conf_port and conf_ssl:
|
||||||
|
self.log.debug('SSL is enabled @{} but not on port {} '
|
||||||
|
'({})'.format(host, port, unit_name))
|
||||||
|
return False
|
||||||
|
# Port not checked (useful when checking that ssl is disabled)
|
||||||
|
elif not port and conf_ssl:
|
||||||
|
self.log.debug('SSL is enabled @{}:{} '
|
||||||
|
'({})'.format(host, port, unit_name))
|
||||||
|
return True
|
||||||
|
elif not port and not conf_ssl:
|
||||||
|
self.log.debug('SSL not enabled @{}:{} '
|
||||||
|
'({})'.format(host, port, unit_name))
|
||||||
|
return False
|
||||||
|
else:
|
||||||
|
msg = ('Unknown condition when checking SSL status @{}:{} '
|
||||||
|
'({})'.format(host, port, unit_name))
|
||||||
|
amulet.raise_status(amulet.FAIL, msg)
|
||||||
|
|
||||||
|
def validate_rmq_ssl_enabled_units(self, sentry_units, port=None):
|
||||||
|
"""Check that ssl is enabled on rmq juju sentry units.
|
||||||
|
|
||||||
|
:param sentry_units: list of all rmq sentry units
|
||||||
|
:param port: optional ssl port override to validate
|
||||||
|
:returns: None if successful, otherwise return error message
|
||||||
|
"""
|
||||||
|
for sentry_unit in sentry_units:
|
||||||
|
if not self.rmq_ssl_is_enabled_on_unit(sentry_unit, port=port):
|
||||||
|
return ('Unexpected condition: ssl is disabled on unit '
|
||||||
|
'({})'.format(sentry_unit.info['unit_name']))
|
||||||
|
return None
|
||||||
|
|
||||||
|
def validate_rmq_ssl_disabled_units(self, sentry_units):
|
||||||
|
"""Check that ssl is enabled on listed rmq juju sentry units.
|
||||||
|
|
||||||
|
:param sentry_units: list of all rmq sentry units
|
||||||
|
:returns: True if successful. Raise on error.
|
||||||
|
"""
|
||||||
|
for sentry_unit in sentry_units:
|
||||||
|
if self.rmq_ssl_is_enabled_on_unit(sentry_unit):
|
||||||
|
return ('Unexpected condition: ssl is enabled on unit '
|
||||||
|
'({})'.format(sentry_unit.info['unit_name']))
|
||||||
|
return None
|
||||||
|
|
||||||
|
def configure_rmq_ssl_on(self, sentry_units, deployment,
|
||||||
|
port=None, max_wait=60):
|
||||||
|
"""Turn ssl charm config option on, with optional non-default
|
||||||
|
ssl port specification. Confirm that it is enabled on every
|
||||||
|
unit.
|
||||||
|
|
||||||
|
:param sentry_units: list of sentry units
|
||||||
|
:param deployment: amulet deployment object pointer
|
||||||
|
:param port: amqp port, use defaults if None
|
||||||
|
:param max_wait: maximum time to wait in seconds to confirm
|
||||||
|
:returns: None if successful. Raise on error.
|
||||||
|
"""
|
||||||
|
self.log.debug('Setting ssl charm config option: on')
|
||||||
|
|
||||||
|
# Enable RMQ SSL
|
||||||
|
config = {'ssl': 'on'}
|
||||||
|
if port:
|
||||||
|
config['ssl_port'] = port
|
||||||
|
|
||||||
|
deployment.configure('rabbitmq-server', config)
|
||||||
|
|
||||||
|
# Confirm
|
||||||
|
tries = 0
|
||||||
|
ret = self.validate_rmq_ssl_enabled_units(sentry_units, port=port)
|
||||||
|
while ret and tries < (max_wait / 4):
|
||||||
|
time.sleep(4)
|
||||||
|
self.log.debug('Attempt {}: {}'.format(tries, ret))
|
||||||
|
ret = self.validate_rmq_ssl_enabled_units(sentry_units, port=port)
|
||||||
|
tries += 1
|
||||||
|
|
||||||
|
if ret:
|
||||||
|
amulet.raise_status(amulet.FAIL, ret)
|
||||||
|
|
||||||
|
def configure_rmq_ssl_off(self, sentry_units, deployment, max_wait=60):
|
||||||
|
"""Turn ssl charm config option off, confirm that it is disabled
|
||||||
|
on every unit.
|
||||||
|
|
||||||
|
:param sentry_units: list of sentry units
|
||||||
|
:param deployment: amulet deployment object pointer
|
||||||
|
:param max_wait: maximum time to wait in seconds to confirm
|
||||||
|
:returns: None if successful. Raise on error.
|
||||||
|
"""
|
||||||
|
self.log.debug('Setting ssl charm config option: off')
|
||||||
|
|
||||||
|
# Disable RMQ SSL
|
||||||
|
config = {'ssl': 'off'}
|
||||||
|
deployment.configure('rabbitmq-server', config)
|
||||||
|
|
||||||
|
# Confirm
|
||||||
|
tries = 0
|
||||||
|
ret = self.validate_rmq_ssl_disabled_units(sentry_units)
|
||||||
|
while ret and tries < (max_wait / 4):
|
||||||
|
time.sleep(4)
|
||||||
|
self.log.debug('Attempt {}: {}'.format(tries, ret))
|
||||||
|
ret = self.validate_rmq_ssl_disabled_units(sentry_units)
|
||||||
|
tries += 1
|
||||||
|
|
||||||
|
if ret:
|
||||||
|
amulet.raise_status(amulet.FAIL, ret)
|
||||||
|
|
||||||
|
def connect_amqp_by_unit(self, sentry_unit, ssl=False,
|
||||||
|
port=None, fatal=True,
|
||||||
|
username="testuser1", password="changeme"):
|
||||||
|
"""Establish and return a pika amqp connection to the rabbitmq service
|
||||||
|
running on a rmq juju unit.
|
||||||
|
|
||||||
|
:param sentry_unit: sentry unit pointer
|
||||||
|
:param ssl: boolean, default to False
|
||||||
|
:param port: amqp port, use defaults if None
|
||||||
|
:param fatal: boolean, default to True (raises on connect error)
|
||||||
|
:param username: amqp user name, default to testuser1
|
||||||
|
:param password: amqp user password
|
||||||
|
:returns: pika amqp connection pointer or None if failed and non-fatal
|
||||||
|
"""
|
||||||
|
host = sentry_unit.info['public-address']
|
||||||
|
unit_name = sentry_unit.info['unit_name']
|
||||||
|
|
||||||
|
# Default port logic if port is not specified
|
||||||
|
if ssl and not port:
|
||||||
|
port = 5671
|
||||||
|
elif not ssl and not port:
|
||||||
|
port = 5672
|
||||||
|
|
||||||
|
self.log.debug('Connecting to amqp on {}:{} ({}) as '
|
||||||
|
'{}...'.format(host, port, unit_name, username))
|
||||||
|
|
||||||
|
try:
|
||||||
|
credentials = pika.PlainCredentials(username, password)
|
||||||
|
parameters = pika.ConnectionParameters(host=host, port=port,
|
||||||
|
credentials=credentials,
|
||||||
|
ssl=ssl,
|
||||||
|
connection_attempts=3,
|
||||||
|
retry_delay=5,
|
||||||
|
socket_timeout=1)
|
||||||
|
connection = pika.BlockingConnection(parameters)
|
||||||
|
assert connection.server_properties['product'] == 'RabbitMQ'
|
||||||
|
self.log.debug('Connect OK')
|
||||||
|
return connection
|
||||||
|
except Exception as e:
|
||||||
|
msg = ('amqp connection failed to {}:{} as '
|
||||||
|
'{} ({})'.format(host, port, username, str(e)))
|
||||||
|
if fatal:
|
||||||
|
amulet.raise_status(amulet.FAIL, msg)
|
||||||
|
else:
|
||||||
|
self.log.warn(msg)
|
||||||
|
return None
|
||||||
|
|
||||||
|
def publish_amqp_message_by_unit(self, sentry_unit, message,
|
||||||
|
queue="test", ssl=False,
|
||||||
|
username="testuser1",
|
||||||
|
password="changeme",
|
||||||
|
port=None):
|
||||||
|
"""Publish an amqp message to a rmq juju unit.
|
||||||
|
|
||||||
|
:param sentry_unit: sentry unit pointer
|
||||||
|
:param message: amqp message string
|
||||||
|
:param queue: message queue, default to test
|
||||||
|
:param username: amqp user name, default to testuser1
|
||||||
|
:param password: amqp user password
|
||||||
|
:param ssl: boolean, default to False
|
||||||
|
:param port: amqp port, use defaults if None
|
||||||
|
:returns: None. Raises exception if publish failed.
|
||||||
|
"""
|
||||||
|
self.log.debug('Publishing message to {} queue:\n{}'.format(queue,
|
||||||
|
message))
|
||||||
|
connection = self.connect_amqp_by_unit(sentry_unit, ssl=ssl,
|
||||||
|
port=port,
|
||||||
|
username=username,
|
||||||
|
password=password)
|
||||||
|
|
||||||
|
# NOTE(beisner): extra debug here re: pika hang potential:
|
||||||
|
# https://github.com/pika/pika/issues/297
|
||||||
|
# https://groups.google.com/forum/#!topic/rabbitmq-users/Ja0iyfF0Szw
|
||||||
|
self.log.debug('Defining channel...')
|
||||||
|
channel = connection.channel()
|
||||||
|
self.log.debug('Declaring queue...')
|
||||||
|
channel.queue_declare(queue=queue, auto_delete=False, durable=True)
|
||||||
|
self.log.debug('Publishing message...')
|
||||||
|
channel.basic_publish(exchange='', routing_key=queue, body=message)
|
||||||
|
self.log.debug('Closing channel...')
|
||||||
|
channel.close()
|
||||||
|
self.log.debug('Closing connection...')
|
||||||
|
connection.close()
|
||||||
|
|
||||||
|
def get_amqp_message_by_unit(self, sentry_unit, queue="test",
|
||||||
|
username="testuser1",
|
||||||
|
password="changeme",
|
||||||
|
ssl=False, port=None):
|
||||||
|
"""Get an amqp message from a rmq juju unit.
|
||||||
|
|
||||||
|
:param sentry_unit: sentry unit pointer
|
||||||
|
:param queue: message queue, default to test
|
||||||
|
:param username: amqp user name, default to testuser1
|
||||||
|
:param password: amqp user password
|
||||||
|
:param ssl: boolean, default to False
|
||||||
|
:param port: amqp port, use defaults if None
|
||||||
|
:returns: amqp message body as string. Raise if get fails.
|
||||||
|
"""
|
||||||
|
connection = self.connect_amqp_by_unit(sentry_unit, ssl=ssl,
|
||||||
|
port=port,
|
||||||
|
username=username,
|
||||||
|
password=password)
|
||||||
|
channel = connection.channel()
|
||||||
|
method_frame, _, body = channel.basic_get(queue)
|
||||||
|
|
||||||
|
if method_frame:
|
||||||
|
self.log.debug('Retreived message from {} queue:\n{}'.format(queue,
|
||||||
|
body))
|
||||||
|
channel.basic_ack(method_frame.delivery_tag)
|
||||||
|
channel.close()
|
||||||
|
connection.close()
|
||||||
|
return body
|
||||||
|
else:
|
||||||
|
msg = 'No message retrieved.'
|
||||||
|
amulet.raise_status(amulet.FAIL, msg)
|
|
@ -1,94 +0,0 @@
|
||||||
# Copyright 2014-2015 Canonical Limited.
|
|
||||||
#
|
|
||||||
# This file is part of charm-helpers.
|
|
||||||
#
|
|
||||||
# charm-helpers is free software: you can redistribute it and/or modify
|
|
||||||
# it under the terms of the GNU Lesser General Public License version 3 as
|
|
||||||
# published by the Free Software Foundation.
|
|
||||||
#
|
|
||||||
# charm-helpers is distributed in the hope that it will be useful,
|
|
||||||
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
||||||
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
|
||||||
# GNU Lesser General Public License for more details.
|
|
||||||
#
|
|
||||||
# You should have received a copy of the GNU Lesser General Public License
|
|
||||||
# along with charm-helpers. If not, see <http://www.gnu.org/licenses/>.
|
|
||||||
|
|
||||||
import subprocess
|
|
||||||
from charmhelpers.core import hookenv
|
|
||||||
|
|
||||||
|
|
||||||
def generate_selfsigned(keyfile, certfile, keysize="1024", config=None, subject=None, cn=None):
|
|
||||||
"""Generate selfsigned SSL keypair
|
|
||||||
|
|
||||||
You must provide one of the 3 optional arguments:
|
|
||||||
config, subject or cn
|
|
||||||
If more than one is provided the leftmost will be used
|
|
||||||
|
|
||||||
Arguments:
|
|
||||||
keyfile -- (required) full path to the keyfile to be created
|
|
||||||
certfile -- (required) full path to the certfile to be created
|
|
||||||
keysize -- (optional) SSL key length
|
|
||||||
config -- (optional) openssl configuration file
|
|
||||||
subject -- (optional) dictionary with SSL subject variables
|
|
||||||
cn -- (optional) cerfificate common name
|
|
||||||
|
|
||||||
Required keys in subject dict:
|
|
||||||
cn -- Common name (eq. FQDN)
|
|
||||||
|
|
||||||
Optional keys in subject dict
|
|
||||||
country -- Country Name (2 letter code)
|
|
||||||
state -- State or Province Name (full name)
|
|
||||||
locality -- Locality Name (eg, city)
|
|
||||||
organization -- Organization Name (eg, company)
|
|
||||||
organizational_unit -- Organizational Unit Name (eg, section)
|
|
||||||
email -- Email Address
|
|
||||||
"""
|
|
||||||
|
|
||||||
cmd = []
|
|
||||||
if config:
|
|
||||||
cmd = ["/usr/bin/openssl", "req", "-new", "-newkey",
|
|
||||||
"rsa:{}".format(keysize), "-days", "365", "-nodes", "-x509",
|
|
||||||
"-keyout", keyfile,
|
|
||||||
"-out", certfile, "-config", config]
|
|
||||||
elif subject:
|
|
||||||
ssl_subject = ""
|
|
||||||
if "country" in subject:
|
|
||||||
ssl_subject = ssl_subject + "/C={}".format(subject["country"])
|
|
||||||
if "state" in subject:
|
|
||||||
ssl_subject = ssl_subject + "/ST={}".format(subject["state"])
|
|
||||||
if "locality" in subject:
|
|
||||||
ssl_subject = ssl_subject + "/L={}".format(subject["locality"])
|
|
||||||
if "organization" in subject:
|
|
||||||
ssl_subject = ssl_subject + "/O={}".format(subject["organization"])
|
|
||||||
if "organizational_unit" in subject:
|
|
||||||
ssl_subject = ssl_subject + "/OU={}".format(subject["organizational_unit"])
|
|
||||||
if "cn" in subject:
|
|
||||||
ssl_subject = ssl_subject + "/CN={}".format(subject["cn"])
|
|
||||||
else:
|
|
||||||
hookenv.log("When using \"subject\" argument you must "
|
|
||||||
"provide \"cn\" field at very least")
|
|
||||||
return False
|
|
||||||
if "email" in subject:
|
|
||||||
ssl_subject = ssl_subject + "/emailAddress={}".format(subject["email"])
|
|
||||||
|
|
||||||
cmd = ["/usr/bin/openssl", "req", "-new", "-newkey",
|
|
||||||
"rsa:{}".format(keysize), "-days", "365", "-nodes", "-x509",
|
|
||||||
"-keyout", keyfile,
|
|
||||||
"-out", certfile, "-subj", ssl_subject]
|
|
||||||
elif cn:
|
|
||||||
cmd = ["/usr/bin/openssl", "req", "-new", "-newkey",
|
|
||||||
"rsa:{}".format(keysize), "-days", "365", "-nodes", "-x509",
|
|
||||||
"-keyout", keyfile,
|
|
||||||
"-out", certfile, "-subj", "/CN={}".format(cn)]
|
|
||||||
|
|
||||||
if not cmd:
|
|
||||||
hookenv.log("No config, subject or cn provided,"
|
|
||||||
"unable to generate self signed SSL certificates")
|
|
||||||
return False
|
|
||||||
try:
|
|
||||||
subprocess.check_call(cmd)
|
|
||||||
return True
|
|
||||||
except Exception as e:
|
|
||||||
print("Execution of openssl command failed:\n{}".format(e))
|
|
||||||
return False
|
|
|
@ -1,283 +0,0 @@
|
||||||
# Copyright 2014-2015 Canonical Limited.
|
|
||||||
#
|
|
||||||
# This file is part of charm-helpers.
|
|
||||||
#
|
|
||||||
# charm-helpers is free software: you can redistribute it and/or modify
|
|
||||||
# it under the terms of the GNU Lesser General Public License version 3 as
|
|
||||||
# published by the Free Software Foundation.
|
|
||||||
#
|
|
||||||
# charm-helpers is distributed in the hope that it will be useful,
|
|
||||||
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
||||||
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
|
||||||
# GNU Lesser General Public License for more details.
|
|
||||||
#
|
|
||||||
# You should have received a copy of the GNU Lesser General Public License
|
|
||||||
# along with charm-helpers. If not, see <http://www.gnu.org/licenses/>.
|
|
||||||
|
|
||||||
import logging
|
|
||||||
import os
|
|
||||||
from os.path import join as path_join
|
|
||||||
from os.path import exists
|
|
||||||
import subprocess
|
|
||||||
|
|
||||||
|
|
||||||
log = logging.getLogger("service_ca")
|
|
||||||
|
|
||||||
logging.basicConfig(level=logging.DEBUG)
|
|
||||||
|
|
||||||
STD_CERT = "standard"
|
|
||||||
|
|
||||||
# Mysql server is fairly picky about cert creation
|
|
||||||
# and types, spec its creation separately for now.
|
|
||||||
MYSQL_CERT = "mysql"
|
|
||||||
|
|
||||||
|
|
||||||
class ServiceCA(object):
|
|
||||||
|
|
||||||
default_expiry = str(365 * 2)
|
|
||||||
default_ca_expiry = str(365 * 6)
|
|
||||||
|
|
||||||
def __init__(self, name, ca_dir, cert_type=STD_CERT):
|
|
||||||
self.name = name
|
|
||||||
self.ca_dir = ca_dir
|
|
||||||
self.cert_type = cert_type
|
|
||||||
|
|
||||||
###############
|
|
||||||
# Hook Helper API
|
|
||||||
@staticmethod
|
|
||||||
def get_ca(type=STD_CERT):
|
|
||||||
service_name = os.environ['JUJU_UNIT_NAME'].split('/')[0]
|
|
||||||
ca_path = os.path.join(os.environ['CHARM_DIR'], 'ca')
|
|
||||||
ca = ServiceCA(service_name, ca_path, type)
|
|
||||||
ca.init()
|
|
||||||
return ca
|
|
||||||
|
|
||||||
@classmethod
|
|
||||||
def get_service_cert(cls, type=STD_CERT):
|
|
||||||
service_name = os.environ['JUJU_UNIT_NAME'].split('/')[0]
|
|
||||||
ca = cls.get_ca()
|
|
||||||
crt, key = ca.get_or_create_cert(service_name)
|
|
||||||
return crt, key, ca.get_ca_bundle()
|
|
||||||
|
|
||||||
###############
|
|
||||||
|
|
||||||
def init(self):
|
|
||||||
log.debug("initializing service ca")
|
|
||||||
if not exists(self.ca_dir):
|
|
||||||
self._init_ca_dir(self.ca_dir)
|
|
||||||
self._init_ca()
|
|
||||||
|
|
||||||
@property
|
|
||||||
def ca_key(self):
|
|
||||||
return path_join(self.ca_dir, 'private', 'cacert.key')
|
|
||||||
|
|
||||||
@property
|
|
||||||
def ca_cert(self):
|
|
||||||
return path_join(self.ca_dir, 'cacert.pem')
|
|
||||||
|
|
||||||
@property
|
|
||||||
def ca_conf(self):
|
|
||||||
return path_join(self.ca_dir, 'ca.cnf')
|
|
||||||
|
|
||||||
@property
|
|
||||||
def signing_conf(self):
|
|
||||||
return path_join(self.ca_dir, 'signing.cnf')
|
|
||||||
|
|
||||||
def _init_ca_dir(self, ca_dir):
|
|
||||||
os.mkdir(ca_dir)
|
|
||||||
for i in ['certs', 'crl', 'newcerts', 'private']:
|
|
||||||
sd = path_join(ca_dir, i)
|
|
||||||
if not exists(sd):
|
|
||||||
os.mkdir(sd)
|
|
||||||
|
|
||||||
if not exists(path_join(ca_dir, 'serial')):
|
|
||||||
with open(path_join(ca_dir, 'serial'), 'w') as fh:
|
|
||||||
fh.write('02\n')
|
|
||||||
|
|
||||||
if not exists(path_join(ca_dir, 'index.txt')):
|
|
||||||
with open(path_join(ca_dir, 'index.txt'), 'w') as fh:
|
|
||||||
fh.write('')
|
|
||||||
|
|
||||||
def _init_ca(self):
|
|
||||||
"""Generate the root ca's cert and key.
|
|
||||||
"""
|
|
||||||
if not exists(path_join(self.ca_dir, 'ca.cnf')):
|
|
||||||
with open(path_join(self.ca_dir, 'ca.cnf'), 'w') as fh:
|
|
||||||
fh.write(
|
|
||||||
CA_CONF_TEMPLATE % (self.get_conf_variables()))
|
|
||||||
|
|
||||||
if not exists(path_join(self.ca_dir, 'signing.cnf')):
|
|
||||||
with open(path_join(self.ca_dir, 'signing.cnf'), 'w') as fh:
|
|
||||||
fh.write(
|
|
||||||
SIGNING_CONF_TEMPLATE % (self.get_conf_variables()))
|
|
||||||
|
|
||||||
if exists(self.ca_cert) or exists(self.ca_key):
|
|
||||||
raise RuntimeError("Initialized called when CA already exists")
|
|
||||||
cmd = ['openssl', 'req', '-config', self.ca_conf,
|
|
||||||
'-x509', '-nodes', '-newkey', 'rsa',
|
|
||||||
'-days', self.default_ca_expiry,
|
|
||||||
'-keyout', self.ca_key, '-out', self.ca_cert,
|
|
||||||
'-outform', 'PEM']
|
|
||||||
output = subprocess.check_output(cmd, stderr=subprocess.STDOUT)
|
|
||||||
log.debug("CA Init:\n %s", output)
|
|
||||||
|
|
||||||
def get_conf_variables(self):
|
|
||||||
return dict(
|
|
||||||
org_name="juju",
|
|
||||||
org_unit_name="%s service" % self.name,
|
|
||||||
common_name=self.name,
|
|
||||||
ca_dir=self.ca_dir)
|
|
||||||
|
|
||||||
def get_or_create_cert(self, common_name):
|
|
||||||
if common_name in self:
|
|
||||||
return self.get_certificate(common_name)
|
|
||||||
return self.create_certificate(common_name)
|
|
||||||
|
|
||||||
def create_certificate(self, common_name):
|
|
||||||
if common_name in self:
|
|
||||||
return self.get_certificate(common_name)
|
|
||||||
key_p = path_join(self.ca_dir, "certs", "%s.key" % common_name)
|
|
||||||
crt_p = path_join(self.ca_dir, "certs", "%s.crt" % common_name)
|
|
||||||
csr_p = path_join(self.ca_dir, "certs", "%s.csr" % common_name)
|
|
||||||
self._create_certificate(common_name, key_p, csr_p, crt_p)
|
|
||||||
return self.get_certificate(common_name)
|
|
||||||
|
|
||||||
def get_certificate(self, common_name):
|
|
||||||
if common_name not in self:
|
|
||||||
raise ValueError("No certificate for %s" % common_name)
|
|
||||||
key_p = path_join(self.ca_dir, "certs", "%s.key" % common_name)
|
|
||||||
crt_p = path_join(self.ca_dir, "certs", "%s.crt" % common_name)
|
|
||||||
with open(crt_p) as fh:
|
|
||||||
crt = fh.read()
|
|
||||||
with open(key_p) as fh:
|
|
||||||
key = fh.read()
|
|
||||||
return crt, key
|
|
||||||
|
|
||||||
def __contains__(self, common_name):
|
|
||||||
crt_p = path_join(self.ca_dir, "certs", "%s.crt" % common_name)
|
|
||||||
return exists(crt_p)
|
|
||||||
|
|
||||||
def _create_certificate(self, common_name, key_p, csr_p, crt_p):
|
|
||||||
template_vars = self.get_conf_variables()
|
|
||||||
template_vars['common_name'] = common_name
|
|
||||||
subj = '/O=%(org_name)s/OU=%(org_unit_name)s/CN=%(common_name)s' % (
|
|
||||||
template_vars)
|
|
||||||
|
|
||||||
log.debug("CA Create Cert %s", common_name)
|
|
||||||
cmd = ['openssl', 'req', '-sha1', '-newkey', 'rsa:2048',
|
|
||||||
'-nodes', '-days', self.default_expiry,
|
|
||||||
'-keyout', key_p, '-out', csr_p, '-subj', subj]
|
|
||||||
subprocess.check_call(cmd)
|
|
||||||
cmd = ['openssl', 'rsa', '-in', key_p, '-out', key_p]
|
|
||||||
subprocess.check_call(cmd)
|
|
||||||
|
|
||||||
log.debug("CA Sign Cert %s", common_name)
|
|
||||||
if self.cert_type == MYSQL_CERT:
|
|
||||||
cmd = ['openssl', 'x509', '-req',
|
|
||||||
'-in', csr_p, '-days', self.default_expiry,
|
|
||||||
'-CA', self.ca_cert, '-CAkey', self.ca_key,
|
|
||||||
'-set_serial', '01', '-out', crt_p]
|
|
||||||
else:
|
|
||||||
cmd = ['openssl', 'ca', '-config', self.signing_conf,
|
|
||||||
'-extensions', 'req_extensions',
|
|
||||||
'-days', self.default_expiry, '-notext',
|
|
||||||
'-in', csr_p, '-out', crt_p, '-subj', subj, '-batch']
|
|
||||||
log.debug("running %s", " ".join(cmd))
|
|
||||||
subprocess.check_call(cmd)
|
|
||||||
|
|
||||||
def get_ca_bundle(self):
|
|
||||||
with open(self.ca_cert) as fh:
|
|
||||||
return fh.read()
|
|
||||||
|
|
||||||
|
|
||||||
CA_CONF_TEMPLATE = """
|
|
||||||
[ ca ]
|
|
||||||
default_ca = CA_default
|
|
||||||
|
|
||||||
[ CA_default ]
|
|
||||||
dir = %(ca_dir)s
|
|
||||||
policy = policy_match
|
|
||||||
database = $dir/index.txt
|
|
||||||
serial = $dir/serial
|
|
||||||
certs = $dir/certs
|
|
||||||
crl_dir = $dir/crl
|
|
||||||
new_certs_dir = $dir/newcerts
|
|
||||||
certificate = $dir/cacert.pem
|
|
||||||
private_key = $dir/private/cacert.key
|
|
||||||
RANDFILE = $dir/private/.rand
|
|
||||||
default_md = default
|
|
||||||
|
|
||||||
[ req ]
|
|
||||||
default_bits = 1024
|
|
||||||
default_md = sha1
|
|
||||||
|
|
||||||
prompt = no
|
|
||||||
distinguished_name = ca_distinguished_name
|
|
||||||
|
|
||||||
x509_extensions = ca_extensions
|
|
||||||
|
|
||||||
[ ca_distinguished_name ]
|
|
||||||
organizationName = %(org_name)s
|
|
||||||
organizationalUnitName = %(org_unit_name)s Certificate Authority
|
|
||||||
|
|
||||||
|
|
||||||
[ policy_match ]
|
|
||||||
countryName = optional
|
|
||||||
stateOrProvinceName = optional
|
|
||||||
organizationName = match
|
|
||||||
organizationalUnitName = optional
|
|
||||||
commonName = supplied
|
|
||||||
|
|
||||||
[ ca_extensions ]
|
|
||||||
basicConstraints = critical,CA:true
|
|
||||||
subjectKeyIdentifier = hash
|
|
||||||
authorityKeyIdentifier = keyid:always, issuer
|
|
||||||
keyUsage = cRLSign, keyCertSign
|
|
||||||
"""
|
|
||||||
|
|
||||||
|
|
||||||
SIGNING_CONF_TEMPLATE = """
|
|
||||||
[ ca ]
|
|
||||||
default_ca = CA_default
|
|
||||||
|
|
||||||
[ CA_default ]
|
|
||||||
dir = %(ca_dir)s
|
|
||||||
policy = policy_match
|
|
||||||
database = $dir/index.txt
|
|
||||||
serial = $dir/serial
|
|
||||||
certs = $dir/certs
|
|
||||||
crl_dir = $dir/crl
|
|
||||||
new_certs_dir = $dir/newcerts
|
|
||||||
certificate = $dir/cacert.pem
|
|
||||||
private_key = $dir/private/cacert.key
|
|
||||||
RANDFILE = $dir/private/.rand
|
|
||||||
default_md = default
|
|
||||||
|
|
||||||
[ req ]
|
|
||||||
default_bits = 1024
|
|
||||||
default_md = sha1
|
|
||||||
|
|
||||||
prompt = no
|
|
||||||
distinguished_name = req_distinguished_name
|
|
||||||
|
|
||||||
x509_extensions = req_extensions
|
|
||||||
|
|
||||||
[ req_distinguished_name ]
|
|
||||||
organizationName = %(org_name)s
|
|
||||||
organizationalUnitName = %(org_unit_name)s machine resources
|
|
||||||
commonName = %(common_name)s
|
|
||||||
|
|
||||||
[ policy_match ]
|
|
||||||
countryName = optional
|
|
||||||
stateOrProvinceName = optional
|
|
||||||
organizationName = match
|
|
||||||
organizationalUnitName = optional
|
|
||||||
commonName = supplied
|
|
||||||
|
|
||||||
[ req_extensions ]
|
|
||||||
basicConstraints = CA:false
|
|
||||||
subjectKeyIdentifier = hash
|
|
||||||
authorityKeyIdentifier = keyid:always, issuer
|
|
||||||
keyUsage = digitalSignature, keyEncipherment, keyAgreement
|
|
||||||
extendedKeyUsage = serverAuth, clientAuth
|
|
||||||
"""
|
|
|
@ -1,57 +0,0 @@
|
||||||
# Copyright 2014-2015 Canonical Limited.
|
|
||||||
#
|
|
||||||
# This file is part of charm-helpers.
|
|
||||||
#
|
|
||||||
# charm-helpers is free software: you can redistribute it and/or modify
|
|
||||||
# it under the terms of the GNU Lesser General Public License version 3 as
|
|
||||||
# published by the Free Software Foundation.
|
|
||||||
#
|
|
||||||
# charm-helpers is distributed in the hope that it will be useful,
|
|
||||||
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
||||||
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
|
||||||
# GNU Lesser General Public License for more details.
|
|
||||||
#
|
|
||||||
# You should have received a copy of the GNU Lesser General Public License
|
|
||||||
# along with charm-helpers. If not, see <http://www.gnu.org/licenses/>.
|
|
||||||
|
|
||||||
#
|
|
||||||
# Copyright 2014 Canonical Ltd.
|
|
||||||
#
|
|
||||||
# Authors:
|
|
||||||
# Edward Hope-Morley <opentastic@gmail.com>
|
|
||||||
#
|
|
||||||
|
|
||||||
import time
|
|
||||||
|
|
||||||
from charmhelpers.core.hookenv import (
|
|
||||||
log,
|
|
||||||
INFO,
|
|
||||||
)
|
|
||||||
|
|
||||||
|
|
||||||
def retry_on_exception(num_retries, base_delay=0, exc_type=Exception):
|
|
||||||
"""If the decorated function raises exception exc_type, allow num_retries
|
|
||||||
retry attempts before raise the exception.
|
|
||||||
"""
|
|
||||||
def _retry_on_exception_inner_1(f):
|
|
||||||
def _retry_on_exception_inner_2(*args, **kwargs):
|
|
||||||
retries = num_retries
|
|
||||||
multiplier = 1
|
|
||||||
while True:
|
|
||||||
try:
|
|
||||||
return f(*args, **kwargs)
|
|
||||||
except exc_type:
|
|
||||||
if not retries:
|
|
||||||
raise
|
|
||||||
|
|
||||||
delay = base_delay * multiplier
|
|
||||||
multiplier += 1
|
|
||||||
log("Retrying '%s' %d more times (delay=%s)" %
|
|
||||||
(f.__name__, retries, delay), level=INFO)
|
|
||||||
retries -= 1
|
|
||||||
if delay:
|
|
||||||
time.sleep(delay)
|
|
||||||
|
|
||||||
return _retry_on_exception_inner_2
|
|
||||||
|
|
||||||
return _retry_on_exception_inner_1
|
|
|
@ -1,134 +0,0 @@
|
||||||
#!/usr/bin/env python
|
|
||||||
# -*- coding: utf-8 -*-
|
|
||||||
|
|
||||||
# Copyright 2014-2015 Canonical Limited.
|
|
||||||
#
|
|
||||||
# This file is part of charm-helpers.
|
|
||||||
#
|
|
||||||
# charm-helpers is free software: you can redistribute it and/or modify
|
|
||||||
# it under the terms of the GNU Lesser General Public License version 3 as
|
|
||||||
# published by the Free Software Foundation.
|
|
||||||
#
|
|
||||||
# charm-helpers is distributed in the hope that it will be useful,
|
|
||||||
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
||||||
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
|
||||||
# GNU Lesser General Public License for more details.
|
|
||||||
#
|
|
||||||
# You should have received a copy of the GNU Lesser General Public License
|
|
||||||
# along with charm-helpers. If not, see <http://www.gnu.org/licenses/>.
|
|
||||||
|
|
||||||
import io
|
|
||||||
import os
|
|
||||||
|
|
||||||
__author__ = 'Jorge Niedbalski R. <jorge.niedbalski@canonical.com>'
|
|
||||||
|
|
||||||
|
|
||||||
class Fstab(io.FileIO):
|
|
||||||
"""This class extends file in order to implement a file reader/writer
|
|
||||||
for file `/etc/fstab`
|
|
||||||
"""
|
|
||||||
|
|
||||||
class Entry(object):
|
|
||||||
"""Entry class represents a non-comment line on the `/etc/fstab` file
|
|
||||||
"""
|
|
||||||
def __init__(self, device, mountpoint, filesystem,
|
|
||||||
options, d=0, p=0):
|
|
||||||
self.device = device
|
|
||||||
self.mountpoint = mountpoint
|
|
||||||
self.filesystem = filesystem
|
|
||||||
|
|
||||||
if not options:
|
|
||||||
options = "defaults"
|
|
||||||
|
|
||||||
self.options = options
|
|
||||||
self.d = int(d)
|
|
||||||
self.p = int(p)
|
|
||||||
|
|
||||||
def __eq__(self, o):
|
|
||||||
return str(self) == str(o)
|
|
||||||
|
|
||||||
def __str__(self):
|
|
||||||
return "{} {} {} {} {} {}".format(self.device,
|
|
||||||
self.mountpoint,
|
|
||||||
self.filesystem,
|
|
||||||
self.options,
|
|
||||||
self.d,
|
|
||||||
self.p)
|
|
||||||
|
|
||||||
DEFAULT_PATH = os.path.join(os.path.sep, 'etc', 'fstab')
|
|
||||||
|
|
||||||
def __init__(self, path=None):
|
|
||||||
if path:
|
|
||||||
self._path = path
|
|
||||||
else:
|
|
||||||
self._path = self.DEFAULT_PATH
|
|
||||||
super(Fstab, self).__init__(self._path, 'rb+')
|
|
||||||
|
|
||||||
def _hydrate_entry(self, line):
|
|
||||||
# NOTE: use split with no arguments to split on any
|
|
||||||
# whitespace including tabs
|
|
||||||
return Fstab.Entry(*filter(
|
|
||||||
lambda x: x not in ('', None),
|
|
||||||
line.strip("\n").split()))
|
|
||||||
|
|
||||||
@property
|
|
||||||
def entries(self):
|
|
||||||
self.seek(0)
|
|
||||||
for line in self.readlines():
|
|
||||||
line = line.decode('us-ascii')
|
|
||||||
try:
|
|
||||||
if line.strip() and not line.strip().startswith("#"):
|
|
||||||
yield self._hydrate_entry(line)
|
|
||||||
except ValueError:
|
|
||||||
pass
|
|
||||||
|
|
||||||
def get_entry_by_attr(self, attr, value):
|
|
||||||
for entry in self.entries:
|
|
||||||
e_attr = getattr(entry, attr)
|
|
||||||
if e_attr == value:
|
|
||||||
return entry
|
|
||||||
return None
|
|
||||||
|
|
||||||
def add_entry(self, entry):
|
|
||||||
if self.get_entry_by_attr('device', entry.device):
|
|
||||||
return False
|
|
||||||
|
|
||||||
self.write((str(entry) + '\n').encode('us-ascii'))
|
|
||||||
self.truncate()
|
|
||||||
return entry
|
|
||||||
|
|
||||||
def remove_entry(self, entry):
|
|
||||||
self.seek(0)
|
|
||||||
|
|
||||||
lines = [l.decode('us-ascii') for l in self.readlines()]
|
|
||||||
|
|
||||||
found = False
|
|
||||||
for index, line in enumerate(lines):
|
|
||||||
if line.strip() and not line.strip().startswith("#"):
|
|
||||||
if self._hydrate_entry(line) == entry:
|
|
||||||
found = True
|
|
||||||
break
|
|
||||||
|
|
||||||
if not found:
|
|
||||||
return False
|
|
||||||
|
|
||||||
lines.remove(line)
|
|
||||||
|
|
||||||
self.seek(0)
|
|
||||||
self.write(''.join(lines).encode('us-ascii'))
|
|
||||||
self.truncate()
|
|
||||||
return True
|
|
||||||
|
|
||||||
@classmethod
|
|
||||||
def remove_by_mountpoint(cls, mountpoint, path=None):
|
|
||||||
fstab = cls(path=path)
|
|
||||||
entry = fstab.get_entry_by_attr('mountpoint', mountpoint)
|
|
||||||
if entry:
|
|
||||||
return fstab.remove_entry(entry)
|
|
||||||
return False
|
|
||||||
|
|
||||||
@classmethod
|
|
||||||
def add(cls, device, mountpoint, filesystem, options=None, path=None):
|
|
||||||
return cls(path=path).add_entry(Fstab.Entry(device,
|
|
||||||
mountpoint, filesystem,
|
|
||||||
options=options))
|
|
|
@ -1,607 +0,0 @@
|
||||||
# Copyright 2014-2015 Canonical Limited.
|
|
||||||
#
|
|
||||||
# This file is part of charm-helpers.
|
|
||||||
#
|
|
||||||
# charm-helpers is free software: you can redistribute it and/or modify
|
|
||||||
# it under the terms of the GNU Lesser General Public License version 3 as
|
|
||||||
# published by the Free Software Foundation.
|
|
||||||
#
|
|
||||||
# charm-helpers is distributed in the hope that it will be useful,
|
|
||||||
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
||||||
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
|
||||||
# GNU Lesser General Public License for more details.
|
|
||||||
#
|
|
||||||
# You should have received a copy of the GNU Lesser General Public License
|
|
||||||
# along with charm-helpers. If not, see <http://www.gnu.org/licenses/>.
|
|
||||||
|
|
||||||
"Interactions with the Juju environment"
|
|
||||||
# Copyright 2013 Canonical Ltd.
|
|
||||||
#
|
|
||||||
# Authors:
|
|
||||||
# Charm Helpers Developers <juju@lists.ubuntu.com>
|
|
||||||
|
|
||||||
from __future__ import print_function
|
|
||||||
import os
|
|
||||||
import json
|
|
||||||
import yaml
|
|
||||||
import subprocess
|
|
||||||
import sys
|
|
||||||
import errno
|
|
||||||
from subprocess import CalledProcessError
|
|
||||||
|
|
||||||
import six
|
|
||||||
if not six.PY3:
|
|
||||||
from UserDict import UserDict
|
|
||||||
else:
|
|
||||||
from collections import UserDict
|
|
||||||
|
|
||||||
CRITICAL = "CRITICAL"
|
|
||||||
ERROR = "ERROR"
|
|
||||||
WARNING = "WARNING"
|
|
||||||
INFO = "INFO"
|
|
||||||
DEBUG = "DEBUG"
|
|
||||||
MARKER = object()
|
|
||||||
|
|
||||||
cache = {}
|
|
||||||
|
|
||||||
|
|
||||||
def cached(func):
|
|
||||||
"""Cache return values for multiple executions of func + args
|
|
||||||
|
|
||||||
For example::
|
|
||||||
|
|
||||||
@cached
|
|
||||||
def unit_get(attribute):
|
|
||||||
pass
|
|
||||||
|
|
||||||
unit_get('test')
|
|
||||||
|
|
||||||
will cache the result of unit_get + 'test' for future calls.
|
|
||||||
"""
|
|
||||||
def wrapper(*args, **kwargs):
|
|
||||||
global cache
|
|
||||||
key = str((func, args, kwargs))
|
|
||||||
try:
|
|
||||||
return cache[key]
|
|
||||||
except KeyError:
|
|
||||||
res = func(*args, **kwargs)
|
|
||||||
cache[key] = res
|
|
||||||
return res
|
|
||||||
return wrapper
|
|
||||||
|
|
||||||
|
|
||||||
def flush(key):
|
|
||||||
"""Flushes any entries from function cache where the
|
|
||||||
key is found in the function+args """
|
|
||||||
flush_list = []
|
|
||||||
for item in cache:
|
|
||||||
if key in item:
|
|
||||||
flush_list.append(item)
|
|
||||||
for item in flush_list:
|
|
||||||
del cache[item]
|
|
||||||
|
|
||||||
|
|
||||||
def log(message, level=None):
|
|
||||||
"""Write a message to the juju log"""
|
|
||||||
command = ['juju-log']
|
|
||||||
if level:
|
|
||||||
command += ['-l', level]
|
|
||||||
if not isinstance(message, six.string_types):
|
|
||||||
message = repr(message)
|
|
||||||
command += [message]
|
|
||||||
# Missing juju-log should not cause failures in unit tests
|
|
||||||
# Send log output to stderr
|
|
||||||
try:
|
|
||||||
subprocess.call(command)
|
|
||||||
except OSError as e:
|
|
||||||
if e.errno == errno.ENOENT:
|
|
||||||
if level:
|
|
||||||
message = "{}: {}".format(level, message)
|
|
||||||
message = "juju-log: {}".format(message)
|
|
||||||
print(message, file=sys.stderr)
|
|
||||||
else:
|
|
||||||
raise
|
|
||||||
|
|
||||||
|
|
||||||
class Serializable(UserDict):
|
|
||||||
"""Wrapper, an object that can be serialized to yaml or json"""
|
|
||||||
|
|
||||||
def __init__(self, obj):
|
|
||||||
# wrap the object
|
|
||||||
UserDict.__init__(self)
|
|
||||||
self.data = obj
|
|
||||||
|
|
||||||
def __getattr__(self, attr):
|
|
||||||
# See if this object has attribute.
|
|
||||||
if attr in ("json", "yaml", "data"):
|
|
||||||
return self.__dict__[attr]
|
|
||||||
# Check for attribute in wrapped object.
|
|
||||||
got = getattr(self.data, attr, MARKER)
|
|
||||||
if got is not MARKER:
|
|
||||||
return got
|
|
||||||
# Proxy to the wrapped object via dict interface.
|
|
||||||
try:
|
|
||||||
return self.data[attr]
|
|
||||||
except KeyError:
|
|
||||||
raise AttributeError(attr)
|
|
||||||
|
|
||||||
def __getstate__(self):
|
|
||||||
# Pickle as a standard dictionary.
|
|
||||||
return self.data
|
|
||||||
|
|
||||||
def __setstate__(self, state):
|
|
||||||
# Unpickle into our wrapper.
|
|
||||||
self.data = state
|
|
||||||
|
|
||||||
def json(self):
|
|
||||||
"""Serialize the object to json"""
|
|
||||||
return json.dumps(self.data)
|
|
||||||
|
|
||||||
def yaml(self):
|
|
||||||
"""Serialize the object to yaml"""
|
|
||||||
return yaml.dump(self.data)
|
|
||||||
|
|
||||||
|
|
||||||
def execution_environment():
|
|
||||||
"""A convenient bundling of the current execution context"""
|
|
||||||
context = {}
|
|
||||||
context['conf'] = config()
|
|
||||||
if relation_id():
|
|
||||||
context['reltype'] = relation_type()
|
|
||||||
context['relid'] = relation_id()
|
|
||||||
context['rel'] = relation_get()
|
|
||||||
context['unit'] = local_unit()
|
|
||||||
context['rels'] = relations()
|
|
||||||
context['env'] = os.environ
|
|
||||||
return context
|
|
||||||
|
|
||||||
|
|
||||||
def in_relation_hook():
|
|
||||||
"""Determine whether we're running in a relation hook"""
|
|
||||||
return 'JUJU_RELATION' in os.environ
|
|
||||||
|
|
||||||
|
|
||||||
def relation_type():
|
|
||||||
"""The scope for the current relation hook"""
|
|
||||||
return os.environ.get('JUJU_RELATION', None)
|
|
||||||
|
|
||||||
|
|
||||||
def relation_id():
|
|
||||||
"""The relation ID for the current relation hook"""
|
|
||||||
return os.environ.get('JUJU_RELATION_ID', None)
|
|
||||||
|
|
||||||
|
|
||||||
def local_unit():
|
|
||||||
"""Local unit ID"""
|
|
||||||
return os.environ['JUJU_UNIT_NAME']
|
|
||||||
|
|
||||||
|
|
||||||
def remote_unit():
|
|
||||||
"""The remote unit for the current relation hook"""
|
|
||||||
return os.environ['JUJU_REMOTE_UNIT']
|
|
||||||
|
|
||||||
|
|
||||||
def service_name():
|
|
||||||
"""The name service group this unit belongs to"""
|
|
||||||
return local_unit().split('/')[0]
|
|
||||||
|
|
||||||
|
|
||||||
def hook_name():
|
|
||||||
"""The name of the currently executing hook"""
|
|
||||||
return os.path.basename(sys.argv[0])
|
|
||||||
|
|
||||||
|
|
||||||
class Config(dict):
|
|
||||||
"""A dictionary representation of the charm's config.yaml, with some
|
|
||||||
extra features:
|
|
||||||
|
|
||||||
- See which values in the dictionary have changed since the previous hook.
|
|
||||||
- For values that have changed, see what the previous value was.
|
|
||||||
- Store arbitrary data for use in a later hook.
|
|
||||||
|
|
||||||
NOTE: Do not instantiate this object directly - instead call
|
|
||||||
``hookenv.config()``, which will return an instance of :class:`Config`.
|
|
||||||
|
|
||||||
Example usage::
|
|
||||||
|
|
||||||
>>> # inside a hook
|
|
||||||
>>> from charmhelpers.core import hookenv
|
|
||||||
>>> config = hookenv.config()
|
|
||||||
>>> config['foo']
|
|
||||||
'bar'
|
|
||||||
>>> # store a new key/value for later use
|
|
||||||
>>> config['mykey'] = 'myval'
|
|
||||||
|
|
||||||
|
|
||||||
>>> # user runs `juju set mycharm foo=baz`
|
|
||||||
>>> # now we're inside subsequent config-changed hook
|
|
||||||
>>> config = hookenv.config()
|
|
||||||
>>> config['foo']
|
|
||||||
'baz'
|
|
||||||
>>> # test to see if this val has changed since last hook
|
|
||||||
>>> config.changed('foo')
|
|
||||||
True
|
|
||||||
>>> # what was the previous value?
|
|
||||||
>>> config.previous('foo')
|
|
||||||
'bar'
|
|
||||||
>>> # keys/values that we add are preserved across hooks
|
|
||||||
>>> config['mykey']
|
|
||||||
'myval'
|
|
||||||
|
|
||||||
"""
|
|
||||||
CONFIG_FILE_NAME = '.juju-persistent-config'
|
|
||||||
|
|
||||||
def __init__(self, *args, **kw):
|
|
||||||
super(Config, self).__init__(*args, **kw)
|
|
||||||
self.implicit_save = True
|
|
||||||
self._prev_dict = None
|
|
||||||
self.path = os.path.join(charm_dir(), Config.CONFIG_FILE_NAME)
|
|
||||||
if os.path.exists(self.path):
|
|
||||||
self.load_previous()
|
|
||||||
|
|
||||||
def __getitem__(self, key):
|
|
||||||
"""For regular dict lookups, check the current juju config first,
|
|
||||||
then the previous (saved) copy. This ensures that user-saved values
|
|
||||||
will be returned by a dict lookup.
|
|
||||||
|
|
||||||
"""
|
|
||||||
try:
|
|
||||||
return dict.__getitem__(self, key)
|
|
||||||
except KeyError:
|
|
||||||
return (self._prev_dict or {})[key]
|
|
||||||
|
|
||||||
def keys(self):
|
|
||||||
prev_keys = []
|
|
||||||
if self._prev_dict is not None:
|
|
||||||
prev_keys = self._prev_dict.keys()
|
|
||||||
return list(set(prev_keys + list(dict.keys(self))))
|
|
||||||
|
|
||||||
def load_previous(self, path=None):
|
|
||||||
"""Load previous copy of config from disk.
|
|
||||||
|
|
||||||
In normal usage you don't need to call this method directly - it
|
|
||||||
is called automatically at object initialization.
|
|
||||||
|
|
||||||
:param path:
|
|
||||||
|
|
||||||
File path from which to load the previous config. If `None`,
|
|
||||||
config is loaded from the default location. If `path` is
|
|
||||||
specified, subsequent `save()` calls will write to the same
|
|
||||||
path.
|
|
||||||
|
|
||||||
"""
|
|
||||||
self.path = path or self.path
|
|
||||||
with open(self.path) as f:
|
|
||||||
self._prev_dict = json.load(f)
|
|
||||||
|
|
||||||
def changed(self, key):
|
|
||||||
"""Return True if the current value for this key is different from
|
|
||||||
the previous value.
|
|
||||||
|
|
||||||
"""
|
|
||||||
if self._prev_dict is None:
|
|
||||||
return True
|
|
||||||
return self.previous(key) != self.get(key)
|
|
||||||
|
|
||||||
def previous(self, key):
|
|
||||||
"""Return previous value for this key, or None if there
|
|
||||||
is no previous value.
|
|
||||||
|
|
||||||
"""
|
|
||||||
if self._prev_dict:
|
|
||||||
return self._prev_dict.get(key)
|
|
||||||
return None
|
|
||||||
|
|
||||||
def save(self):
|
|
||||||
"""Save this config to disk.
|
|
||||||
|
|
||||||
If the charm is using the :mod:`Services Framework <services.base>`
|
|
||||||
or :meth:'@hook <Hooks.hook>' decorator, this
|
|
||||||
is called automatically at the end of successful hook execution.
|
|
||||||
Otherwise, it should be called directly by user code.
|
|
||||||
|
|
||||||
To disable automatic saves, set ``implicit_save=False`` on this
|
|
||||||
instance.
|
|
||||||
|
|
||||||
"""
|
|
||||||
if self._prev_dict:
|
|
||||||
for k, v in six.iteritems(self._prev_dict):
|
|
||||||
if k not in self:
|
|
||||||
self[k] = v
|
|
||||||
with open(self.path, 'w') as f:
|
|
||||||
json.dump(self, f)
|
|
||||||
|
|
||||||
|
|
||||||
@cached
|
|
||||||
def config(scope=None):
|
|
||||||
"""Juju charm configuration"""
|
|
||||||
config_cmd_line = ['config-get']
|
|
||||||
if scope is not None:
|
|
||||||
config_cmd_line.append(scope)
|
|
||||||
config_cmd_line.append('--format=json')
|
|
||||||
try:
|
|
||||||
config_data = json.loads(
|
|
||||||
subprocess.check_output(config_cmd_line).decode('UTF-8'))
|
|
||||||
if scope is not None:
|
|
||||||
return config_data
|
|
||||||
return Config(config_data)
|
|
||||||
except ValueError:
|
|
||||||
return None
|
|
||||||
|
|
||||||
|
|
||||||
@cached
|
|
||||||
def relation_get(attribute=None, unit=None, rid=None):
|
|
||||||
"""Get relation information"""
|
|
||||||
_args = ['relation-get', '--format=json']
|
|
||||||
if rid:
|
|
||||||
_args.append('-r')
|
|
||||||
_args.append(rid)
|
|
||||||
_args.append(attribute or '-')
|
|
||||||
if unit:
|
|
||||||
_args.append(unit)
|
|
||||||
try:
|
|
||||||
return json.loads(subprocess.check_output(_args).decode('UTF-8'))
|
|
||||||
except ValueError:
|
|
||||||
return None
|
|
||||||
except CalledProcessError as e:
|
|
||||||
if e.returncode == 2:
|
|
||||||
return None
|
|
||||||
raise
|
|
||||||
|
|
||||||
|
|
||||||
def relation_set(relation_id=None, relation_settings=None, **kwargs):
|
|
||||||
"""Set relation information for the current unit"""
|
|
||||||
relation_settings = relation_settings if relation_settings else {}
|
|
||||||
relation_cmd_line = ['relation-set']
|
|
||||||
if relation_id is not None:
|
|
||||||
relation_cmd_line.extend(('-r', relation_id))
|
|
||||||
for k, v in (list(relation_settings.items()) + list(kwargs.items())):
|
|
||||||
if v is None:
|
|
||||||
relation_cmd_line.append('{}='.format(k))
|
|
||||||
else:
|
|
||||||
relation_cmd_line.append('{}={}'.format(k, v))
|
|
||||||
subprocess.check_call(relation_cmd_line)
|
|
||||||
# Flush cache of any relation-gets for local unit
|
|
||||||
flush(local_unit())
|
|
||||||
|
|
||||||
|
|
||||||
@cached
|
|
||||||
def relation_ids(reltype=None):
|
|
||||||
"""A list of relation_ids"""
|
|
||||||
reltype = reltype or relation_type()
|
|
||||||
relid_cmd_line = ['relation-ids', '--format=json']
|
|
||||||
if reltype is not None:
|
|
||||||
relid_cmd_line.append(reltype)
|
|
||||||
return json.loads(
|
|
||||||
subprocess.check_output(relid_cmd_line).decode('UTF-8')) or []
|
|
||||||
return []
|
|
||||||
|
|
||||||
|
|
||||||
@cached
|
|
||||||
def related_units(relid=None):
|
|
||||||
"""A list of related units"""
|
|
||||||
relid = relid or relation_id()
|
|
||||||
units_cmd_line = ['relation-list', '--format=json']
|
|
||||||
if relid is not None:
|
|
||||||
units_cmd_line.extend(('-r', relid))
|
|
||||||
return json.loads(
|
|
||||||
subprocess.check_output(units_cmd_line).decode('UTF-8')) or []
|
|
||||||
|
|
||||||
|
|
||||||
@cached
|
|
||||||
def relation_for_unit(unit=None, rid=None):
|
|
||||||
"""Get the json represenation of a unit's relation"""
|
|
||||||
unit = unit or remote_unit()
|
|
||||||
relation = relation_get(unit=unit, rid=rid)
|
|
||||||
for key in relation:
|
|
||||||
if key.endswith('-list'):
|
|
||||||
relation[key] = relation[key].split()
|
|
||||||
relation['__unit__'] = unit
|
|
||||||
return relation
|
|
||||||
|
|
||||||
|
|
||||||
@cached
|
|
||||||
def relations_for_id(relid=None):
|
|
||||||
"""Get relations of a specific relation ID"""
|
|
||||||
relation_data = []
|
|
||||||
relid = relid or relation_ids()
|
|
||||||
for unit in related_units(relid):
|
|
||||||
unit_data = relation_for_unit(unit, relid)
|
|
||||||
unit_data['__relid__'] = relid
|
|
||||||
relation_data.append(unit_data)
|
|
||||||
return relation_data
|
|
||||||
|
|
||||||
|
|
||||||
@cached
|
|
||||||
def relations_of_type(reltype=None):
|
|
||||||
"""Get relations of a specific type"""
|
|
||||||
relation_data = []
|
|
||||||
reltype = reltype or relation_type()
|
|
||||||
for relid in relation_ids(reltype):
|
|
||||||
for relation in relations_for_id(relid):
|
|
||||||
relation['__relid__'] = relid
|
|
||||||
relation_data.append(relation)
|
|
||||||
return relation_data
|
|
||||||
|
|
||||||
|
|
||||||
@cached
|
|
||||||
def metadata():
|
|
||||||
"""Get the current charm metadata.yaml contents as a python object"""
|
|
||||||
with open(os.path.join(charm_dir(), 'metadata.yaml')) as md:
|
|
||||||
return yaml.safe_load(md)
|
|
||||||
|
|
||||||
|
|
||||||
@cached
|
|
||||||
def relation_types():
|
|
||||||
"""Get a list of relation types supported by this charm"""
|
|
||||||
rel_types = []
|
|
||||||
md = metadata()
|
|
||||||
for key in ('provides', 'requires', 'peers'):
|
|
||||||
section = md.get(key)
|
|
||||||
if section:
|
|
||||||
rel_types.extend(section.keys())
|
|
||||||
return rel_types
|
|
||||||
|
|
||||||
|
|
||||||
@cached
|
|
||||||
def charm_name():
|
|
||||||
"""Get the name of the current charm as is specified on metadata.yaml"""
|
|
||||||
return metadata().get('name')
|
|
||||||
|
|
||||||
|
|
||||||
@cached
|
|
||||||
def relations():
|
|
||||||
"""Get a nested dictionary of relation data for all related units"""
|
|
||||||
rels = {}
|
|
||||||
for reltype in relation_types():
|
|
||||||
relids = {}
|
|
||||||
for relid in relation_ids(reltype):
|
|
||||||
units = {local_unit(): relation_get(unit=local_unit(), rid=relid)}
|
|
||||||
for unit in related_units(relid):
|
|
||||||
reldata = relation_get(unit=unit, rid=relid)
|
|
||||||
units[unit] = reldata
|
|
||||||
relids[relid] = units
|
|
||||||
rels[reltype] = relids
|
|
||||||
return rels
|
|
||||||
|
|
||||||
|
|
||||||
@cached
|
|
||||||
def is_relation_made(relation, keys='private-address'):
|
|
||||||
'''
|
|
||||||
Determine whether a relation is established by checking for
|
|
||||||
presence of key(s). If a list of keys is provided, they
|
|
||||||
must all be present for the relation to be identified as made
|
|
||||||
'''
|
|
||||||
if isinstance(keys, str):
|
|
||||||
keys = [keys]
|
|
||||||
for r_id in relation_ids(relation):
|
|
||||||
for unit in related_units(r_id):
|
|
||||||
context = {}
|
|
||||||
for k in keys:
|
|
||||||
context[k] = relation_get(k, rid=r_id,
|
|
||||||
unit=unit)
|
|
||||||
if None not in context.values():
|
|
||||||
return True
|
|
||||||
return False
|
|
||||||
|
|
||||||
|
|
||||||
def open_port(port, protocol="TCP"):
|
|
||||||
"""Open a service network port"""
|
|
||||||
_args = ['open-port']
|
|
||||||
_args.append('{}/{}'.format(port, protocol))
|
|
||||||
subprocess.check_call(_args)
|
|
||||||
|
|
||||||
|
|
||||||
def close_port(port, protocol="TCP"):
|
|
||||||
"""Close a service network port"""
|
|
||||||
_args = ['close-port']
|
|
||||||
_args.append('{}/{}'.format(port, protocol))
|
|
||||||
subprocess.check_call(_args)
|
|
||||||
|
|
||||||
|
|
||||||
@cached
|
|
||||||
def unit_get(attribute):
|
|
||||||
"""Get the unit ID for the remote unit"""
|
|
||||||
_args = ['unit-get', '--format=json', attribute]
|
|
||||||
try:
|
|
||||||
return json.loads(subprocess.check_output(_args).decode('UTF-8'))
|
|
||||||
except ValueError:
|
|
||||||
return None
|
|
||||||
|
|
||||||
|
|
||||||
def unit_private_ip():
|
|
||||||
"""Get this unit's private IP address"""
|
|
||||||
return unit_get('private-address')
|
|
||||||
|
|
||||||
|
|
||||||
class UnregisteredHookError(Exception):
|
|
||||||
"""Raised when an undefined hook is called"""
|
|
||||||
pass
|
|
||||||
|
|
||||||
|
|
||||||
class Hooks(object):
|
|
||||||
"""A convenient handler for hook functions.
|
|
||||||
|
|
||||||
Example::
|
|
||||||
|
|
||||||
hooks = Hooks()
|
|
||||||
|
|
||||||
# register a hook, taking its name from the function name
|
|
||||||
@hooks.hook()
|
|
||||||
def install():
|
|
||||||
pass # your code here
|
|
||||||
|
|
||||||
# register a hook, providing a custom hook name
|
|
||||||
@hooks.hook("config-changed")
|
|
||||||
def config_changed():
|
|
||||||
pass # your code here
|
|
||||||
|
|
||||||
if __name__ == "__main__":
|
|
||||||
# execute a hook based on the name the program is called by
|
|
||||||
hooks.execute(sys.argv)
|
|
||||||
"""
|
|
||||||
|
|
||||||
def __init__(self, config_save=True):
|
|
||||||
super(Hooks, self).__init__()
|
|
||||||
self._hooks = {}
|
|
||||||
self._config_save = config_save
|
|
||||||
|
|
||||||
def register(self, name, function):
|
|
||||||
"""Register a hook"""
|
|
||||||
self._hooks[name] = function
|
|
||||||
|
|
||||||
def execute(self, args):
|
|
||||||
"""Execute a registered hook based on args[0]"""
|
|
||||||
hook_name = os.path.basename(args[0])
|
|
||||||
if hook_name in self._hooks:
|
|
||||||
self._hooks[hook_name]()
|
|
||||||
if self._config_save:
|
|
||||||
cfg = config()
|
|
||||||
if cfg.implicit_save:
|
|
||||||
cfg.save()
|
|
||||||
else:
|
|
||||||
raise UnregisteredHookError(hook_name)
|
|
||||||
|
|
||||||
def hook(self, *hook_names):
|
|
||||||
"""Decorator, registering them as hooks"""
|
|
||||||
def wrapper(decorated):
|
|
||||||
for hook_name in hook_names:
|
|
||||||
self.register(hook_name, decorated)
|
|
||||||
else:
|
|
||||||
self.register(decorated.__name__, decorated)
|
|
||||||
if '_' in decorated.__name__:
|
|
||||||
self.register(
|
|
||||||
decorated.__name__.replace('_', '-'), decorated)
|
|
||||||
return decorated
|
|
||||||
return wrapper
|
|
||||||
|
|
||||||
|
|
||||||
def charm_dir():
|
|
||||||
"""Return the root directory of the current charm"""
|
|
||||||
return os.environ.get('CHARM_DIR')
|
|
||||||
|
|
||||||
|
|
||||||
@cached
|
|
||||||
def action_get(key=None):
|
|
||||||
"""Gets the value of an action parameter, or all key/value param pairs"""
|
|
||||||
cmd = ['action-get']
|
|
||||||
if key is not None:
|
|
||||||
cmd.append(key)
|
|
||||||
cmd.append('--format=json')
|
|
||||||
action_data = json.loads(subprocess.check_output(cmd).decode('UTF-8'))
|
|
||||||
return action_data
|
|
||||||
|
|
||||||
|
|
||||||
def action_set(values):
|
|
||||||
"""Sets the values to be returned after the action finishes"""
|
|
||||||
cmd = ['action-set']
|
|
||||||
for k, v in list(values.items()):
|
|
||||||
cmd.append('{}={}'.format(k, v))
|
|
||||||
subprocess.check_call(cmd)
|
|
||||||
|
|
||||||
|
|
||||||
def action_fail(message):
|
|
||||||
"""Sets the action status to failed and sets the error message.
|
|
||||||
|
|
||||||
The results set by action_set are preserved."""
|
|
||||||
subprocess.check_call(['action-fail', message])
|
|
|
@ -1,450 +0,0 @@
|
||||||
# Copyright 2014-2015 Canonical Limited.
|
|
||||||
#
|
|
||||||
# This file is part of charm-helpers.
|
|
||||||
#
|
|
||||||
# charm-helpers is free software: you can redistribute it and/or modify
|
|
||||||
# it under the terms of the GNU Lesser General Public License version 3 as
|
|
||||||
# published by the Free Software Foundation.
|
|
||||||
#
|
|
||||||
# charm-helpers is distributed in the hope that it will be useful,
|
|
||||||
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
||||||
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
|
||||||
# GNU Lesser General Public License for more details.
|
|
||||||
#
|
|
||||||
# You should have received a copy of the GNU Lesser General Public License
|
|
||||||
# along with charm-helpers. If not, see <http://www.gnu.org/licenses/>.
|
|
||||||
|
|
||||||
"""Tools for working with the host system"""
|
|
||||||
# Copyright 2012 Canonical Ltd.
|
|
||||||
#
|
|
||||||
# Authors:
|
|
||||||
# Nick Moffitt <nick.moffitt@canonical.com>
|
|
||||||
# Matthew Wedgwood <matthew.wedgwood@canonical.com>
|
|
||||||
|
|
||||||
import os
|
|
||||||
import re
|
|
||||||
import pwd
|
|
||||||
import grp
|
|
||||||
import random
|
|
||||||
import string
|
|
||||||
import subprocess
|
|
||||||
import hashlib
|
|
||||||
from contextlib import contextmanager
|
|
||||||
from collections import OrderedDict
|
|
||||||
|
|
||||||
import six
|
|
||||||
|
|
||||||
from .hookenv import log
|
|
||||||
from .fstab import Fstab
|
|
||||||
|
|
||||||
|
|
||||||
def service_start(service_name):
|
|
||||||
"""Start a system service"""
|
|
||||||
return service('start', service_name)
|
|
||||||
|
|
||||||
|
|
||||||
def service_stop(service_name):
|
|
||||||
"""Stop a system service"""
|
|
||||||
return service('stop', service_name)
|
|
||||||
|
|
||||||
|
|
||||||
def service_restart(service_name):
|
|
||||||
"""Restart a system service"""
|
|
||||||
return service('restart', service_name)
|
|
||||||
|
|
||||||
|
|
||||||
def service_reload(service_name, restart_on_failure=False):
|
|
||||||
"""Reload a system service, optionally falling back to restart if
|
|
||||||
reload fails"""
|
|
||||||
service_result = service('reload', service_name)
|
|
||||||
if not service_result and restart_on_failure:
|
|
||||||
service_result = service('restart', service_name)
|
|
||||||
return service_result
|
|
||||||
|
|
||||||
|
|
||||||
def service(action, service_name):
|
|
||||||
"""Control a system service"""
|
|
||||||
cmd = ['service', service_name, action]
|
|
||||||
return subprocess.call(cmd) == 0
|
|
||||||
|
|
||||||
|
|
||||||
def service_running(service):
|
|
||||||
"""Determine whether a system service is running"""
|
|
||||||
try:
|
|
||||||
output = subprocess.check_output(
|
|
||||||
['service', service, 'status'],
|
|
||||||
stderr=subprocess.STDOUT).decode('UTF-8')
|
|
||||||
except subprocess.CalledProcessError:
|
|
||||||
return False
|
|
||||||
else:
|
|
||||||
if ("start/running" in output or "is running" in output):
|
|
||||||
return True
|
|
||||||
else:
|
|
||||||
return False
|
|
||||||
|
|
||||||
|
|
||||||
def service_available(service_name):
|
|
||||||
"""Determine whether a system service is available"""
|
|
||||||
try:
|
|
||||||
subprocess.check_output(
|
|
||||||
['service', service_name, 'status'],
|
|
||||||
stderr=subprocess.STDOUT).decode('UTF-8')
|
|
||||||
except subprocess.CalledProcessError as e:
|
|
||||||
return 'unrecognized service' not in e.output
|
|
||||||
else:
|
|
||||||
return True
|
|
||||||
|
|
||||||
|
|
||||||
def adduser(username, password=None, shell='/bin/bash', system_user=False):
|
|
||||||
"""Add a user to the system"""
|
|
||||||
try:
|
|
||||||
user_info = pwd.getpwnam(username)
|
|
||||||
log('user {0} already exists!'.format(username))
|
|
||||||
except KeyError:
|
|
||||||
log('creating user {0}'.format(username))
|
|
||||||
cmd = ['useradd']
|
|
||||||
if system_user or password is None:
|
|
||||||
cmd.append('--system')
|
|
||||||
else:
|
|
||||||
cmd.extend([
|
|
||||||
'--create-home',
|
|
||||||
'--shell', shell,
|
|
||||||
'--password', password,
|
|
||||||
])
|
|
||||||
cmd.append(username)
|
|
||||||
subprocess.check_call(cmd)
|
|
||||||
user_info = pwd.getpwnam(username)
|
|
||||||
return user_info
|
|
||||||
|
|
||||||
|
|
||||||
def add_group(group_name, system_group=False):
|
|
||||||
"""Add a group to the system"""
|
|
||||||
try:
|
|
||||||
group_info = grp.getgrnam(group_name)
|
|
||||||
log('group {0} already exists!'.format(group_name))
|
|
||||||
except KeyError:
|
|
||||||
log('creating group {0}'.format(group_name))
|
|
||||||
cmd = ['addgroup']
|
|
||||||
if system_group:
|
|
||||||
cmd.append('--system')
|
|
||||||
else:
|
|
||||||
cmd.extend([
|
|
||||||
'--group',
|
|
||||||
])
|
|
||||||
cmd.append(group_name)
|
|
||||||
subprocess.check_call(cmd)
|
|
||||||
group_info = grp.getgrnam(group_name)
|
|
||||||
return group_info
|
|
||||||
|
|
||||||
|
|
||||||
def add_user_to_group(username, group):
|
|
||||||
"""Add a user to a group"""
|
|
||||||
cmd = [
|
|
||||||
'gpasswd', '-a',
|
|
||||||
username,
|
|
||||||
group
|
|
||||||
]
|
|
||||||
log("Adding user {} to group {}".format(username, group))
|
|
||||||
subprocess.check_call(cmd)
|
|
||||||
|
|
||||||
|
|
||||||
def rsync(from_path, to_path, flags='-r', options=None):
|
|
||||||
"""Replicate the contents of a path"""
|
|
||||||
options = options or ['--delete', '--executability']
|
|
||||||
cmd = ['/usr/bin/rsync', flags]
|
|
||||||
cmd.extend(options)
|
|
||||||
cmd.append(from_path)
|
|
||||||
cmd.append(to_path)
|
|
||||||
log(" ".join(cmd))
|
|
||||||
return subprocess.check_output(cmd).decode('UTF-8').strip()
|
|
||||||
|
|
||||||
|
|
||||||
def symlink(source, destination):
|
|
||||||
"""Create a symbolic link"""
|
|
||||||
log("Symlinking {} as {}".format(source, destination))
|
|
||||||
cmd = [
|
|
||||||
'ln',
|
|
||||||
'-sf',
|
|
||||||
source,
|
|
||||||
destination,
|
|
||||||
]
|
|
||||||
subprocess.check_call(cmd)
|
|
||||||
|
|
||||||
|
|
||||||
def mkdir(path, owner='root', group='root', perms=0o555, force=False):
|
|
||||||
"""Create a directory"""
|
|
||||||
log("Making dir {} {}:{} {:o}".format(path, owner, group,
|
|
||||||
perms))
|
|
||||||
uid = pwd.getpwnam(owner).pw_uid
|
|
||||||
gid = grp.getgrnam(group).gr_gid
|
|
||||||
realpath = os.path.abspath(path)
|
|
||||||
path_exists = os.path.exists(realpath)
|
|
||||||
if path_exists and force:
|
|
||||||
if not os.path.isdir(realpath):
|
|
||||||
log("Removing non-directory file {} prior to mkdir()".format(path))
|
|
||||||
os.unlink(realpath)
|
|
||||||
os.makedirs(realpath, perms)
|
|
||||||
elif not path_exists:
|
|
||||||
os.makedirs(realpath, perms)
|
|
||||||
os.chown(realpath, uid, gid)
|
|
||||||
os.chmod(realpath, perms)
|
|
||||||
|
|
||||||
|
|
||||||
def write_file(path, content, owner='root', group='root', perms=0o444):
|
|
||||||
"""Create or overwrite a file with the contents of a byte string."""
|
|
||||||
log("Writing file {} {}:{} {:o}".format(path, owner, group, perms))
|
|
||||||
uid = pwd.getpwnam(owner).pw_uid
|
|
||||||
gid = grp.getgrnam(group).gr_gid
|
|
||||||
with open(path, 'wb') as target:
|
|
||||||
os.fchown(target.fileno(), uid, gid)
|
|
||||||
os.fchmod(target.fileno(), perms)
|
|
||||||
target.write(content)
|
|
||||||
|
|
||||||
|
|
||||||
def fstab_remove(mp):
|
|
||||||
"""Remove the given mountpoint entry from /etc/fstab
|
|
||||||
"""
|
|
||||||
return Fstab.remove_by_mountpoint(mp)
|
|
||||||
|
|
||||||
|
|
||||||
def fstab_add(dev, mp, fs, options=None):
|
|
||||||
"""Adds the given device entry to the /etc/fstab file
|
|
||||||
"""
|
|
||||||
return Fstab.add(dev, mp, fs, options=options)
|
|
||||||
|
|
||||||
|
|
||||||
def mount(device, mountpoint, options=None, persist=False, filesystem="ext3"):
|
|
||||||
"""Mount a filesystem at a particular mountpoint"""
|
|
||||||
cmd_args = ['mount']
|
|
||||||
if options is not None:
|
|
||||||
cmd_args.extend(['-o', options])
|
|
||||||
cmd_args.extend([device, mountpoint])
|
|
||||||
try:
|
|
||||||
subprocess.check_output(cmd_args)
|
|
||||||
except subprocess.CalledProcessError as e:
|
|
||||||
log('Error mounting {} at {}\n{}'.format(device, mountpoint, e.output))
|
|
||||||
return False
|
|
||||||
|
|
||||||
if persist:
|
|
||||||
return fstab_add(device, mountpoint, filesystem, options=options)
|
|
||||||
return True
|
|
||||||
|
|
||||||
|
|
||||||
def umount(mountpoint, persist=False):
|
|
||||||
"""Unmount a filesystem"""
|
|
||||||
cmd_args = ['umount', mountpoint]
|
|
||||||
try:
|
|
||||||
subprocess.check_output(cmd_args)
|
|
||||||
except subprocess.CalledProcessError as e:
|
|
||||||
log('Error unmounting {}\n{}'.format(mountpoint, e.output))
|
|
||||||
return False
|
|
||||||
|
|
||||||
if persist:
|
|
||||||
return fstab_remove(mountpoint)
|
|
||||||
return True
|
|
||||||
|
|
||||||
|
|
||||||
def mounts():
|
|
||||||
"""Get a list of all mounted volumes as [[mountpoint,device],[...]]"""
|
|
||||||
with open('/proc/mounts') as f:
|
|
||||||
# [['/mount/point','/dev/path'],[...]]
|
|
||||||
system_mounts = [m[1::-1] for m in [l.strip().split()
|
|
||||||
for l in f.readlines()]]
|
|
||||||
return system_mounts
|
|
||||||
|
|
||||||
|
|
||||||
def file_hash(path, hash_type='md5'):
|
|
||||||
"""
|
|
||||||
Generate a hash checksum of the contents of 'path' or None if not found.
|
|
||||||
|
|
||||||
:param str hash_type: Any hash alrgorithm supported by :mod:`hashlib`,
|
|
||||||
such as md5, sha1, sha256, sha512, etc.
|
|
||||||
"""
|
|
||||||
if os.path.exists(path):
|
|
||||||
h = getattr(hashlib, hash_type)()
|
|
||||||
with open(path, 'rb') as source:
|
|
||||||
h.update(source.read())
|
|
||||||
return h.hexdigest()
|
|
||||||
else:
|
|
||||||
return None
|
|
||||||
|
|
||||||
|
|
||||||
def check_hash(path, checksum, hash_type='md5'):
|
|
||||||
"""
|
|
||||||
Validate a file using a cryptographic checksum.
|
|
||||||
|
|
||||||
:param str checksum: Value of the checksum used to validate the file.
|
|
||||||
:param str hash_type: Hash algorithm used to generate `checksum`.
|
|
||||||
Can be any hash alrgorithm supported by :mod:`hashlib`,
|
|
||||||
such as md5, sha1, sha256, sha512, etc.
|
|
||||||
:raises ChecksumError: If the file fails the checksum
|
|
||||||
|
|
||||||
"""
|
|
||||||
actual_checksum = file_hash(path, hash_type)
|
|
||||||
if checksum != actual_checksum:
|
|
||||||
raise ChecksumError("'%s' != '%s'" % (checksum, actual_checksum))
|
|
||||||
|
|
||||||
|
|
||||||
class ChecksumError(ValueError):
|
|
||||||
pass
|
|
||||||
|
|
||||||
|
|
||||||
def restart_on_change(restart_map, stopstart=False):
|
|
||||||
"""Restart services based on configuration files changing
|
|
||||||
|
|
||||||
This function is used a decorator, for example::
|
|
||||||
|
|
||||||
@restart_on_change({
|
|
||||||
'/etc/ceph/ceph.conf': [ 'cinder-api', 'cinder-volume' ]
|
|
||||||
})
|
|
||||||
def ceph_client_changed():
|
|
||||||
pass # your code here
|
|
||||||
|
|
||||||
In this example, the cinder-api and cinder-volume services
|
|
||||||
would be restarted if /etc/ceph/ceph.conf is changed by the
|
|
||||||
ceph_client_changed function.
|
|
||||||
"""
|
|
||||||
def wrap(f):
|
|
||||||
def wrapped_f(*args, **kwargs):
|
|
||||||
checksums = {}
|
|
||||||
for path in restart_map:
|
|
||||||
checksums[path] = file_hash(path)
|
|
||||||
f(*args, **kwargs)
|
|
||||||
restarts = []
|
|
||||||
for path in restart_map:
|
|
||||||
if checksums[path] != file_hash(path):
|
|
||||||
restarts += restart_map[path]
|
|
||||||
services_list = list(OrderedDict.fromkeys(restarts))
|
|
||||||
if not stopstart:
|
|
||||||
for service_name in services_list:
|
|
||||||
service('restart', service_name)
|
|
||||||
else:
|
|
||||||
for action in ['stop', 'start']:
|
|
||||||
for service_name in services_list:
|
|
||||||
service(action, service_name)
|
|
||||||
return wrapped_f
|
|
||||||
return wrap
|
|
||||||
|
|
||||||
|
|
||||||
def lsb_release():
|
|
||||||
"""Return /etc/lsb-release in a dict"""
|
|
||||||
d = {}
|
|
||||||
with open('/etc/lsb-release', 'r') as lsb:
|
|
||||||
for l in lsb:
|
|
||||||
k, v = l.split('=')
|
|
||||||
d[k.strip()] = v.strip()
|
|
||||||
return d
|
|
||||||
|
|
||||||
|
|
||||||
def pwgen(length=None):
|
|
||||||
"""Generate a random pasword."""
|
|
||||||
if length is None:
|
|
||||||
# A random length is ok to use a weak PRNG
|
|
||||||
length = random.choice(range(35, 45))
|
|
||||||
alphanumeric_chars = [
|
|
||||||
l for l in (string.ascii_letters + string.digits)
|
|
||||||
if l not in 'l0QD1vAEIOUaeiou']
|
|
||||||
# Use a crypto-friendly PRNG (e.g. /dev/urandom) for making the
|
|
||||||
# actual password
|
|
||||||
random_generator = random.SystemRandom()
|
|
||||||
random_chars = [
|
|
||||||
random_generator.choice(alphanumeric_chars) for _ in range(length)]
|
|
||||||
return(''.join(random_chars))
|
|
||||||
|
|
||||||
|
|
||||||
def list_nics(nic_type):
|
|
||||||
'''Return a list of nics of given type(s)'''
|
|
||||||
if isinstance(nic_type, six.string_types):
|
|
||||||
int_types = [nic_type]
|
|
||||||
else:
|
|
||||||
int_types = nic_type
|
|
||||||
interfaces = []
|
|
||||||
for int_type in int_types:
|
|
||||||
cmd = ['ip', 'addr', 'show', 'label', int_type + '*']
|
|
||||||
ip_output = subprocess.check_output(cmd).decode('UTF-8').split('\n')
|
|
||||||
ip_output = (line for line in ip_output if line)
|
|
||||||
for line in ip_output:
|
|
||||||
if line.split()[1].startswith(int_type):
|
|
||||||
matched = re.search('.*: (' + int_type + r'[0-9]+\.[0-9]+)@.*', line)
|
|
||||||
if matched:
|
|
||||||
interface = matched.groups()[0]
|
|
||||||
else:
|
|
||||||
interface = line.split()[1].replace(":", "")
|
|
||||||
interfaces.append(interface)
|
|
||||||
|
|
||||||
return interfaces
|
|
||||||
|
|
||||||
|
|
||||||
def set_nic_mtu(nic, mtu):
|
|
||||||
'''Set MTU on a network interface'''
|
|
||||||
cmd = ['ip', 'link', 'set', nic, 'mtu', mtu]
|
|
||||||
subprocess.check_call(cmd)
|
|
||||||
|
|
||||||
|
|
||||||
def get_nic_mtu(nic):
|
|
||||||
cmd = ['ip', 'addr', 'show', nic]
|
|
||||||
ip_output = subprocess.check_output(cmd).decode('UTF-8').split('\n')
|
|
||||||
mtu = ""
|
|
||||||
for line in ip_output:
|
|
||||||
words = line.split()
|
|
||||||
if 'mtu' in words:
|
|
||||||
mtu = words[words.index("mtu") + 1]
|
|
||||||
return mtu
|
|
||||||
|
|
||||||
|
|
||||||
def get_nic_hwaddr(nic):
|
|
||||||
cmd = ['ip', '-o', '-0', 'addr', 'show', nic]
|
|
||||||
ip_output = subprocess.check_output(cmd).decode('UTF-8')
|
|
||||||
hwaddr = ""
|
|
||||||
words = ip_output.split()
|
|
||||||
if 'link/ether' in words:
|
|
||||||
hwaddr = words[words.index('link/ether') + 1]
|
|
||||||
return hwaddr
|
|
||||||
|
|
||||||
|
|
||||||
def cmp_pkgrevno(package, revno, pkgcache=None):
|
|
||||||
'''Compare supplied revno with the revno of the installed package
|
|
||||||
|
|
||||||
* 1 => Installed revno is greater than supplied arg
|
|
||||||
* 0 => Installed revno is the same as supplied arg
|
|
||||||
* -1 => Installed revno is less than supplied arg
|
|
||||||
|
|
||||||
This function imports apt_cache function from charmhelpers.fetch if
|
|
||||||
the pkgcache argument is None. Be sure to add charmhelpers.fetch if
|
|
||||||
you call this function, or pass an apt_pkg.Cache() instance.
|
|
||||||
'''
|
|
||||||
import apt_pkg
|
|
||||||
if not pkgcache:
|
|
||||||
from charmhelpers.fetch import apt_cache
|
|
||||||
pkgcache = apt_cache()
|
|
||||||
pkg = pkgcache[package]
|
|
||||||
return apt_pkg.version_compare(pkg.current_ver.ver_str, revno)
|
|
||||||
|
|
||||||
|
|
||||||
@contextmanager
|
|
||||||
def chdir(d):
|
|
||||||
cur = os.getcwd()
|
|
||||||
try:
|
|
||||||
yield os.chdir(d)
|
|
||||||
finally:
|
|
||||||
os.chdir(cur)
|
|
||||||
|
|
||||||
|
|
||||||
def chownr(path, owner, group, follow_links=True):
|
|
||||||
uid = pwd.getpwnam(owner).pw_uid
|
|
||||||
gid = grp.getgrnam(group).gr_gid
|
|
||||||
if follow_links:
|
|
||||||
chown = os.chown
|
|
||||||
else:
|
|
||||||
chown = os.lchown
|
|
||||||
|
|
||||||
for root, dirs, files in os.walk(path):
|
|
||||||
for name in dirs + files:
|
|
||||||
full = os.path.join(root, name)
|
|
||||||
broken_symlink = os.path.lexists(full) and not os.path.exists(full)
|
|
||||||
if not broken_symlink:
|
|
||||||
chown(full, uid, gid)
|
|
||||||
|
|
||||||
|
|
||||||
def lchownr(path, owner, group):
|
|
||||||
chownr(path, owner, group, follow_links=False)
|
|
|
@ -1,329 +0,0 @@
|
||||||
# Copyright 2014-2015 Canonical Limited.
|
|
||||||
#
|
|
||||||
# This file is part of charm-helpers.
|
|
||||||
#
|
|
||||||
# charm-helpers is free software: you can redistribute it and/or modify
|
|
||||||
# it under the terms of the GNU Lesser General Public License version 3 as
|
|
||||||
# published by the Free Software Foundation.
|
|
||||||
#
|
|
||||||
# charm-helpers is distributed in the hope that it will be useful,
|
|
||||||
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
||||||
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
|
||||||
# GNU Lesser General Public License for more details.
|
|
||||||
#
|
|
||||||
# You should have received a copy of the GNU Lesser General Public License
|
|
||||||
# along with charm-helpers. If not, see <http://www.gnu.org/licenses/>.
|
|
||||||
|
|
||||||
import os
|
|
||||||
import re
|
|
||||||
import json
|
|
||||||
from collections import Iterable
|
|
||||||
|
|
||||||
from charmhelpers.core import host
|
|
||||||
from charmhelpers.core import hookenv
|
|
||||||
|
|
||||||
|
|
||||||
__all__ = ['ServiceManager', 'ManagerCallback',
|
|
||||||
'PortManagerCallback', 'open_ports', 'close_ports', 'manage_ports',
|
|
||||||
'service_restart', 'service_stop']
|
|
||||||
|
|
||||||
|
|
||||||
class ServiceManager(object):
|
|
||||||
def __init__(self, services=None):
|
|
||||||
"""
|
|
||||||
Register a list of services, given their definitions.
|
|
||||||
|
|
||||||
Service definitions are dicts in the following formats (all keys except
|
|
||||||
'service' are optional)::
|
|
||||||
|
|
||||||
{
|
|
||||||
"service": <service name>,
|
|
||||||
"required_data": <list of required data contexts>,
|
|
||||||
"provided_data": <list of provided data contexts>,
|
|
||||||
"data_ready": <one or more callbacks>,
|
|
||||||
"data_lost": <one or more callbacks>,
|
|
||||||
"start": <one or more callbacks>,
|
|
||||||
"stop": <one or more callbacks>,
|
|
||||||
"ports": <list of ports to manage>,
|
|
||||||
}
|
|
||||||
|
|
||||||
The 'required_data' list should contain dicts of required data (or
|
|
||||||
dependency managers that act like dicts and know how to collect the data).
|
|
||||||
Only when all items in the 'required_data' list are populated are the list
|
|
||||||
of 'data_ready' and 'start' callbacks executed. See `is_ready()` for more
|
|
||||||
information.
|
|
||||||
|
|
||||||
The 'provided_data' list should contain relation data providers, most likely
|
|
||||||
a subclass of :class:`charmhelpers.core.services.helpers.RelationContext`,
|
|
||||||
that will indicate a set of data to set on a given relation.
|
|
||||||
|
|
||||||
The 'data_ready' value should be either a single callback, or a list of
|
|
||||||
callbacks, to be called when all items in 'required_data' pass `is_ready()`.
|
|
||||||
Each callback will be called with the service name as the only parameter.
|
|
||||||
After all of the 'data_ready' callbacks are called, the 'start' callbacks
|
|
||||||
are fired.
|
|
||||||
|
|
||||||
The 'data_lost' value should be either a single callback, or a list of
|
|
||||||
callbacks, to be called when a 'required_data' item no longer passes
|
|
||||||
`is_ready()`. Each callback will be called with the service name as the
|
|
||||||
only parameter. After all of the 'data_lost' callbacks are called,
|
|
||||||
the 'stop' callbacks are fired.
|
|
||||||
|
|
||||||
The 'start' value should be either a single callback, or a list of
|
|
||||||
callbacks, to be called when starting the service, after the 'data_ready'
|
|
||||||
callbacks are complete. Each callback will be called with the service
|
|
||||||
name as the only parameter. This defaults to
|
|
||||||
`[host.service_start, services.open_ports]`.
|
|
||||||
|
|
||||||
The 'stop' value should be either a single callback, or a list of
|
|
||||||
callbacks, to be called when stopping the service. If the service is
|
|
||||||
being stopped because it no longer has all of its 'required_data', this
|
|
||||||
will be called after all of the 'data_lost' callbacks are complete.
|
|
||||||
Each callback will be called with the service name as the only parameter.
|
|
||||||
This defaults to `[services.close_ports, host.service_stop]`.
|
|
||||||
|
|
||||||
The 'ports' value should be a list of ports to manage. The default
|
|
||||||
'start' handler will open the ports after the service is started,
|
|
||||||
and the default 'stop' handler will close the ports prior to stopping
|
|
||||||
the service.
|
|
||||||
|
|
||||||
|
|
||||||
Examples:
|
|
||||||
|
|
||||||
The following registers an Upstart service called bingod that depends on
|
|
||||||
a mongodb relation and which runs a custom `db_migrate` function prior to
|
|
||||||
restarting the service, and a Runit service called spadesd::
|
|
||||||
|
|
||||||
manager = services.ServiceManager([
|
|
||||||
{
|
|
||||||
'service': 'bingod',
|
|
||||||
'ports': [80, 443],
|
|
||||||
'required_data': [MongoRelation(), config(), {'my': 'data'}],
|
|
||||||
'data_ready': [
|
|
||||||
services.template(source='bingod.conf'),
|
|
||||||
services.template(source='bingod.ini',
|
|
||||||
target='/etc/bingod.ini',
|
|
||||||
owner='bingo', perms=0400),
|
|
||||||
],
|
|
||||||
},
|
|
||||||
{
|
|
||||||
'service': 'spadesd',
|
|
||||||
'data_ready': services.template(source='spadesd_run.j2',
|
|
||||||
target='/etc/sv/spadesd/run',
|
|
||||||
perms=0555),
|
|
||||||
'start': runit_start,
|
|
||||||
'stop': runit_stop,
|
|
||||||
},
|
|
||||||
])
|
|
||||||
manager.manage()
|
|
||||||
"""
|
|
||||||
self._ready_file = os.path.join(hookenv.charm_dir(), 'READY-SERVICES.json')
|
|
||||||
self._ready = None
|
|
||||||
self.services = {}
|
|
||||||
for service in services or []:
|
|
||||||
service_name = service['service']
|
|
||||||
self.services[service_name] = service
|
|
||||||
|
|
||||||
def manage(self):
|
|
||||||
"""
|
|
||||||
Handle the current hook by doing The Right Thing with the registered services.
|
|
||||||
"""
|
|
||||||
hook_name = hookenv.hook_name()
|
|
||||||
if hook_name == 'stop':
|
|
||||||
self.stop_services()
|
|
||||||
else:
|
|
||||||
self.provide_data()
|
|
||||||
self.reconfigure_services()
|
|
||||||
cfg = hookenv.config()
|
|
||||||
if cfg.implicit_save:
|
|
||||||
cfg.save()
|
|
||||||
|
|
||||||
def provide_data(self):
|
|
||||||
"""
|
|
||||||
Set the relation data for each provider in the ``provided_data`` list.
|
|
||||||
|
|
||||||
A provider must have a `name` attribute, which indicates which relation
|
|
||||||
to set data on, and a `provide_data()` method, which returns a dict of
|
|
||||||
data to set.
|
|
||||||
"""
|
|
||||||
hook_name = hookenv.hook_name()
|
|
||||||
for service in self.services.values():
|
|
||||||
for provider in service.get('provided_data', []):
|
|
||||||
if re.match(r'{}-relation-(joined|changed)'.format(provider.name), hook_name):
|
|
||||||
data = provider.provide_data()
|
|
||||||
_ready = provider._is_ready(data) if hasattr(provider, '_is_ready') else data
|
|
||||||
if _ready:
|
|
||||||
hookenv.relation_set(None, data)
|
|
||||||
|
|
||||||
def reconfigure_services(self, *service_names):
|
|
||||||
"""
|
|
||||||
Update all files for one or more registered services, and,
|
|
||||||
if ready, optionally restart them.
|
|
||||||
|
|
||||||
If no service names are given, reconfigures all registered services.
|
|
||||||
"""
|
|
||||||
for service_name in service_names or self.services.keys():
|
|
||||||
if self.is_ready(service_name):
|
|
||||||
self.fire_event('data_ready', service_name)
|
|
||||||
self.fire_event('start', service_name, default=[
|
|
||||||
service_restart,
|
|
||||||
manage_ports])
|
|
||||||
self.save_ready(service_name)
|
|
||||||
else:
|
|
||||||
if self.was_ready(service_name):
|
|
||||||
self.fire_event('data_lost', service_name)
|
|
||||||
self.fire_event('stop', service_name, default=[
|
|
||||||
manage_ports,
|
|
||||||
service_stop])
|
|
||||||
self.save_lost(service_name)
|
|
||||||
|
|
||||||
def stop_services(self, *service_names):
|
|
||||||
"""
|
|
||||||
Stop one or more registered services, by name.
|
|
||||||
|
|
||||||
If no service names are given, stops all registered services.
|
|
||||||
"""
|
|
||||||
for service_name in service_names or self.services.keys():
|
|
||||||
self.fire_event('stop', service_name, default=[
|
|
||||||
manage_ports,
|
|
||||||
service_stop])
|
|
||||||
|
|
||||||
def get_service(self, service_name):
|
|
||||||
"""
|
|
||||||
Given the name of a registered service, return its service definition.
|
|
||||||
"""
|
|
||||||
service = self.services.get(service_name)
|
|
||||||
if not service:
|
|
||||||
raise KeyError('Service not registered: %s' % service_name)
|
|
||||||
return service
|
|
||||||
|
|
||||||
def fire_event(self, event_name, service_name, default=None):
|
|
||||||
"""
|
|
||||||
Fire a data_ready, data_lost, start, or stop event on a given service.
|
|
||||||
"""
|
|
||||||
service = self.get_service(service_name)
|
|
||||||
callbacks = service.get(event_name, default)
|
|
||||||
if not callbacks:
|
|
||||||
return
|
|
||||||
if not isinstance(callbacks, Iterable):
|
|
||||||
callbacks = [callbacks]
|
|
||||||
for callback in callbacks:
|
|
||||||
if isinstance(callback, ManagerCallback):
|
|
||||||
callback(self, service_name, event_name)
|
|
||||||
else:
|
|
||||||
callback(service_name)
|
|
||||||
|
|
||||||
def is_ready(self, service_name):
|
|
||||||
"""
|
|
||||||
Determine if a registered service is ready, by checking its 'required_data'.
|
|
||||||
|
|
||||||
A 'required_data' item can be any mapping type, and is considered ready
|
|
||||||
if `bool(item)` evaluates as True.
|
|
||||||
"""
|
|
||||||
service = self.get_service(service_name)
|
|
||||||
reqs = service.get('required_data', [])
|
|
||||||
return all(bool(req) for req in reqs)
|
|
||||||
|
|
||||||
def _load_ready_file(self):
|
|
||||||
if self._ready is not None:
|
|
||||||
return
|
|
||||||
if os.path.exists(self._ready_file):
|
|
||||||
with open(self._ready_file) as fp:
|
|
||||||
self._ready = set(json.load(fp))
|
|
||||||
else:
|
|
||||||
self._ready = set()
|
|
||||||
|
|
||||||
def _save_ready_file(self):
|
|
||||||
if self._ready is None:
|
|
||||||
return
|
|
||||||
with open(self._ready_file, 'w') as fp:
|
|
||||||
json.dump(list(self._ready), fp)
|
|
||||||
|
|
||||||
def save_ready(self, service_name):
|
|
||||||
"""
|
|
||||||
Save an indicator that the given service is now data_ready.
|
|
||||||
"""
|
|
||||||
self._load_ready_file()
|
|
||||||
self._ready.add(service_name)
|
|
||||||
self._save_ready_file()
|
|
||||||
|
|
||||||
def save_lost(self, service_name):
|
|
||||||
"""
|
|
||||||
Save an indicator that the given service is no longer data_ready.
|
|
||||||
"""
|
|
||||||
self._load_ready_file()
|
|
||||||
self._ready.discard(service_name)
|
|
||||||
self._save_ready_file()
|
|
||||||
|
|
||||||
def was_ready(self, service_name):
|
|
||||||
"""
|
|
||||||
Determine if the given service was previously data_ready.
|
|
||||||
"""
|
|
||||||
self._load_ready_file()
|
|
||||||
return service_name in self._ready
|
|
||||||
|
|
||||||
|
|
||||||
class ManagerCallback(object):
|
|
||||||
"""
|
|
||||||
Special case of a callback that takes the `ServiceManager` instance
|
|
||||||
in addition to the service name.
|
|
||||||
|
|
||||||
Subclasses should implement `__call__` which should accept three parameters:
|
|
||||||
|
|
||||||
* `manager` The `ServiceManager` instance
|
|
||||||
* `service_name` The name of the service it's being triggered for
|
|
||||||
* `event_name` The name of the event that this callback is handling
|
|
||||||
"""
|
|
||||||
def __call__(self, manager, service_name, event_name):
|
|
||||||
raise NotImplementedError()
|
|
||||||
|
|
||||||
|
|
||||||
class PortManagerCallback(ManagerCallback):
|
|
||||||
"""
|
|
||||||
Callback class that will open or close ports, for use as either
|
|
||||||
a start or stop action.
|
|
||||||
"""
|
|
||||||
def __call__(self, manager, service_name, event_name):
|
|
||||||
service = manager.get_service(service_name)
|
|
||||||
new_ports = service.get('ports', [])
|
|
||||||
port_file = os.path.join(hookenv.charm_dir(), '.{}.ports'.format(service_name))
|
|
||||||
if os.path.exists(port_file):
|
|
||||||
with open(port_file) as fp:
|
|
||||||
old_ports = fp.read().split(',')
|
|
||||||
for old_port in old_ports:
|
|
||||||
if bool(old_port):
|
|
||||||
old_port = int(old_port)
|
|
||||||
if old_port not in new_ports:
|
|
||||||
hookenv.close_port(old_port)
|
|
||||||
with open(port_file, 'w') as fp:
|
|
||||||
fp.write(','.join(str(port) for port in new_ports))
|
|
||||||
for port in new_ports:
|
|
||||||
if event_name == 'start':
|
|
||||||
hookenv.open_port(port)
|
|
||||||
elif event_name == 'stop':
|
|
||||||
hookenv.close_port(port)
|
|
||||||
|
|
||||||
|
|
||||||
def service_stop(service_name):
|
|
||||||
"""
|
|
||||||
Wrapper around host.service_stop to prevent spurious "unknown service"
|
|
||||||
messages in the logs.
|
|
||||||
"""
|
|
||||||
if host.service_running(service_name):
|
|
||||||
host.service_stop(service_name)
|
|
||||||
|
|
||||||
|
|
||||||
def service_restart(service_name):
|
|
||||||
"""
|
|
||||||
Wrapper around host.service_restart to prevent spurious "unknown service"
|
|
||||||
messages in the logs.
|
|
||||||
"""
|
|
||||||
if host.service_available(service_name):
|
|
||||||
if host.service_running(service_name):
|
|
||||||
host.service_restart(service_name)
|
|
||||||
else:
|
|
||||||
host.service_start(service_name)
|
|
||||||
|
|
||||||
|
|
||||||
# Convenience aliases
|
|
||||||
open_ports = close_ports = manage_ports = PortManagerCallback()
|
|
|
@ -1,267 +0,0 @@
|
||||||
# Copyright 2014-2015 Canonical Limited.
|
|
||||||
#
|
|
||||||
# This file is part of charm-helpers.
|
|
||||||
#
|
|
||||||
# charm-helpers is free software: you can redistribute it and/or modify
|
|
||||||
# it under the terms of the GNU Lesser General Public License version 3 as
|
|
||||||
# published by the Free Software Foundation.
|
|
||||||
#
|
|
||||||
# charm-helpers is distributed in the hope that it will be useful,
|
|
||||||
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
||||||
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
|
||||||
# GNU Lesser General Public License for more details.
|
|
||||||
#
|
|
||||||
# You should have received a copy of the GNU Lesser General Public License
|
|
||||||
# along with charm-helpers. If not, see <http://www.gnu.org/licenses/>.
|
|
||||||
|
|
||||||
import os
|
|
||||||
import yaml
|
|
||||||
from charmhelpers.core import hookenv
|
|
||||||
from charmhelpers.core import templating
|
|
||||||
|
|
||||||
from charmhelpers.core.services.base import ManagerCallback
|
|
||||||
|
|
||||||
|
|
||||||
__all__ = ['RelationContext', 'TemplateCallback',
|
|
||||||
'render_template', 'template']
|
|
||||||
|
|
||||||
|
|
||||||
class RelationContext(dict):
|
|
||||||
"""
|
|
||||||
Base class for a context generator that gets relation data from juju.
|
|
||||||
|
|
||||||
Subclasses must provide the attributes `name`, which is the name of the
|
|
||||||
interface of interest, `interface`, which is the type of the interface of
|
|
||||||
interest, and `required_keys`, which is the set of keys required for the
|
|
||||||
relation to be considered complete. The data for all interfaces matching
|
|
||||||
the `name` attribute that are complete will used to populate the dictionary
|
|
||||||
values (see `get_data`, below).
|
|
||||||
|
|
||||||
The generated context will be namespaced under the relation :attr:`name`,
|
|
||||||
to prevent potential naming conflicts.
|
|
||||||
|
|
||||||
:param str name: Override the relation :attr:`name`, since it can vary from charm to charm
|
|
||||||
:param list additional_required_keys: Extend the list of :attr:`required_keys`
|
|
||||||
"""
|
|
||||||
name = None
|
|
||||||
interface = None
|
|
||||||
|
|
||||||
def __init__(self, name=None, additional_required_keys=None):
|
|
||||||
if not hasattr(self, 'required_keys'):
|
|
||||||
self.required_keys = []
|
|
||||||
|
|
||||||
if name is not None:
|
|
||||||
self.name = name
|
|
||||||
if additional_required_keys:
|
|
||||||
self.required_keys.extend(additional_required_keys)
|
|
||||||
self.get_data()
|
|
||||||
|
|
||||||
def __bool__(self):
|
|
||||||
"""
|
|
||||||
Returns True if all of the required_keys are available.
|
|
||||||
"""
|
|
||||||
return self.is_ready()
|
|
||||||
|
|
||||||
__nonzero__ = __bool__
|
|
||||||
|
|
||||||
def __repr__(self):
|
|
||||||
return super(RelationContext, self).__repr__()
|
|
||||||
|
|
||||||
def is_ready(self):
|
|
||||||
"""
|
|
||||||
Returns True if all of the `required_keys` are available from any units.
|
|
||||||
"""
|
|
||||||
ready = len(self.get(self.name, [])) > 0
|
|
||||||
if not ready:
|
|
||||||
hookenv.log('Incomplete relation: {}'.format(self.__class__.__name__), hookenv.DEBUG)
|
|
||||||
return ready
|
|
||||||
|
|
||||||
def _is_ready(self, unit_data):
|
|
||||||
"""
|
|
||||||
Helper method that tests a set of relation data and returns True if
|
|
||||||
all of the `required_keys` are present.
|
|
||||||
"""
|
|
||||||
return set(unit_data.keys()).issuperset(set(self.required_keys))
|
|
||||||
|
|
||||||
def get_data(self):
|
|
||||||
"""
|
|
||||||
Retrieve the relation data for each unit involved in a relation and,
|
|
||||||
if complete, store it in a list under `self[self.name]`. This
|
|
||||||
is automatically called when the RelationContext is instantiated.
|
|
||||||
|
|
||||||
The units are sorted lexographically first by the service ID, then by
|
|
||||||
the unit ID. Thus, if an interface has two other services, 'db:1'
|
|
||||||
and 'db:2', with 'db:1' having two units, 'wordpress/0' and 'wordpress/1',
|
|
||||||
and 'db:2' having one unit, 'mediawiki/0', all of which have a complete
|
|
||||||
set of data, the relation data for the units will be stored in the
|
|
||||||
order: 'wordpress/0', 'wordpress/1', 'mediawiki/0'.
|
|
||||||
|
|
||||||
If you only care about a single unit on the relation, you can just
|
|
||||||
access it as `{{ interface[0]['key'] }}`. However, if you can at all
|
|
||||||
support multiple units on a relation, you should iterate over the list,
|
|
||||||
like::
|
|
||||||
|
|
||||||
{% for unit in interface -%}
|
|
||||||
{{ unit['key'] }}{% if not loop.last %},{% endif %}
|
|
||||||
{%- endfor %}
|
|
||||||
|
|
||||||
Note that since all sets of relation data from all related services and
|
|
||||||
units are in a single list, if you need to know which service or unit a
|
|
||||||
set of data came from, you'll need to extend this class to preserve
|
|
||||||
that information.
|
|
||||||
"""
|
|
||||||
if not hookenv.relation_ids(self.name):
|
|
||||||
return
|
|
||||||
|
|
||||||
ns = self.setdefault(self.name, [])
|
|
||||||
for rid in sorted(hookenv.relation_ids(self.name)):
|
|
||||||
for unit in sorted(hookenv.related_units(rid)):
|
|
||||||
reldata = hookenv.relation_get(rid=rid, unit=unit)
|
|
||||||
if self._is_ready(reldata):
|
|
||||||
ns.append(reldata)
|
|
||||||
|
|
||||||
def provide_data(self):
|
|
||||||
"""
|
|
||||||
Return data to be relation_set for this interface.
|
|
||||||
"""
|
|
||||||
return {}
|
|
||||||
|
|
||||||
|
|
||||||
class MysqlRelation(RelationContext):
|
|
||||||
"""
|
|
||||||
Relation context for the `mysql` interface.
|
|
||||||
|
|
||||||
:param str name: Override the relation :attr:`name`, since it can vary from charm to charm
|
|
||||||
:param list additional_required_keys: Extend the list of :attr:`required_keys`
|
|
||||||
"""
|
|
||||||
name = 'db'
|
|
||||||
interface = 'mysql'
|
|
||||||
|
|
||||||
def __init__(self, *args, **kwargs):
|
|
||||||
self.required_keys = ['host', 'user', 'password', 'database']
|
|
||||||
RelationContext.__init__(self, *args, **kwargs)
|
|
||||||
|
|
||||||
|
|
||||||
class HttpRelation(RelationContext):
|
|
||||||
"""
|
|
||||||
Relation context for the `http` interface.
|
|
||||||
|
|
||||||
:param str name: Override the relation :attr:`name`, since it can vary from charm to charm
|
|
||||||
:param list additional_required_keys: Extend the list of :attr:`required_keys`
|
|
||||||
"""
|
|
||||||
name = 'website'
|
|
||||||
interface = 'http'
|
|
||||||
|
|
||||||
def __init__(self, *args, **kwargs):
|
|
||||||
self.required_keys = ['host', 'port']
|
|
||||||
RelationContext.__init__(self, *args, **kwargs)
|
|
||||||
|
|
||||||
def provide_data(self):
|
|
||||||
return {
|
|
||||||
'host': hookenv.unit_get('private-address'),
|
|
||||||
'port': 80,
|
|
||||||
}
|
|
||||||
|
|
||||||
|
|
||||||
class RequiredConfig(dict):
|
|
||||||
"""
|
|
||||||
Data context that loads config options with one or more mandatory options.
|
|
||||||
|
|
||||||
Once the required options have been changed from their default values, all
|
|
||||||
config options will be available, namespaced under `config` to prevent
|
|
||||||
potential naming conflicts (for example, between a config option and a
|
|
||||||
relation property).
|
|
||||||
|
|
||||||
:param list *args: List of options that must be changed from their default values.
|
|
||||||
"""
|
|
||||||
|
|
||||||
def __init__(self, *args):
|
|
||||||
self.required_options = args
|
|
||||||
self['config'] = hookenv.config()
|
|
||||||
with open(os.path.join(hookenv.charm_dir(), 'config.yaml')) as fp:
|
|
||||||
self.config = yaml.load(fp).get('options', {})
|
|
||||||
|
|
||||||
def __bool__(self):
|
|
||||||
for option in self.required_options:
|
|
||||||
if option not in self['config']:
|
|
||||||
return False
|
|
||||||
current_value = self['config'][option]
|
|
||||||
default_value = self.config[option].get('default')
|
|
||||||
if current_value == default_value:
|
|
||||||
return False
|
|
||||||
if current_value in (None, '') and default_value in (None, ''):
|
|
||||||
return False
|
|
||||||
return True
|
|
||||||
|
|
||||||
def __nonzero__(self):
|
|
||||||
return self.__bool__()
|
|
||||||
|
|
||||||
|
|
||||||
class StoredContext(dict):
|
|
||||||
"""
|
|
||||||
A data context that always returns the data that it was first created with.
|
|
||||||
|
|
||||||
This is useful to do a one-time generation of things like passwords, that
|
|
||||||
will thereafter use the same value that was originally generated, instead
|
|
||||||
of generating a new value each time it is run.
|
|
||||||
"""
|
|
||||||
def __init__(self, file_name, config_data):
|
|
||||||
"""
|
|
||||||
If the file exists, populate `self` with the data from the file.
|
|
||||||
Otherwise, populate with the given data and persist it to the file.
|
|
||||||
"""
|
|
||||||
if os.path.exists(file_name):
|
|
||||||
self.update(self.read_context(file_name))
|
|
||||||
else:
|
|
||||||
self.store_context(file_name, config_data)
|
|
||||||
self.update(config_data)
|
|
||||||
|
|
||||||
def store_context(self, file_name, config_data):
|
|
||||||
if not os.path.isabs(file_name):
|
|
||||||
file_name = os.path.join(hookenv.charm_dir(), file_name)
|
|
||||||
with open(file_name, 'w') as file_stream:
|
|
||||||
os.fchmod(file_stream.fileno(), 0o600)
|
|
||||||
yaml.dump(config_data, file_stream)
|
|
||||||
|
|
||||||
def read_context(self, file_name):
|
|
||||||
if not os.path.isabs(file_name):
|
|
||||||
file_name = os.path.join(hookenv.charm_dir(), file_name)
|
|
||||||
with open(file_name, 'r') as file_stream:
|
|
||||||
data = yaml.load(file_stream)
|
|
||||||
if not data:
|
|
||||||
raise OSError("%s is empty" % file_name)
|
|
||||||
return data
|
|
||||||
|
|
||||||
|
|
||||||
class TemplateCallback(ManagerCallback):
|
|
||||||
"""
|
|
||||||
Callback class that will render a Jinja2 template, for use as a ready
|
|
||||||
action.
|
|
||||||
|
|
||||||
:param str source: The template source file, relative to
|
|
||||||
`$CHARM_DIR/templates`
|
|
||||||
|
|
||||||
:param str target: The target to write the rendered template to
|
|
||||||
:param str owner: The owner of the rendered file
|
|
||||||
:param str group: The group of the rendered file
|
|
||||||
:param int perms: The permissions of the rendered file
|
|
||||||
"""
|
|
||||||
def __init__(self, source, target,
|
|
||||||
owner='root', group='root', perms=0o444):
|
|
||||||
self.source = source
|
|
||||||
self.target = target
|
|
||||||
self.owner = owner
|
|
||||||
self.group = group
|
|
||||||
self.perms = perms
|
|
||||||
|
|
||||||
def __call__(self, manager, service_name, event_name):
|
|
||||||
service = manager.get_service(service_name)
|
|
||||||
context = {}
|
|
||||||
for ctx in service.get('required_data', []):
|
|
||||||
context.update(ctx)
|
|
||||||
templating.render(self.source, self.target, context,
|
|
||||||
self.owner, self.group, self.perms)
|
|
||||||
|
|
||||||
|
|
||||||
# Convenience aliases for templates
|
|
||||||
render_template = template = TemplateCallback
|
|
|
@ -1,56 +0,0 @@
|
||||||
#!/usr/bin/env python
|
|
||||||
# -*- coding: utf-8 -*-
|
|
||||||
|
|
||||||
# Copyright 2014-2015 Canonical Limited.
|
|
||||||
#
|
|
||||||
# This file is part of charm-helpers.
|
|
||||||
#
|
|
||||||
# charm-helpers is free software: you can redistribute it and/or modify
|
|
||||||
# it under the terms of the GNU Lesser General Public License version 3 as
|
|
||||||
# published by the Free Software Foundation.
|
|
||||||
#
|
|
||||||
# charm-helpers is distributed in the hope that it will be useful,
|
|
||||||
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
||||||
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
|
||||||
# GNU Lesser General Public License for more details.
|
|
||||||
#
|
|
||||||
# You should have received a copy of the GNU Lesser General Public License
|
|
||||||
# along with charm-helpers. If not, see <http://www.gnu.org/licenses/>.
|
|
||||||
|
|
||||||
import yaml
|
|
||||||
|
|
||||||
from subprocess import check_call
|
|
||||||
|
|
||||||
from charmhelpers.core.hookenv import (
|
|
||||||
log,
|
|
||||||
DEBUG,
|
|
||||||
ERROR,
|
|
||||||
)
|
|
||||||
|
|
||||||
__author__ = 'Jorge Niedbalski R. <jorge.niedbalski@canonical.com>'
|
|
||||||
|
|
||||||
|
|
||||||
def create(sysctl_dict, sysctl_file):
|
|
||||||
"""Creates a sysctl.conf file from a YAML associative array
|
|
||||||
|
|
||||||
:param sysctl_dict: a YAML-formatted string of sysctl options eg "{ 'kernel.max_pid': 1337 }"
|
|
||||||
:type sysctl_dict: str
|
|
||||||
:param sysctl_file: path to the sysctl file to be saved
|
|
||||||
:type sysctl_file: str or unicode
|
|
||||||
:returns: None
|
|
||||||
"""
|
|
||||||
try:
|
|
||||||
sysctl_dict_parsed = yaml.safe_load(sysctl_dict)
|
|
||||||
except yaml.YAMLError:
|
|
||||||
log("Error parsing YAML sysctl_dict: {}".format(sysctl_dict),
|
|
||||||
level=ERROR)
|
|
||||||
return
|
|
||||||
|
|
||||||
with open(sysctl_file, "w") as fd:
|
|
||||||
for key, value in sysctl_dict_parsed.items():
|
|
||||||
fd.write("{}={}\n".format(key, value))
|
|
||||||
|
|
||||||
log("Updating sysctl_file: %s values: %s" % (sysctl_file, sysctl_dict_parsed),
|
|
||||||
level=DEBUG)
|
|
||||||
|
|
||||||
check_call(["sysctl", "-p", sysctl_file])
|
|
|
@ -1,68 +0,0 @@
|
||||||
# Copyright 2014-2015 Canonical Limited.
|
|
||||||
#
|
|
||||||
# This file is part of charm-helpers.
|
|
||||||
#
|
|
||||||
# charm-helpers is free software: you can redistribute it and/or modify
|
|
||||||
# it under the terms of the GNU Lesser General Public License version 3 as
|
|
||||||
# published by the Free Software Foundation.
|
|
||||||
#
|
|
||||||
# charm-helpers is distributed in the hope that it will be useful,
|
|
||||||
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
||||||
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
|
||||||
# GNU Lesser General Public License for more details.
|
|
||||||
#
|
|
||||||
# You should have received a copy of the GNU Lesser General Public License
|
|
||||||
# along with charm-helpers. If not, see <http://www.gnu.org/licenses/>.
|
|
||||||
|
|
||||||
import os
|
|
||||||
|
|
||||||
from charmhelpers.core import host
|
|
||||||
from charmhelpers.core import hookenv
|
|
||||||
|
|
||||||
|
|
||||||
def render(source, target, context, owner='root', group='root',
|
|
||||||
perms=0o444, templates_dir=None, encoding='UTF-8'):
|
|
||||||
"""
|
|
||||||
Render a template.
|
|
||||||
|
|
||||||
The `source` path, if not absolute, is relative to the `templates_dir`.
|
|
||||||
|
|
||||||
The `target` path should be absolute.
|
|
||||||
|
|
||||||
The context should be a dict containing the values to be replaced in the
|
|
||||||
template.
|
|
||||||
|
|
||||||
The `owner`, `group`, and `perms` options will be passed to `write_file`.
|
|
||||||
|
|
||||||
If omitted, `templates_dir` defaults to the `templates` folder in the charm.
|
|
||||||
|
|
||||||
Note: Using this requires python-jinja2; if it is not installed, calling
|
|
||||||
this will attempt to use charmhelpers.fetch.apt_install to install it.
|
|
||||||
"""
|
|
||||||
try:
|
|
||||||
from jinja2 import FileSystemLoader, Environment, exceptions
|
|
||||||
except ImportError:
|
|
||||||
try:
|
|
||||||
from charmhelpers.fetch import apt_install
|
|
||||||
except ImportError:
|
|
||||||
hookenv.log('Could not import jinja2, and could not import '
|
|
||||||
'charmhelpers.fetch to install it',
|
|
||||||
level=hookenv.ERROR)
|
|
||||||
raise
|
|
||||||
apt_install('python-jinja2', fatal=True)
|
|
||||||
from jinja2 import FileSystemLoader, Environment, exceptions
|
|
||||||
|
|
||||||
if templates_dir is None:
|
|
||||||
templates_dir = os.path.join(hookenv.charm_dir(), 'templates')
|
|
||||||
loader = Environment(loader=FileSystemLoader(templates_dir))
|
|
||||||
try:
|
|
||||||
source = source
|
|
||||||
template = loader.get_template(source)
|
|
||||||
except exceptions.TemplateNotFound as e:
|
|
||||||
hookenv.log('Could not load template %s from %s.' %
|
|
||||||
(source, templates_dir),
|
|
||||||
level=hookenv.ERROR)
|
|
||||||
raise e
|
|
||||||
content = template.render(context)
|
|
||||||
host.mkdir(os.path.dirname(target), owner, group, perms=0o755)
|
|
||||||
host.write_file(target, content.encode(encoding), owner, group, perms)
|
|
|
@ -1,477 +0,0 @@
|
||||||
#!/usr/bin/env python
|
|
||||||
# -*- coding: utf-8 -*-
|
|
||||||
#
|
|
||||||
# Copyright 2014-2015 Canonical Limited.
|
|
||||||
#
|
|
||||||
# This file is part of charm-helpers.
|
|
||||||
#
|
|
||||||
# charm-helpers is free software: you can redistribute it and/or modify
|
|
||||||
# it under the terms of the GNU Lesser General Public License version 3 as
|
|
||||||
# published by the Free Software Foundation.
|
|
||||||
#
|
|
||||||
# charm-helpers is distributed in the hope that it will be useful,
|
|
||||||
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
||||||
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
|
||||||
# GNU Lesser General Public License for more details.
|
|
||||||
#
|
|
||||||
# You should have received a copy of the GNU Lesser General Public License
|
|
||||||
# along with charm-helpers. If not, see <http://www.gnu.org/licenses/>.
|
|
||||||
#
|
|
||||||
#
|
|
||||||
# Authors:
|
|
||||||
# Kapil Thangavelu <kapil.foss@gmail.com>
|
|
||||||
#
|
|
||||||
"""
|
|
||||||
Intro
|
|
||||||
-----
|
|
||||||
|
|
||||||
A simple way to store state in units. This provides a key value
|
|
||||||
storage with support for versioned, transactional operation,
|
|
||||||
and can calculate deltas from previous values to simplify unit logic
|
|
||||||
when processing changes.
|
|
||||||
|
|
||||||
|
|
||||||
Hook Integration
|
|
||||||
----------------
|
|
||||||
|
|
||||||
There are several extant frameworks for hook execution, including
|
|
||||||
|
|
||||||
- charmhelpers.core.hookenv.Hooks
|
|
||||||
- charmhelpers.core.services.ServiceManager
|
|
||||||
|
|
||||||
The storage classes are framework agnostic, one simple integration is
|
|
||||||
via the HookData contextmanager. It will record the current hook
|
|
||||||
execution environment (including relation data, config data, etc.),
|
|
||||||
setup a transaction and allow easy access to the changes from
|
|
||||||
previously seen values. One consequence of the integration is the
|
|
||||||
reservation of particular keys ('rels', 'unit', 'env', 'config',
|
|
||||||
'charm_revisions') for their respective values.
|
|
||||||
|
|
||||||
Here's a fully worked integration example using hookenv.Hooks::
|
|
||||||
|
|
||||||
from charmhelper.core import hookenv, unitdata
|
|
||||||
|
|
||||||
hook_data = unitdata.HookData()
|
|
||||||
db = unitdata.kv()
|
|
||||||
hooks = hookenv.Hooks()
|
|
||||||
|
|
||||||
@hooks.hook
|
|
||||||
def config_changed():
|
|
||||||
# Print all changes to configuration from previously seen
|
|
||||||
# values.
|
|
||||||
for changed, (prev, cur) in hook_data.conf.items():
|
|
||||||
print('config changed', changed,
|
|
||||||
'previous value', prev,
|
|
||||||
'current value', cur)
|
|
||||||
|
|
||||||
# Get some unit specific bookeeping
|
|
||||||
if not db.get('pkg_key'):
|
|
||||||
key = urllib.urlopen('https://example.com/pkg_key').read()
|
|
||||||
db.set('pkg_key', key)
|
|
||||||
|
|
||||||
# Directly access all charm config as a mapping.
|
|
||||||
conf = db.getrange('config', True)
|
|
||||||
|
|
||||||
# Directly access all relation data as a mapping
|
|
||||||
rels = db.getrange('rels', True)
|
|
||||||
|
|
||||||
if __name__ == '__main__':
|
|
||||||
with hook_data():
|
|
||||||
hook.execute()
|
|
||||||
|
|
||||||
|
|
||||||
A more basic integration is via the hook_scope context manager which simply
|
|
||||||
manages transaction scope (and records hook name, and timestamp)::
|
|
||||||
|
|
||||||
>>> from unitdata import kv
|
|
||||||
>>> db = kv()
|
|
||||||
>>> with db.hook_scope('install'):
|
|
||||||
... # do work, in transactional scope.
|
|
||||||
... db.set('x', 1)
|
|
||||||
>>> db.get('x')
|
|
||||||
1
|
|
||||||
|
|
||||||
|
|
||||||
Usage
|
|
||||||
-----
|
|
||||||
|
|
||||||
Values are automatically json de/serialized to preserve basic typing
|
|
||||||
and complex data struct capabilities (dicts, lists, ints, booleans, etc).
|
|
||||||
|
|
||||||
Individual values can be manipulated via get/set::
|
|
||||||
|
|
||||||
>>> kv.set('y', True)
|
|
||||||
>>> kv.get('y')
|
|
||||||
True
|
|
||||||
|
|
||||||
# We can set complex values (dicts, lists) as a single key.
|
|
||||||
>>> kv.set('config', {'a': 1, 'b': True'})
|
|
||||||
|
|
||||||
# Also supports returning dictionaries as a record which
|
|
||||||
# provides attribute access.
|
|
||||||
>>> config = kv.get('config', record=True)
|
|
||||||
>>> config.b
|
|
||||||
True
|
|
||||||
|
|
||||||
|
|
||||||
Groups of keys can be manipulated with update/getrange::
|
|
||||||
|
|
||||||
>>> kv.update({'z': 1, 'y': 2}, prefix="gui.")
|
|
||||||
>>> kv.getrange('gui.', strip=True)
|
|
||||||
{'z': 1, 'y': 2}
|
|
||||||
|
|
||||||
When updating values, its very helpful to understand which values
|
|
||||||
have actually changed and how have they changed. The storage
|
|
||||||
provides a delta method to provide for this::
|
|
||||||
|
|
||||||
>>> data = {'debug': True, 'option': 2}
|
|
||||||
>>> delta = kv.delta(data, 'config.')
|
|
||||||
>>> delta.debug.previous
|
|
||||||
None
|
|
||||||
>>> delta.debug.current
|
|
||||||
True
|
|
||||||
>>> delta
|
|
||||||
{'debug': (None, True), 'option': (None, 2)}
|
|
||||||
|
|
||||||
Note the delta method does not persist the actual change, it needs to
|
|
||||||
be explicitly saved via 'update' method::
|
|
||||||
|
|
||||||
>>> kv.update(data, 'config.')
|
|
||||||
|
|
||||||
Values modified in the context of a hook scope retain historical values
|
|
||||||
associated to the hookname.
|
|
||||||
|
|
||||||
>>> with db.hook_scope('config-changed'):
|
|
||||||
... db.set('x', 42)
|
|
||||||
>>> db.gethistory('x')
|
|
||||||
[(1, u'x', 1, u'install', u'2015-01-21T16:49:30.038372'),
|
|
||||||
(2, u'x', 42, u'config-changed', u'2015-01-21T16:49:30.038786')]
|
|
||||||
|
|
||||||
"""
|
|
||||||
|
|
||||||
import collections
|
|
||||||
import contextlib
|
|
||||||
import datetime
|
|
||||||
import json
|
|
||||||
import os
|
|
||||||
import pprint
|
|
||||||
import sqlite3
|
|
||||||
import sys
|
|
||||||
|
|
||||||
__author__ = 'Kapil Thangavelu <kapil.foss@gmail.com>'
|
|
||||||
|
|
||||||
|
|
||||||
class Storage(object):
|
|
||||||
"""Simple key value database for local unit state within charms.
|
|
||||||
|
|
||||||
Modifications are automatically committed at hook exit. That's
|
|
||||||
currently regardless of exit code.
|
|
||||||
|
|
||||||
To support dicts, lists, integer, floats, and booleans values
|
|
||||||
are automatically json encoded/decoded.
|
|
||||||
"""
|
|
||||||
def __init__(self, path=None):
|
|
||||||
self.db_path = path
|
|
||||||
if path is None:
|
|
||||||
self.db_path = os.path.join(
|
|
||||||
os.environ.get('CHARM_DIR', ''), '.unit-state.db')
|
|
||||||
self.conn = sqlite3.connect('%s' % self.db_path)
|
|
||||||
self.cursor = self.conn.cursor()
|
|
||||||
self.revision = None
|
|
||||||
self._closed = False
|
|
||||||
self._init()
|
|
||||||
|
|
||||||
def close(self):
|
|
||||||
if self._closed:
|
|
||||||
return
|
|
||||||
self.flush(False)
|
|
||||||
self.cursor.close()
|
|
||||||
self.conn.close()
|
|
||||||
self._closed = True
|
|
||||||
|
|
||||||
def _scoped_query(self, stmt, params=None):
|
|
||||||
if params is None:
|
|
||||||
params = []
|
|
||||||
return stmt, params
|
|
||||||
|
|
||||||
def get(self, key, default=None, record=False):
|
|
||||||
self.cursor.execute(
|
|
||||||
*self._scoped_query(
|
|
||||||
'select data from kv where key=?', [key]))
|
|
||||||
result = self.cursor.fetchone()
|
|
||||||
if not result:
|
|
||||||
return default
|
|
||||||
if record:
|
|
||||||
return Record(json.loads(result[0]))
|
|
||||||
return json.loads(result[0])
|
|
||||||
|
|
||||||
def getrange(self, key_prefix, strip=False):
|
|
||||||
stmt = "select key, data from kv where key like '%s%%'" % key_prefix
|
|
||||||
self.cursor.execute(*self._scoped_query(stmt))
|
|
||||||
result = self.cursor.fetchall()
|
|
||||||
|
|
||||||
if not result:
|
|
||||||
return None
|
|
||||||
if not strip:
|
|
||||||
key_prefix = ''
|
|
||||||
return dict([
|
|
||||||
(k[len(key_prefix):], json.loads(v)) for k, v in result])
|
|
||||||
|
|
||||||
def update(self, mapping, prefix=""):
|
|
||||||
for k, v in mapping.items():
|
|
||||||
self.set("%s%s" % (prefix, k), v)
|
|
||||||
|
|
||||||
def unset(self, key):
|
|
||||||
self.cursor.execute('delete from kv where key=?', [key])
|
|
||||||
if self.revision and self.cursor.rowcount:
|
|
||||||
self.cursor.execute(
|
|
||||||
'insert into kv_revisions values (?, ?, ?)',
|
|
||||||
[key, self.revision, json.dumps('DELETED')])
|
|
||||||
|
|
||||||
def set(self, key, value):
|
|
||||||
serialized = json.dumps(value)
|
|
||||||
|
|
||||||
self.cursor.execute(
|
|
||||||
'select data from kv where key=?', [key])
|
|
||||||
exists = self.cursor.fetchone()
|
|
||||||
|
|
||||||
# Skip mutations to the same value
|
|
||||||
if exists:
|
|
||||||
if exists[0] == serialized:
|
|
||||||
return value
|
|
||||||
|
|
||||||
if not exists:
|
|
||||||
self.cursor.execute(
|
|
||||||
'insert into kv (key, data) values (?, ?)',
|
|
||||||
(key, serialized))
|
|
||||||
else:
|
|
||||||
self.cursor.execute('''
|
|
||||||
update kv
|
|
||||||
set data = ?
|
|
||||||
where key = ?''', [serialized, key])
|
|
||||||
|
|
||||||
# Save
|
|
||||||
if not self.revision:
|
|
||||||
return value
|
|
||||||
|
|
||||||
self.cursor.execute(
|
|
||||||
'select 1 from kv_revisions where key=? and revision=?',
|
|
||||||
[key, self.revision])
|
|
||||||
exists = self.cursor.fetchone()
|
|
||||||
|
|
||||||
if not exists:
|
|
||||||
self.cursor.execute(
|
|
||||||
'''insert into kv_revisions (
|
|
||||||
revision, key, data) values (?, ?, ?)''',
|
|
||||||
(self.revision, key, serialized))
|
|
||||||
else:
|
|
||||||
self.cursor.execute(
|
|
||||||
'''
|
|
||||||
update kv_revisions
|
|
||||||
set data = ?
|
|
||||||
where key = ?
|
|
||||||
and revision = ?''',
|
|
||||||
[serialized, key, self.revision])
|
|
||||||
|
|
||||||
return value
|
|
||||||
|
|
||||||
def delta(self, mapping, prefix):
|
|
||||||
"""
|
|
||||||
return a delta containing values that have changed.
|
|
||||||
"""
|
|
||||||
previous = self.getrange(prefix, strip=True)
|
|
||||||
if not previous:
|
|
||||||
pk = set()
|
|
||||||
else:
|
|
||||||
pk = set(previous.keys())
|
|
||||||
ck = set(mapping.keys())
|
|
||||||
delta = DeltaSet()
|
|
||||||
|
|
||||||
# added
|
|
||||||
for k in ck.difference(pk):
|
|
||||||
delta[k] = Delta(None, mapping[k])
|
|
||||||
|
|
||||||
# removed
|
|
||||||
for k in pk.difference(ck):
|
|
||||||
delta[k] = Delta(previous[k], None)
|
|
||||||
|
|
||||||
# changed
|
|
||||||
for k in pk.intersection(ck):
|
|
||||||
c = mapping[k]
|
|
||||||
p = previous[k]
|
|
||||||
if c != p:
|
|
||||||
delta[k] = Delta(p, c)
|
|
||||||
|
|
||||||
return delta
|
|
||||||
|
|
||||||
@contextlib.contextmanager
|
|
||||||
def hook_scope(self, name=""):
|
|
||||||
"""Scope all future interactions to the current hook execution
|
|
||||||
revision."""
|
|
||||||
assert not self.revision
|
|
||||||
self.cursor.execute(
|
|
||||||
'insert into hooks (hook, date) values (?, ?)',
|
|
||||||
(name or sys.argv[0],
|
|
||||||
datetime.datetime.utcnow().isoformat()))
|
|
||||||
self.revision = self.cursor.lastrowid
|
|
||||||
try:
|
|
||||||
yield self.revision
|
|
||||||
self.revision = None
|
|
||||||
except:
|
|
||||||
self.flush(False)
|
|
||||||
self.revision = None
|
|
||||||
raise
|
|
||||||
else:
|
|
||||||
self.flush()
|
|
||||||
|
|
||||||
def flush(self, save=True):
|
|
||||||
if save:
|
|
||||||
self.conn.commit()
|
|
||||||
elif self._closed:
|
|
||||||
return
|
|
||||||
else:
|
|
||||||
self.conn.rollback()
|
|
||||||
|
|
||||||
def _init(self):
|
|
||||||
self.cursor.execute('''
|
|
||||||
create table if not exists kv (
|
|
||||||
key text,
|
|
||||||
data text,
|
|
||||||
primary key (key)
|
|
||||||
)''')
|
|
||||||
self.cursor.execute('''
|
|
||||||
create table if not exists kv_revisions (
|
|
||||||
key text,
|
|
||||||
revision integer,
|
|
||||||
data text,
|
|
||||||
primary key (key, revision)
|
|
||||||
)''')
|
|
||||||
self.cursor.execute('''
|
|
||||||
create table if not exists hooks (
|
|
||||||
version integer primary key autoincrement,
|
|
||||||
hook text,
|
|
||||||
date text
|
|
||||||
)''')
|
|
||||||
self.conn.commit()
|
|
||||||
|
|
||||||
def gethistory(self, key, deserialize=False):
|
|
||||||
self.cursor.execute(
|
|
||||||
'''
|
|
||||||
select kv.revision, kv.key, kv.data, h.hook, h.date
|
|
||||||
from kv_revisions kv,
|
|
||||||
hooks h
|
|
||||||
where kv.key=?
|
|
||||||
and kv.revision = h.version
|
|
||||||
''', [key])
|
|
||||||
if deserialize is False:
|
|
||||||
return self.cursor.fetchall()
|
|
||||||
return map(_parse_history, self.cursor.fetchall())
|
|
||||||
|
|
||||||
def debug(self, fh=sys.stderr):
|
|
||||||
self.cursor.execute('select * from kv')
|
|
||||||
pprint.pprint(self.cursor.fetchall(), stream=fh)
|
|
||||||
self.cursor.execute('select * from kv_revisions')
|
|
||||||
pprint.pprint(self.cursor.fetchall(), stream=fh)
|
|
||||||
|
|
||||||
|
|
||||||
def _parse_history(d):
|
|
||||||
return (d[0], d[1], json.loads(d[2]), d[3],
|
|
||||||
datetime.datetime.strptime(d[-1], "%Y-%m-%dT%H:%M:%S.%f"))
|
|
||||||
|
|
||||||
|
|
||||||
class HookData(object):
|
|
||||||
"""Simple integration for existing hook exec frameworks.
|
|
||||||
|
|
||||||
Records all unit information, and stores deltas for processing
|
|
||||||
by the hook.
|
|
||||||
|
|
||||||
Sample::
|
|
||||||
|
|
||||||
from charmhelper.core import hookenv, unitdata
|
|
||||||
|
|
||||||
changes = unitdata.HookData()
|
|
||||||
db = unitdata.kv()
|
|
||||||
hooks = hookenv.Hooks()
|
|
||||||
|
|
||||||
@hooks.hook
|
|
||||||
def config_changed():
|
|
||||||
# View all changes to configuration
|
|
||||||
for changed, (prev, cur) in changes.conf.items():
|
|
||||||
print('config changed', changed,
|
|
||||||
'previous value', prev,
|
|
||||||
'current value', cur)
|
|
||||||
|
|
||||||
# Get some unit specific bookeeping
|
|
||||||
if not db.get('pkg_key'):
|
|
||||||
key = urllib.urlopen('https://example.com/pkg_key').read()
|
|
||||||
db.set('pkg_key', key)
|
|
||||||
|
|
||||||
if __name__ == '__main__':
|
|
||||||
with changes():
|
|
||||||
hook.execute()
|
|
||||||
|
|
||||||
"""
|
|
||||||
def __init__(self):
|
|
||||||
self.kv = kv()
|
|
||||||
self.conf = None
|
|
||||||
self.rels = None
|
|
||||||
|
|
||||||
@contextlib.contextmanager
|
|
||||||
def __call__(self):
|
|
||||||
from charmhelpers.core import hookenv
|
|
||||||
hook_name = hookenv.hook_name()
|
|
||||||
|
|
||||||
with self.kv.hook_scope(hook_name):
|
|
||||||
self._record_charm_version(hookenv.charm_dir())
|
|
||||||
delta_config, delta_relation = self._record_hook(hookenv)
|
|
||||||
yield self.kv, delta_config, delta_relation
|
|
||||||
|
|
||||||
def _record_charm_version(self, charm_dir):
|
|
||||||
# Record revisions.. charm revisions are meaningless
|
|
||||||
# to charm authors as they don't control the revision.
|
|
||||||
# so logic dependnent on revision is not particularly
|
|
||||||
# useful, however it is useful for debugging analysis.
|
|
||||||
charm_rev = open(
|
|
||||||
os.path.join(charm_dir, 'revision')).read().strip()
|
|
||||||
charm_rev = charm_rev or '0'
|
|
||||||
revs = self.kv.get('charm_revisions', [])
|
|
||||||
if charm_rev not in revs:
|
|
||||||
revs.append(charm_rev.strip() or '0')
|
|
||||||
self.kv.set('charm_revisions', revs)
|
|
||||||
|
|
||||||
def _record_hook(self, hookenv):
|
|
||||||
data = hookenv.execution_environment()
|
|
||||||
self.conf = conf_delta = self.kv.delta(data['conf'], 'config')
|
|
||||||
self.rels = rels_delta = self.kv.delta(data['rels'], 'rels')
|
|
||||||
self.kv.set('env', dict(data['env']))
|
|
||||||
self.kv.set('unit', data['unit'])
|
|
||||||
self.kv.set('relid', data.get('relid'))
|
|
||||||
return conf_delta, rels_delta
|
|
||||||
|
|
||||||
|
|
||||||
class Record(dict):
|
|
||||||
|
|
||||||
__slots__ = ()
|
|
||||||
|
|
||||||
def __getattr__(self, k):
|
|
||||||
if k in self:
|
|
||||||
return self[k]
|
|
||||||
raise AttributeError(k)
|
|
||||||
|
|
||||||
|
|
||||||
class DeltaSet(Record):
|
|
||||||
|
|
||||||
__slots__ = ()
|
|
||||||
|
|
||||||
|
|
||||||
Delta = collections.namedtuple('Delta', ['previous', 'current'])
|
|
||||||
|
|
||||||
|
|
||||||
_KV = None
|
|
||||||
|
|
||||||
|
|
||||||
def kv():
|
|
||||||
global _KV
|
|
||||||
if _KV is None:
|
|
||||||
_KV = Storage()
|
|
||||||
return _KV
|
|
0
tests/10-outofthebox-testing → tests/deprecated/10-outofthebox-testing
Executable file → Normal file
0
tests/10-outofthebox-testing → tests/deprecated/10-outofthebox-testing
Executable file → Normal file
0
tests/10_basic_deploy_test.py → tests/deprecated/10_basic_deploy_test.py
Executable file → Normal file
0
tests/10_basic_deploy_test.py → tests/deprecated/10_basic_deploy_test.py
Executable file → Normal file
0
tests/20_deploy_relations_test.py → tests/deprecated/20_deploy_relations_test.py
Executable file → Normal file
0
tests/20_deploy_relations_test.py → tests/deprecated/20_deploy_relations_test.py
Executable file → Normal file
0
tests/30_configuration_test.py → tests/deprecated/30_configuration_test.py
Executable file → Normal file
0
tests/30_configuration_test.py → tests/deprecated/30_configuration_test.py
Executable file → Normal file
0
tests/40_test_mirroring_queues.py → tests/deprecated/40_test_mirroring_queues.py
Executable file → Normal file
0
tests/40_test_mirroring_queues.py → tests/deprecated/40_test_mirroring_queues.py
Executable file → Normal file
|
@ -0,0 +1,71 @@
|
||||||
|
#!/usr/bin/python3
|
||||||
|
|
||||||
|
# This Amulet test performs a basic deploy and checks if rabbitmq is running.
|
||||||
|
|
||||||
|
import amulet
|
||||||
|
import time
|
||||||
|
|
||||||
|
# The number of seconds to wait for the environment to setup.
|
||||||
|
seconds = 900
|
||||||
|
|
||||||
|
# Create a dictionary for the rabbitmq configuration.
|
||||||
|
rabbitmq_configuration = {
|
||||||
|
'stats_cron_schedule': '*/1 * * * *'
|
||||||
|
}
|
||||||
|
d = amulet.Deployment(series='trusty')
|
||||||
|
# Add the rabbitmq-server charm to the deployment.
|
||||||
|
d.add('rabbitmq-server')
|
||||||
|
# Configure options on the rabbitmq-server.
|
||||||
|
d.configure('rabbitmq-server', rabbitmq_configuration)
|
||||||
|
# Expose the server so we can connect.
|
||||||
|
d.expose('rabbitmq-server')
|
||||||
|
# XXX Remove charm= once this branch lands in the charm store
|
||||||
|
d.add('nrpe-external-master',
|
||||||
|
charm='lp:~gnuoy/charms/trusty/nrpe/services-rewrite')
|
||||||
|
d.relate('rabbitmq-server:nrpe-external-master',
|
||||||
|
'nrpe-external-master:nrpe-external-master')
|
||||||
|
|
||||||
|
try:
|
||||||
|
# Execute the deployer with the current mapping.
|
||||||
|
d.setup(timeout=seconds)
|
||||||
|
except amulet.helpers.TimeoutError:
|
||||||
|
message = 'The environment did not setup in %d seconds.' % seconds
|
||||||
|
# The SKIP status enables skip or fail the test based on configuration.
|
||||||
|
amulet.raise_status(amulet.SKIP, msg=message)
|
||||||
|
except:
|
||||||
|
raise
|
||||||
|
print('The rabbitmq-server has been successfully deployed and related '
|
||||||
|
'to nrpe-external-master.')
|
||||||
|
|
||||||
|
###############################################################################
|
||||||
|
# # Verify nagios checks
|
||||||
|
###############################################################################
|
||||||
|
rabbitmq_sentry = d.sentry.unit['rabbitmq-server/0']
|
||||||
|
|
||||||
|
command = 'bash -c "$(egrep -oh /usr/local.* ' \
|
||||||
|
'/etc/nagios/nrpe.d/check_rabbitmq.cfg)"'
|
||||||
|
print(command)
|
||||||
|
output, code = rabbitmq_sentry.run(command)
|
||||||
|
print(output)
|
||||||
|
if (code != 0):
|
||||||
|
message = 'The ' + command + ' did not return the expected code of 0.'
|
||||||
|
amulet.raise_status(amulet.FAIL, msg=message)
|
||||||
|
else:
|
||||||
|
print('The rabbitmq-server check_rabbitmq is OK')
|
||||||
|
|
||||||
|
print('Sleeping 70 seconds to make sure the monitoring cron has run')
|
||||||
|
time.sleep(70)
|
||||||
|
|
||||||
|
command = 'bash -c "$(egrep -oh /usr/local.* ' \
|
||||||
|
'/etc/nagios/nrpe.d/check_rabbitmq_queue.cfg)"'
|
||||||
|
print(command)
|
||||||
|
output, code = rabbitmq_sentry.run(command)
|
||||||
|
print(output)
|
||||||
|
if (code != 0):
|
||||||
|
message = 'The ' + command + ' did not return the expected code of 0.'
|
||||||
|
amulet.raise_status(amulet.FAIL, msg=message)
|
||||||
|
else:
|
||||||
|
print('The rabbitmq-server check_rabbitmq_queue is OK')
|
||||||
|
|
||||||
|
# Success!
|
||||||
|
print('The rabbitmq-server passed the monitoring tests!')
|
|
@ -0,0 +1,20 @@
|
||||||
|
bootstrap: true
|
||||||
|
reset: true
|
||||||
|
virtualenv: true
|
||||||
|
makefile:
|
||||||
|
- lint
|
||||||
|
- test
|
||||||
|
sources:
|
||||||
|
- ppa:juju/stable
|
||||||
|
packages:
|
||||||
|
- amulet
|
||||||
|
- python-amulet
|
||||||
|
- python-cinderclient
|
||||||
|
- python-distro-info
|
||||||
|
- python-glanceclient
|
||||||
|
- python-heatclient
|
||||||
|
- python-keystoneclient
|
||||||
|
- python-neutronclient
|
||||||
|
- python-novaclient
|
||||||
|
- python-pika
|
||||||
|
- python-swiftclient
|
|
@ -129,3 +129,20 @@ class RelationUtil(TestCase):
|
||||||
mock_peer_store_and_set.assert_called_with(
|
mock_peer_store_and_set.assert_called_with(
|
||||||
relation_settings={'private-address': ipv6_addr},
|
relation_settings={'private-address': ipv6_addr},
|
||||||
relation_id=None)
|
relation_id=None)
|
||||||
|
|
||||||
|
@patch.object(rabbitmq_server_relations, 'related_units')
|
||||||
|
@patch.object(rabbitmq_server_relations, 'relation_ids')
|
||||||
|
@patch.object(rabbitmq_server_relations, 'config')
|
||||||
|
def test_is_sufficient_peers(self, mock_config, mock_relation_ids,
|
||||||
|
mock_related_units):
|
||||||
|
_config = {'min-cluster-size': None}
|
||||||
|
mock_config.side_effect = lambda key: _config.get(key)
|
||||||
|
self.assertTrue(rabbitmq_server_relations.is_sufficient_peers())
|
||||||
|
|
||||||
|
mock_relation_ids.return_value = ['cluster:0']
|
||||||
|
mock_related_units.return_value = ['test/0']
|
||||||
|
_config = {'min-cluster-size': 3}
|
||||||
|
self.assertFalse(rabbitmq_server_relations.is_sufficient_peers())
|
||||||
|
|
||||||
|
mock_related_units.return_value = ['test/0', 'test/1']
|
||||||
|
self.assertTrue(rabbitmq_server_relations.is_sufficient_peers())
|
||||||
|
|
Loading…
Reference in New Issue