Merge /next in

This commit is contained in:
Liam Young 2015-09-12 11:58:20 +01:00
commit 54c20f0499
13 changed files with 331 additions and 182 deletions

View File

@ -2,12 +2,19 @@
PYTHON := /usr/bin/env python
lint:
@flake8 --exclude hooks/charmhelpers actions hooks unit_tests tests
@flake8 --exclude hooks/charmhelpers,tests/charmhelpers \
actions hooks unit_tests tests
@charm proof
unit_test:
test:
@# Bundletester expects unit tests here.
@echo Starting tests...
@$(PYTHON) /usr/bin/nosetests --nologcapture unit_tests
@$(PYTHON) /usr/bin/nosetests --nologcapture --with-coverage unit_tests
functional_test:
@echo Starting Amulet tests...
# https://bugs.launchpad.net/amulet/+bug/1320357
@juju test -v -p AMULET_HTTP_PROXY,AMULET_OS_VIP --timeout 2700
bin/charm_helpers_sync.py:
@mkdir -p bin
@ -18,13 +25,6 @@ sync: bin/charm_helpers_sync.py
@$(PYTHON) bin/charm_helpers_sync.py -c charm-helpers-hooks.yaml
@$(PYTHON) bin/charm_helpers_sync.py -c charm-helpers-tests.yaml
test:
@echo Starting Amulet tests...
# coreycb note: The -v should only be temporary until Amulet sends
# raise_status() messages to stderr:
# https://bugs.launchpad.net/amulet/+bug/1320357
@juju test -v -p AMULET_HTTP_PROXY,AMULET_OS_VIP --timeout 2700
publish: lint test
bzr push lp:charms/openstack-dashboard
bzr push lp:charms/trusty/openstack-dashboard

View File

@ -435,8 +435,12 @@ def get_hostname(address, fqdn=True):
rev = dns.reversename.from_address(address)
result = ns_query(rev)
if not result:
return None
try:
result = socket.gethostbyaddr(address)[0]
except:
return None
else:
result = address

View File

@ -895,6 +895,18 @@ class NeutronContext(OSContextGenerator):
'neutron_url': '%s://%s:%s' % (proto, host, '9696')}
return ctxt
def pg_ctxt(self):
driver = neutron_plugin_attribute(self.plugin, 'driver',
self.network_manager)
config = neutron_plugin_attribute(self.plugin, 'config',
self.network_manager)
ovs_ctxt = {'core_plugin': driver,
'neutron_plugin': 'plumgrid',
'neutron_security_groups': self.neutron_security_groups,
'local_ip': unit_private_ip(),
'config': config}
return ovs_ctxt
def __call__(self):
if self.network_manager not in ['quantum', 'neutron']:
return {}
@ -914,6 +926,8 @@ class NeutronContext(OSContextGenerator):
ctxt.update(self.calico_ctxt())
elif self.plugin == 'vsp':
ctxt.update(self.nuage_ctxt())
elif self.plugin == 'plumgrid':
ctxt.update(self.pg_ctxt())
alchemy_flags = config('neutron-alchemy-flags')
if alchemy_flags:

View File

@ -195,6 +195,20 @@ def neutron_plugins():
'packages': [],
'server_packages': ['neutron-server', 'neutron-plugin-nuage'],
'server_services': ['neutron-server']
},
'plumgrid': {
'config': '/etc/neutron/plugins/plumgrid/plumgrid.ini',
'driver': 'neutron.plugins.plumgrid.plumgrid_plugin.plumgrid_plugin.NeutronPluginPLUMgridV2',
'contexts': [
context.SharedDBContext(user=config('database-user'),
database=config('database'),
ssl_dir=NEUTRON_CONF_DIR)],
'services': [],
'packages': [['plumgrid-lxc'],
['iovisor-dkms']],
'server_packages': ['neutron-server',
'neutron-plugin-plumgrid'],
'server_services': ['neutron-server']
}
}
if release >= 'icehouse':

View File

@ -1,5 +1,3 @@
#!/usr/bin/python
# Copyright 2014-2015 Canonical Limited.
#
# This file is part of charm-helpers.
@ -144,6 +142,9 @@ PACKAGE_CODENAMES = {
'glance-common': OrderedDict([
('11.0.0', 'liberty'),
]),
'openstack-dashboard': OrderedDict([
('8.0.0', 'liberty'),
]),
}
DEFAULT_LOOPBACK_SIZE = '5G'
@ -195,9 +196,9 @@ def get_os_codename_version(vers):
error_out(e)
def get_os_version_codename(codename):
def get_os_version_codename(codename, version_map=OPENSTACK_CODENAMES):
'''Determine OpenStack version number from codename.'''
for k, v in six.iteritems(OPENSTACK_CODENAMES):
for k, v in six.iteritems(version_map):
if v == codename:
return k
e = 'Could not derive OpenStack version for '\
@ -229,7 +230,7 @@ def get_os_codename_package(package, fatal=True):
error_out(e)
vers = apt.upstream_version(pkg.current_ver.ver_str)
match = re.match('^(\d)\.(\d)\.(\d)', vers)
match = re.match('^(\d+)\.(\d+)\.(\d+)', vers)
if match:
vers = match.group(0)
@ -250,6 +251,8 @@ def get_os_codename_package(package, fatal=True):
vers = vers[:6]
return OPENSTACK_CODENAMES[vers]
except KeyError:
if not fatal:
return None
e = 'Could not determine OpenStack codename for version %s' % vers
error_out(e)
@ -429,7 +432,11 @@ def openstack_upgrade_available(package):
import apt_pkg as apt
src = config('openstack-origin')
cur_vers = get_os_version_package(package)
available_vers = get_os_version_install_source(src)
if "swift" in package:
codename = get_os_codename_install_source(src)
available_vers = get_os_version_codename(codename, SWIFT_CODENAMES)
else:
available_vers = get_os_version_install_source(src)
apt.init()
return apt.version_compare(available_vers, cur_vers) == 1

View File

@ -767,21 +767,23 @@ def status_set(workload_state, message):
def status_get():
"""Retrieve the previously set juju workload state
"""Retrieve the previously set juju workload state and message
If the status-get command is not found then assume this is juju < 1.23 and
return 'unknown', ""
If the status-set command is not found then assume this is juju < 1.23 and
return 'unknown'
"""
cmd = ['status-get']
cmd = ['status-get', "--format=json", "--include-data"]
try:
raw_status = subprocess.check_output(cmd, universal_newlines=True)
status = raw_status.rstrip()
return status
raw_status = subprocess.check_output(cmd)
except OSError as e:
if e.errno == errno.ENOENT:
return 'unknown'
return ('unknown', "")
else:
raise
else:
status = json.loads(raw_status.decode("UTF-8"))
return (status["status"], status["message"])
def translate_exc(from_exc, to_exc):

View File

@ -4,7 +4,9 @@ maintainer: Adam Gandelman <adamg@canonical.com>
description: |
The OpenStack Dashboard provides a full feature web interface for interacting
with instances, images, volumes and networks within an OpenStack deployment.
tags: ["misc"]
tags:
- openstack
- misc
provides:
nrpe-external-master:
interface: nrpe-external-master

View File

@ -4,9 +4,13 @@ set -ex
sudo add-apt-repository --yes ppa:juju/stable
sudo apt-get update --yes
sudo apt-get install --yes python-amulet \
sudo apt-get install --yes amulet \
python-cinderclient \
python-distro-info \
python-neutronclient \
python-glanceclient \
python-heatclient \
python-keystoneclient \
python-neutronclient \
python-novaclient \
python-glanceclient
python-pika \
python-swiftclient

0
tests/019-basic-vivid-kilo Normal file → Executable file
View File

View File

@ -1,6 +1,16 @@
This directory provides Amulet tests that focus on verification of
openstack-dashboard deployments.
test_* methods are called in lexical sort order, although each individual test
should be idempotent, and expected to pass regardless of run order.
Test name convention to ensure desired test order:
1xx service and endpoint checks
2xx relation checks
3xx config checks
4xx functional checks
9xx restarts and other final checks
In order to run tests, you'll need charm-tools installed (in addition to
juju, of course):
sudo add-apt-repository ppa:juju/stable

View File

@ -12,8 +12,8 @@ from charmhelpers.contrib.openstack.amulet.deployment import (
from charmhelpers.contrib.openstack.amulet.utils import (
OpenStackAmuletUtils,
DEBUG, # flake8: noqa
ERROR
DEBUG,
# ERROR
)
# Use DEBUG to turn on debug logging
@ -26,8 +26,10 @@ class OpenstackDashboardBasicDeployment(OpenStackAmuletDeployment):
def __init__(self, series, openstack=None, source=None, git=False,
stable=False):
"""Deploy the entire test environment."""
super(OpenstackDashboardBasicDeployment, self).__init__(series, openstack,
source, stable)
super(OpenstackDashboardBasicDeployment, self).__init__(series,
openstack,
source,
stable)
self.git = git
self._add_services()
self._add_relations()
@ -36,24 +38,26 @@ class OpenstackDashboardBasicDeployment(OpenStackAmuletDeployment):
self._initialize_tests()
def _add_services(self):
"""Add services
Add the services that we're testing, where openstack-dashboard is local,
and the rest of the service are from lp branches that are
compatible with the local charm (e.g. stable or next).
"""
"""Add the services that we're testing, where openstack-dashboard is
local, and the rest of the service are from lp branches that are
compatible with the local charm (e.g. stable or next).
"""
this_service = {'name': 'openstack-dashboard'}
other_services = [{'name': 'keystone'}, {'name': 'mysql'}]
super(OpenstackDashboardBasicDeployment, self)._add_services(this_service,
other_services)
other_services = [{'name': 'keystone'},
{'name': 'mysql'}]
super(OpenstackDashboardBasicDeployment, self)._add_services(
this_service,
other_services)
def _add_relations(self):
"""Add all of the relations for the services."""
relations = {
'openstack-dashboard:identity-service': 'keystone:identity-service',
'keystone:shared-db': 'mysql:shared-db',
'openstack-dashboard:identity-service':
'keystone:identity-service',
'keystone:shared-db': 'mysql:shared-db',
}
super(OpenstackDashboardBasicDeployment, self)._add_relations(relations)
super(OpenstackDashboardBasicDeployment, self)._add_relations(
relations)
def _configure_services(self):
"""Configure all of the services."""
@ -75,16 +79,17 @@ class OpenstackDashboardBasicDeployment(OpenStackAmuletDeployment):
{'name': 'requirements',
'repository': reqs_repo,
'branch': branch},
# NOTE(coreycb): Pin oslo libraries here because they're not
# capped and recently released versions causing issues for juno.
# NOTE(coreycb): Pin oslo libraries here because
# they're not capped and recently released versions
# causing issues for juno.
{'name': 'oslo-config',
'repository': 'git://github.com/openstack/oslo.config',
'repository': 'git://github.com/openstack/oslo.config', # noqa
'branch': '1.6.0'},
{'name': 'oslo-i18n',
'repository': 'git://github.com/openstack/oslo.i18n',
'branch': '1.3.1'},
{'name': 'oslo-serialization',
'repository': 'git://github.com/openstack/oslo.serialization',
'repository': 'git://github.com/openstack/oslo.serialization', # noqa
'branch': '1.2.0'},
{'name': 'oslo-utils',
'repository': 'git://github.com/openstack/oslo.utils',
@ -111,7 +116,8 @@ class OpenstackDashboardBasicDeployment(OpenStackAmuletDeployment):
'http_proxy': amulet_http_proxy,
'https_proxy': amulet_http_proxy,
}
horizon_config['openstack-origin-git'] = yaml.dump(openstack_origin_git)
horizon_config['openstack-origin-git'] = \
yaml.dump(openstack_origin_git)
keystone_config = {'admin-password': 'openstack',
'admin-token': 'ubuntutesting'}
@ -119,98 +125,53 @@ class OpenstackDashboardBasicDeployment(OpenStackAmuletDeployment):
configs = {'openstack-dashboard': horizon_config,
'mysql': mysql_config,
'keystone': keystone_config}
super(OpenstackDashboardBasicDeployment, self)._configure_services(configs)
super(OpenstackDashboardBasicDeployment, self)._configure_services(
configs)
def _initialize_tests(self):
"""Perform final initialization before tests get run."""
# Access the sentries for inspecting service units
self.keystone_sentry = self.d.sentry.unit['keystone/0']
self.openstack_dashboard_sentry = self.d.sentry.unit['openstack-dashboard/0']
self.openstack_dashboard_sentry = \
self.d.sentry.unit['openstack-dashboard/0']
def test_services(self):
u.log.debug('openstack release val: {}'.format(
self._get_openstack_release()))
u.log.debug('openstack release str: {}'.format(
self._get_openstack_release_string()))
# Let things settle a bit before moving forward
time.sleep(30)
# NOTE(beisner): Switch to helper once the rabbitmq test refactor lands.
def crude_py_parse(self, file_contents, expected):
for line in file_contents.split('\n'):
if '=' in line:
args = line.split('=')
if len(args) <= 1:
continue
key = args[0].strip()
value = args[1].strip()
if key in expected.keys():
if expected[key] != value:
msg = "Mismatch %s != %s" % (expected[key], value)
amulet.raise_status(amulet.FAIL, msg=msg)
def test_100_services(self):
"""Verify the expected services are running on the corresponding
service units."""
dashboard_services = ['apache2']
services = {
self.keystone_sentry: ['keystone'],
self.openstack_dashboard_sentry: dashboard_services,
self.openstack_dashboard_sentry: ['apache2']
}
ret = u.validate_services_by_name(services)
if ret:
amulet.raise_status(amulet.FAIL, msg=ret)
def crude_py_parse(self, file_contents, expected):
for line in file_contents.split('\n'):
if '=' in line:
args = line.split('=')
if len(args) <= 1:
continue
key = args[0].strip()
value = args[1].strip()
if key in expected.keys():
if expected[key] != value:
msg="Mismatch %s != %s" % (expected[key], value)
amulet.raise_status(amulet.FAIL, msg=msg)
def test_local_settings(self):
unit = self.openstack_dashboard_sentry
ksentry = self.keystone_sentry
conf = '/etc/openstack-dashboard/local_settings.py'
file_contents = unit.file_contents(conf)
rdata = ksentry.relation('identity-service', 'openstack-dashboard:identity-service')
expected = {
'LOGIN_REDIRECT_URL': """'/horizon'""",
'OPENSTACK_HOST': '"%s"' % (rdata['private-address']),
'OPENSTACK_KEYSTONE_DEFAULT_ROLE': '"Member"'
}
self.crude_py_parse(file_contents, expected)
def test_router_settings(self):
if self.openstack > "icehouse":
unit = self.openstack_dashboard_sentry
conf = ('/usr/share/openstack-dashboard/openstack_dashboard/'
'enabled/_40_router.py')
file_contents = unit.file_contents(conf)
expected = {
'DISABLED': "True",
}
self.crude_py_parse(file_contents, expected)
def test_connection(self):
unit = self.openstack_dashboard_sentry
dashboard_relation = unit.relation('identity-service',
'keystone:identity-service')
dashboard_ip = dashboard_relation['private-address']
response = urllib2.urlopen('http://%s/horizon' % (dashboard_ip))
html = response.read()
if 'OpenStack Dashboard' not in html:
msg="Dashboard frontpage check failed"
amulet.raise_status(amulet.FAIL, msg=msg)
def test_z_restart_on_config_change(self):
"""Verify that the specified services are restarted when the config
is changed.
Note(coreycb): The method name with the _z_ is a little odd
but it forces the test to run last. It just makes things
easier because restarting services requires re-authorization.
"""
conf = '/etc/openstack-dashboard/local_settings.py'
services = ['apache2']
self.d.configure('openstack-dashboard', {'use-syslog': 'True'})
time = 120
for s in services:
if not u.service_restarted(self.openstack_dashboard_sentry, s, conf,
pgrep_full=True, sleep_time=time):
self.d.configure('openstack-dashboard', {'use-syslog': 'False'})
msg = "service {} didn't restart after config change".format(s)
time = 0
self.d.configure('openstack-dashboard', {'use-syslog': 'False'})
def test_openstack_dashboard_identity_service_relation(self):
"""Verify the openstack-dashboard to keystone identity-service relation data."""
def test_200_openstack_dashboard_identity_service_relation(self):
"""Verify the openstack-dashboard to keystone identity-service
relation data."""
u.log.debug('Checking dashboard:keystone id relation data...')
unit = self.openstack_dashboard_sentry
relation = ['identity-service', 'keystone:identity-service']
expected = {
@ -220,11 +181,14 @@ class OpenstackDashboardBasicDeployment(OpenStackAmuletDeployment):
ret = u.validate_relation_data(unit, relation, expected)
if ret:
message = u.relation_error('openstack-dashboard identity-service', ret)
message = u.relation_error('openstack-dashboard identity-service',
ret)
amulet.raise_status(amulet.FAIL, msg=message)
def test_keystone_identity_service_relation(self):
"""Verify the keystone to openstack-dashboard identity-service relation data."""
def test_202_keystone_identity_service_relation(self):
"""Verify the keystone to openstack-dashboard identity-service
relation data."""
u.log.debug('Checking keystone:dashboard id relation data...')
unit = self.keystone_sentry
relation = ['identity-service', 'openstack-dashboard:identity-service']
expected = {
@ -242,3 +206,63 @@ class OpenstackDashboardBasicDeployment(OpenStackAmuletDeployment):
if ret:
message = u.relation_error('keystone identity-service', ret)
amulet.raise_status(amulet.FAIL, msg=message)
def test_302_router_settings(self):
if self._get_openstack_release() > self.trusty_icehouse:
u.log.debug('Checking dashboard router settings...')
unit = self.openstack_dashboard_sentry
conf = ('/usr/share/openstack-dashboard/openstack_dashboard/'
'enabled/_40_router.py')
file_contents = unit.file_contents(conf)
expected = {
'DISABLED': "True",
}
self.crude_py_parse(file_contents, expected)
def test_400_connection(self):
u.log.debug('Checking dashboard http response...')
unit = self.openstack_dashboard_sentry
dashboard_relation = unit.relation('identity-service',
'keystone:identity-service')
dashboard_ip = dashboard_relation['private-address']
response = urllib2.urlopen('http://%s/horizon' % (dashboard_ip))
html = response.read()
if 'OpenStack Dashboard' not in html:
msg = "Dashboard frontpage check failed"
amulet.raise_status(amulet.FAIL, msg=msg)
def test_900_restart_on_config_change(self):
"""Verify that the specified services are restarted when the
config is changed."""
sentry = self.openstack_dashboard_sentry
juju_service = 'openstack-dashboard'
# Expected default and alternate values
set_default = {'use-syslog': 'False'}
set_alternate = {'use-syslog': 'True'}
# Services which are expected to restart upon config change,
# and corresponding config files affected by the change
services = {'apache2': '/etc/openstack-dashboard/local_settings.py'}
# Make config change, check for service restarts
u.log.debug('Making config change on {}...'.format(juju_service))
mtime = u.get_sentry_time(sentry)
self.d.configure(juju_service, set_alternate)
sleep_time = 30
for s, conf_file in services.iteritems():
u.log.debug("Checking that service restarted: {}".format(s))
if not u.validate_service_config_changed(sentry, mtime, s,
conf_file,
retry_count=6,
retry_sleep_time=20,
sleep_time=sleep_time):
self.d.configure(juju_service, set_default)
msg = "service {} didn't restart after config change".format(s)
amulet.raise_status(amulet.FAIL, msg=msg)
sleep_time = 0
self.d.configure(juju_service, set_default)

View File

@ -114,7 +114,7 @@ class AmuletUtils(object):
# /!\ DEPRECATION WARNING (beisner):
# New and existing tests should be rewritten to use
# validate_services_by_name() as it is aware of init systems.
self.log.warn('/!\\ DEPRECATION WARNING: use '
self.log.warn('DEPRECATION WARNING: use '
'validate_services_by_name instead of validate_services '
'due to init system differences.')
@ -269,33 +269,52 @@ class AmuletUtils(object):
"""Get last modification time of directory."""
return sentry_unit.directory_stat(directory)['mtime']
def _get_proc_start_time(self, sentry_unit, service, pgrep_full=False):
"""Get process' start time.
def _get_proc_start_time(self, sentry_unit, service, pgrep_full=None):
"""Get start time of a process based on the last modification time
of the /proc/pid directory.
Determine start time of the process based on the last modification
time of the /proc/pid directory. If pgrep_full is True, the process
name is matched against the full command line.
"""
if pgrep_full:
cmd = 'pgrep -o -f {}'.format(service)
else:
cmd = 'pgrep -o {}'.format(service)
cmd = cmd + ' | grep -v pgrep || exit 0'
cmd_out = sentry_unit.run(cmd)
self.log.debug('CMDout: ' + str(cmd_out))
if cmd_out[0]:
self.log.debug('Pid for %s %s' % (service, str(cmd_out[0])))
proc_dir = '/proc/{}'.format(cmd_out[0].strip())
return self._get_dir_mtime(sentry_unit, proc_dir)
:sentry_unit: The sentry unit to check for the service on
:service: service name to look for in process table
:pgrep_full: [Deprecated] Use full command line search mode with pgrep
:returns: epoch time of service process start
:param commands: list of bash commands
:param sentry_units: list of sentry unit pointers
:returns: None if successful; Failure message otherwise
"""
if pgrep_full is not None:
# /!\ DEPRECATION WARNING (beisner):
# No longer implemented, as pidof is now used instead of pgrep.
# https://bugs.launchpad.net/charm-helpers/+bug/1474030
self.log.warn('DEPRECATION WARNING: pgrep_full bool is no '
'longer implemented re: lp 1474030.')
pid_list = self.get_process_id_list(sentry_unit, service)
pid = pid_list[0]
proc_dir = '/proc/{}'.format(pid)
self.log.debug('Pid for {} on {}: {}'.format(
service, sentry_unit.info['unit_name'], pid))
return self._get_dir_mtime(sentry_unit, proc_dir)
def service_restarted(self, sentry_unit, service, filename,
pgrep_full=False, sleep_time=20):
pgrep_full=None, sleep_time=20):
"""Check if service was restarted.
Compare a service's start time vs a file's last modification time
(such as a config file for that service) to determine if the service
has been restarted.
"""
# /!\ DEPRECATION WARNING (beisner):
# This method is prone to races in that no before-time is known.
# Use validate_service_config_changed instead.
# NOTE(beisner) pgrep_full is no longer implemented, as pidof is now
# used instead of pgrep. pgrep_full is still passed through to ensure
# deprecation WARNS. lp1474030
self.log.warn('DEPRECATION WARNING: use '
'validate_service_config_changed instead of '
'service_restarted due to known races.')
time.sleep(sleep_time)
if (self._get_proc_start_time(sentry_unit, service, pgrep_full) >=
self._get_file_mtime(sentry_unit, filename)):
@ -304,15 +323,15 @@ class AmuletUtils(object):
return False
def service_restarted_since(self, sentry_unit, mtime, service,
pgrep_full=False, sleep_time=20,
retry_count=2):
pgrep_full=None, sleep_time=20,
retry_count=2, retry_sleep_time=30):
"""Check if service was been started after a given time.
Args:
sentry_unit (sentry): The sentry unit to check for the service on
mtime (float): The epoch time to check against
service (string): service name to look for in process table
pgrep_full (boolean): Use full command line search mode with pgrep
pgrep_full: [Deprecated] Use full command line search mode with pgrep
sleep_time (int): Seconds to sleep before looking for process
retry_count (int): If service is not found, how many times to retry
@ -321,30 +340,44 @@ class AmuletUtils(object):
False if service is older than mtime or if service was
not found.
"""
self.log.debug('Checking %s restarted since %s' % (service, mtime))
# NOTE(beisner) pgrep_full is no longer implemented, as pidof is now
# used instead of pgrep. pgrep_full is still passed through to ensure
# deprecation WARNS. lp1474030
unit_name = sentry_unit.info['unit_name']
self.log.debug('Checking that %s service restarted since %s on '
'%s' % (service, mtime, unit_name))
time.sleep(sleep_time)
proc_start_time = self._get_proc_start_time(sentry_unit, service,
pgrep_full)
while retry_count > 0 and not proc_start_time:
self.log.debug('No pid file found for service %s, will retry %i '
'more times' % (service, retry_count))
time.sleep(30)
proc_start_time = self._get_proc_start_time(sentry_unit, service,
pgrep_full)
retry_count = retry_count - 1
proc_start_time = None
tries = 0
while tries <= retry_count and not proc_start_time:
try:
proc_start_time = self._get_proc_start_time(sentry_unit,
service,
pgrep_full)
self.log.debug('Attempt {} to get {} proc start time on {} '
'OK'.format(tries, service, unit_name))
except IOError:
# NOTE(beisner) - race avoidance, proc may not exist yet.
# https://bugs.launchpad.net/charm-helpers/+bug/1474030
self.log.debug('Attempt {} to get {} proc start time on {} '
'failed'.format(tries, service, unit_name))
time.sleep(retry_sleep_time)
tries += 1
if not proc_start_time:
self.log.warn('No proc start time found, assuming service did '
'not start')
return False
if proc_start_time >= mtime:
self.log.debug('proc start time is newer than provided mtime'
'(%s >= %s)' % (proc_start_time, mtime))
self.log.debug('Proc start time is newer than provided mtime'
'(%s >= %s) on %s (OK)' % (proc_start_time,
mtime, unit_name))
return True
else:
self.log.warn('proc start time (%s) is older than provided mtime '
'(%s), service did not restart' % (proc_start_time,
mtime))
self.log.warn('Proc start time (%s) is older than provided mtime '
'(%s) on %s, service did not '
'restart' % (proc_start_time, mtime, unit_name))
return False
def config_updated_since(self, sentry_unit, filename, mtime,
@ -361,12 +394,15 @@ class AmuletUtils(object):
bool: True if file was modified more recently than mtime, False if
file was modified before mtime,
"""
self.log.debug('Checking %s updated since %s' % (filename, mtime))
self.log.debug('Checking that %s file updated since '
'%s' % (filename, mtime))
unit_name = sentry_unit.info['unit_name']
time.sleep(sleep_time)
file_mtime = self._get_file_mtime(sentry_unit, filename)
if file_mtime >= mtime:
self.log.debug('File mtime is newer than provided mtime '
'(%s >= %s)' % (file_mtime, mtime))
'(%s >= %s) on %s (OK)' % (file_mtime, mtime,
unit_name))
return True
else:
self.log.warn('File mtime %s is older than provided mtime %s'
@ -374,8 +410,9 @@ class AmuletUtils(object):
return False
def validate_service_config_changed(self, sentry_unit, mtime, service,
filename, pgrep_full=False,
sleep_time=20, retry_count=2):
filename, pgrep_full=None,
sleep_time=20, retry_count=2,
retry_sleep_time=30):
"""Check service and file were updated after mtime
Args:
@ -383,9 +420,10 @@ class AmuletUtils(object):
mtime (float): The epoch time to check against
service (string): service name to look for in process table
filename (string): The file to check mtime of
pgrep_full (boolean): Use full command line search mode with pgrep
sleep_time (int): Seconds to sleep before looking for process
pgrep_full: [Deprecated] Use full command line search mode with pgrep
sleep_time (int): Initial sleep in seconds to pass to test helpers
retry_count (int): If service is not found, how many times to retry
retry_sleep_time (int): Time in seconds to wait between retries
Typical Usage:
u = OpenStackAmuletUtils(ERROR)
@ -402,15 +440,25 @@ class AmuletUtils(object):
mtime, False if service is older than mtime or if service was
not found or if filename was modified before mtime.
"""
self.log.debug('Checking %s restarted since %s' % (service, mtime))
time.sleep(sleep_time)
service_restart = self.service_restarted_since(sentry_unit, mtime,
service,
pgrep_full=pgrep_full,
sleep_time=0,
retry_count=retry_count)
config_update = self.config_updated_since(sentry_unit, filename, mtime,
sleep_time=0)
# NOTE(beisner) pgrep_full is no longer implemented, as pidof is now
# used instead of pgrep. pgrep_full is still passed through to ensure
# deprecation WARNS. lp1474030
service_restart = self.service_restarted_since(
sentry_unit, mtime,
service,
pgrep_full=pgrep_full,
sleep_time=sleep_time,
retry_count=retry_count,
retry_sleep_time=retry_sleep_time)
config_update = self.config_updated_since(
sentry_unit,
filename,
mtime,
sleep_time=0)
return service_restart and config_update
def get_sentry_time(self, sentry_unit):

20
tests/tests.yaml Normal file
View File

@ -0,0 +1,20 @@
bootstrap: true
reset: true
virtualenv: true
makefile:
- lint
- test
sources:
- ppa:juju/stable
packages:
- amulet
- python-amulet
- python-cinderclient
- python-distro-info
- python-glanceclient
- python-heatclient
- python-keystoneclient
- python-neutronclient
- python-novaclient
- python-pika
- python-swiftclient