Add pause/resume actions and sync charm-helpers

Adds pause and resume unit to the charm such that the
charm stays paused during maintenance operations.

Change-Id: I2ee7c87549279b29a9cb2e4e6747953cd6825b79
Partial-Bug: 1558642
This commit is contained in:
Liam Young 2016-04-04 14:57:29 +00:00
parent 8a459218dc
commit ef90f88c5f
16 changed files with 421 additions and 59 deletions

4
.gitignore vendored
View File

@ -6,3 +6,7 @@ tags
*.pyc
*.sw[nop]
*.pyc
precise
trusty
wily
xenial

View File

@ -2,3 +2,7 @@ git-reinstall:
description: Reinstall quantum-gateway from the openstack-origin-git repositories.
openstack-upgrade:
description: Perform openstack upgrades. Config option action-managed-upgrade must be set to True.
pause:
description: Pause the neutron-gateway unit.
resume:
descrpition: Resume the neutron-gateway unit.

50
actions/actions.py Executable file
View File

@ -0,0 +1,50 @@
#!/usr/bin/python
import os
import sys
sys.path.append('hooks/')
from charmhelpers.core.hookenv import action_fail
from neutron_utils import (
pause_unit_helper,
resume_unit_helper,
register_configs,
)
def pause(args):
"""Pause the Ceilometer services.
@raises Exception should the service fail to stop.
"""
pause_unit_helper(register_configs())
def resume(args):
"""Resume the Ceilometer services.
@raises Exception should the service fail to start."""
resume_unit_helper(register_configs())
# A dictionary of all the defined actions to callables (which take
# parsed arguments).
ACTIONS = {"pause": pause, "resume": resume}
def main(args):
action_name = os.path.basename(args[0])
try:
action = ACTIONS[action_name]
except KeyError:
s = "Action {} undefined".format(action_name)
action_fail(s)
return s
else:
try:
action(args)
except Exception as e:
action_fail("Action {} failed: {}".format(action_name, str(e)))
if __name__ == "__main__":
sys.exit(main(sys.argv))

1
actions/pause Symbolic link
View File

@ -0,0 +1 @@
actions.py

1
actions/resume Symbolic link
View File

@ -0,0 +1 @@
actions.py

View File

@ -191,6 +191,15 @@ get_iface_for_address = partial(_get_for_address, key='iface')
get_netmask_for_address = partial(_get_for_address, key='netmask')
def resolve_network_cidr(ip_address):
'''
Resolves the full address cidr of an ip_address based on
configured network interfaces
'''
netmask = get_netmask_for_address(ip_address)
return str(netaddr.IPNetwork("%s/%s" % (ip_address, netmask)).cidr)
def format_ipv6_addr(address):
"""If address is IPv6, wrap it in '[]' otherwise return None.

View File

@ -126,7 +126,9 @@ class OpenStackAmuletDeployment(AmuletDeployment):
# Charms which can not use openstack-origin, ie. many subordinates
no_origin = ['cinder-ceph', 'hacluster', 'neutron-openvswitch', 'nrpe',
'openvswitch-odl', 'neutron-api-odl', 'odl-controller',
'cinder-backup']
'cinder-backup', 'nexentaedge-data',
'nexentaedge-iscsi-gw', 'nexentaedge-swift-gw',
'cinder-nexentaedge', 'nexentaedge-mgmt']
if self.openstack:
for svc in services:

View File

@ -1479,3 +1479,15 @@ class NetworkServiceContext(OSContextGenerator):
if self.context_complete(ctxt):
return ctxt
return {}
class InternalEndpointContext(OSContextGenerator):
"""Internal endpoint context.
This context provides the endpoint type used for communication between
services e.g. between Nova and Cinder internally. Openstack uses Public
endpoints by default so this allows admins to optionally use internal
endpoints.
"""
def __call__(self):
return {'use_internal_endpoints': config('use-internal-endpoints')}

View File

@ -14,16 +14,19 @@
# You should have received a copy of the GNU Lesser General Public License
# along with charm-helpers. If not, see <http://www.gnu.org/licenses/>.
from charmhelpers.core.hookenv import (
config,
unit_get,
service_name,
network_get_primary_address,
)
from charmhelpers.contrib.network.ip import (
get_address_in_network,
is_address_in_network,
is_ipv6,
get_ipv6_addr,
resolve_network_cidr,
)
from charmhelpers.contrib.hahelpers.cluster import is_clustered
@ -33,16 +36,19 @@ ADMIN = 'admin'
ADDRESS_MAP = {
PUBLIC: {
'binding': 'public',
'config': 'os-public-network',
'fallback': 'public-address',
'override': 'os-public-hostname',
},
INTERNAL: {
'binding': 'internal',
'config': 'os-internal-network',
'fallback': 'private-address',
'override': 'os-internal-hostname',
},
ADMIN: {
'binding': 'admin',
'config': 'os-admin-network',
'fallback': 'private-address',
'override': 'os-admin-hostname',
@ -110,7 +116,7 @@ def resolve_address(endpoint_type=PUBLIC):
correct network. If clustered with no nets defined, return primary vip.
If not clustered, return unit address ensuring address is on configured net
split if one is configured.
split if one is configured, or a Juju 2.0 extra-binding has been used.
:param endpoint_type: Network endpoing type
"""
@ -125,23 +131,45 @@ def resolve_address(endpoint_type=PUBLIC):
net_type = ADDRESS_MAP[endpoint_type]['config']
net_addr = config(net_type)
net_fallback = ADDRESS_MAP[endpoint_type]['fallback']
binding = ADDRESS_MAP[endpoint_type]['binding']
clustered = is_clustered()
if clustered:
if not net_addr:
# If no net-splits defined, we expect a single vip
resolved_address = vips[0]
else:
if clustered and vips:
if net_addr:
for vip in vips:
if is_address_in_network(net_addr, vip):
resolved_address = vip
break
else:
# NOTE: endeavour to check vips against network space
# bindings
try:
bound_cidr = resolve_network_cidr(
network_get_primary_address(binding)
)
for vip in vips:
if is_address_in_network(bound_cidr, vip):
resolved_address = vip
break
except NotImplementedError:
# If no net-splits configured and no support for extra
# bindings/network spaces so we expect a single vip
resolved_address = vips[0]
else:
if config('prefer-ipv6'):
fallback_addr = get_ipv6_addr(exc_list=vips)[0]
else:
fallback_addr = unit_get(net_fallback)
resolved_address = get_address_in_network(net_addr, fallback_addr)
if net_addr:
resolved_address = get_address_in_network(net_addr, fallback_addr)
else:
# NOTE: only try to use extra bindings if legacy network
# configuration is not in use
try:
resolved_address = network_get_primary_address(binding)
except NotImplementedError:
resolved_address = fallback_addr
if resolved_address is None:
raise ValueError("Unable to resolve a suitable IP address based on "

View File

@ -128,6 +128,13 @@ def service(action, service_name):
return subprocess.call(cmd) == 0
def systemv_services_running():
output = subprocess.check_output(
['service', '--status-all'],
stderr=subprocess.STDOUT).decode('UTF-8')
return [row.split()[-1] for row in output.split('\n') if '[ + ]' in row]
def service_running(service_name):
"""Determine whether a system service is running"""
if init_is_systemd():
@ -140,11 +147,15 @@ def service_running(service_name):
except subprocess.CalledProcessError:
return False
else:
# This works for upstart scripts where the 'service' command
# returns a consistent string to represent running 'start/running'
if ("start/running" in output or "is running" in output or
"up and running" in output):
return True
else:
return False
# Check System V scripts init script return codes
if service_name in systemv_services_running():
return True
return False
def service_available(service_name):

View File

@ -19,7 +19,6 @@ from charmhelpers.fetch import (
apt_purge,
)
from charmhelpers.core.host import (
restart_on_change,
lsb_release,
)
from charmhelpers.contrib.hahelpers.cluster import(
@ -34,7 +33,7 @@ from charmhelpers.contrib.openstack.utils import (
configure_installation_source,
openstack_upgrade_available,
os_requires_version,
set_os_workload_status,
pausable_restart_on_change as restart_on_change,
)
from charmhelpers.payload.execd import execd_preinstall
from charmhelpers.core.sysctl import create as create_sysctl
@ -65,9 +64,8 @@ from neutron_utils import (
reassign_agent_resources,
stop_neutron_ha_monitor_daemon,
use_l3ha,
REQUIRED_INTERFACES,
check_optional_relations,
NEUTRON_COMMON,
assess_status,
)
hooks = Hooks()
@ -334,5 +332,4 @@ if __name__ == '__main__':
hooks.execute(sys.argv)
except UnregisteredHookError as e:
log('Unknown hook {} - skipping.'.format(e))
set_os_workload_status(CONFIGS, REQUIRED_INTERFACES,
charm_func=check_optional_relations)
assess_status(CONFIGS)

View File

@ -49,6 +49,9 @@ from charmhelpers.contrib.openstack.utils import (
git_pip_venv_dir,
get_hostname,
set_os_workload_status,
make_assess_status_func,
pause_unit,
resume_unit,
)
from charmhelpers.contrib.openstack.neutron import (
@ -187,7 +190,6 @@ GIT_PACKAGE_BLACKLIST = [
'neutron-plugin-cisco',
'neutron-plugin-metering-agent',
'neutron-plugin-openvswitch-agent',
'neutron-plugin-vpn-agent',
'neutron-vpn-agent',
'python-neutron-fwaas',
'python-oslo.config',
@ -269,6 +271,8 @@ def use_l3ha():
EXT_PORT_CONF = '/etc/init/ext-port.conf'
PHY_NIC_MTU_CONF = '/etc/init/os-charm-phy-nic-mtu.conf'
STOPPED_SERVICES = ['os-charm-phy-nic-mtu', 'ext-port']
TEMPLATES = 'templates'
QUANTUM_CONF = "/etc/quantum/quantum.conf"
@ -331,7 +335,6 @@ NEUTRON_OVS_CONFIG_FILES = {
'neutron-plugin-metering-agent',
'neutron-metering-agent',
'neutron-lbaas-agent',
'neutron-plugin-vpn-agent',
'neutron-vpn-agent']
},
NEUTRON_L3_AGENT_CONF: {
@ -351,8 +354,7 @@ NEUTRON_OVS_CONFIG_FILES = {
},
NEUTRON_VPNAAS_AGENT_CONF: {
'hook_contexts': [NeutronGatewayContext()],
'services': ['neutron-plugin-vpn-agent',
'neutron-vpn-agent']
'services': ['neutron-vpn-agent']
},
NEUTRON_FWAAS_CONF: {
'hook_contexts': [NeutronGatewayContext()],
@ -394,7 +396,6 @@ NEUTRON_OVS_ODL_CONFIG_FILES = {
'neutron-plugin-metering-agent',
'neutron-metering-agent',
'neutron-lbaas-agent',
'neutron-plugin-vpn-agent',
'neutron-vpn-agent']
},
NEUTRON_L3_AGENT_CONF: {
@ -414,8 +415,7 @@ NEUTRON_OVS_ODL_CONFIG_FILES = {
},
NEUTRON_VPNAAS_AGENT_CONF: {
'hook_contexts': [NeutronGatewayContext()],
'services': ['neutron-plugin-vpn-agent',
'neutron-vpn-agent']
'services': ['neutron-vpn-agent']
},
NEUTRON_FWAAS_CONF: {
'hook_contexts': [NeutronGatewayContext()],
@ -468,10 +468,12 @@ CONFIG_FILES = {
}
SERVICE_RENAMES = {
'icehouse': {
'neutron-plugin-metering-agent': 'neutron-metering-agent',
},
'mitaka': {
'neutron-plugin-openvswitch-agent': 'neutron-openvswitch-agent',
'neutron-plugin-metering-agent': 'neutron-metering-agent',
}
},
}
@ -568,10 +570,15 @@ def restart_map():
plugin = config('plugin')
config_files = resolve_config_files(plugin, release)
_map = {}
enable_vpn_agent = 'neutron-vpn-agent' in get_packages()
for f, ctxt in config_files[plugin].iteritems():
svcs = set()
for svc in ctxt['services']:
svcs.add(remap_service(svc))
if not enable_vpn_agent and 'neutron-vpn-agent' in svcs:
svcs.remove('neutron-vpn-agent')
if 'neutron-vpn-agent' in svcs and 'neutron-l3-agent' in svcs:
svcs.remove('neutron-l3-agent')
if svcs:
_map[f] = list(svcs)
return _map
@ -1214,3 +1221,72 @@ def check_optional_relations(configs):
return status_get()
else:
return 'unknown', 'No optional relations'
def assess_status(configs):
"""Assess status of current unit
Decides what the state of the unit should be based on the current
configuration.
SIDE EFFECT: calls set_os_workload_status(...) which sets the workload
status of the unit.
Also calls status_set(...) directly if paused state isn't complete.
@param configs: a templating.OSConfigRenderer() object
@returns None - this function is executed for its side-effect
"""
assess_status_func(configs)()
def assess_status_func(configs):
"""Helper function to create the function that will assess_status() for
the unit.
Uses charmhelpers.contrib.openstack.utils.make_assess_status_func() to
create the appropriate status function and then returns it.
Used directly by assess_status() and also for pausing and resuming
the unit.
NOTE(ajkavanagh) ports are not checked due to race hazards with services
that don't behave sychronously w.r.t their service scripts. e.g.
apache2.
@param configs: a templating.OSConfigRenderer() object
@return f() -> None : a function that assesses the unit's workload status
"""
active_services = [s for s in services() if s not in STOPPED_SERVICES]
return make_assess_status_func(
configs, REQUIRED_INTERFACES,
charm_func=check_optional_relations,
services=active_services, ports=None)
def pause_unit_helper(configs):
"""Helper function to pause a unit, and then call assess_status(...) in
effect, so that the status is correctly updated.
Uses charmhelpers.contrib.openstack.utils.pause_unit() to do the work.
@param configs: a templating.OSConfigRenderer() object
@returns None - this function is executed for its side-effect
"""
_pause_resume_helper(pause_unit, configs)
def resume_unit_helper(configs):
"""Helper function to resume a unit, and then call assess_status(...) in
effect, so that the status is correctly updated.
Uses charmhelpers.contrib.openstack.utils.resume_unit() to do the work.
@param configs: a templating.OSConfigRenderer() object
@returns None - this function is executed for its side-effect
"""
_pause_resume_helper(resume_unit, configs)
def _pause_resume_helper(f, configs):
"""Helper function that uses the make_assess_status_func(...) from
charmhelpers.contrib.openstack.utils to create an assess_status(...)
function that can be used with the pause/resume of the unit
@param f: the function to be used with the assess_status(...) function
@returns None - this function is executed for its side-effect
"""
active_services = [s for s in services() if s not in STOPPED_SERVICES]
# TODO(ajkavanagh) - ports= has been left off because of the race hazard
# that exists due to service_start()
f(assess_status_func(configs),
services=active_services,
ports=None)

View File

@ -1,6 +1,9 @@
import amulet
import os
import yaml
import time
import subprocess
import json
from neutronclient.v2_0 import client as neutronclient
@ -151,6 +154,31 @@ class NeutronGatewayBasicDeployment(OpenStackAmuletDeployment):
'nova-cloud-controller': nova_cc_config}
super(NeutronGatewayBasicDeployment, self)._configure_services(configs)
def _run_action(self, unit_id, action, *args):
command = ["juju", "action", "do", "--format=json", unit_id, action]
command.extend(args)
output = subprocess.check_output(command)
output_json = output.decode(encoding="UTF-8")
data = json.loads(output_json)
action_id = data[u'Action queued with id']
return action_id
def _wait_on_action(self, action_id):
command = ["juju", "action", "fetch", "--format=json", action_id]
while True:
try:
output = subprocess.check_output(command)
except Exception as e:
print(e)
return False
output_json = output.decode(encoding="UTF-8")
data = json.loads(output_json)
if data[u"status"] == "completed":
return True
elif data[u"status"] == "failed":
return False
time.sleep(2)
def _initialize_tests(self):
"""Perform final initialization before tests get run."""
# Access the sentries for inspecting service units
@ -1027,3 +1055,20 @@ class NeutronGatewayBasicDeployment(OpenStackAmuletDeployment):
# sleep_time = 0
self.d.configure(juju_service, set_default)
def test_910_pause_and_resume(self):
"""The services can be paused and resumed. """
u.log.debug('Checking pause and resume actions...')
unit_name = "neutron-gateway/0"
unit = self.d.sentry.unit[unit_name]
assert u.status_get(unit)[0] == "active"
action_id = self._run_action(unit_name, "pause")
assert self._wait_on_action(action_id), "Pause action failed."
assert u.status_get(unit)[0] == "maintenance"
action_id = self._run_action(unit_name, "resume")
assert self._wait_on_action(action_id), "Resume action failed."
assert u.status_get(unit)[0] == "active"
u.log.debug('OK')

View File

@ -126,7 +126,9 @@ class OpenStackAmuletDeployment(AmuletDeployment):
# Charms which can not use openstack-origin, ie. many subordinates
no_origin = ['cinder-ceph', 'hacluster', 'neutron-openvswitch', 'nrpe',
'openvswitch-odl', 'neutron-api-odl', 'odl-controller',
'cinder-backup']
'cinder-backup', 'nexentaedge-data',
'nexentaedge-iscsi-gw', 'nexentaedge-swift-gw',
'cinder-nexentaedge', 'nexentaedge-mgmt']
if self.openstack:
for svc in services:

View File

@ -0,0 +1,78 @@
import sys
import mock
from mock import patch, MagicMock
from test_utils import CharmTestCase
# python-apt is not installed as part of test-requirements but is imported by
# some charmhelpers modules so create a fake import.
sys.modules['apt'] = MagicMock()
sys.modules['apt_pkg'] = MagicMock()
with patch('charmhelpers.core.hookenv.config'):
with patch('neutron_utils.restart_map'):
with patch('neutron_utils.register_configs') as configs:
with patch('charmhelpers.contrib.hardening.harden.harden') as \
mock_dec:
mock_dec.side_effect = (lambda *dargs, **dkwargs: lambda f:
lambda *args, **kwargs:
f(*args, **kwargs))
with patch('charmhelpers.core.hookenv.status_set'):
configs.return_value = 'test-config'
import actions
class PauseTestCase(CharmTestCase):
def setUp(self):
super(PauseTestCase, self).setUp(
actions, ["pause_unit_helper"])
def test_pauses_services(self):
actions.pause([])
self.pause_unit_helper.assert_called_once_with('test-config')
class ResumeTestCase(CharmTestCase):
def setUp(self):
super(ResumeTestCase, self).setUp(
actions, ["resume_unit_helper"])
def test_pauses_services(self):
actions.resume([])
self.resume_unit_helper.assert_called_once_with('test-config')
class MainTestCase(CharmTestCase):
def setUp(self):
super(MainTestCase, self).setUp(actions, ["action_fail"])
def test_invokes_action(self):
dummy_calls = []
def dummy_action(args):
dummy_calls.append(True)
with mock.patch.dict(actions.ACTIONS, {"foo": dummy_action}):
actions.main(["foo"])
self.assertEqual(dummy_calls, [True])
def test_unknown_action(self):
"""Unknown actions aren't a traceback."""
exit_string = actions.main(["foo"])
self.assertEqual("Action foo undefined", exit_string)
def test_failing_action(self):
"""Actions which traceback trigger action_fail() calls."""
dummy_calls = []
self.action_fail.side_effect = dummy_calls.append
def dummy_action(args):
raise ValueError("uh oh")
with mock.patch.dict(actions.ACTIONS, {"foo": dummy_action}):
actions.main(["foo"])
self.assertEqual(dummy_calls, ["Action foo failed: uh oh"])

View File

@ -312,18 +312,18 @@ class TestNeutronUtils(CharmTestCase):
for conf in confs:
configs.register.assert_any_call(conf, ANY)
def test_restart_map_ovs(self):
@patch.object(neutron_utils, 'get_packages')
def test_restart_map_ovs(self, mock_get_packages):
self.config.return_value = 'ovs'
self.get_os_codename_install_source.return_value = 'havana'
mock_get_packages.return_value = ['neutron-vpn-agent']
ex_map = {
neutron_utils.NEUTRON_CONF: ['neutron-l3-agent',
'neutron-dhcp-agent',
neutron_utils.NEUTRON_CONF: ['neutron-dhcp-agent',
'neutron-metadata-agent',
'neutron-plugin-openvswitch-agent',
'neutron-plugin-metering-agent',
'neutron-metering-agent',
'neutron-lbaas-agent',
'neutron-plugin-vpn-agent',
'neutron-vpn-agent'],
neutron_utils.NEUTRON_DNSMASQ_CONF: ['neutron-dhcp-agent'],
neutron_utils.NEUTRON_LBAAS_AGENT_CONF:
@ -333,13 +333,10 @@ class TestNeutronUtils(CharmTestCase):
neutron_utils.NEUTRON_METADATA_AGENT_CONF:
['neutron-metadata-agent'],
neutron_utils.NEUTRON_VPNAAS_AGENT_CONF: [
'neutron-plugin-vpn-agent',
'neutron-vpn-agent'],
neutron_utils.NEUTRON_L3_AGENT_CONF: ['neutron-l3-agent',
'neutron-vpn-agent'],
neutron_utils.NEUTRON_L3_AGENT_CONF: ['neutron-vpn-agent'],
neutron_utils.NEUTRON_DHCP_AGENT_CONF: ['neutron-dhcp-agent'],
neutron_utils.NEUTRON_FWAAS_CONF: ['neutron-l3-agent',
'neutron-vpn-agent'],
neutron_utils.NEUTRON_FWAAS_CONF: ['neutron-vpn-agent'],
neutron_utils.NEUTRON_METERING_AGENT_CONF:
['neutron-metering-agent', 'neutron-plugin-metering-agent'],
neutron_utils.NOVA_CONF: ['nova-api-metadata'],
@ -349,17 +346,17 @@ class TestNeutronUtils(CharmTestCase):
self.assertDictEqual(neutron_utils.restart_map(), ex_map)
def test_restart_map_ovs_mitaka(self):
@patch.object(neutron_utils, 'get_packages')
def test_restart_map_ovs_mitaka(self, mock_get_packages):
self.config.return_value = 'ovs'
mock_get_packages.return_value = ['neutron-vpn-agent']
self.get_os_codename_install_source.return_value = 'mitaka'
ex_map = {
neutron_utils.NEUTRON_CONF: ['neutron-l3-agent',
'neutron-dhcp-agent',
neutron_utils.NEUTRON_CONF: ['neutron-dhcp-agent',
'neutron-metadata-agent',
'neutron-openvswitch-agent',
'neutron-metering-agent',
'neutron-lbaas-agent',
'neutron-plugin-vpn-agent',
'neutron-vpn-agent'],
neutron_utils.NEUTRON_DNSMASQ_CONF: ['neutron-dhcp-agent'],
neutron_utils.NEUTRON_LBAAS_AGENT_CONF:
@ -368,14 +365,10 @@ class TestNeutronUtils(CharmTestCase):
['neutron-openvswitch-agent'],
neutron_utils.NEUTRON_METADATA_AGENT_CONF:
['neutron-metadata-agent'],
neutron_utils.NEUTRON_VPNAAS_AGENT_CONF: [
'neutron-plugin-vpn-agent',
'neutron-vpn-agent'],
neutron_utils.NEUTRON_L3_AGENT_CONF: ['neutron-l3-agent',
'neutron-vpn-agent'],
neutron_utils.NEUTRON_VPNAAS_AGENT_CONF: ['neutron-vpn-agent'],
neutron_utils.NEUTRON_L3_AGENT_CONF: ['neutron-vpn-agent'],
neutron_utils.NEUTRON_DHCP_AGENT_CONF: ['neutron-dhcp-agent'],
neutron_utils.NEUTRON_FWAAS_CONF: ['neutron-l3-agent',
'neutron-vpn-agent'],
neutron_utils.NEUTRON_FWAAS_CONF: ['neutron-vpn-agent'],
neutron_utils.NEUTRON_METERING_AGENT_CONF:
['neutron-metering-agent'],
neutron_utils.NOVA_CONF: ['nova-api-metadata'],
@ -384,33 +377,37 @@ class TestNeutronUtils(CharmTestCase):
}
self.assertEqual(ex_map, neutron_utils.restart_map())
def test_restart_map_ovs_odl(self):
@patch.object(neutron_utils, 'get_packages')
def test_restart_map_ovs_post_trusty(self, mock_get_packages):
self.config.return_value = 'ovs'
# No VPN agent after trusty
mock_get_packages.return_value = ['neutron-l3-agent']
rmap = neutron_utils.restart_map()
for services in rmap.itervalues():
self.assertFalse('neutron-vpn-agent' in services)
@patch.object(neutron_utils, 'get_packages')
def test_restart_map_ovs_odl(self, mock_get_packages):
self.config.return_value = 'ovs-odl'
mock_get_packages.return_value = ['neutron-vpn-agent']
self.get_os_codename_install_source.return_value = 'icehouse'
ex_map = {
neutron_utils.NEUTRON_CONF: ['neutron-l3-agent',
'neutron-dhcp-agent',
neutron_utils.NEUTRON_CONF: ['neutron-dhcp-agent',
'neutron-metadata-agent',
'neutron-plugin-metering-agent',
'neutron-metering-agent',
'neutron-lbaas-agent',
'neutron-plugin-vpn-agent',
'neutron-vpn-agent'],
neutron_utils.NEUTRON_DNSMASQ_CONF: ['neutron-dhcp-agent'],
neutron_utils.NEUTRON_LBAAS_AGENT_CONF:
['neutron-lbaas-agent'],
neutron_utils.NEUTRON_METADATA_AGENT_CONF:
['neutron-metadata-agent'],
neutron_utils.NEUTRON_VPNAAS_AGENT_CONF: [
'neutron-plugin-vpn-agent',
'neutron-vpn-agent'],
neutron_utils.NEUTRON_L3_AGENT_CONF: ['neutron-l3-agent',
'neutron-vpn-agent'],
neutron_utils.NEUTRON_VPNAAS_AGENT_CONF: ['neutron-vpn-agent'],
neutron_utils.NEUTRON_L3_AGENT_CONF: ['neutron-vpn-agent'],
neutron_utils.NEUTRON_DHCP_AGENT_CONF: ['neutron-dhcp-agent'],
neutron_utils.NEUTRON_FWAAS_CONF: ['neutron-l3-agent',
'neutron-vpn-agent'],
neutron_utils.NEUTRON_FWAAS_CONF: ['neutron-vpn-agent'],
neutron_utils.NEUTRON_METERING_AGENT_CONF:
['neutron-metering-agent', 'neutron-plugin-metering-agent'],
['neutron-metering-agent'],
neutron_utils.NOVA_CONF: ['nova-api-metadata'],
neutron_utils.EXT_PORT_CONF: ['ext-port'],
neutron_utils.PHY_NIC_MTU_CONF: ['os-charm-phy-nic-mtu'],
@ -1171,3 +1168,48 @@ class TestNeutronAgentReallocation(CharmTestCase):
neutron_vpn_agent_context, perms=0o644),
]
self.assertEquals(render.call_args_list, expected)
def test_assess_status(self):
with patch.object(neutron_utils, 'assess_status_func') as asf:
callee = MagicMock()
asf.return_value = callee
neutron_utils.assess_status('test-config')
asf.assert_called_once_with('test-config')
callee.assert_called_once_with()
@patch.object(neutron_utils, 'check_optional_relations')
@patch.object(neutron_utils, 'REQUIRED_INTERFACES')
@patch.object(neutron_utils, 'services')
@patch.object(neutron_utils, 'make_assess_status_func')
def test_assess_status_func(self,
make_assess_status_func,
services,
REQUIRED_INTERFACES,
check_optional_relations):
services.return_value = ['s1']
neutron_utils.assess_status_func('test-config')
# ports=None whilst port checks are disabled.
make_assess_status_func.assert_called_once_with(
'test-config', REQUIRED_INTERFACES,
charm_func=check_optional_relations, services=['s1'], ports=None)
def test_pause_unit_helper(self):
with patch.object(neutron_utils, '_pause_resume_helper') as prh:
neutron_utils.pause_unit_helper('random-config')
prh.assert_called_once_with(neutron_utils.pause_unit,
'random-config')
with patch.object(neutron_utils, '_pause_resume_helper') as prh:
neutron_utils.resume_unit_helper('random-config')
prh.assert_called_once_with(neutron_utils.resume_unit,
'random-config')
@patch.object(neutron_utils, 'services')
def test_pause_resume_helper(self, services):
f = MagicMock()
services.return_value = ['s1']
with patch.object(neutron_utils, 'assess_status_func') as asf:
asf.return_value = 'assessor'
neutron_utils._pause_resume_helper(f, 'some-config')
asf.assert_called_once_with('some-config')
# ports=None whilst port checks are disabled.
f.assert_called_once_with('assessor', services=['s1'], ports=None)