From a227ab593b602549dbd3ec41d754193a3b463bc4 Mon Sep 17 00:00:00 2001 From: David Lenwell Date: Wed, 24 Jun 2015 20:58:58 -0700 Subject: [PATCH] naming convention change vm to instance vm_manager = instance_manager VMManager = InstanceManager vm = instance Change-Id: I8ad2bc3f72888dfe43dd3a966cafee4ee2163131 --- .gitignore | 3 + akanda/rug/api/nova.py | 6 +- .../{vm_manager.py => instance_manager.py} | 16 +- akanda/rug/state.py | 189 ++++----- akanda/rug/test/functional/base.py | 17 +- .../test/functional/test_service_instance.py | 22 ++ akanda/rug/test/functional/test_service_vm.py | 22 -- .../rug/test/unit/api/test_configuration.py | 14 +- ..._vmmanager.py => test_instance_manager.py} | 372 +++++++++--------- akanda/rug/test/unit/test_state.py | 251 ++++++------ akanda/rug/test/unit/test_tenant.py | 11 +- 11 files changed, 488 insertions(+), 435 deletions(-) mode change 100644 => 100755 .gitignore mode change 100644 => 100755 akanda/rug/api/nova.py rename akanda/rug/{vm_manager.py => instance_manager.py} (98%) mode change 100644 => 100755 mode change 100644 => 100755 akanda/rug/state.py mode change 100644 => 100755 akanda/rug/test/functional/base.py create mode 100755 akanda/rug/test/functional/test_service_instance.py delete mode 100644 akanda/rug/test/functional/test_service_vm.py mode change 100644 => 100755 akanda/rug/test/unit/api/test_configuration.py rename akanda/rug/test/unit/{test_vmmanager.py => test_instance_manager.py} (67%) mode change 100644 => 100755 mode change 100644 => 100755 akanda/rug/test/unit/test_state.py mode change 100644 => 100755 akanda/rug/test/unit/test_tenant.py diff --git a/.gitignore b/.gitignore old mode 100644 new mode 100755 index fb8a8b41..59752833 --- a/.gitignore +++ b/.gitignore @@ -36,3 +36,6 @@ ChangeLog test.conf *.swp + +#pycharm cruft +.idea/* diff --git a/akanda/rug/api/nova.py b/akanda/rug/api/nova.py old mode 100644 new mode 100755 index ae5f09f9..bf64fd27 --- a/akanda/rug/api/nova.py +++ b/akanda/rug/api/nova.py @@ -28,7 +28,7 @@ OPTIONS = [ cfg.StrOpt( 'router_ssh_public_key', help="Path to the SSH public key for the 'akanda' user within " - "router appliance VMs", + "router appliance instances", default='/etc/akanda-rug/akanda.pub') ] cfg.CONF.register_opts(OPTIONS) @@ -91,7 +91,7 @@ class Nova(object): nics = [{'net-id': p.network_id, 'v4-fixed-ip': '', 'port-id': p.id} for p in ([mgt_port] + instance_ports)] - LOG.debug('creating vm for router %s with image %s', + LOG.debug('creating instance for router %s with image %s', router_id, image_uuid) name = 'ak-' + router_id @@ -146,7 +146,7 @@ class Nova(object): def destroy_instance(self, instance_info): if instance_info: - LOG.debug('deleting vm for router %s', instance_info.name) + LOG.debug('deleting instance for router %s', instance_info.name) self.client.servers.delete(instance_info.id_) def boot_instance(self, prev_instance_info, router_id, router_image_uuid, diff --git a/akanda/rug/vm_manager.py b/akanda/rug/instance_manager.py old mode 100644 new mode 100755 similarity index 98% rename from akanda/rug/vm_manager.py rename to akanda/rug/instance_manager.py index 29badd43..0d45fd40 --- a/akanda/rug/vm_manager.py +++ b/akanda/rug/instance_manager.py @@ -44,11 +44,11 @@ STATUS_MAP = { CONF = cfg.CONF -VM_MANAGER_OPTS = [ +INSTANCE_MANAGER_OPTS = [ cfg.IntOpt( 'hotplug_timeout', default=10, help='The amount of time to wait for nova to hotplug/unplug ' - 'networks from the router VMs'), + 'networks from the router instances'), cfg.IntOpt( 'boot_timeout', default=600), cfg.IntOpt( @@ -58,7 +58,7 @@ VM_MANAGER_OPTS = [ 'into ERROR state'), ), ] -CONF.register_opts(VM_MANAGER_OPTS) +CONF.register_opts(INSTANCE_MANAGER_OPTS) def synchronize_router_status(f): @@ -94,7 +94,7 @@ class BootAttemptCounter(object): return self._attempts -class VmManager(object): +class InstanceManager(object): def __init__(self, router_id, tenant_id, log, worker_context): self.router_id = router_id @@ -149,7 +149,7 @@ class VmManager(object): self.router_id ) if instance is None and self.state != ERROR: - self.log.info('No router VM was found; rebooting') + self.log.info('No instance was found; rebooting') self.state = DOWN self.instance_info = None @@ -425,7 +425,7 @@ class VmManager(object): self.instance_info.id_ ) - # For each port that doesn't have a mac address on the VM... + # For each port that doesn't have a mac address on the instance... for network_id in logical_networks - instance_networks: port = worker_context.neutron.create_vrrp_port( self.router_obj.id, @@ -463,7 +463,7 @@ class VmManager(object): # The action of attaching/detaching interfaces in Nova happens via the # message bus and is *not* blocking. We need to wait a few seconds to # see if the list of tap devices on the appliance actually changed. If - # not, assume the hotplug failed, and reboot the VM. + # not, assume the hotplug failed, and reboot the Instance. replug_seconds = cfg.CONF.hotplug_timeout while replug_seconds > 0: self.log.debug( @@ -521,7 +521,7 @@ class VmManager(object): if self.state != ERROR: self.state = BOOTING else: - # If the VM was created more than `boot_timeout` seconds + # If the instance was created more than `boot_timeout` seconds # ago, log an error and set the state set to DOWN self.log.info( 'Router is DOWN. Created over %d secs ago.', diff --git a/akanda/rug/state.py b/akanda/rug/state.py old mode 100644 new mode 100755 index 9548bfcf..2ec307f6 --- a/akanda/rug/state.py +++ b/akanda/rug/state.py @@ -29,13 +29,13 @@ from oslo_config import cfg from oslo_log import log as logging from akanda.rug.event import POLL, CREATE, READ, UPDATE, DELETE, REBUILD -from akanda.rug import vm_manager +from akanda.rug import instance_manager class StateParams(object): - def __init__(self, vm, log, queue, bandwidth_callback, + def __init__(self, instance, log, queue, bandwidth_callback, reboot_error_threshold, router_image_uuid): - self.vm = vm + self.instance = instance self.log = log self.queue = queue self.bandwidth_callback = bandwidth_callback @@ -57,8 +57,8 @@ class State(object): return self.params.queue @property - def vm(self): - return self.params.vm + def instance(self): + return self.params.instance @property def router_image_uuid(self): @@ -136,26 +136,26 @@ class CalcAction(State): return action def transition(self, action, worker_context): - if self.vm.state == vm_manager.GONE: - next_action = StopVM(self.params) + if self.instance.state == instance_manager.GONE: + next_action = StopInstance(self.params) elif action == DELETE: - next_action = StopVM(self.params) + next_action = StopInstance(self.params) elif action == REBUILD: - next_action = RebuildVM(self.params) - elif self.vm.state == vm_manager.BOOTING: + next_action = RebuildInstance(self.params) + elif self.instance.state == instance_manager.BOOTING: next_action = CheckBoot(self.params) - elif self.vm.state == vm_manager.DOWN: - next_action = CreateVM(self.params) + elif self.instance.state == instance_manager.DOWN: + next_action = CreateInstance(self.params) else: next_action = Alive(self.params) - if self.vm.state == vm_manager.ERROR: + if self.instance.state == instance_manager.ERROR: if action == POLL: # If the selected action is to poll, and we are in an # error state, then an event slipped through the # filter in send_message() and we should ignore it # here. next_action = self - elif self.vm.error_cooldown: + elif self.instance.error_cooldown: self.log.debug('Router is in ERROR cooldown, ignoring ' 'event.') next_action = self @@ -180,7 +180,7 @@ class PushUpdate(State): class ClearError(State): - """Remove the error state from the VM. + """Remove the error state from the instance. """ def __init__(self, params, next_state=None): @@ -188,9 +188,9 @@ class ClearError(State): self._next_state = next_state def execute(self, action, worker_context): - # If we are being told explicitly to update the VM, we should + # If we are being told explicitly to update the instance, we should # ignore any error status. - self.vm.clear_error(worker_context) + self.instance.clear_error(worker_context) return action def transition(self, action, worker_context): @@ -201,123 +201,128 @@ class ClearError(State): class Alive(State): def execute(self, action, worker_context): - self.vm.update_state(worker_context) + self.instance.update_state(worker_context) return action def transition(self, action, worker_context): - if self.vm.state == vm_manager.GONE: - return StopVM(self.params) - elif self.vm.state == vm_manager.DOWN: - return CreateVM(self.params) - elif action == POLL and self.vm.state == vm_manager.CONFIGURED: + if self.instance.state == instance_manager.GONE: + return StopInstance(self.params) + elif self.instance.state == instance_manager.DOWN: + return CreateInstance(self.params) + elif action == POLL and \ + self.instance.state == instance_manager.CONFIGURED: return CalcAction(self.params) - elif action == READ and self.vm.state == vm_manager.CONFIGURED: + elif action == READ and \ + self.instance.state == instance_manager.CONFIGURED: return ReadStats(self.params) else: - return ConfigureVM(self.params) + return ConfigureInstance(self.params) -class CreateVM(State): +class CreateInstance(State): def execute(self, action, worker_context): # Check for a loop where the router keeps failing to boot or # accept the configuration. - if self.vm.attempts >= self.params.reboot_error_threshold: + if self.instance.attempts >= self.params.reboot_error_threshold: self.log.info('dropping out of boot loop after %s trials', - self.vm.attempts) - self.vm.set_error(worker_context) + self.instance.attempts) + self.instance.set_error(worker_context) return action - self.vm.boot(worker_context, self.router_image_uuid) - self.log.debug('CreateVM attempt %s/%s', - self.vm.attempts, + self.instance.boot(worker_context, self.router_image_uuid) + self.log.debug('CreateInstance attempt %s/%s', + self.instance.attempts, self.params.reboot_error_threshold) return action def transition(self, action, worker_context): - if self.vm.state == vm_manager.GONE: - return StopVM(self.params) - elif self.vm.state == vm_manager.ERROR: + if self.instance.state == instance_manager.GONE: + return StopInstance(self.params) + elif self.instance.state == instance_manager.ERROR: return CalcAction(self.params) - elif self.vm.state == vm_manager.DOWN: - return CreateVM(self.params) + elif self.instance.state == instance_manager.DOWN: + return CreateInstance(self.params) return CheckBoot(self.params) class CheckBoot(State): def execute(self, action, worker_context): - self.vm.check_boot(worker_context) + self.instance.check_boot(worker_context) # Put the action back on the front of the queue so that we can yield # and handle it in another state machine traversal (which will proceed # from CalcAction directly to CheckBoot). - if self.vm.state not in (vm_manager.DOWN, vm_manager.GONE): + if self.instance.state not in (instance_manager.DOWN, + instance_manager.GONE): self.queue.appendleft(action) return action def transition(self, action, worker_context): - if self.vm.state in (vm_manager.DOWN, - vm_manager.GONE): - return StopVM(self.params) - if self.vm.state == vm_manager.UP: - return ConfigureVM(self.params) + if self.instance.state in (instance_manager.DOWN, + instance_manager.GONE): + return StopInstance(self.params) + if self.instance.state == instance_manager.UP: + return ConfigureInstance(self.params) return CalcAction(self.params) -class ReplugVM(State): +class ReplugInstance(State): def execute(self, action, worker_context): - self.vm.replug(worker_context) + self.instance.replug(worker_context) return action def transition(self, action, worker_context): - if self.vm.state == vm_manager.RESTART: - return StopVM(self.params) - return ConfigureVM(self.params) + if self.instance.state == instance_manager.RESTART: + return StopInstance(self.params) + return ConfigureInstance(self.params) -class StopVM(State): +class StopInstance(State): def execute(self, action, worker_context): - self.vm.stop(worker_context) - if self.vm.state == vm_manager.GONE: + self.instance.stop(worker_context) + if self.instance.state == instance_manager.GONE: # Force the action to delete since the router isn't there # any more. return DELETE return action def transition(self, action, worker_context): - if self.vm.state not in (vm_manager.DOWN, vm_manager.GONE): + if self.instance.state not in (instance_manager.DOWN, + instance_manager.GONE): return self - if self.vm.state == vm_manager.GONE: + if self.instance.state == instance_manager.GONE: return Exit(self.params) if action == DELETE: return Exit(self.params) - return CreateVM(self.params) + return CreateInstance(self.params) -class RebuildVM(State): +class RebuildInstance(State): def execute(self, action, worker_context): - self.vm.stop(worker_context) - if self.vm.state == vm_manager.GONE: + self.instance.stop(worker_context) + if self.instance.state == instance_manager.GONE: # Force the action to delete since the router isn't there # any more. return DELETE - # Re-create the VM - self.vm.reset_boot_counter() + # Re-create the instance + self.instance.reset_boot_counter() return CREATE def transition(self, action, worker_context): - if self.vm.state not in (vm_manager.DOWN, vm_manager.GONE): + if self.instance.state not in (instance_manager.DOWN, + instance_manager.GONE): return self - if self.vm.state == vm_manager.GONE: + if self.instance.state == instance_manager.GONE: return Exit(self.params) - return CreateVM(self.params) + return CreateInstance(self.params) class Exit(State): pass -class ConfigureVM(State): +class ConfigureInstance(State): def execute(self, action, worker_context): - self.vm.configure(worker_context) - if self.vm.state == vm_manager.CONFIGURED: + self.instance.configure(worker_context) + if self.instance.state == instance_manager.CONFIGURED: if action == READ: return READ else: @@ -326,15 +331,15 @@ class ConfigureVM(State): return action def transition(self, action, worker_context): - if self.vm.state == vm_manager.REPLUG: - return ReplugVM(self.params) - if self.vm.state in (vm_manager.RESTART, - vm_manager.DOWN, - vm_manager.GONE): - return StopVM(self.params) - if self.vm.state == vm_manager.UP: + if self.instance.state == instance_manager.REPLUG: + return ReplugInstance(self.params) + if self.instance.state in (instance_manager.RESTART, + instance_manager.DOWN, + instance_manager.GONE): + return StopInstance(self.params) + if self.instance.state == instance_manager.UP: return PushUpdate(self.params) - # Below here, assume vm.state == vm_manager.CONFIGURED + # Below here, assume instance.state == instance_manager.CONFIGURED if action == READ: return ReadStats(self.params) return CalcAction(self.params) @@ -342,7 +347,7 @@ class ConfigureVM(State): class ReadStats(State): def execute(self, action, worker_context): - stats = self.vm.read_stats() + stats = self.instance.read_stats() self.params.bandwidth_callback(stats) return POLL @@ -363,9 +368,8 @@ class Automaton(object): :param delete_callback: Invoked when the Automaton decides the router should be deleted. :type delete_callback: callable - :param bandwidth_callback: To be invoked when the Automaton - needs to report how much bandwidth - a router has used. + :param bandwidth_callback: To be invoked when the Automaton needs to + report how much bandwidth a router has used. :type bandwidth_callback: callable taking router_id and bandwidth info dict :param worker_context: a WorkerContext @@ -388,10 +392,12 @@ class Automaton(object): self.log = logging.getLogger(__name__ + '.' + router_id) self.action = POLL - self.vm = vm_manager.VmManager(router_id, tenant_id, self.log, - worker_context) + self.instance = instance_manager.InstanceManager(router_id, + tenant_id, + self.log, + worker_context) self._state_params = StateParams( - self.vm, + self.instance, self.log, self._queue, self.bandwidth_callback, @@ -423,14 +429,18 @@ class Automaton(object): return try: - self.log.debug('%s.execute(%s) vm.state=%s', - self.state, self.action, self.vm.state) + self.log.debug('%s.execute(%s) instance.state=%s', + self.state, + self.action, + self.instance.state) self.action = self.state.execute( self.action, worker_context, ) - self.log.debug('%s.execute -> %s vm.state=%s', - self.state, self.action, self.vm.state) + self.log.debug('%s.execute -> %s instance.state=%s', + self.state, + self.action, + self.instance.state) except: self.log.exception( '%s.execute() failed for action: %s', @@ -443,9 +453,9 @@ class Automaton(object): self.action, worker_context, ) - self.log.debug('%s.transition(%s) -> %s vm.state=%s', + self.log.debug('%s.transition(%s) -> %s instance.state=%s', old_state, self.action, self.state, - self.vm.state) + self.instance.state) # Yield control each time we stop to figure out what # to do next. @@ -473,7 +483,8 @@ class Automaton(object): # down on the number of times a worker thread wakes up to # process something on a router that isn't going to actually # do any work. - if message.crud == POLL and self.vm.state == vm_manager.ERROR: + if message.crud == POLL and \ + self.instance.state == instance_manager.ERROR: self.log.info( 'Router status is ERROR, ignoring POLL message: %s', message, @@ -512,4 +523,4 @@ class Automaton(object): return (not self.deleted) and bool(self._queue) def has_error(self): - return self.vm.state == vm_manager.ERROR + return self.instance.state == instance_manager.ERROR diff --git a/akanda/rug/test/functional/base.py b/akanda/rug/test/functional/base.py old mode 100644 new mode 100755 index 73fc3a53..a02b897f --- a/akanda/rug/test/functional/base.py +++ b/akanda/rug/test/functional/base.py @@ -78,17 +78,18 @@ class AkandaFunctionalBase(testtools.TestCase): if self._management_address: return self._management_address['addr'] - # TODO(adam_g): Deal with multiple service VMs - service_vm = [vm for vm in self.novaclient.servers.list(search_opts={ - 'all_tenants': 1, - 'tenant_id': self.config['service_tenant_id'], - }) if vm.name.startswith('ak-')][0] + # TODO(adam_g): Deal with multiple service instances + service_instance = \ + [instance for instance in self.novaclient.servers.list( + search_opts={'all_tenants': 1, + 'tenant_id': self.config['service_tenant_id']} + ) if instance.name.startswith('ak-')][0] try: - self._management_address = service_vm.addresses['mgt'][0] + self._management_address = service_instance.addresses['mgt'][0] except KeyError: - self.fail('"mgt" port not found on service vm %s (%s)' % - (service_vm.id, service_vm.name)) + self.fail('"mgt" port not found on service instance %s (%s)' % + (service_instance.id, service_instance.name)) return self._management_address['addr'] def assert_router_is_active(self, router_uuid=None): diff --git a/akanda/rug/test/functional/test_service_instance.py b/akanda/rug/test/functional/test_service_instance.py new file mode 100755 index 00000000..e4e29f74 --- /dev/null +++ b/akanda/rug/test/functional/test_service_instance.py @@ -0,0 +1,22 @@ + +from akanda.rug.test.functional import base + + +class AkandaApplianceInstanceTest(base.AkandaFunctionalBase): + """Basic tests to ensure a service instance and its associated router is + alive and well. + """ + def setUp(self): + super(AkandaApplianceInstanceTest, self).setUp() + # ensure the devstack spawned router instance becomes active before + # starting to run any test cases. this in itself is a test that + # devstack produced a functional router. + self.assert_router_is_active() + + def test_appliance_is_alive(self): + self.assertTrue( + self.ak_client.is_alive( + host=self.management_address, + port=self.config['appliance_api_port'], + ), + ) diff --git a/akanda/rug/test/functional/test_service_vm.py b/akanda/rug/test/functional/test_service_vm.py deleted file mode 100644 index 45ac2b31..00000000 --- a/akanda/rug/test/functional/test_service_vm.py +++ /dev/null @@ -1,22 +0,0 @@ - -from akanda.rug.test.functional import base - - -class AkandaApplianceVMTest(base.AkandaFunctionalBase): - """Basic tests to ensure a service VM and its associated router is alive - and well. - """ - def setUp(self): - super(AkandaApplianceVMTest, self).setUp() - # ensure the devstack spawned router VM becomes active before starting - # to run any test cases. this in itself is a test that devstack - # produced a functional router. - self.assert_router_is_active() - - def test_appliance_is_alive(self): - self.assertTrue( - self.ak_client.is_alive( - host=self.management_address, - port=self.config['appliance_api_port'], - ), - ) diff --git a/akanda/rug/test/unit/api/test_configuration.py b/akanda/rug/test/unit/api/test_configuration.py old mode 100644 new mode 100755 index 27f282ce..fea273c6 --- a/akanda/rug/test/unit/api/test_configuration.py +++ b/akanda/rug/test/unit/api/test_configuration.py @@ -54,7 +54,7 @@ fake_int_port = FakeModel( fixed_ips=[FakeModel('', ip_address='192.168.1.1', subnet_id='s1')], device_id='i-i-i-i') -fake_vm_port = FakeModel( +fake_instance_port = FakeModel( '4', name='foo', mac_address='aa:aa:aa:aa:aa:bb', @@ -63,7 +63,7 @@ fake_vm_port = FakeModel( first_v4='192.168.1.2', device_id='v-v-v-v') -fake_vm_mgt_port = FakeModel( +fake_instance_mgt_port = FakeModel( '4', name='AKANDA:MGT:foo', mac_address='aa:aa:aa:aa:aa:bb', @@ -72,7 +72,7 @@ fake_vm_mgt_port = FakeModel( first_v4='192.168.1.2', device_id='v-v-v-v') -fake_vm_vrrp_port = FakeModel( +fake_instance_vrrp_port = FakeModel( '4', name='AKANDA:VRRP:foo', mac_address='aa:aa:aa:aa:aa:bb', @@ -283,7 +283,7 @@ class TestAkandaClient(unittest.TestCase): 'dhcp', 'ra', subnets_dict={fake_subnet.id: fake_subnet}, - network_ports=[fake_vm_port]) + network_ports=[fake_instance_port]) expected = { 'interface': interface, @@ -376,7 +376,8 @@ class TestAkandaClient(unittest.TestCase): def test_allocation_config_vrrp(self): subnets_dict = {fake_subnet.id: fake_subnet} self.assertEqual( - conf_mod._allocation_config([fake_vm_vrrp_port], subnets_dict), + conf_mod._allocation_config([fake_instance_vrrp_port], + subnets_dict), [] ) @@ -389,7 +390,8 @@ class TestAkandaClient(unittest.TestCase): 'device_id': 'v-v-v-v'} ] self.assertEqual( - conf_mod._allocation_config([fake_vm_mgt_port], subnets_dict), + conf_mod._allocation_config([fake_instance_mgt_port], + subnets_dict), expected ) diff --git a/akanda/rug/test/unit/test_vmmanager.py b/akanda/rug/test/unit/test_instance_manager.py old mode 100644 new mode 100755 similarity index 67% rename from akanda/rug/test/unit/test_vmmanager.py rename to akanda/rug/test/unit/test_instance_manager.py index 12689395..c6d32556 --- a/akanda/rug/test/unit/test_vmmanager.py +++ b/akanda/rug/test/unit/test_instance_manager.py @@ -20,11 +20,11 @@ import mock import unittest2 as unittest from datetime import datetime, timedelta -from akanda.rug import vm_manager +from akanda.rug import instance_manager from akanda.rug.api import neutron, nova -vm_manager.RETRY_DELAY = 0.4 -vm_manager.BOOT_WAIT = 1 +instance_manager.RETRY_DELAY = 0.4 +instance_manager.BOOT_WAIT = 1 LOG = logging.getLogger(__name__) @@ -60,12 +60,12 @@ fake_add_port = FakeModel( fixed_ips=[FakeModel('', ip_address='8.8.8.8', subnet_id='s3')]) -class TestVmManager(unittest.TestCase): +class TestInstanceManager(unittest.TestCase): def setUp(self): self.ctx = mock.Mock() self.neutron = self.ctx.neutron - self.conf = mock.patch.object(vm_manager.cfg, 'CONF').start() + self.conf = mock.patch.object(instance_manager.cfg, 'CONF').start() self.conf.boot_timeout = 1 self.conf.akanda_mgt_service_port = 5000 self.conf.max_retries = 3 @@ -73,7 +73,7 @@ class TestVmManager(unittest.TestCase): self.log = mock.Mock() self.update_state_p = mock.patch.object( - vm_manager.VmManager, + instance_manager.InstanceManager, 'update_state' ) @@ -88,31 +88,34 @@ class TestVmManager(unittest.TestCase): ) self.mock_update_state = self.update_state_p.start() - self.vm_mgr = vm_manager.VmManager('the_id', 'tenant_id', - self.log, self.ctx) - self.vm_mgr.instance_info = self.INSTANCE_INFO - mock.patch.object(self.vm_mgr, '_ensure_cache', mock.Mock) + self.instance_mgr = instance_manager.InstanceManager('the_id', + 'tenant_id', + self.log, + self.ctx) + self.instance_mgr.instance_info = self.INSTANCE_INFO + mock.patch.object(self.instance_mgr, '_ensure_cache', mock.Mock) self.next_state = None def next_state(*args, **kwargs): if self.next_state: - self.vm_mgr.state = self.next_state - return self.vm_mgr.state + self.instance_mgr.state = self.next_state + return self.instance_mgr.state self.mock_update_state.side_effect = next_state - @mock.patch('akanda.rug.vm_manager.router_api') + @mock.patch('akanda.rug.instance_manager.router_api') def test_update_state_is_alive(self, router_api): self.update_state_p.stop() router_api.is_alive.return_value = True - self.assertEqual(self.vm_mgr.update_state(self.ctx), vm_manager.UP) + self.assertEqual(self.instance_mgr.update_state(self.ctx), + instance_manager.UP) router_api.is_alive.assert_called_once_with( self.INSTANCE_INFO.management_address, self.conf.akanda_mgt_service_port) @mock.patch('time.sleep', lambda *a: None) - @mock.patch('akanda.rug.vm_manager.router_api') + @mock.patch('akanda.rug.instance_manager.router_api') @mock.patch('akanda.rug.api.configuration.build_config') def test_router_status_sync(self, config, router_api): self.update_state_p.stop() @@ -125,27 +128,28 @@ class TestVmManager(unittest.TestCase): n = self.neutron # Router state should start down - self.vm_mgr.update_state(self.ctx) + self.instance_mgr.update_state(self.ctx) n.update_router_status.assert_called_once_with('R1', 'DOWN') n.update_router_status.reset_mock() # Bring the router to UP with `is_alive = True` router_api.is_alive.return_value = True - self.vm_mgr.update_state(self.ctx) + self.instance_mgr.update_state(self.ctx) n.update_router_status.assert_called_once_with('R1', 'BUILD') n.update_router_status.reset_mock() # Configure the router and make sure state is synchronized as ACTIVE - with mock.patch.object(self.vm_mgr, '_verify_interfaces') as verify: + with mock.patch.object(self.instance_mgr, + '_verify_interfaces') as verify: verify.return_value = True - self.vm_mgr.last_boot = datetime.utcnow() - self.vm_mgr.configure(self.ctx) - self.vm_mgr.update_state(self.ctx) + self.instance_mgr.last_boot = datetime.utcnow() + self.instance_mgr.configure(self.ctx) + self.instance_mgr.update_state(self.ctx) n.update_router_status.assert_called_once_with('R1', 'ACTIVE') n.update_router_status.reset_mock() @mock.patch('time.sleep', lambda *a: None) - @mock.patch('akanda.rug.vm_manager.router_api') + @mock.patch('akanda.rug.instance_manager.router_api') @mock.patch('akanda.rug.api.configuration.build_config') def test_router_status_caching(self, config, router_api): self.update_state_p.stop() @@ -158,26 +162,26 @@ class TestVmManager(unittest.TestCase): n = self.neutron # Router state should start down - self.vm_mgr.update_state(self.ctx) + self.instance_mgr.update_state(self.ctx) n.update_router_status.assert_called_once_with('R1', 'DOWN') n.update_router_status.reset_mock() # Router state should not be updated in neutron if it didn't change - self.vm_mgr.update_state(self.ctx) + self.instance_mgr.update_state(self.ctx) self.assertEqual(n.update_router_status.call_count, 0) @mock.patch('time.sleep') - @mock.patch('akanda.rug.vm_manager.router_api') + @mock.patch('akanda.rug.instance_manager.router_api') def test_boot_timeout_still_booting(self, router_api, sleep): now = datetime.utcnow() self.INSTANCE_INFO.last_boot = now - self.vm_mgr.last_boot = now + self.instance_mgr.last_boot = now self.update_state_p.stop() router_api.is_alive.return_value = False self.assertEqual( - self.vm_mgr.update_state(self.ctx), - vm_manager.BOOTING + self.instance_mgr.update_state(self.ctx), + instance_manager.BOOTING ) router_api.is_alive.assert_has_calls([ mock.call(self.INSTANCE_INFO.management_address, 5000), @@ -186,16 +190,16 @@ class TestVmManager(unittest.TestCase): ]) @mock.patch('time.sleep') - @mock.patch('akanda.rug.vm_manager.router_api') + @mock.patch('akanda.rug.instance_manager.router_api') def test_boot_timeout_error(self, router_api, sleep): - self.vm_mgr.state = vm_manager.ERROR - self.vm_mgr.last_boot = datetime.utcnow() + self.instance_mgr.state = instance_manager.ERROR + self.instance_mgr.last_boot = datetime.utcnow() self.update_state_p.stop() router_api.is_alive.return_value = False self.assertEqual( - self.vm_mgr.update_state(self.ctx), - vm_manager.ERROR, + self.instance_mgr.update_state(self.ctx), + instance_manager.ERROR, ) router_api.is_alive.assert_has_calls([ mock.call(self.INSTANCE_INFO.management_address, 5000), @@ -204,16 +208,16 @@ class TestVmManager(unittest.TestCase): ]) @mock.patch('time.sleep') - @mock.patch('akanda.rug.vm_manager.router_api') + @mock.patch('akanda.rug.instance_manager.router_api') def test_boot_timeout_error_no_last_boot(self, router_api, sleep): - self.vm_mgr.state = vm_manager.ERROR - self.vm_mgr.last_boot = None + self.instance_mgr.state = instance_manager.ERROR + self.instance_mgr.last_boot = None self.update_state_p.stop() router_api.is_alive.return_value = False self.assertEqual( - self.vm_mgr.update_state(self.ctx), - vm_manager.ERROR, + self.instance_mgr.update_state(self.ctx), + instance_manager.ERROR, ) router_api.is_alive.assert_has_calls([ mock.call(self.INSTANCE_INFO.management_address, 5000), @@ -222,30 +226,32 @@ class TestVmManager(unittest.TestCase): ]) @mock.patch('time.sleep') - @mock.patch('akanda.rug.vm_manager.router_api') + @mock.patch('akanda.rug.instance_manager.router_api') def test_boot_timeout(self, router_api, sleep): - self.vm_mgr.last_boot = datetime.utcnow() - timedelta(minutes=5) + self.instance_mgr.last_boot = datetime.utcnow() - timedelta(minutes=5) self.update_state_p.stop() router_api.is_alive.return_value = False - self.assertEqual(self.vm_mgr.update_state(self.ctx), vm_manager.DOWN) + self.assertEqual(self.instance_mgr.update_state(self.ctx), + instance_manager.DOWN) router_api.is_alive.assert_has_calls([ mock.call(self.INSTANCE_INFO.management_address, 5000), mock.call(self.INSTANCE_INFO.management_address, 5000), mock.call(self.INSTANCE_INFO.management_address, 5000), ]) - self.vm_mgr.log.info.assert_called_once_with( + self.instance_mgr.log.info.assert_called_once_with( mock.ANY, self.conf.boot_timeout ) @mock.patch('time.sleep') - @mock.patch('akanda.rug.vm_manager.router_api') + @mock.patch('akanda.rug.instance_manager.router_api') def test_update_state_is_down(self, router_api, sleep): self.update_state_p.stop() router_api.is_alive.return_value = False - self.assertEqual(self.vm_mgr.update_state(self.ctx), vm_manager.DOWN) + self.assertEqual(self.instance_mgr.update_state(self.ctx), + instance_manager.DOWN) router_api.is_alive.assert_has_calls([ mock.call(self.INSTANCE_INFO.management_address, 5000), mock.call(self.INSTANCE_INFO.management_address, 5000), @@ -253,13 +259,13 @@ class TestVmManager(unittest.TestCase): ]) @mock.patch('time.sleep') - @mock.patch('akanda.rug.vm_manager.router_api') + @mock.patch('akanda.rug.instance_manager.router_api') def test_update_state_retry_delay(self, router_api, sleep): self.update_state_p.stop() router_api.is_alive.side_effect = [False, False, True] max_retries = 5 self.conf.max_retries = max_retries - self.vm_mgr.update_state(self.ctx, silent=False) + self.instance_mgr.update_state(self.ctx, silent=False) self.assertEqual(sleep.call_count, 2) self.log.debug.assert_has_calls([ mock.call('Alive check failed. Attempt %d of %d', 0, max_retries), @@ -268,7 +274,7 @@ class TestVmManager(unittest.TestCase): @mock.patch('time.sleep') def test_boot_success(self, sleep): - self.next_state = vm_manager.UP + self.next_state = instance_manager.UP rtr = mock.sentinel.router self.ctx.neutron.get_router_detail.return_value = rtr rtr.id = 'ROUTER1' @@ -276,15 +282,15 @@ class TestVmManager(unittest.TestCase): rtr.external_port = None rtr.ports = mock.MagicMock() rtr.ports.__iter__.return_value = [] - self.vm_mgr.boot(self.ctx, 'GLANCE-IMAGE-123') - self.assertEqual(self.vm_mgr.state, vm_manager.BOOTING) # async + self.instance_mgr.boot(self.ctx, 'GLANCE-IMAGE-123') + self.assertEqual(self.instance_mgr.state, instance_manager.BOOTING) self.ctx.nova_client.boot_instance.assert_called_once_with( self.INSTANCE_INFO, rtr.id, 'GLANCE-IMAGE-123', mock.ANY) - self.assertEqual(1, self.vm_mgr.attempts) + self.assertEqual(1, self.instance_mgr.attempts) @mock.patch('time.sleep') def test_boot_fail(self, sleep): - self.next_state = vm_manager.DOWN + self.next_state = instance_manager.DOWN rtr = mock.sentinel.router self.ctx.neutron.get_router_detail.return_value = rtr rtr.id = 'ROUTER1' @@ -292,11 +298,11 @@ class TestVmManager(unittest.TestCase): rtr.external_port = None rtr.ports = mock.MagicMock() rtr.ports.__iter__.return_value = [] - self.vm_mgr.boot(self.ctx, 'GLANCE-IMAGE-123') - self.assertEqual(self.vm_mgr.state, vm_manager.BOOTING) + self.instance_mgr.boot(self.ctx, 'GLANCE-IMAGE-123') + self.assertEqual(self.instance_mgr.state, instance_manager.BOOTING) self.ctx.nova_client.boot_instance.assert_called_once_with( self.INSTANCE_INFO, rtr.id, 'GLANCE-IMAGE-123', mock.ANY) - self.assertEqual(1, self.vm_mgr.attempts) + self.assertEqual(1, self.instance_mgr.attempts) @mock.patch('time.sleep') def test_boot_exception(self, sleep): @@ -309,15 +315,15 @@ class TestVmManager(unittest.TestCase): rtr.ports.__iter__.return_value = [] self.ctx.nova_client.boot_instance.side_effect = RuntimeError - self.vm_mgr.boot(self.ctx, 'GLANCE-IMAGE-123') - self.assertEqual(self.vm_mgr.state, vm_manager.DOWN) + self.instance_mgr.boot(self.ctx, 'GLANCE-IMAGE-123') + self.assertEqual(self.instance_mgr.state, instance_manager.DOWN) self.ctx.nova_client.boot_instance.assert_called_once_with( self.INSTANCE_INFO, rtr.id, 'GLANCE-IMAGE-123', mock.ANY) - self.assertEqual(1, self.vm_mgr.attempts) + self.assertEqual(1, self.instance_mgr.attempts) @mock.patch('time.sleep') def test_boot_with_port_cleanup(self, sleep): - self.next_state = vm_manager.UP + self.next_state = instance_manager.UP management_port = mock.Mock(id='mgmt', device_id='INSTANCE1') external_port = mock.Mock(id='ext', device_id='INSTANCE1') @@ -334,8 +340,8 @@ class TestVmManager(unittest.TestCase): rtr.ports = mock.MagicMock() rtr.ports.__iter__.return_value = [management_port, external_port, internal_port] - self.vm_mgr.boot(self.ctx, 'GLANCE-IMAGE-123') - self.assertEqual(self.vm_mgr.state, vm_manager.BOOTING) # async + self.instance_mgr.boot(self.ctx, 'GLANCE-IMAGE-123') + self.assertEqual(self.instance_mgr.state, instance_manager.BOOTING) self.ctx.nova_client.boot_instance.assert_called_once_with( self.INSTANCE_INFO, rtr.id, @@ -345,98 +351,98 @@ class TestVmManager(unittest.TestCase): def test_boot_check_up(self): with mock.patch.object( - vm_manager.VmManager, + instance_manager.InstanceManager, 'update_state' ) as update_state: with mock.patch.object( - vm_manager.VmManager, + instance_manager.InstanceManager, 'configure' ) as configure: - update_state.return_value = vm_manager.UP + update_state.return_value = instance_manager.UP configure.side_effect = lambda *a, **kw: setattr( - self.vm_mgr, + self.instance_mgr, 'state', - vm_manager.CONFIGURED + instance_manager.CONFIGURED ) - assert self.vm_mgr.check_boot(self.ctx) is True + assert self.instance_mgr.check_boot(self.ctx) is True update_state.assert_called_once_with(self.ctx, silent=True) configure.assert_called_once_with( self.ctx, - vm_manager.BOOTING, + instance_manager.BOOTING, attempts=1 ) def test_boot_check_configured(self): with mock.patch.object( - vm_manager.VmManager, + instance_manager.InstanceManager, 'update_state' ) as update_state: with mock.patch.object( - vm_manager.VmManager, + instance_manager.InstanceManager, 'configure' ) as configure: - update_state.return_value = vm_manager.CONFIGURED + update_state.return_value = instance_manager.CONFIGURED configure.side_effect = lambda *a, **kw: setattr( - self.vm_mgr, + self.instance_mgr, 'state', - vm_manager.CONFIGURED + instance_manager.CONFIGURED ) - assert self.vm_mgr.check_boot(self.ctx) is True + assert self.instance_mgr.check_boot(self.ctx) is True update_state.assert_called_once_with(self.ctx, silent=True) configure.assert_called_once_with( self.ctx, - vm_manager.BOOTING, + instance_manager.BOOTING, attempts=1 ) def test_boot_check_still_booting(self): with mock.patch.object( - vm_manager.VmManager, + instance_manager.InstanceManager, 'update_state' ) as update_state: - update_state.return_value = vm_manager.BOOTING - assert self.vm_mgr.check_boot(self.ctx) is False + update_state.return_value = instance_manager.BOOTING + assert self.instance_mgr.check_boot(self.ctx) is False update_state.assert_called_once_with(self.ctx, silent=True) def test_boot_check_unsuccessful_initial_config_update(self): with mock.patch.object( - vm_manager.VmManager, + instance_manager.InstanceManager, 'update_state' ) as update_state: with mock.patch.object( - vm_manager.VmManager, + instance_manager.InstanceManager, 'configure' ) as configure: - update_state.return_value = vm_manager.CONFIGURED + update_state.return_value = instance_manager.CONFIGURED configure.side_effect = lambda *a, **kw: setattr( - self.vm_mgr, + self.instance_mgr, 'state', - vm_manager.BOOTING + instance_manager.BOOTING ) - assert self.vm_mgr.check_boot(self.ctx) is False + assert self.instance_mgr.check_boot(self.ctx) is False update_state.assert_called_once_with(self.ctx, silent=True) configure.assert_called_once_with( self.ctx, - vm_manager.BOOTING, + instance_manager.BOOTING, attempts=1 ) @mock.patch('time.sleep') def test_stop_success(self, sleep): - self.vm_mgr.state = vm_manager.UP + self.instance_mgr.state = instance_manager.UP self.ctx.nova_client.get_instance_by_id.return_value = None - self.vm_mgr.stop(self.ctx) + self.instance_mgr.stop(self.ctx) self.ctx.nova_client.destroy_instance.assert_called_once_with( self.INSTANCE_INFO ) - self.assertEqual(self.vm_mgr.state, vm_manager.DOWN) + self.assertEqual(self.instance_mgr.state, instance_manager.DOWN) @mock.patch('time.sleep') def test_stop_fail(self, sleep): - self.vm_mgr.state = vm_manager.UP + self.instance_mgr.state = instance_manager.UP self.ctx.nova_client.get_router_instance_status.return_value = 'UP' - self.vm_mgr.stop(self.ctx) - self.assertEqual(self.vm_mgr.state, vm_manager.UP) + self.instance_mgr.stop(self.ctx) + self.assertEqual(self.instance_mgr.state, instance_manager.UP) self.ctx.nova_client.destroy_instance.assert_called_once_with( self.INSTANCE_INFO ) @@ -444,13 +450,13 @@ class TestVmManager(unittest.TestCase): @mock.patch('time.sleep') def test_stop_router_already_deleted_from_neutron(self, sleep): - self.vm_mgr.state = vm_manager.GONE - self.vm_mgr.stop(self.ctx) + self.instance_mgr.state = instance_manager.GONE + self.instance_mgr.stop(self.ctx) self.ctx.nova_client.destroy_instance.assert_called_once_with( self.INSTANCE_INFO) - self.assertEqual(self.vm_mgr.state, vm_manager.GONE) + self.assertEqual(self.instance_mgr.state, instance_manager.GONE) - @mock.patch('akanda.rug.vm_manager.router_api') + @mock.patch('akanda.rug.instance_manager.router_api') @mock.patch('akanda.rug.api.configuration.build_config') def test_configure_success(self, config, router_api): rtr = mock.sentinel.router @@ -459,9 +465,10 @@ class TestVmManager(unittest.TestCase): config.return_value = 'fake_config' router_api.get_interfaces.return_value = [] - with mock.patch.object(self.vm_mgr, '_verify_interfaces') as verify: + with mock.patch.object(self.instance_mgr, + '_verify_interfaces') as verify: verify.return_value = True - self.vm_mgr.configure(self.ctx) + self.instance_mgr.configure(self.ctx) verify.assert_called_once_with(rtr, []) config.assert_called_once_with( @@ -469,27 +476,29 @@ class TestVmManager(unittest.TestCase): router_api.update_config.assert_called_once_with( self.INSTANCE_INFO.management_address, 5000, 'fake_config', ) - self.assertEqual(self.vm_mgr.state, vm_manager.CONFIGURED) + self.assertEqual(self.instance_mgr.state, + instance_manager.CONFIGURED) - @mock.patch('akanda.rug.vm_manager.router_api') + @mock.patch('akanda.rug.instance_manager.router_api') def test_configure_mismatched_interfaces(self, router_api): rtr = mock.sentinel.router self.neutron.get_router_detail.return_value = rtr - with mock.patch.object(self.vm_mgr, '_verify_interfaces') as verify: + with mock.patch.object(self.instance_mgr, + '_verify_interfaces') as verify: verify.return_value = False - self.vm_mgr.configure(self.ctx) + self.instance_mgr.configure(self.ctx) interfaces = router_api.get_interfaces.return_value verify.assert_called_once_with(rtr, interfaces) self.assertFalse(router_api.update_config.called) - self.assertEqual(self.vm_mgr.state, vm_manager.REPLUG) + self.assertEqual(self.instance_mgr.state, instance_manager.REPLUG) @mock.patch('time.sleep') - @mock.patch('akanda.rug.vm_manager.router_api') + @mock.patch('akanda.rug.instance_manager.router_api') @mock.patch('akanda.rug.api.configuration.build_config') def test_configure_failure(self, config, router_api, sleep): rtr = {'id': 'the_id'} @@ -499,9 +508,10 @@ class TestVmManager(unittest.TestCase): router_api.update_config.side_effect = Exception config.return_value = 'fake_config' - with mock.patch.object(self.vm_mgr, '_verify_interfaces') as verify: + with mock.patch.object(self.instance_mgr, + '_verify_interfaces') as verify: verify.return_value = True - self.vm_mgr.configure(self.ctx) + self.instance_mgr.configure(self.ctx) interfaces = router_api.get_interfaces.return_value verify.assert_called_once_with(rtr, interfaces) @@ -513,19 +523,19 @@ class TestVmManager(unittest.TestCase): 'fake_config') for i in range(0, 2)] router_api.update_config.assert_has_calls(expected_calls) - self.assertEqual(self.vm_mgr.state, vm_manager.RESTART) + self.assertEqual(self.instance_mgr.state, instance_manager.RESTART) @mock.patch('time.sleep', lambda *a: None) - @mock.patch('akanda.rug.vm_manager.router_api') + @mock.patch('akanda.rug.instance_manager.router_api') def test_replug_add_new_port_success(self, router_api): - self.vm_mgr.state = vm_manager.REPLUG + self.instance_mgr.state = instance_manager.REPLUG fake_router = mock.Mock() fake_router.id = 'fake_router_id' fake_router.ports = [fake_ext_port, fake_int_port, fake_add_port] self.neutron.get_router_detail.return_value = fake_router - self.vm_mgr.router_obj = fake_router + self.instance_mgr.router_obj = fake_router router_api.get_interfaces.return_value = [ {'lladdr': fake_mgt_port.mac_address}, {'lladdr': fake_ext_port.mac_address}, @@ -539,30 +549,31 @@ class TestVmManager(unittest.TestCase): fake_new_port = mock.Mock(id='fake_new_port_id') self.ctx.neutron.create_vrrp_port.return_value = fake_new_port - with mock.patch.object(self.vm_mgr, '_verify_interfaces') as verify: + with mock.patch.object(self.instance_mgr, + '_verify_interfaces') as verify: verify.return_value = True # the hotplug worked! - self.vm_mgr.replug(self.ctx) + self.instance_mgr.replug(self.ctx) self.ctx.neutron.create_vrrp_port.assert_called_with( fake_router.id, 'additional-net' ) - self.assertEqual(self.vm_mgr.state, vm_manager.REPLUG) + self.assertEqual(self.instance_mgr.state, instance_manager.REPLUG) fake_instance.interface_attach.assert_called_once_with( fake_new_port.id, None, None ) self.assertIn(fake_new_port, self.INSTANCE_INFO.ports) @mock.patch('time.sleep', lambda *a: None) - @mock.patch('akanda.rug.vm_manager.router_api') + @mock.patch('akanda.rug.instance_manager.router_api') def test_replug_add_new_port_failure(self, router_api): - self.vm_mgr.state = vm_manager.REPLUG + self.instance_mgr.state = instance_manager.REPLUG fake_router = mock.Mock() fake_router.id = 'fake_router_id' fake_router.ports = [fake_ext_port, fake_int_port, fake_add_port] self.neutron.get_router_detail.return_value = fake_router - self.vm_mgr.router_obj = fake_router + self.instance_mgr.router_obj = fake_router router_api.get_interfaces.return_value = [ {'lladdr': fake_mgt_port.mac_address}, {'lladdr': fake_ext_port.mac_address}, @@ -577,19 +588,20 @@ class TestVmManager(unittest.TestCase): fake_new_port = mock.Mock(id='fake_new_port_id') self.ctx.neutron.create_vrrp_port.return_value = fake_new_port - with mock.patch.object(self.vm_mgr, '_verify_interfaces') as verify: + with mock.patch.object(self.instance_mgr, + '_verify_interfaces') as verify: verify.return_value = False # The hotplug didn't work! - self.vm_mgr.replug(self.ctx) - self.assertEqual(self.vm_mgr.state, vm_manager.RESTART) + self.instance_mgr.replug(self.ctx) + self.assertEqual(self.instance_mgr.state, instance_manager.RESTART) fake_instance.interface_attach.assert_called_once_with( fake_new_port.id, None, None ) @mock.patch('time.sleep', lambda *a: None) - @mock.patch('akanda.rug.vm_manager.router_api') + @mock.patch('akanda.rug.instance_manager.router_api') def test_replug_remove_port_success(self, router_api): - self.vm_mgr.state = vm_manager.REPLUG + self.instance_mgr.state = instance_manager.REPLUG fake_router = mock.Mock() fake_router.id = 'fake_router_id' @@ -598,7 +610,7 @@ class TestVmManager(unittest.TestCase): fake_router.ports = [fake_mgt_port, fake_int_port] self.neutron.get_router_detail.return_value = fake_router - self.vm_mgr.router_obj = fake_router + self.instance_mgr.router_obj = fake_router router_api.get_interfaces.return_value = [ {'lladdr': fake_mgt_port.mac_address}, {'lladdr': fake_ext_port.mac_address}, @@ -610,19 +622,20 @@ class TestVmManager(unittest.TestCase): self.ctx.nova_client.get_instance_by_id = mock.Mock( return_value=fake_instance) - with mock.patch.object(self.vm_mgr, '_verify_interfaces') as verify: + with mock.patch.object(self.instance_mgr, + '_verify_interfaces') as verify: verify.return_value = True # the unplug worked! - self.vm_mgr.replug(self.ctx) - self.assertEqual(self.vm_mgr.state, vm_manager.REPLUG) + self.instance_mgr.replug(self.ctx) + self.assertEqual(self.instance_mgr.state, instance_manager.REPLUG) fake_instance.interface_detach.assert_called_once_with( fake_ext_port.id ) self.assertNotIn(fake_ext_port, self.INSTANCE_INFO.ports) @mock.patch('time.sleep', lambda *a: None) - @mock.patch('akanda.rug.vm_manager.router_api') + @mock.patch('akanda.rug.instance_manager.router_api') def test_replug_remove_port_failure(self, router_api): - self.vm_mgr.state = vm_manager.REPLUG + self.instance_mgr.state = instance_manager.REPLUG fake_router = mock.Mock() fake_router.id = 'fake_router_id' @@ -631,7 +644,7 @@ class TestVmManager(unittest.TestCase): fake_router.ports = [fake_mgt_port, fake_int_port] self.neutron.get_router_detail.return_value = fake_router - self.vm_mgr.router_obj = fake_router + self.instance_mgr.router_obj = fake_router router_api.get_interfaces.return_value = [ {'lladdr': fake_mgt_port.mac_address}, {'lladdr': fake_ext_port.mac_address}, @@ -643,10 +656,12 @@ class TestVmManager(unittest.TestCase): self.ctx.nova_client.get_instance_by_id = mock.Mock( return_value=fake_instance) - with mock.patch.object(self.vm_mgr, '_verify_interfaces') as verify: + with mock.patch.object(self.instance_mgr, + '_verify_interfaces') as verify: verify.return_value = False # the unplug failed! - self.vm_mgr.replug(self.ctx) - self.assertEquals(self.vm_mgr.state, vm_manager.RESTART) + self.instance_mgr.replug(self.ctx) + self.assertEquals(self.instance_mgr.state, + instance_manager.RESTART) fake_instance.interface_detach.assert_called_once_with( fake_ext_port.id ) @@ -666,7 +681,7 @@ class TestVmManager(unittest.TestCase): {'lladdr': fake_int_port.mac_address} ] - self.assertTrue(self.vm_mgr._verify_interfaces(rtr, interfaces)) + self.assertTrue(self.instance_mgr._verify_interfaces(rtr, interfaces)) def test_verify_interfaces_with_cleared_gateway(self): rtr = mock.Mock() @@ -683,58 +698,59 @@ class TestVmManager(unittest.TestCase): {'lladdr': 'a:a:a:a'} ] - self.assertFalse(self.vm_mgr._verify_interfaces(rtr, interfaces)) + self.assertFalse(self.instance_mgr._verify_interfaces(rtr, interfaces)) def test_ensure_provider_ports(self): rtr = mock.Mock() rtr.external_port = None - self.assertEqual(self.vm_mgr._ensure_provider_ports(rtr, self.ctx), + self.assertEqual(self.instance_mgr._ensure_provider_ports(rtr, + self.ctx), rtr) self.neutron.create_router_external_port.assert_called_once_with(rtr) def test_set_error_when_gone(self): - self.vm_mgr.state = vm_manager.GONE + self.instance_mgr.state = instance_manager.GONE rtr = mock.sentinel.router rtr.id = 'R1' self.ctx.neutron.get_router_detail.return_value = rtr - self.vm_mgr.set_error(self.ctx) + self.instance_mgr.set_error(self.ctx) self.neutron.update_router_status.assert_called_once_with('R1', 'ERROR') - self.assertEqual(vm_manager.GONE, self.vm_mgr.state) + self.assertEqual(instance_manager.GONE, self.instance_mgr.state) def test_set_error_when_booting(self): - self.vm_mgr.state = vm_manager.BOOTING + self.instance_mgr.state = instance_manager.BOOTING rtr = mock.sentinel.router rtr.id = 'R1' self.ctx.neutron.get_router_detail.return_value = rtr - self.vm_mgr.set_error(self.ctx) + self.instance_mgr.set_error(self.ctx) self.neutron.update_router_status.assert_called_once_with('R1', 'ERROR') - self.assertEqual(vm_manager.ERROR, self.vm_mgr.state) + self.assertEqual(instance_manager.ERROR, self.instance_mgr.state) def test_clear_error_when_gone(self): - self.vm_mgr.state = vm_manager.GONE + self.instance_mgr.state = instance_manager.GONE rtr = mock.sentinel.router rtr.id = 'R1' self.ctx.neutron.get_router_detail.return_value = rtr - self.vm_mgr.clear_error(self.ctx) + self.instance_mgr.clear_error(self.ctx) self.neutron.update_router_status.assert_called_once_with('R1', 'ERROR') - self.assertEqual(vm_manager.GONE, self.vm_mgr.state) + self.assertEqual(instance_manager.GONE, self.instance_mgr.state) def test_set_error_when_error(self): - self.vm_mgr.state = vm_manager.ERROR + self.instance_mgr.state = instance_manager.ERROR rtr = mock.sentinel.router rtr.id = 'R1' self.ctx.neutron.get_router_detail.return_value = rtr - self.vm_mgr.clear_error(self.ctx) + self.instance_mgr.clear_error(self.ctx) self.neutron.update_router_status.assert_called_once_with('R1', 'DOWN') - self.assertEqual(vm_manager.DOWN, self.vm_mgr.state) + self.assertEqual(instance_manager.DOWN, self.instance_mgr.state) @mock.patch('time.sleep') def test_boot_success_after_error(self, sleep): - self.next_state = vm_manager.UP + self.next_state = instance_manager.UP rtr = mock.sentinel.router self.ctx.neutron.get_router_detail.return_value = rtr rtr.id = 'ROUTER1' @@ -742,29 +758,29 @@ class TestVmManager(unittest.TestCase): rtr.external_port = None rtr.ports = mock.MagicMock() rtr.ports.__iter__.return_value = [] - self.vm_mgr.set_error(self.ctx) - self.vm_mgr.boot(self.ctx, 'GLANCE-IMAGE-123') - self.assertEqual(self.vm_mgr.state, vm_manager.BOOTING) # async + self.instance_mgr.set_error(self.ctx) + self.instance_mgr.boot(self.ctx, 'GLANCE-IMAGE-123') + self.assertEqual(self.instance_mgr.state, instance_manager.BOOTING) self.ctx.nova_client.boot_instance.assert_called_once_with( self.INSTANCE_INFO, rtr.id, 'GLANCE-IMAGE-123', mock.ANY) def test_error_cooldown(self): self.conf.error_state_cooldown = 30 - self.assertIsNone(self.vm_mgr.last_error) - self.assertFalse(self.vm_mgr.error_cooldown) + self.assertIsNone(self.instance_mgr.last_error) + self.assertFalse(self.instance_mgr.error_cooldown) - self.vm_mgr.state = vm_manager.ERROR - self.vm_mgr.last_error = datetime.utcnow() - timedelta(seconds=1) - self.assertTrue(self.vm_mgr.error_cooldown) + self.instance_mgr.state = instance_manager.ERROR + self.instance_mgr.last_error = datetime.utcnow() - timedelta(seconds=1) + self.assertTrue(self.instance_mgr.error_cooldown) - self.vm_mgr.last_error = datetime.utcnow() - timedelta(minutes=5) - self.assertFalse(self.vm_mgr.error_cooldown) + self.instance_mgr.last_error = datetime.utcnow() - timedelta(minutes=5) + self.assertFalse(self.instance_mgr.error_cooldown) class TestBootAttemptCounter(unittest.TestCase): def setUp(self): - self.c = vm_manager.BootAttemptCounter() + self.c = instance_manager.BootAttemptCounter() def test_start(self): self.c.start() @@ -781,39 +797,39 @@ class TestBootAttemptCounter(unittest.TestCase): class TestSynchronizeRouterStatus(unittest.TestCase): def setUp(self): - self.test_vm_manager = mock.Mock(spec=('router_obj', - '_last_synced_status', - 'state')) + self.test_instance_manager = mock.Mock(spec=('router_obj', + '_last_synced_status', + 'state')) self.test_context = mock.Mock() def test_router_is_deleted(self): - self.test_vm_manager.router_obj = None - v = vm_manager.synchronize_router_status( - lambda vm_manager_inst, ctx, silent: 1) - self.assertEqual(v(self.test_vm_manager, {}), 1) + self.test_instance_manager.router_obj = None + v = instance_manager.synchronize_router_status( + lambda instance_manager_inst, ctx, silent: 1) + self.assertEqual(v(self.test_instance_manager, {}), 1) def test_router_status_changed(self): - self.test_vm_manager.router_obj = mock.Mock(id='ABC123') - self.test_vm_manager._last_synced_status = neutron.STATUS_ACTIVE - self.test_vm_manager.state = vm_manager.DOWN - v = vm_manager.synchronize_router_status( - lambda vm_manager_inst, ctx, silent: 1) - self.assertEqual(v(self.test_vm_manager, self.test_context), 1) + self.test_instance_manager.router_obj = mock.Mock(id='ABC123') + self.test_instance_manager._last_synced_status = neutron.STATUS_ACTIVE + self.test_instance_manager.state = instance_manager.DOWN + v = instance_manager.synchronize_router_status( + lambda instance_manager_inst, ctx, silent: 1) + self.assertEqual(v(self.test_instance_manager, self.test_context), 1) self.test_context.neutron.update_router_status.\ assert_called_once_with( 'ABC123', neutron.STATUS_DOWN) - self.assertEqual(self.test_vm_manager._last_synced_status, + self.assertEqual(self.test_instance_manager._last_synced_status, neutron.STATUS_DOWN) def test_router_status_same(self): - self.test_vm_manager.router_obj = mock.Mock(id='ABC123') - self.test_vm_manager._last_synced_status = neutron.STATUS_ACTIVE - self.test_vm_manager.state = vm_manager.CONFIGURED - v = vm_manager.synchronize_router_status( - lambda vm_manager_inst, ctx, silent: 1) - self.assertEqual(v(self.test_vm_manager, self.test_context), 1) + self.test_instance_manager.router_obj = mock.Mock(id='ABC123') + self.test_instance_manager._last_synced_status = neutron.STATUS_ACTIVE + self.test_instance_manager.state = instance_manager.CONFIGURED + v = instance_manager.synchronize_router_status( + lambda instance_manager_inst, ctx, silent: 1) + self.assertEqual(v(self.test_instance_manager, self.test_context), 1) self.assertEqual( self.test_context.neutron.update_router_status.call_count, 0) - self.assertEqual( - self.test_vm_manager._last_synced_status, neutron.STATUS_ACTIVE) + self.assertEqual(self.test_instance_manager._last_synced_status, + neutron.STATUS_ACTIVE) diff --git a/akanda/rug/test/unit/test_state.py b/akanda/rug/test/unit/test_state.py old mode 100644 new mode 100755 index a75d29f6..e083e746 --- a/akanda/rug/test/unit/test_state.py +++ b/akanda/rug/test/unit/test_state.py @@ -23,7 +23,7 @@ import unittest2 as unittest from akanda.rug import event from akanda.rug import state -from akanda.rug import vm_manager +from akanda.rug import instance_manager from akanda.rug.api.neutron import RouterGone @@ -33,11 +33,12 @@ class BaseTestStateCase(unittest.TestCase): def setUp(self): self.ctx = mock.Mock() # worker context log = logging.getLogger(__name__) - vm_mgr_cls = mock.patch('akanda.rug.vm_manager.VmManager').start() + instance_mgr_cls = \ + mock.patch('akanda.rug.instance_manager.InstanceManager').start() self.addCleanup(mock.patch.stopall) - self.vm = vm_mgr_cls.return_value + self.instance = instance_mgr_cls.return_value self.params = state.StateParams( - vm=self.vm, + instance=self.instance, log=log, queue=deque(), bandwidth_callback=mock.Mock(), @@ -47,8 +48,8 @@ class BaseTestStateCase(unittest.TestCase): self.state = self.state_cls(self.params) def _test_transition_hlpr(self, action, expected_class, - vm_state=state.vm_manager.UP): - self.vm.state = vm_state + instance_state=state.instance_manager.UP): + self.instance.state = instance_state result = self.state.transition(action, self.ctx) self.assertIsInstance(result, expected_class) return result @@ -134,7 +135,7 @@ class TestCalcActionState(BaseTestStateCase): self._test_transition_hlpr( event.UPDATE, state.CheckBoot, - vm_manager.BOOTING + instance_manager.BOOTING ) def test_transition_update_missing_router_not_down(self): @@ -143,7 +144,7 @@ class TestCalcActionState(BaseTestStateCase): self._test_transition_hlpr( event.UPDATE, state.CheckBoot, - vm_manager.BOOTING + instance_manager.BOOTING ) def test_transition_delete_missing_router_down(self): @@ -151,8 +152,8 @@ class TestCalcActionState(BaseTestStateCase): self.ctx.neutron.get_router_detail.side_effect = RouterGone self._test_transition_hlpr( event.DELETE, - state.StopVM, - vm_manager.DOWN + state.StopInstance, + instance_manager.DOWN ) def test_transition_delete_missing_router_not_down(self): @@ -160,56 +161,62 @@ class TestCalcActionState(BaseTestStateCase): self.ctx.neutron.get_router_detail.side_effect = RouterGone self._test_transition_hlpr( event.DELETE, - state.StopVM, - vm_manager.BOOTING + state.StopInstance, + instance_manager.BOOTING ) - def test_transition_delete_down_vm(self): - self._test_transition_hlpr(event.DELETE, state.StopVM, vm_manager.DOWN) + def test_transition_delete_down_instance(self): + self._test_transition_hlpr(event.DELETE, + state.StopInstance, + instance_manager.DOWN) - def test_transition_delete_up_vm(self): - self._test_transition_hlpr(event.DELETE, state.StopVM) + def test_transition_delete_up_instance(self): + self._test_transition_hlpr(event.DELETE, state.StopInstance) - def test_transition_create_down_vm(self): + def test_transition_create_down_instance(self): for evt in [event.POLL, event.READ, event.UPDATE, event.CREATE]: - self._test_transition_hlpr(evt, state.CreateVM, vm_manager.DOWN) + self._test_transition_hlpr(evt, + state.CreateInstance, + instance_manager.DOWN) - def test_transition_poll_up_vm(self): - self._test_transition_hlpr(event.POLL, state.Alive, vm_manager.UP) + def test_transition_poll_up_instance(self): + self._test_transition_hlpr(event.POLL, + state.Alive, + instance_manager.UP) - def test_transition_poll_configured_vm(self): + def test_transition_poll_configured_instance(self): self._test_transition_hlpr( event.POLL, state.Alive, - vm_manager.CONFIGURED + instance_manager.CONFIGURED ) - def test_transition_other_up_vm(self): + def test_transition_other_up_instance(self): for evt in [event.READ, event.UPDATE, event.CREATE]: self._test_transition_hlpr(evt, state.Alive) - def test_transition_update_error_vm(self): - self.vm.error_cooldown = False + def test_transition_update_error_instance(self): + self.instance.error_cooldown = False result = self._test_transition_hlpr( event.UPDATE, state.ClearError, - vm_manager.ERROR, + instance_manager.ERROR, ) self.assertIsInstance(result._next_state, state.Alive) - def test_transition_update_error_vm_in_error_cooldown(self): - self.vm.error_cooldown = True + def test_transition_update_error_instance_in_error_cooldown(self): + self.instance.error_cooldown = True self._test_transition_hlpr( event.UPDATE, state.CalcAction, - vm_manager.ERROR, + instance_manager.ERROR, ) - def test_transition_poll_error_vm(self): + def test_transition_poll_error_instance(self): self._test_transition_hlpr( event.POLL, state.CalcAction, - vm_manager.ERROR, + instance_manager.ERROR, ) @@ -221,104 +228,107 @@ class TestAliveState(BaseTestStateCase): self.state.execute('passthrough', self.ctx), 'passthrough' ) - self.vm.update_state.assert_called_once_with(self.ctx) + self.instance.update_state.assert_called_once_with(self.ctx) - def test_transition_vm_down(self): + def test_transition_instance_down(self): for evt in [event.POLL, event.READ, event.UPDATE, event.CREATE]: - self._test_transition_hlpr(evt, state.CreateVM, vm_manager.DOWN) + self._test_transition_hlpr(evt, + state.CreateInstance, + instance_manager.DOWN) - def test_transition_poll_vm_configured(self): + def test_transition_poll_instance_configured(self): self._test_transition_hlpr( event.POLL, state.CalcAction, - vm_manager.CONFIGURED + instance_manager.CONFIGURED ) - def test_transition_read_vm_configured(self): + def test_transition_read_instance_configured(self): self._test_transition_hlpr( event.READ, state.ReadStats, - vm_manager.CONFIGURED + instance_manager.CONFIGURED ) def test_transition_up_to_configured(self): self._test_transition_hlpr( event.CREATE, - state.ConfigureVM, - vm_manager.UP + state.ConfigureInstance, + instance_manager.UP ) - def test_transition_configured_vm_configured(self): + def test_transition_configured_instance_configured(self): self._test_transition_hlpr( event.CREATE, - state.ConfigureVM, - vm_manager.CONFIGURED + state.ConfigureInstance, + instance_manager.CONFIGURED ) -class TestCreateVMState(BaseTestStateCase): - state_cls = state.CreateVM +class TestCreateInstanceState(BaseTestStateCase): + state_cls = state.CreateInstance def test_execute(self): - self.vm.attempts = 0 + self.instance.attempts = 0 self.assertEqual( self.state.execute('passthrough', self.ctx), 'passthrough' ) - self.vm.boot.assert_called_once_with(self.ctx, 'GLANCE-IMAGE-123') + self.instance.boot.assert_called_once_with(self.ctx, + 'GLANCE-IMAGE-123') def test_execute_too_many_attempts(self): - self.vm.attempts = self.params.reboot_error_threshold + self.instance.attempts = self.params.reboot_error_threshold self.assertEqual( self.state.execute('passthrough', self.ctx), 'passthrough' ) - self.assertEqual([], self.vm.boot.mock_calls) - self.vm.set_error.assert_called_once_with(self.ctx) + self.assertEqual([], self.instance.boot.mock_calls) + self.instance.set_error.assert_called_once_with(self.ctx) - def test_transition_vm_down(self): + def test_transition_instance_down(self): self._test_transition_hlpr( event.READ, state.CheckBoot, - vm_manager.BOOTING + instance_manager.BOOTING ) - def test_transition_vm_up(self): + def test_transition_instance_up(self): self._test_transition_hlpr( event.READ, state.CheckBoot, - vm_state=state.vm_manager.BOOTING + instance_state=state.instance_manager.BOOTING ) - def test_transition_vm_missing(self): + def test_transition_instance_missing(self): self._test_transition_hlpr( event.READ, - state.CreateVM, - vm_state=state.vm_manager.DOWN + state.CreateInstance, + instance_state=state.instance_manager.DOWN ) - def test_transition_vm_error(self): + def test_transition_instance_error(self): self._test_transition_hlpr(event.READ, state.CalcAction, - vm_state=state.vm_manager.ERROR) + instance_state=state.instance_manager.ERROR) -class TestRebuildVMState(BaseTestStateCase): - state_cls = state.RebuildVM +class TestRebuildInstanceState(BaseTestStateCase): + state_cls = state.RebuildInstance def test_execute(self): self.assertEqual( self.state.execute('ignored', self.ctx), event.CREATE, ) - self.vm.stop.assert_called_once_with(self.ctx) + self.instance.stop.assert_called_once_with(self.ctx) def test_execute_gone(self): - self.vm.state = vm_manager.GONE + self.instance.state = instance_manager.GONE self.assertEqual( self.state.execute('ignored', self.ctx), event.DELETE, ) - self.vm.stop.assert_called_once_with(self.ctx) + self.instance.stop.assert_called_once_with(self.ctx) class TestClearErrorState(BaseTestStateCase): @@ -329,15 +339,15 @@ class TestClearErrorState(BaseTestStateCase): self.state.execute('passthrough', self.ctx), 'passthrough', ) - self.vm.clear_error.assert_called_once_with(self.ctx) + self.instance.clear_error.assert_called_once_with(self.ctx) def test_execute_after_error(self): - self.vm.state = vm_manager.ERROR + self.instance.state = instance_manager.ERROR self.assertEqual( self.state.execute('passthrough', self.ctx), 'passthrough', ) - self.vm.clear_error.assert_called_once_with(self.ctx) + self.instance.clear_error.assert_called_once_with(self.ctx) def test_transition_default(self): st = self.state_cls(self.params) @@ -362,66 +372,70 @@ class TestCheckBootState(BaseTestStateCase): self.state.execute('passthrough', self.ctx), 'passthrough' ) - self.vm.check_boot.assert_called_once_with(self.ctx) + self.instance.check_boot.assert_called_once_with(self.ctx) assert list(self.params.queue) == ['passthrough'] - def test_transition_vm_configure(self): + def test_transition_instance_configure(self): self._test_transition_hlpr( event.UPDATE, - state.ConfigureVM, - vm_manager.UP + state.ConfigureInstance, + instance_manager.UP ) - def test_transition_vm_booting(self): + def test_transition_instance_booting(self): self._test_transition_hlpr( event.UPDATE, state.CalcAction, - vm_manager.BOOTING + instance_manager.BOOTING ) -class TestStopVMState(BaseTestStateCase): - state_cls = state.StopVM +class TestStopInstanceState(BaseTestStateCase): + state_cls = state.StopInstance def test_execute(self): self.assertEqual( self.state.execute('passthrough', self.ctx), 'passthrough' ) - self.vm.stop.assert_called_once_with(self.ctx) + self.instance.stop.assert_called_once_with(self.ctx) - def test_transition_vm_still_up(self): - self._test_transition_hlpr(event.DELETE, state.StopVM) + def test_transition_instance_still_up(self): + self._test_transition_hlpr(event.DELETE, state.StopInstance) - def test_transition_delete_vm_down(self): - self._test_transition_hlpr(event.DELETE, state.Exit, vm_manager.DOWN) + def test_transition_delete_instance_down(self): + self._test_transition_hlpr(event.DELETE, + state.Exit, + instance_manager.DOWN) - def test_transition_restart_vm_down(self): - self._test_transition_hlpr(event.READ, state.CreateVM, vm_manager.DOWN) + def test_transition_restart_instance_down(self): + self._test_transition_hlpr(event.READ, + state.CreateInstance, + instance_manager.DOWN) class TestReplugState(BaseTestStateCase): - state_cls = state.ReplugVM + state_cls = state.ReplugInstance def test_execute(self): self.assertEqual( self.state.execute('update', self.ctx), 'update' ) - self.vm.replug.assert_called_once_with(self.ctx) + self.instance.replug.assert_called_once_with(self.ctx) def test_transition_hotplug_succeeded(self): self._test_transition_hlpr( event.UPDATE, - state.ConfigureVM, - vm_manager.REPLUG + state.ConfigureInstance, + instance_manager.REPLUG ) def test_transition_hotplug_failed(self): self._test_transition_hlpr( event.UPDATE, - state.StopVM, - vm_manager.RESTART + state.StopInstance, + instance_manager.RESTART ) @@ -429,51 +443,55 @@ class TestExitState(TestBaseState): state_cls = state.Exit -class TestConfigureVMState(BaseTestStateCase): - state_cls = state.ConfigureVM +class TestConfigureInstanceState(BaseTestStateCase): + state_cls = state.ConfigureInstance def test_execute_read_configure_success(self): - self.vm.state = vm_manager.CONFIGURED + self.instance.state = instance_manager.CONFIGURED self.assertEqual(self.state.execute(event.READ, self.ctx), event.READ) - self.vm.configure.assert_called_once_with(self.ctx) + self.instance.configure.assert_called_once_with(self.ctx) def test_execute_update_configure_success(self): - self.vm.state = vm_manager.CONFIGURED + self.instance.state = instance_manager.CONFIGURED self.assertEqual(self.state.execute(event.UPDATE, self.ctx), event.POLL) - self.vm.configure.assert_called_once_with(self.ctx) + self.instance.configure.assert_called_once_with(self.ctx) def test_execute_configure_failure(self): self.assertEqual( self.state.execute(event.CREATE, self.ctx), event.CREATE ) - self.vm.configure.assert_called_once_with(self.ctx) + self.instance.configure.assert_called_once_with(self.ctx) def test_transition_not_configured_down(self): - self._test_transition_hlpr(event.READ, state.StopVM, vm_manager.DOWN) + self._test_transition_hlpr(event.READ, + state.StopInstance, + instance_manager.DOWN) def test_transition_not_configured_restart(self): - self._test_transition_hlpr(event.READ, state.StopVM, - vm_manager.RESTART) + self._test_transition_hlpr(event.READ, + state.StopInstance, + instance_manager.RESTART) def test_transition_not_configured_up(self): - self._test_transition_hlpr(event.READ, state.PushUpdate, - vm_manager.UP) + self._test_transition_hlpr(event.READ, + state.PushUpdate, + instance_manager.UP) def test_transition_read_configured(self): self._test_transition_hlpr( event.READ, state.ReadStats, - vm_manager.CONFIGURED + instance_manager.CONFIGURED ) def test_transition_other_configured(self): self._test_transition_hlpr( event.POLL, state.CalcAction, - vm_manager.CONFIGURED + instance_manager.CONFIGURED ) @@ -481,13 +499,13 @@ class TestReadStatsState(BaseTestStateCase): state_cls = state.ReadStats def test_execute(self): - self.vm.read_stats.return_value = 'foo' + self.instance.read_stats.return_value = 'foo' self.assertEqual( self.state.execute(event.READ, self.ctx), event.POLL ) - self.vm.read_stats.assert_called_once_with() + self.instance.read_stats.assert_called_once_with() self.params.bandwidth_callback.assert_called_once_with('foo') def test_transition(self): @@ -500,7 +518,8 @@ class TestAutomaton(unittest.TestCase): self.ctx = mock.Mock() # worker context - self.vm_mgr_cls = mock.patch('akanda.rug.vm_manager.VmManager').start() + self.instance_mgr_cls = \ + mock.patch('akanda.rug.instance_manager.InstanceManager').start() self.addCleanup(mock.patch.stopall) self.delete_callback = mock.Mock() @@ -548,8 +567,8 @@ class TestAutomaton(unittest.TestCase): self.assertFalse(self.sm.has_more_work()) def test_send_message_in_error(self): - vm = self.vm_mgr_cls.return_value - vm.state = state.vm_manager.ERROR + instance = self.instance_mgr_cls.return_value + instance.state = state.instance_manager.ERROR message = mock.Mock() message.crud = 'poll' self.sm.send_message(message) @@ -567,9 +586,9 @@ class TestAutomaton(unittest.TestCase): ) def test_send_rebuild_message_with_custom_image(self): - vm = self.vm_mgr_cls.return_value - vm.state = state.vm_manager.DOWN - with mock.patch.object(vm_manager.cfg, 'CONF') as conf: + instance = self.instance_mgr_cls.return_value + instance.state = state.instance_manager.DOWN + with mock.patch.object(instance_manager.cfg, 'CONF') as conf: conf.router_image_uuid = 'DEFAULT' self.sm.state.params.router_image_uuid = conf.router_image_uuid @@ -645,7 +664,7 @@ class TestAutomaton(unittest.TestCase): execute.called_once_with( event.POLL, - self.vm_mgr_cls.return_value, + self.instance_mgr_cls.return_value, self.ctx, self.sm._queue ) @@ -663,17 +682,17 @@ class TestAutomaton(unittest.TestCase): execute.called_once_with( event.POLL, - self.vm_mgr_cls.return_value, + self.instance_mgr_cls.return_value, self.ctx, self.bandwidth_callback ) def test_has_error(self): - with mock.patch.object(self.sm, 'vm') as vm: - vm.state = vm_manager.ERROR + with mock.patch.object(self.sm, 'instance') as instance: + instance.state = instance_manager.ERROR self.assertTrue(self.sm.has_error()) def test_has_no_error(self): - with mock.patch.object(self.sm, 'vm') as vm: - vm.state = vm_manager.UP + with mock.patch.object(self.sm, 'instance') as instance: + instance.state = instance_manager.UP self.assertFalse(self.sm.has_error()) diff --git a/akanda/rug/test/unit/test_tenant.py b/akanda/rug/test/unit/test_tenant.py old mode 100644 new mode 100755 index ac894ca3..9b610c3a --- a/akanda/rug/test/unit/test_tenant.py +++ b/akanda/rug/test/unit/test_tenant.py @@ -21,7 +21,7 @@ import unittest2 as unittest from akanda.rug import event from akanda.rug import tenant from akanda.rug import state -from akanda.rug import vm_manager +from akanda.rug import instance_manager class TestTenantRouterManager(unittest.TestCase): @@ -29,7 +29,8 @@ class TestTenantRouterManager(unittest.TestCase): def setUp(self): super(TestTenantRouterManager, self).setUp() - self.vm_mgr = mock.patch('akanda.rug.vm_manager.VmManager').start() + self.instance_mgr = \ + mock.patch('akanda.rug.instance_manager.InstanceManager').start() self.addCleanup(mock.patch.stopall) self.notifier = mock.Mock() self.trm = tenant.TenantRouterManager( @@ -93,10 +94,10 @@ class TestTenantRouterManager(unittest.TestCase): None, None, None, 5, 5) # Replace the default mock with one that has 'state' set. if i == 2: - status = vm_manager.ERROR + status = instance_manager.ERROR else: - status = vm_manager.UP - sm.vm = mock.Mock(state=status) + status = instance_manager.UP + sm.instance = mock.Mock(state=status) self.trm.state_machines.state_machines[str(i)] = sm msg = event.Event( tenant_id='1234',