Add compute_id to Instance object
This adds the compute_id field to the Instance object and adds checks in save() and create() to make sure we no longer update node without also updating compute_id. Related to blueprint compute-object-ids Change-Id: I0740a2e4e09a526da8565a18e6761b4dbdc4ec0b
This commit is contained in:
parent
70516d4ff9
commit
625fb569a7
|
@ -707,6 +707,7 @@ class ComputeManager(manager.Manager):
|
|||
# to the database layer.
|
||||
instance.host = None
|
||||
instance.node = None
|
||||
instance.compute_id = None
|
||||
# ResourceTracker._set_instance_host_and_node also sets launched_on
|
||||
# to the same value as host and is really only ever used by legacy
|
||||
# nova-network code, but we should also null it out to avoid confusion
|
||||
|
@ -5094,7 +5095,12 @@ class ComputeManager(manager.Manager):
|
|||
"""Revert host, node and flavor fields after a resize-revert."""
|
||||
self._set_instance_info(instance, instance.old_flavor)
|
||||
instance.host = migration.source_compute
|
||||
instance.node = migration.source_node
|
||||
# NOTE(danms): This could fail if we somehow ended up on the wrong
|
||||
# host without the desired node. That's a big problem, so let the
|
||||
# ComputeHostNotFound raise from here.
|
||||
cn = self.rt.get_node_by_name(migration.source_node)
|
||||
instance.node = cn.hypervisor_hostname
|
||||
instance.compute_id = cn.id
|
||||
instance.save(expected_task_state=[task_states.RESIZE_REVERTING])
|
||||
|
||||
@wrap_exception()
|
||||
|
@ -6009,6 +6015,11 @@ class ComputeManager(manager.Manager):
|
|||
|
||||
instance.host = migration.dest_compute
|
||||
instance.node = migration.dest_node
|
||||
# NOTE(danms): This could fail if the dest_compute_id was not set
|
||||
# by the sending host *and* migration.dest_node does not point to
|
||||
# a legit node on the dest_compute. Since that would be very bad,
|
||||
# let the ComputeHostNotFound raise from here.
|
||||
instance.compute_id = migration.get_dest_compute_id()
|
||||
# NOTE(gibi): as the instance now tracked on the destination we
|
||||
# have to make sure that the source compute resource track can
|
||||
# track this instance as a migration. For that the resource tracker
|
||||
|
@ -6394,7 +6405,12 @@ class ComputeManager(manager.Manager):
|
|||
# Setting the host/node values will make the ResourceTracker continue
|
||||
# to track usage for this instance on this host.
|
||||
instance.host = migration.dest_compute
|
||||
instance.node = migration.dest_node
|
||||
# NOTE(danms): This could fail if we somehow ended up on the wrong
|
||||
# host without the desired node. That's a big problem, so let the
|
||||
# ComputeHostNotFound raise from here.
|
||||
cn = self.rt.get_node_by_name(migration.dest_node)
|
||||
instance.compute_id = cn.id
|
||||
instance.node = cn.hypervisor_hostname
|
||||
instance.save(expected_task_state=task_states.RESIZE_FINISH)
|
||||
|
||||
# Broadcast to all schedulers that the instance is on this host.
|
||||
|
@ -9174,6 +9190,7 @@ class ComputeManager(manager.Manager):
|
|||
source_bdms)
|
||||
except Exception:
|
||||
# Restore the instance object
|
||||
compute_node = None
|
||||
node_name = None
|
||||
try:
|
||||
# get node name of compute, where instance will be
|
||||
|
@ -9195,6 +9212,7 @@ class ComputeManager(manager.Manager):
|
|||
instance.host = dest
|
||||
instance.task_state = None
|
||||
instance.node = node_name
|
||||
instance.compute_id = compute_node and compute_node.id or None
|
||||
instance.progress = 0
|
||||
instance.save()
|
||||
raise
|
||||
|
@ -9420,6 +9438,7 @@ class ComputeManager(manager.Manager):
|
|||
finally:
|
||||
# Restore instance state and update host
|
||||
current_power_state = self._get_power_state(instance)
|
||||
compute_node = None
|
||||
node_name = None
|
||||
prev_host = instance.host
|
||||
try:
|
||||
|
@ -9442,6 +9461,7 @@ class ComputeManager(manager.Manager):
|
|||
instance.power_state = current_power_state
|
||||
instance.task_state = None
|
||||
instance.node = node_name
|
||||
instance.compute_id = compute_node and compute_node.id or None
|
||||
instance.progress = 0
|
||||
instance.save(expected_task_state=task_states.MIGRATING)
|
||||
|
||||
|
|
|
@ -196,7 +196,7 @@ class ResourceTracker(object):
|
|||
# so that the resource audit knows about any cpus we've pinned.
|
||||
instance_numa_topology = claim.claimed_numa_topology
|
||||
instance.numa_topology = instance_numa_topology
|
||||
self._set_instance_host_and_node(instance, nodename)
|
||||
self._set_instance_host_and_node(instance, cn)
|
||||
|
||||
if self.pci_tracker:
|
||||
# NOTE(jaypipes): ComputeNode.pci_device_pools is set below
|
||||
|
@ -542,7 +542,7 @@ class ResourceTracker(object):
|
|||
rc = resource.resource_class
|
||||
self.assigned_resources[rp_uuid][rc].add(resource)
|
||||
|
||||
def _set_instance_host_and_node(self, instance, nodename):
|
||||
def _set_instance_host_and_node(self, instance, node):
|
||||
"""Tag the instance as belonging to this host. This should be done
|
||||
while the COMPUTE_RESOURCES_SEMAPHORE is held so the resource claim
|
||||
will not be lost if the audit process starts.
|
||||
|
@ -552,7 +552,15 @@ class ResourceTracker(object):
|
|||
# method changes that method might need to be updated.
|
||||
instance.host = self.host
|
||||
instance.launched_on = self.host
|
||||
instance.node = nodename
|
||||
if node is not None:
|
||||
# NOTE(danms): This method can be called when the node is disabled,
|
||||
# which means not in our list. The note in instance_claim()
|
||||
# explains that we should actually never get there (here) and that
|
||||
# if we do, we will likely record the instance membership
|
||||
# incorrectly. As such, if we do not have a compute node to
|
||||
# update the instance, just avoid making a change.
|
||||
instance.node = node.hypervisor_hostname
|
||||
instance.compute_id = node.id
|
||||
instance.save()
|
||||
|
||||
def _unset_instance_host_and_node(self, instance):
|
||||
|
@ -563,6 +571,7 @@ class ResourceTracker(object):
|
|||
"""
|
||||
instance.host = None
|
||||
instance.node = None
|
||||
instance.compute_id = None
|
||||
instance.save()
|
||||
|
||||
@utils.synchronized(COMPUTE_RESOURCE_SEMAPHORE, fair=True)
|
||||
|
@ -2080,6 +2089,9 @@ class ResourceTracker(object):
|
|||
# entry.
|
||||
instance.host = self.host
|
||||
instance.node = node
|
||||
# NOTE(danms): We can be called with node=None, which means compute_id
|
||||
# should also be None
|
||||
instance.compute_id = node and self.compute_nodes[node].id or None
|
||||
instance.save()
|
||||
instance.drop_migration_context()
|
||||
|
||||
|
@ -2106,3 +2118,13 @@ class ResourceTracker(object):
|
|||
# up the cache.
|
||||
self.remove_node(stale_cn)
|
||||
self.reportclient.invalidate_resource_provider(stale_cn)
|
||||
|
||||
def get_node_by_name(self, nodename):
|
||||
"""Get a node from our list by name.
|
||||
|
||||
:raises: ComputeHostNotFound if missing
|
||||
"""
|
||||
try:
|
||||
return self.compute_nodes[nodename]
|
||||
except KeyError:
|
||||
raise exception.ComputeHostNotFound(host=nodename)
|
||||
|
|
|
@ -95,6 +95,7 @@ class BuildRequest(base.NovaObject):
|
|||
self.instance.terminated_at = None
|
||||
self.instance.host = None
|
||||
self.instance.node = None
|
||||
self.instance.compute_id = None
|
||||
self.instance.launched_at = None
|
||||
self.instance.launched_on = None
|
||||
self.instance.cell_name = None
|
||||
|
|
|
@ -115,7 +115,8 @@ class Instance(base.NovaPersistentObject, base.NovaObject,
|
|||
# Version 2.5: Added hard_delete kwarg in destroy
|
||||
# Version 2.6: Added hidden
|
||||
# Version 2.7: Added resources
|
||||
VERSION = '2.7'
|
||||
# Version 2.8: Added compute_id
|
||||
VERSION = '2.8'
|
||||
|
||||
fields = {
|
||||
'id': fields.IntegerField(),
|
||||
|
@ -146,6 +147,7 @@ class Instance(base.NovaPersistentObject, base.NovaObject,
|
|||
|
||||
'host': fields.StringField(nullable=True),
|
||||
'node': fields.StringField(nullable=True),
|
||||
'compute_id': fields.IntegerField(nullable=True),
|
||||
|
||||
# TODO(stephenfin): Remove this in version 3.0 of the object as it has
|
||||
# been replaced by 'flavor'
|
||||
|
@ -231,6 +233,8 @@ class Instance(base.NovaPersistentObject, base.NovaObject,
|
|||
def obj_make_compatible(self, primitive, target_version):
|
||||
super(Instance, self).obj_make_compatible(primitive, target_version)
|
||||
target_version = versionutils.convert_version_to_tuple(target_version)
|
||||
if target_version < (2, 8) and 'compute_id' in primitive:
|
||||
del primitive['compute_id']
|
||||
if target_version < (2, 7) and 'resources' in primitive:
|
||||
del primitive['resources']
|
||||
if target_version < (2, 6) and 'hidden' in primitive:
|
||||
|
@ -546,6 +550,21 @@ class Instance(base.NovaPersistentObject, base.NovaObject,
|
|||
raise exception.ObjectActionError(action='create',
|
||||
reason='already deleted')
|
||||
updates = self.obj_get_changes()
|
||||
version = versionutils.convert_version_to_tuple(self.VERSION)
|
||||
|
||||
if 'node' in updates and 'compute_id' not in updates:
|
||||
# NOTE(danms): This is not really the best idea, as we should try
|
||||
# not to have different behavior based on the version of the
|
||||
# object. However, this exception helps us find cases in testing
|
||||
# where these may not be updated together. We can remove this
|
||||
# later.
|
||||
if version >= (2, 8):
|
||||
raise exception.ObjectActionError(
|
||||
('Instance is being created with node (%r) '
|
||||
'but not compute_id') % updates['node'])
|
||||
else:
|
||||
LOG.warning('Instance is being created with node %r but '
|
||||
'no compute_id', updates['node'])
|
||||
|
||||
# NOTE(danms): We know because of the check above that deleted
|
||||
# is either unset or false. Since we need to avoid passing False
|
||||
|
@ -780,6 +799,21 @@ class Instance(base.NovaPersistentObject, base.NovaObject,
|
|||
updates = {}
|
||||
changes = self.obj_what_changed()
|
||||
|
||||
version = versionutils.convert_version_to_tuple(self.VERSION)
|
||||
if 'node' in changes and 'compute_id' not in changes:
|
||||
# NOTE(danms): This is not really the best idea, as we should try
|
||||
# not to have different behavior based on the version of the
|
||||
# object. However, this exception helps us find cases in testing
|
||||
# where these may not be updated together. We can remove this
|
||||
# later.
|
||||
if version >= (2, 8):
|
||||
raise exception.ObjectActionError(
|
||||
('Instance.node is being updated (%r) '
|
||||
'but compute_id is not') % self.node)
|
||||
else:
|
||||
LOG.warning('Instance %s node is being updated to %r but '
|
||||
'compute_id is not', self.uuid, self.node)
|
||||
|
||||
for field in self.fields:
|
||||
# NOTE(danms): For object fields, we construct and call a
|
||||
# helper method like self._save_$attrname()
|
||||
|
|
|
@ -158,7 +158,8 @@ class InstanceObjectTestCase(test.TestCase):
|
|||
|
||||
def test_numa_topology_online_migration(self):
|
||||
"""Ensure legacy NUMA topology objects are reserialized to o.vo's."""
|
||||
instance = self._create_instance(host='fake-host', node='fake-node')
|
||||
instance = self._create_instance(host='fake-host', node='fake-node',
|
||||
compute_id=123)
|
||||
|
||||
legacy_topology = jsonutils.dumps({
|
||||
"cells": [
|
||||
|
|
|
@ -169,6 +169,7 @@ class ServerGroupTestV21(test.NoDBTestCase):
|
|||
with context.target_cell(ctx, cell) as cctx:
|
||||
instance = objects.Instance(context=cctx,
|
||||
image_ref=uuidsentinel.fake_image_ref,
|
||||
compute_id=123,
|
||||
node='node1', reservation_id='a',
|
||||
host='host1',
|
||||
project_id=fakes.FAKE_PROJECT_ID,
|
||||
|
|
|
@ -431,7 +431,8 @@ def stub_instance(id=1, user_id=None, project_id=None, host=None,
|
|||
memory_mb=0, vcpus=0, root_gb=0, ephemeral_gb=0,
|
||||
flavor=None, launch_index=0, kernel_id="",
|
||||
ramdisk_id="", user_data=None, system_metadata=None,
|
||||
services=None, trusted_certs=None, hidden=False):
|
||||
services=None, trusted_certs=None, hidden=False,
|
||||
compute_id=None):
|
||||
if user_id is None:
|
||||
user_id = 'fake_user'
|
||||
if project_id is None:
|
||||
|
@ -504,6 +505,7 @@ def stub_instance(id=1, user_id=None, project_id=None, host=None,
|
|||
"ephemeral_key_uuid": None,
|
||||
"host": host,
|
||||
"node": node,
|
||||
"compute_id": compute_id,
|
||||
"instance_type_id": flavor.id,
|
||||
"user_data": user_data,
|
||||
"reservation_id": reservation_id,
|
||||
|
|
|
@ -100,6 +100,7 @@ FAKE_IMAGE_REF = uuids.image_ref
|
|||
|
||||
NODENAME = 'fakenode1'
|
||||
NODENAME2 = 'fakenode2'
|
||||
COMPUTE_ID = 123
|
||||
|
||||
|
||||
def fake_not_implemented(*args, **kwargs):
|
||||
|
@ -289,6 +290,7 @@ class BaseTestCase(test.TestCase):
|
|||
inst.project_id = self.project_id
|
||||
inst.host = self.compute.host
|
||||
inst.node = NODENAME
|
||||
inst.compute_id = COMPUTE_ID
|
||||
inst.instance_type_id = flavor.id
|
||||
inst.ami_launch_index = 0
|
||||
inst.memory_mb = 0
|
||||
|
@ -5984,12 +5986,14 @@ class ComputeTestCase(BaseTestCase,
|
|||
# test. It introduced a hacky way to force the migration dest_compute
|
||||
# and makes it hard to keep _test_finish_revert_resize() generic
|
||||
# and have the resources correctly tracked.
|
||||
def test_finish_revert_resize_validate_source_compute(self):
|
||||
@mock.patch.object(objects.ComputeNode, 'get_by_host_and_nodename')
|
||||
def test_finish_revert_resize_validate_source_compute(self, mock_cn):
|
||||
def fake(*args, **kwargs):
|
||||
pass
|
||||
|
||||
instance = self._create_fake_instance_obj()
|
||||
request_spec = objects.RequestSpec()
|
||||
mock_cn.return_value = mock.MagicMock(id=123)
|
||||
|
||||
self.stub_out('nova.virt.fake.FakeDriver.finish_migration', fake)
|
||||
self.stub_out('nova.virt.fake.FakeDriver.finish_revert_migration',
|
||||
|
|
|
@ -99,6 +99,11 @@ class ComputeManagerUnitTestCase(test.NoDBTestCase,
|
|||
self.context = context.RequestContext(fakes.FAKE_USER_ID,
|
||||
fakes.FAKE_PROJECT_ID)
|
||||
|
||||
self.compute.rt.compute_nodes = {
|
||||
uuids.compute: mock.MagicMock(id=123,
|
||||
hypervisor_hostname=uuids.compute),
|
||||
}
|
||||
|
||||
self.useFixture(fixtures.SpawnIsSynchronousFixture())
|
||||
self.useFixture(fixtures.EventReporterStub())
|
||||
self.allocations = {
|
||||
|
@ -5486,7 +5491,7 @@ class ComputeManagerUnitTestCase(test.NoDBTestCase,
|
|||
self.context, instance, None, None, None, None, None,
|
||||
None, recreate=True, on_shared_storage=None,
|
||||
preserve_ephemeral=False, migration=None,
|
||||
scheduled_node='fake-node',
|
||||
scheduled_node=uuids.compute,
|
||||
limits={}, request_spec=request_spec, accel_uuids=[],
|
||||
reimage_boot_volume=False,
|
||||
target_state=None)
|
||||
|
@ -5542,8 +5547,8 @@ class ComputeManagerUnitTestCase(test.NoDBTestCase,
|
|||
self.assertEqual(vm_states.ACTIVE, instance.vm_state)
|
||||
|
||||
def test_rebuild_node_not_updated_if_not_recreate(self):
|
||||
node = uuidutils.generate_uuid() # ironic node uuid
|
||||
instance = fake_instance.fake_instance_obj(self.context, node=node)
|
||||
instance = fake_instance.fake_instance_obj(self.context,
|
||||
node=uuids.compute)
|
||||
instance.migration_context = None
|
||||
migration = objects.Migration(status='accepted')
|
||||
with test.nested(
|
||||
|
@ -5558,7 +5563,8 @@ class ComputeManagerUnitTestCase(test.NoDBTestCase,
|
|||
False, False, migration, None, {}, None, [], False,
|
||||
None)
|
||||
self.assertFalse(mock_get.called)
|
||||
self.assertEqual(node, instance.node)
|
||||
self.assertEqual(uuids.compute, instance.node)
|
||||
self.assertEqual(123, instance.compute_id)
|
||||
self.assertEqual('done', migration.status)
|
||||
mock_migration_save.assert_called_once_with()
|
||||
|
||||
|
@ -9015,13 +9021,16 @@ class ComputeManagerMigrationTestCase(test.NoDBTestCase,
|
|||
mock.patch.object(objects.Flavor, 'get_by_id'),
|
||||
mock.patch.object(self.compute, '_terminate_volume_connections'),
|
||||
mock.patch.object(self.compute, 'compute_rpcapi'),
|
||||
mock.patch.object(objects.ComputeNode, 'get_by_host_and_nodename'),
|
||||
) as (
|
||||
migrate_disk_and_power_off, fault_create, instance_update,
|
||||
network_api, save_inst, notify, vol_block_info, bdm, flavor,
|
||||
terminate_volume_connections, compute_rpcapi
|
||||
terminate_volume_connections, compute_rpcapi, cn_get
|
||||
):
|
||||
fault_create.return_value = (
|
||||
test_instance_fault.fake_faults['fake-uuid'][0])
|
||||
cn_get.return_value = mock.MagicMock(id=123,
|
||||
hypervisor_hostname='fake')
|
||||
yield (migrate_disk_and_power_off, notify)
|
||||
|
||||
def test_resize_instance_failure(self):
|
||||
|
@ -9197,6 +9206,9 @@ class ComputeManagerMigrationTestCase(test.NoDBTestCase,
|
|||
self.migration.uuid = uuids.migration
|
||||
self.migration.source_compute = self.instance['host']
|
||||
self.migration.source_node = self.instance['host']
|
||||
self.compute.rt.get_node_by_name.return_value = (
|
||||
mock.MagicMock(id=123,
|
||||
hypervisor_hostname=self.migration.source_node))
|
||||
request_spec = objects.RequestSpec()
|
||||
self.compute.finish_revert_resize(context=self.context,
|
||||
migration=self.migration,
|
||||
|
@ -9208,6 +9220,8 @@ class ComputeManagerMigrationTestCase(test.NoDBTestCase,
|
|||
# source_compute value.
|
||||
self.assertNotEqual(self.migration.dest_compute,
|
||||
self.migration.source_compute)
|
||||
self.compute.rt.get_node_by_name.assert_called_once_with(
|
||||
self.migration.source_node)
|
||||
|
||||
do_test()
|
||||
|
||||
|
@ -9243,6 +9257,9 @@ class ComputeManagerMigrationTestCase(test.NoDBTestCase,
|
|||
|
||||
self.compute.rt.tracked_migrations[self.instance['uuid']] = (
|
||||
self.migration, None)
|
||||
self.compute.rt.compute_nodes[self.instance['node']] = (
|
||||
mock.MagicMock(id=123,
|
||||
hypervisor_hostname=self.instance['node']))
|
||||
self.instance.migration_context = objects.MigrationContext()
|
||||
self.migration.source_compute = self.instance['host']
|
||||
self.migration.source_node = self.instance['node']
|
||||
|
@ -9342,6 +9359,9 @@ class ComputeManagerMigrationTestCase(test.NoDBTestCase,
|
|||
mock_get_bdms.return_value = objects.BlockDeviceMappingList()
|
||||
request_spec = objects.RequestSpec()
|
||||
mock_revert_allocation.return_value = mock.sentinel.allocation
|
||||
self.compute.rt.compute_nodes[self.migration.source_node] = (
|
||||
mock.MagicMock(id=123,
|
||||
hypervisor_hostname=self.migration.source_node))
|
||||
|
||||
with mock.patch.object(
|
||||
self.compute.network_api, 'get_instance_nw_info'):
|
||||
|
@ -9374,6 +9394,9 @@ class ComputeManagerMigrationTestCase(test.NoDBTestCase,
|
|||
|
||||
mock_get_bdms.return_value = objects.BlockDeviceMappingList()
|
||||
mock_get_allocations.return_value = mock.sentinel.allocation
|
||||
self.compute.rt.compute_nodes[self.migration.source_node] = (
|
||||
mock.MagicMock(id=123,
|
||||
hypervisor_hostname=self.migration.source_node))
|
||||
|
||||
with mock.patch.object(
|
||||
self.compute.network_api, 'get_instance_nw_info'):
|
||||
|
@ -10270,8 +10293,9 @@ class ComputeManagerMigrationTestCase(test.NoDBTestCase,
|
|||
setup_networks_on_host, get_instance_nw_info, save,
|
||||
rt_mock, mock_apply_mig_ctxt, mock_drop_mig_ctxt):
|
||||
|
||||
cn = mock.Mock(spec_set=['hypervisor_hostname'])
|
||||
cn = mock.Mock(spec_set=['hypervisor_hostname', 'id'])
|
||||
cn.hypervisor_hostname = 'test_host'
|
||||
cn.id = 123
|
||||
_get_compute_info.return_value = cn
|
||||
cn_old = self.instance.host
|
||||
instance_old = self.instance
|
||||
|
@ -10392,8 +10416,9 @@ class ComputeManagerMigrationTestCase(test.NoDBTestCase,
|
|||
_notify_about_instance_usage, network_api, save,
|
||||
add_instance_fault_from_exc, rt_mock,
|
||||
mock_apply_mig_ctxt, mock_drop_mig_ctxt):
|
||||
cn = mock.Mock(spec_set=['hypervisor_hostname'])
|
||||
cn = mock.Mock(spec_set=['hypervisor_hostname', 'id'])
|
||||
cn.hypervisor_hostname = 'test_host'
|
||||
cn.id = 123
|
||||
_get_compute_info.return_value = cn
|
||||
|
||||
self.assertRaises(exception.NovaException,
|
||||
|
@ -10422,6 +10447,7 @@ class ComputeManagerMigrationTestCase(test.NoDBTestCase,
|
|||
return_value=power_state.RUNNING)
|
||||
@mock.patch.object(self.compute, '_get_compute_info',
|
||||
return_value=objects.ComputeNode(
|
||||
id=123,
|
||||
hypervisor_hostname='fake-dest-host'))
|
||||
@mock.patch.object(self.instance, 'save')
|
||||
def _do_test(instance_save, get_compute_node, get_power_state,
|
||||
|
@ -10494,7 +10520,7 @@ class ComputeManagerMigrationTestCase(test.NoDBTestCase,
|
|||
@mock.patch.object(self.compute, '_get_compute_info')
|
||||
def _test_post_live_migration(_get_compute_info):
|
||||
dest_host = 'dest'
|
||||
cn = objects.ComputeNode(hypervisor_hostname=dest_host)
|
||||
cn = objects.ComputeNode(hypervisor_hostname=dest_host, id=123)
|
||||
_get_compute_info.return_value = cn
|
||||
instance = fake_instance.fake_instance_obj(self.context,
|
||||
node='src',
|
||||
|
@ -12013,6 +12039,8 @@ class ComputeManagerMigrationTestCase(test.NoDBTestCase,
|
|||
self.migration.dest_compute = uuids.dest
|
||||
self.migration.dest_node = uuids.dest
|
||||
self.migration.dest_compute_id = 123
|
||||
self.compute.rt.compute_nodes[uuids.dest] = mock.MagicMock(
|
||||
id=123, hypervisor_hostname=uuids.dest)
|
||||
|
||||
with mock.patch.object(self.compute, 'network_api') as network_api:
|
||||
network_api.get_instance_nw_info.return_value = nwinfo
|
||||
|
@ -12693,6 +12721,9 @@ class ComputeManagerMigrationTestCase(test.NoDBTestCase,
|
|||
# Configure the instance with an old_flavor for the revert.
|
||||
old_flavor = fake_flavor.fake_flavor_obj(self.context)
|
||||
self.instance.old_flavor = old_flavor
|
||||
self.compute.rt.compute_nodes[self.migration.source_node] = (
|
||||
mock.MagicMock(id=123,
|
||||
hypervisor_hostname=self.migration.source_node))
|
||||
with test.nested(
|
||||
mock.patch.object(self.compute.network_api,
|
||||
'get_instance_nw_info'),
|
||||
|
@ -12765,6 +12796,9 @@ class ComputeManagerMigrationTestCase(test.NoDBTestCase,
|
|||
# Configure the instance with an old_flavor for the revert.
|
||||
old_flavor = fake_flavor.fake_flavor_obj(self.context)
|
||||
self.instance.old_flavor = old_flavor
|
||||
self.compute.rt.compute_nodes[self.migration.source_node] = (
|
||||
mock.MagicMock(id=123,
|
||||
hypervisor_hostname=self.migration.source_node))
|
||||
with test.nested(
|
||||
mock.patch.object(self.compute.network_api,
|
||||
'get_instance_nw_info'),
|
||||
|
|
|
@ -2261,6 +2261,8 @@ class TestInstanceClaim(BaseTestCase):
|
|||
|
||||
# Reset all changes to the instance to make sure that we can detect
|
||||
# any manipulation after the failure.
|
||||
orig_node = self.instance.node
|
||||
self.assertNotIn('compute_id', self.instance)
|
||||
self.instance.obj_reset_changes(recursive=True)
|
||||
|
||||
with mock.patch.object(self.instance, 'save') as mock_save:
|
||||
|
@ -2272,6 +2274,8 @@ class TestInstanceClaim(BaseTestCase):
|
|||
|
||||
# Make sure the instance was not touched by the failed claim process
|
||||
self.assertEqual(set(), self.instance.obj_what_changed())
|
||||
self.assertEqual(orig_node, self.instance.node)
|
||||
self.assertNotIn('compute_id', self.instance)
|
||||
|
||||
@mock.patch('nova.compute.utils.is_volume_backed_instance')
|
||||
@mock.patch('nova.objects.MigrationList.get_in_progress_and_error')
|
||||
|
@ -4108,22 +4112,30 @@ class TestSetInstanceHostAndNode(BaseTestCase):
|
|||
@mock.patch('nova.objects.Instance.save')
|
||||
def test_set_instance_host_and_node(self, save_mock):
|
||||
inst = objects.Instance()
|
||||
self.rt._set_instance_host_and_node(inst, _NODENAME)
|
||||
self.rt._set_instance_host_and_node(inst, _COMPUTE_NODE_FIXTURES[0])
|
||||
save_mock.assert_called_once_with()
|
||||
self.assertEqual(self.rt.host, inst.host)
|
||||
self.assertEqual(_NODENAME, inst.node)
|
||||
self.assertEqual(1, inst.compute_id)
|
||||
self.assertEqual(self.rt.host, inst.launched_on)
|
||||
|
||||
@mock.patch('nova.objects.Instance.save')
|
||||
def test_unset_instance_host_and_node(self, save_mock):
|
||||
inst = objects.Instance()
|
||||
self.rt._set_instance_host_and_node(inst, _NODENAME)
|
||||
self.rt._set_instance_host_and_node(inst, _COMPUTE_NODE_FIXTURES[0])
|
||||
self.rt._unset_instance_host_and_node(inst)
|
||||
self.assertEqual(2, save_mock.call_count)
|
||||
self.assertIsNone(inst.host)
|
||||
self.assertIsNone(inst.node)
|
||||
self.assertEqual(self.rt.host, inst.launched_on)
|
||||
|
||||
def test_get_node_by_name(self):
|
||||
self.assertRaises(exc.ComputeHostNotFound,
|
||||
self.rt.get_node_by_name, 'foo')
|
||||
self.assertRaises(exc.ComputeHostNotFound,
|
||||
self.rt.get_node_by_name,
|
||||
_COMPUTE_NODE_FIXTURES[0].hypervisor_hostname)
|
||||
|
||||
|
||||
def _update_compute_node(node, **kwargs):
|
||||
for key, value in kwargs.items():
|
||||
|
|
|
@ -646,7 +646,7 @@ class ShelveComputeManagerTestCase(test_compute.BaseTestCase):
|
|||
|
||||
self.compute.unshelve_instance(
|
||||
self.context, instance, image=None,
|
||||
filter_properties={}, node='fakenode2', request_spec=request_spec,
|
||||
filter_properties={}, node='fakenode1', request_spec=request_spec,
|
||||
accel_uuids=[])
|
||||
|
||||
mock_update_pci.assert_called_once_with(
|
||||
|
@ -700,7 +700,7 @@ class ShelveComputeManagerTestCase(test_compute.BaseTestCase):
|
|||
self.assertRaises(test.TestingException,
|
||||
self.compute.unshelve_instance, self.context, instance,
|
||||
image=shelved_image, filter_properties={},
|
||||
node='fakenode2', request_spec=fake_spec, accel_uuids=[])
|
||||
node='fakenode1', request_spec=fake_spec, accel_uuids=[])
|
||||
self.assertEqual(instance.image_ref, initial_image_ref)
|
||||
|
||||
@mock.patch.object(objects.InstanceList, 'get_by_filters')
|
||||
|
|
|
@ -302,6 +302,77 @@ class _TestInstanceObject(object):
|
|||
|
||||
self.assertFalse(mock_log.called)
|
||||
|
||||
def test_save_updates_host_not_compute_id(self):
|
||||
instance = objects.Instance(self.context, uuid=uuids.instance,
|
||||
user_id=self.context.user_id,
|
||||
project_id=self.context.project_id)
|
||||
# Instance create() without node or compute_id is okay
|
||||
instance.create()
|
||||
|
||||
# Try to update node without compute_id and make sure save() fails
|
||||
instance.node = 'foo'
|
||||
self.assertRaises(exception.ObjectActionError, instance.save)
|
||||
|
||||
# If we update both, then save() should succeed
|
||||
instance.compute_id = 1
|
||||
instance.save()
|
||||
|
||||
# NOTE(danms): This ensures that older object versions are not held to the
|
||||
# new standard of requiring compute_id. If we drop support for Instance
|
||||
# <=2.7 we can remove this test.
|
||||
def test_save_updates_host_not_compute_id_compat(self):
|
||||
instance = objects.Instance(self.context, uuid=uuids.instance,
|
||||
user_id=self.context.user_id,
|
||||
project_id=self.context.project_id,
|
||||
numa_topology=None)
|
||||
# Instance create() without node or compute_id is okay
|
||||
instance.create()
|
||||
|
||||
# Backlevel this to before the requirement
|
||||
manifest = ovo_base.obj_tree_get_versions('Instance')
|
||||
instance = objects.Instance.obj_from_primitive(
|
||||
instance.obj_to_primitive(target_version='2.7',
|
||||
version_manifest=manifest))
|
||||
instance._context = self.context
|
||||
|
||||
# Try to update node without compute_id and make sure save() does not
|
||||
# fail for this older version
|
||||
instance.node = 'foo'
|
||||
instance.save()
|
||||
|
||||
def test_create_with_host_not_compute_id(self):
|
||||
instance = objects.Instance(self.context, uuid=uuids.instance,
|
||||
user_id=self.context.user_id,
|
||||
project_id=self.context.project_id)
|
||||
# Instance create() with node but not compute_id should fail
|
||||
instance.node = 'foo'
|
||||
self.assertRaises(exception.ObjectActionError, instance.create)
|
||||
|
||||
# If we create with both, then create() should succeed
|
||||
instance.compute_id = 1
|
||||
instance.create()
|
||||
|
||||
# NOTE(danms): This ensures that older object versions are not held to the
|
||||
# new standard of requiring compute_id. If we drop support for Instance
|
||||
# <=2.7 we can remove this test.
|
||||
def test_create_with_host_not_compute_id_compat(self):
|
||||
instance = objects.Instance(self.context, uuid=uuids.instance,
|
||||
user_id=self.context.user_id,
|
||||
project_id=self.context.project_id,
|
||||
numa_topology=None)
|
||||
|
||||
# Backlevel this to before the requirement
|
||||
manifest = ovo_base.obj_tree_get_versions('Instance')
|
||||
instance = objects.Instance.obj_from_primitive(
|
||||
instance.obj_to_primitive(target_version='2.7',
|
||||
version_manifest=manifest))
|
||||
|
||||
# Try to create this object without compute_id and make sure create()
|
||||
# does not fail for this older version
|
||||
instance._context = self.context
|
||||
instance.node = 'foo'
|
||||
instance.create()
|
||||
|
||||
@mock.patch.object(db, 'instance_get')
|
||||
def test_get_by_id(self, mock_get):
|
||||
mock_get.return_value = self.fake_instance
|
||||
|
|
|
@ -1073,7 +1073,7 @@ object_data = {
|
|||
'IDEDeviceBus': '1.0-29d4c9f27ac44197f01b6ac1b7e16502',
|
||||
'ImageMeta': '1.8-642d1b2eb3e880a367f37d72dd76162d',
|
||||
'ImageMetaProps': '1.34-29b3a6b7fe703f36bfd240d914f16c21',
|
||||
'Instance': '2.7-d187aec68cad2e4d8b8a03a68e4739ce',
|
||||
'Instance': '2.8-2727dba5e4a078e6cc848c1f94f7eb24',
|
||||
'InstanceAction': '1.2-9a5abc87fdd3af46f45731960651efb5',
|
||||
'InstanceActionEvent': '1.4-5b1f361bd81989f8bb2c20bb7e8a4cb4',
|
||||
'InstanceActionEventList': '1.1-13d92fb953030cdbfee56481756e02be',
|
||||
|
|
|
@ -79,6 +79,7 @@ class NotificationsTestCase(test.TestCase):
|
|||
display_name='test_instance',
|
||||
hostname='test_instance_hostname',
|
||||
node='test_instance_node',
|
||||
compute_id=123,
|
||||
system_metadata={})
|
||||
inst._context = self.context
|
||||
if params:
|
||||
|
|
|
@ -345,6 +345,7 @@ class TestMachineTypeUtilsListUnset(test.NoDBTestCase):
|
|||
context=cctxt,
|
||||
host=node.host,
|
||||
node=node.hypervisor_hostname,
|
||||
compute_id=node.id,
|
||||
uuid=uuidutils.generate_uuid())
|
||||
inst.create()
|
||||
|
||||
|
|
Loading…
Reference in New Issue