Release PCI devices on drop_move_claim()
On cold migration, drop_move_claim() is called in the confirm stage on the source node. Since the migration is being tracked by the resource tracker on the destination node, the source node has the instance in it's tracked_instances. So in this case the PCI devices were only freed on the next periodic audit. For PCI resources such as PCI passthrough, those are limited in number and should be freed right away. This patch fixes drop_move_claim() to also free PCI devices when an instance is in self.tracked_instances(). Co-Authored-By: Steven Webster <steven.webster@windriver.com> Change-Id: Ie3392f80dfd2650048519c571ffaa11c025ad048 Closes-Bug: #1641750
This commit is contained in:
parent
a404681dcc
commit
3a4909ae7e
|
@ -364,10 +364,23 @@ class ResourceTracker(object):
|
|||
|
||||
self._update(context.elevated(), self.compute_nodes[nodename])
|
||||
|
||||
def _drop_pci_devices(self, instance, nodename, prefix):
|
||||
if self.pci_tracker:
|
||||
# free old/new allocated pci devices
|
||||
pci_devices = self._get_migration_context_resource(
|
||||
'pci_devices', instance, prefix=prefix)
|
||||
if pci_devices:
|
||||
for pci_device in pci_devices:
|
||||
self.pci_tracker.free_device(pci_device, instance)
|
||||
|
||||
dev_pools_obj = self.pci_tracker.stats.to_device_pools_obj()
|
||||
self.compute_nodes[nodename].pci_device_pools = dev_pools_obj
|
||||
|
||||
@utils.synchronized(COMPUTE_RESOURCE_SEMAPHORE)
|
||||
def drop_move_claim(self, context, instance, nodename,
|
||||
instance_type=None, prefix='new_'):
|
||||
"""Remove usage for an incoming/outgoing migration."""
|
||||
# Remove usage for an incoming/outgoing migration on the destination
|
||||
# node.
|
||||
if instance['uuid'] in self.tracked_migrations:
|
||||
migration = self.tracked_migrations.pop(instance['uuid'])
|
||||
|
||||
|
@ -381,17 +394,22 @@ class ResourceTracker(object):
|
|||
'numa_topology', instance, prefix=prefix)
|
||||
usage = self._get_usage_dict(
|
||||
instance_type, numa_topology=numa_topology)
|
||||
if self.pci_tracker:
|
||||
# free old/new allocated pci devices
|
||||
pci_devices = self._get_migration_context_resource(
|
||||
'pci_devices', instance, prefix=prefix)
|
||||
if pci_devices:
|
||||
for pci_device in pci_devices:
|
||||
self.pci_tracker.free_device(pci_device, instance)
|
||||
self._drop_pci_devices(instance, nodename, prefix)
|
||||
self._update_usage(usage, nodename, sign=-1)
|
||||
|
||||
ctxt = context.elevated()
|
||||
self._update(ctxt, self.compute_nodes[nodename])
|
||||
# Remove usage for an instance that is not tracked in migrations (such
|
||||
# as on the source node after a migration).
|
||||
# NOTE(lbeliveau): On resize on the same node, the instance is
|
||||
# included in both tracked_migrations and tracked_instances.
|
||||
elif (instance['uuid'] in self.tracked_instances):
|
||||
self.tracked_instances.pop(instance['uuid'])
|
||||
self._drop_pci_devices(instance, nodename, prefix)
|
||||
# TODO(lbeliveau): Validate if numa needs the same treatment.
|
||||
|
||||
ctxt = context.elevated()
|
||||
self._update(ctxt, self.compute_nodes[nodename])
|
||||
|
||||
@utils.synchronized(COMPUTE_RESOURCE_SEMAPHORE)
|
||||
def update_usage(self, context, instance, nodename):
|
||||
|
|
|
@ -5481,8 +5481,11 @@ class ComputeTestCase(BaseTestCase):
|
|||
mock.patch.object(self.compute.network_api,
|
||||
'migrate_instance_start'),
|
||||
mock.patch.object(self.rt.pci_tracker,
|
||||
'free_device')
|
||||
) as (mock_setup, mock_migrate, mock_pci_free_device):
|
||||
'free_device'),
|
||||
mock.patch.object(self.rt.pci_tracker.stats, 'to_device_pools_obj',
|
||||
return_value=objects.PciDevicePoolList())
|
||||
) as (mock_setup, mock_migrate, mock_pci_free_device,
|
||||
mock_to_device_pools_obj):
|
||||
method(self.context, instance=instance,
|
||||
migration=migration, reservations=[])
|
||||
mock_pci_free_device.assert_called_once_with(
|
||||
|
|
|
@ -1764,7 +1764,7 @@ class TestResize(BaseTestCase):
|
|||
# PCI requests defined directly above.
|
||||
pci_req_mock.return_value = objects.InstancePCIRequests(requests=[])
|
||||
|
||||
# not using mock.sentinel.ctx because resize_claim calls #elevated
|
||||
# not using mock.sentinel.ctx because resize_claim calls elevated
|
||||
ctx = mock.MagicMock()
|
||||
|
||||
with test.nested(
|
||||
|
@ -1788,6 +1788,41 @@ class TestResize(BaseTestCase):
|
|||
self.assertEqual(request, pci_req_mock.return_value.requests[0])
|
||||
alloc_mock.assert_called_once_with(instance)
|
||||
|
||||
def test_drop_move_claim_on_revert(self):
|
||||
self._setup_rt()
|
||||
cn = _COMPUTE_NODE_FIXTURES[0].obj_clone()
|
||||
self.rt.compute_nodes[_NODENAME] = cn
|
||||
|
||||
# TODO(jaypipes): Remove once the PCI tracker is always created
|
||||
# upon the resource tracker being initialized...
|
||||
self.rt.pci_tracker = pci_manager.PciDevTracker(mock.sentinel.ctx)
|
||||
|
||||
pci_dev = pci_device.PciDevice.create(
|
||||
None, fake_pci_device.dev_dict)
|
||||
pci_devs = [pci_dev]
|
||||
|
||||
instance = _INSTANCE_FIXTURES[0].obj_clone()
|
||||
instance.task_state = task_states.RESIZE_MIGRATING
|
||||
instance.new_flavor = _INSTANCE_TYPE_OBJ_FIXTURES[2]
|
||||
instance.migration_context = objects.MigrationContext()
|
||||
instance.migration_context.new_pci_devices = objects.PciDeviceList(
|
||||
objects=pci_devs)
|
||||
|
||||
self.rt.tracked_instances = {
|
||||
instance.uuid: obj_base.obj_to_primitive(instance)
|
||||
}
|
||||
|
||||
# not using mock.sentinel.ctx because drop_move_claim calls elevated
|
||||
ctx = mock.MagicMock()
|
||||
|
||||
with test.nested(
|
||||
mock.patch.object(self.rt, '_update'),
|
||||
mock.patch.object(self.rt.pci_tracker, 'free_device')
|
||||
) as (update_mock, mock_pci_free_device):
|
||||
self.rt.drop_move_claim(ctx, instance, _NODENAME)
|
||||
mock_pci_free_device.assert_called_once_with(
|
||||
pci_dev, mock.ANY)
|
||||
|
||||
@mock.patch('nova.objects.InstancePCIRequests.get_by_instance',
|
||||
return_value=objects.InstancePCIRequests(requests=[]))
|
||||
@mock.patch('nova.objects.PciDeviceList.get_by_compute_node',
|
||||
|
|
Loading…
Reference in New Issue