compute: Skip driver detach calls for non local instances

Only call for a driver detach from a volume if the instance is currently
associated with the local compute host. This avoids potential virt
driver and volume backend issues when attempting to disconnect from
volumes that have never been connected to from the current host.

NOTE(lyarwood): Test conflict caused by the mox to mock migration in
I8d4d8a. As a result test_rebuild_on_remote_host_with_volumes remains a
mox based test.

Conflicts:
	nova/tests/unit/compute/test_compute.py

Closes-Bug: #1583284
Change-Id: I36b8532554d75b24130f456a35acd0be838b62d6
(cherry picked from commit fdf3328107)
This commit is contained in:
Lee Yarwood 2016-05-18 17:11:16 +01:00
parent 7cc81c0e1d
commit 76f1d60be8
3 changed files with 31 additions and 37 deletions

View File

@ -4766,7 +4766,7 @@ class ComputeManager(manager.Manager):
self._notify_about_instance_usage(
context, instance, "volume.attach", extra_usage_info=info)
def _driver_detach_volume(self, context, instance, bdm):
def _driver_detach_volume(self, context, instance, bdm, connection_info):
"""Do the actual driver detach using block device mapping."""
mp = bdm.device_name
volume_id = bdm.volume_id
@ -4775,11 +4775,6 @@ class ComputeManager(manager.Manager):
{'volume_id': volume_id, 'mp': mp},
context=context, instance=instance)
connection_info = jsonutils.loads(bdm.connection_info)
# NOTE(vish): We currently don't use the serial when disconnecting,
# but added for completeness in case we ever do.
if connection_info and 'serial' not in connection_info:
connection_info['serial'] = volume_id
try:
if not self.driver.instance_exists(instance):
LOG.warning(_LW('Detaching volume from unknown instance'),
@ -4805,8 +4800,6 @@ class ComputeManager(manager.Manager):
context=context, instance=instance)
self.volume_api.roll_detaching(context, volume_id)
return connection_info
def _detach_volume(self, context, volume_id, instance, destroy_bdm=True,
attachment_id=None):
"""Detach a volume from an instance.
@ -4851,8 +4844,21 @@ class ComputeManager(manager.Manager):
self.notifier.info(context, 'volume.usage',
compute_utils.usage_volume_info(vol_usage))
connection_info = self._driver_detach_volume(context, instance, bdm)
connection_info = jsonutils.loads(bdm.connection_info)
connector = self.driver.get_volume_connector(instance)
if CONF.host == instance.host:
# Only attempt to detach and disconnect from the volume if the
# instance is currently associated with the local compute host.
self._driver_detach_volume(context, instance, bdm, connection_info)
elif not destroy_bdm:
LOG.debug("Skipping _driver_detach_volume during remote rebuild.",
instance=instance)
elif destroy_bdm:
LOG.error(_LE("Unable to call for a driver detach of volume "
"%(vol_id)s due to the instance being registered to "
"the remote host %(inst_host)s."),
{'vol_id': volume_id, 'inst_host': instance.host},
instance=instance)
if connection_info and not destroy_bdm and (
connector.get('host') != instance.host):
@ -5041,7 +5047,8 @@ class ComputeManager(manager.Manager):
try:
bdm = objects.BlockDeviceMapping.get_by_volume_and_instance(
context, volume_id, instance.uuid)
self._driver_detach_volume(context, instance, bdm)
connection_info = jsonutils.loads(bdm.connection_info)
self._driver_detach_volume(context, instance, bdm, connection_info)
connector = self.driver.get_volume_connector(instance)
self.volume_api.terminate_connection(context, volume_id, connector)
except exception.NotFound:

View File

@ -363,7 +363,8 @@ class ComputeVolumeTestCase(BaseTestCase):
}
self.fake_volume = fake_block_device.FakeDbBlockDeviceDict(
{'source_type': 'volume', 'destination_type': 'volume',
'volume_id': uuids.volume_id, 'device_name': '/dev/vdb'})
'volume_id': uuids.volume_id, 'device_name': '/dev/vdb',
'connection_info': jsonutils.dumps({})})
self.instance_object = objects.Instance._from_db_object(
self.context, objects.Instance(),
fake_instance.fake_db_instance())
@ -440,7 +441,7 @@ class ComputeVolumeTestCase(BaseTestCase):
self.context, 'fake', instance, 'fake_id')
mock_internal_detach.assert_called_once_with(self.context,
instance,
fake_bdm)
fake_bdm, {})
self.assertTrue(mock_destroy.called)
def test_await_block_device_created_too_slow(self):
@ -11552,8 +11553,10 @@ class EvacuateHostTestCase(BaseTestCase):
instance = db.instance_get(self.context, self.inst.id)
self.assertEqual(instance['host'], 'fake_host_2')
def test_rebuild_on_host_with_volumes(self):
"""Confirm evacuate scenario reconnects volumes."""
def test_rebuild_on_remote_host_with_volumes(self):
"""Confirm that the evacuate scenario does not attempt a driver detach
when rebuilding an instance with volumes on a remote host
"""
values = {'instance_uuid': self.inst.uuid,
'source_type': 'volume',
'device_name': '/dev/vdc',
@ -11574,11 +11577,6 @@ class EvacuateHostTestCase(BaseTestCase):
result["detached"] = volume["id"] == 'fake_volume_id'
self.stubs.Set(cinder.API, "detach", fake_detach)
self.mox.StubOutWithMock(self.compute, '_driver_detach_volume')
self.compute._driver_detach_volume(mox.IsA(self.context),
mox.IsA(instance_obj.Instance),
mox.IsA(objects.BlockDeviceMapping))
def fake_terminate_connection(self, context, volume, connector):
return {}
self.stubs.Set(cinder.API, "terminate_connection",
@ -11598,6 +11596,7 @@ class EvacuateHostTestCase(BaseTestCase):
self.mox.ReplayAll()
self._rebuild()
self.mox.VerifyAll()
# cleanup
bdms = db.block_device_mapping_get_all_by_instance(self.context,

View File

@ -2170,6 +2170,7 @@ class ComputeManagerUnitTestCase(test.NoDBTestCase):
@mock.patch('nova.objects.Instance._from_db_object')
def test_remove_volume_connection(self, inst_from_db, detach, bdm_get):
bdm = mock.sentinel.bdm
bdm.connection_info = jsonutils.dumps({})
inst_obj = mock.Mock()
inst_obj.uuid = 'uuid'
bdm_get.return_value = bdm
@ -2177,7 +2178,7 @@ class ComputeManagerUnitTestCase(test.NoDBTestCase):
with mock.patch.object(self.compute, 'volume_api'):
self.compute.remove_volume_connection(self.context, 'vol',
inst_obj)
detach.assert_called_once_with(self.context, inst_obj, bdm)
detach.assert_called_once_with(self.context, inst_obj, bdm, {})
bdm_get.assert_called_once_with(self.context, 'vol', 'uuid')
def test_detach_volume(self):
@ -2195,10 +2196,12 @@ class ComputeManagerUnitTestCase(test.NoDBTestCase):
volume_id = uuids.volume
inst_obj = mock.Mock()
inst_obj.uuid = uuids.instance
inst_obj.host = CONF.host
attachment_id = uuids.attachment
bdm = mock.MagicMock(spec=objects.BlockDeviceMapping)
bdm.device_name = 'vdb'
bdm.connection_info = jsonutils.dumps({})
bdm_get.return_value = bdm
detach.return_value = {}
@ -2213,7 +2216,7 @@ class ComputeManagerUnitTestCase(test.NoDBTestCase):
destroy_bdm=destroy_bdm,
attachment_id=attachment_id)
detach.assert_called_once_with(self.context, inst_obj, bdm)
detach.assert_called_once_with(self.context, inst_obj, bdm, {})
driver.get_volume_connector.assert_called_once_with(inst_obj)
volume_api.terminate_connection.assert_called_once_with(
self.context, volume_id, connector_sentinel)
@ -2289,6 +2292,7 @@ class ComputeManagerUnitTestCase(test.NoDBTestCase):
instance,
destroy_bdm=False)
driver._driver_detach_volume.assert_not_called()
driver.get_volume_connector.assert_called_once_with(instance)
volume_api.terminate_connection.assert_called_once_with(
self.context, volume_id, expected_connector)
@ -2301,22 +2305,6 @@ class ComputeManagerUnitTestCase(test.NoDBTestCase):
extra_usage_info={'volume_id': volume_id}
)
def test__driver_detach_volume_return(self):
"""_driver_detach_volume returns the connection_info from loads()."""
with mock.patch.object(jsonutils, 'loads') as loads:
conn_info_str = 'test-expected-loads-param'
bdm = mock.Mock()
bdm.connection_info = conn_info_str
loads.return_value = {'test-loads-key': 'test loads return value'}
instance = fake_instance.fake_instance_obj(self.context)
ret = self.compute._driver_detach_volume(self.context,
instance,
bdm)
self.assertEqual(loads.return_value, ret)
loads.assert_called_once_with(conn_info_str)
def _test_rescue(self, clean_shutdown=True):
instance = fake_instance.fake_instance_obj(
self.context, vm_state=vm_states.ACTIVE)