Pass attachment_id to Cinder when detach a volume

Cinder needs the attachment_id to properly identify which attachment
of a volume to detach. This patch adapts the Cinder driver to pass
the required information. The attachment_id is necessary for volumes
that enable multiple attachments in order to terminate the connection
properly. The attachment_id is retrieved in the API layer where
available and sent through RPC, the RPC version is bumped to 4.7.

Also the translation functions are modified to retrieve all necessary
information for the volume info coming from Cinder including the multiple
attachments. The Nova API returns the volume info in the same format
as earlier.

Co-Authored-By: Ildiko Vancsa <ildiko.vancsa@ericsson.com>

Partially-implements: blueprint multi-attach-volume
Change-Id: I3cdc49924acbdd21d1e6678a3bb4cf7de7f1db1a
This commit is contained in:
Krisztian Gacsal 2015-06-18 15:08:20 +02:00 committed by Ildiko Vancsa
parent 806113e4f4
commit cb3a6bfe1a
18 changed files with 340 additions and 86 deletions

View File

@ -59,9 +59,21 @@ def _translate_volume_summary_view(context, vol):
d['createdAt'] = vol['created_at']
if vol['attach_status'] == 'attached':
# NOTE(ildikov): The attachments field in the volume info that
# Cinder sends is converted to an OrderedDict with the
# instance_uuid as key to make it easier for the multiattach
# feature to check the required information. Multiattach will
# be enable in the Nova API in Newton.
# The format looks like the following:
# attachments = {'instance_uuid': {
# 'attachment_id': 'attachment_uuid',
# 'mountpoint': '/dev/sda/
# }
# }
attachment = vol['attachments'].items()[0]
d['attachments'] = [_translate_attachment_detail_view(vol['id'],
vol['instance_uuid'],
vol['mountpoint'])]
attachment[0],
attachment[1].get('mountpoint'))]
else:
d['attachments'] = [{}]

View File

@ -55,9 +55,21 @@ def _translate_volume_summary_view(context, vol):
d['createdAt'] = vol['created_at']
if vol['attach_status'] == 'attached':
# NOTE(ildikov): The attachments field in the volume info that
# Cinder sends is converted to an OrderedDict with the
# instance_uuid as key to make it easier for the multiattach
# feature to check the required information. Multiattach will
# be enable in the Nova API in Newton.
# The format looks like the following:
# attachments = {'instance_uuid': {
# 'attachment_id': 'attachment_uuid',
# 'mountpoint': '/dev/sda/
# }
# }
attachment = vol['attachments'].items()[0]
d['attachments'] = [_translate_attachment_detail_view(vol['id'],
vol['instance_uuid'],
vol['mountpoint'])]
attachment[0],
attachment[1].get('mountpoint'))]
else:
d['attachments'] = [{}]

View File

@ -1764,7 +1764,8 @@ class API(base.Base):
self.volume_api.terminate_connection(context,
bdm.volume_id,
connector)
self.volume_api.detach(elevated, bdm.volume_id)
self.volume_api.detach(elevated, bdm.volume_id,
instance.uuid)
if bdm.delete_on_termination:
self.volume_api.delete(context, bdm.volume_id)
except Exception as exc:
@ -3016,10 +3017,14 @@ class API(base.Base):
This method is separated to make it easier for cells version
to override.
"""
self.volume_api.check_detach(context, volume)
self.volume_api.check_detach(context, volume, instance=instance)
self.volume_api.begin_detaching(context, volume['id'])
attachments = volume.get('attachments', {})
attachment_id = None
if attachments and instance.uuid in attachments:
attachment_id = attachments[instance.uuid]['attachment_id']
self.compute_rpcapi.detach_volume(context, instance=instance,
volume_id=volume['id'])
volume_id=volume['id'], attachment_id=attachment_id)
@wrap_check_policy
@check_instance_lock
@ -3028,13 +3033,7 @@ class API(base.Base):
vm_states.SOFT_DELETED])
def detach_volume(self, context, instance, volume):
"""Detach a volume from an instance."""
if volume['attach_status'] == 'detached':
msg = _("Volume must be attached in order to detach.")
raise exception.InvalidVolume(reason=msg)
# The caller likely got the instance from volume['instance_uuid']
# in the first place, but let's sanity check.
if volume['instance_uuid'] != instance.uuid:
raise exception.VolumeUnattached(volume_id=volume['id'])
self._detach_volume(context, instance, volume)
@wrap_check_policy
@ -3046,9 +3045,9 @@ class API(base.Base):
"""Swap volume attached to an instance."""
if old_volume['attach_status'] == 'detached':
raise exception.VolumeUnattached(volume_id=old_volume['id'])
# The caller likely got the instance from volume['instance_uuid']
# The caller likely got the instance from volume['attachments']
# in the first place, but let's sanity check.
if old_volume['instance_uuid'] != instance.uuid:
if not old_volume.get('attachments', {}).get(instance.uuid):
msg = _("Old volume is attached to a different instance.")
raise exception.InvalidVolume(reason=msg)
if new_volume['attach_status'] == 'attached':

View File

@ -429,7 +429,7 @@ class ComputeCellsAPI(compute_api.API):
@check_instance_cell
def _detach_volume(self, context, instance, volume):
"""Detach a volume from an instance."""
self.volume_api.check_detach(context, volume)
self.volume_api.check_detach(context, volume, instance=instance)
self._cast_to_cells(context, instance, 'detach_volume',
volume)

View File

@ -674,7 +674,7 @@ class ComputeVirtAPI(virtapi.VirtAPI):
class ComputeManager(manager.Manager):
"""Manages the running instances from creation to destruction."""
target = messaging.Target(version='4.6')
target = messaging.Target(version='4.7')
# How long to wait in seconds before re-issuing a shutdown
# signal to an instance during power off. The overall
@ -2326,7 +2326,7 @@ class ComputeManager(manager.Manager):
self.volume_api.terminate_connection(context,
bdm.volume_id,
connector)
self.volume_api.detach(context, bdm.volume_id)
self.volume_api.detach(context, bdm.volume_id, instance.uuid)
except exception.DiskNotFound as exc:
LOG.debug('Ignoring DiskNotFound: %s', exc,
instance=instance)
@ -4737,7 +4737,8 @@ class ComputeManager(manager.Manager):
context=context, instance=instance)
self.volume_api.roll_detaching(context, volume_id)
def _detach_volume(self, context, volume_id, instance, destroy_bdm=True):
def _detach_volume(self, context, volume_id, instance, destroy_bdm=True,
attachment_id=None):
"""Detach a volume from an instance.
:param context: security context
@ -4790,14 +4791,16 @@ class ComputeManager(manager.Manager):
info = dict(volume_id=volume_id)
self._notify_about_instance_usage(
context, instance, "volume.detach", extra_usage_info=info)
self.volume_api.detach(context.elevated(), volume_id)
self.volume_api.detach(context.elevated(), volume_id, instance.uuid,
attachment_id)
@wrap_exception()
@wrap_instance_fault
def detach_volume(self, context, volume_id, instance):
def detach_volume(self, context, volume_id, instance, attachment_id=None):
"""Detach a volume from an instance."""
self._detach_volume(context, volume_id, instance)
self._detach_volume(context, volume_id, instance,
attachment_id=attachment_id)
def _init_volume_connection(self, context, new_volume_id,
old_volume_id, connector, instance, bdm):

View File

@ -319,6 +319,7 @@ class ComputeAPI(object):
* ... - Remove refresh_security_group_members()
* ... - Remove refresh_security_group_rules()
* 4.6 - Add trigger_crash_dump()
* 4.7 - Add attachment_id argument to detach_volume()
'''
VERSION_ALIASES = {
@ -467,12 +468,16 @@ class ComputeAPI(object):
cctxt.cast(ctxt, 'detach_interface',
instance=instance, port_id=port_id)
def detach_volume(self, ctxt, instance, volume_id):
version = '4.0'
def detach_volume(self, ctxt, instance, volume_id, attachment_id=None):
extra = {'attachment_id': attachment_id}
version = '4.7'
if not self.client.can_send_version(version):
version = '4.0'
extra.pop('attachment_id')
cctxt = self.client.prepare(server=_compute_host(None, instance),
version=version)
cctxt.cast(ctxt, 'detach_volume',
instance=instance, volume_id=volume_id)
instance=instance, volume_id=volume_id, **extra)
def finish_resize(self, ctxt, instance, migration, image, disk_info,
host, reservations=None):

View File

@ -28,7 +28,7 @@ LOG = logging.getLogger(__name__)
# NOTE(danms): This is the global service version counter
SERVICE_VERSION = 4
SERVICE_VERSION = 5
# NOTE(danms): This is our SERVICE_VERSION history. The idea is that any
@ -60,6 +60,8 @@ SERVICE_VERSION_HISTORY = (
{'compute_rpc': '4.6'},
# Version 4: Add PciDevice.parent_addr (data migration needed)
{'compute_rpc': '4.6'},
# Version 5: Add attachment_id kwarg to detach_volume()
{'compute_rpc': '4.7'},
)

View File

@ -111,8 +111,6 @@ class VolumesSampleJsonTest(test_servers.ServersSampleBase):
'id': id,
'size': size,
'availability_zone': 'zone1:host1',
'instance_uuid': '3912f2b4-c5ba-4aec-9165-872876fe202e',
'mountpoint': '/',
'status': 'in-use',
'attach_status': 'attached',
'name': 'vol name',
@ -122,7 +120,14 @@ class VolumesSampleJsonTest(test_servers.ServersSampleBase):
'snapshot_id': None,
'volume_type_id': 'fakevoltype',
'volume_metadata': [],
'volume_type': {'name': 'Backup'}
'volume_type': {'name': 'Backup'},
'multiattach': False,
'attachments': {'3912f2b4-c5ba-4aec-9165-872876fe202e':
{'mountpoint': '/',
'attachment_id':
'a26887c6-c47b-4654-abb5-dfadf7d3f803'
}
}
}
return volume

View File

@ -60,7 +60,10 @@ def fake_get_instance(self, context, instance_id, want_objects=False,
def fake_get_volume(self, context, id):
return {'id': 'woot'}
return {'id': FAKE_UUID_A,
'status': 'available',
'attach_status': 'detached'
}
def fake_attach_volume(self, context, instance, volume_id, device):

View File

@ -581,8 +581,6 @@ def stub_volume(id, **kwargs):
'host': 'fakehost',
'size': 1,
'availability_zone': 'fakeaz',
'instance_uuid': 'fakeuuid',
'mountpoint': '/',
'status': 'fakestatus',
'attach_status': 'attached',
'name': 'vol name',
@ -592,7 +590,12 @@ def stub_volume(id, **kwargs):
'snapshot_id': None,
'volume_type_id': 'fakevoltype',
'volume_metadata': [],
'volume_type': {'name': 'vol_type_name'}}
'volume_type': {'name': 'vol_type_name'},
'multiattach': True,
'attachments': {'fakeuuid': {'mountpoint': '/'},
'fakeuuid2': {'mountpoint': '/dev/sdb'}
}
}
volume.update(kwargs)
return volume

View File

@ -367,7 +367,7 @@ class ComputeVolumeTestCase(BaseTestCase):
self.context, objects.Instance(),
fake_instance.fake_db_instance())
self.stubs.Set(self.compute.volume_api, 'get', lambda *a, **kw:
{'id': 'fake', 'size': 4,
{'id': uuids.volume_id, 'size': 4,
'attach_status': 'detached'})
self.stubs.Set(self.compute.driver, 'get_volume_connector',
lambda *a, **kw: None)
@ -436,7 +436,7 @@ class ComputeVolumeTestCase(BaseTestCase):
mock_get.return_value = fake_bdm
self.assertRaises(
test.TestingException, self.compute.detach_volume,
self.context, 'fake', instance)
self.context, 'fake', instance, 'fake_id')
mock_internal_detach.assert_called_once_with(self.context,
instance,
fake_bdm)
@ -748,6 +748,7 @@ class ComputeVolumeTestCase(BaseTestCase):
self.mox.StubOutWithMock(self.compute.driver, 'block_stats')
self.mox.StubOutWithMock(self.compute, '_get_host_volume_bdms')
self.mox.StubOutWithMock(self.compute.driver, 'get_all_volume_usage')
self.mox.StubOutWithMock(self.compute.driver, 'instance_exists')
# The following methods will be called
objects.BlockDeviceMapping.get_by_volume_and_instance(
@ -767,6 +768,8 @@ class ComputeVolumeTestCase(BaseTestCase):
'wr_bytes': 5,
'instance': instance}])
self.compute.driver.instance_exists(mox.IgnoreArg()).AndReturn(True)
self.mox.ReplayAll()
def fake_get_volume_encryption_metadata(self, context, volume_id):
@ -1081,10 +1084,19 @@ class ComputeVolumeTestCase(BaseTestCase):
)
for status, attach_status in status_values:
def fake_volume_get(self, ctxt, volume_id):
return {'id': volume_id,
'status': status,
'attach_status': attach_status}
if attach_status == 'attached':
def fake_volume_get(self, ctxt, volume_id):
return {'id': volume_id,
'status': status,
'attach_status': attach_status,
'multiattach': False,
'attachments': {}}
else:
def fake_volume_get(self, ctxt, volume_id):
return {'id': volume_id,
'status': status,
'attach_status': attach_status,
'multiattach': False}
self.stubs.Set(cinder.API, 'get', fake_volume_get)
self.assertRaises(exception.InvalidVolume,
self.compute_api._validate_bdm,
@ -1106,7 +1118,8 @@ class ComputeVolumeTestCase(BaseTestCase):
def fake_volume_get_ok(self, context, volume_id):
return {'id': volume_id,
'status': 'available',
'attach_status': 'detached'}
'attach_status': 'detached',
'multiattach': False}
self.stubs.Set(cinder.API, 'get', fake_volume_get_ok)
self.compute_api._validate_bdm(self.context, self.instance,
@ -1873,12 +1886,18 @@ class ComputeTestCase(BaseTestCase):
pass
def fake_volume_get(self, context, volume_id):
return {'id': volume_id}
return {'id': volume_id,
'attach_status': 'attached',
'attachments': {instance.uuid: {
'attachment_id': 'abc123'
}
}
}
def fake_terminate_connection(self, context, volume_id, connector):
pass
def fake_detach(self, context, volume_id):
def fake_detach(self, context, volume_id, instance_uuid):
pass
bdms = []
@ -9670,8 +9689,7 @@ class ComputeAPITestCase(BaseTestCase):
called = {}
instance = self._create_fake_instance_obj()
# Set attach_status to 'fake' as nothing is reading the value.
volume = {'id': 1, 'attach_status': 'fake',
'instance_uuid': instance['uuid']}
volume = {'id': 1, 'attach_status': 'fake'}
def fake_check_detach(*args, **kwargs):
called['fake_check_detach'] = True
@ -9701,7 +9719,7 @@ class ComputeAPITestCase(BaseTestCase):
'launched_at': timeutils.utcnow(),
'vm_state': vm_states.ACTIVE,
'task_state': None})
volume = {'id': 1, 'attach_status': 'detached'}
volume = {'id': 1, 'attach_status': 'detached', 'status': 'available'}
self.assertRaises(exception.InvalidVolume,
self.compute_api.detach_volume, self.context,
@ -9716,8 +9734,8 @@ class ComputeAPITestCase(BaseTestCase):
'launched_at': timeutils.utcnow(),
'vm_state': vm_states.ACTIVE,
'task_state': None})
volume = {'id': 1, 'attach_status': 'attached',
'instance_uuid': 'f7000000-0000-0000-0000-000000000002'}
volume = {'id': 1, 'attach_status': 'attached', 'status': 'in-use',
'attachments': {'fake_uuid': {'attachment_id': 'fakeid'}}}
self.assertRaises(exception.VolumeUnattached,
self.compute_api.detach_volume, self.context,
@ -9789,6 +9807,12 @@ class ComputeAPITestCase(BaseTestCase):
'connection_info': '{"test": "test"}'})
bdm = objects.BlockDeviceMapping(context=self.context, **fake_bdm)
# Stub out fake_volume_get so cinder api does not raise exception
# and manager gets to call bdm.destroy()
def fake_volume_get(self, context, volume_id):
return {'id': volume_id}
self.stub_out('nova.volume.cinder.API.get', fake_volume_get)
with test.nested(
mock.patch.object(self.compute.driver, 'detach_volume',
side_effect=exception.DiskNotFound('sdb')),
@ -9808,7 +9832,8 @@ class ComputeAPITestCase(BaseTestCase):
'fake-id',
'fake-connector')
mock_destroy.assert_called_once_with()
mock_detach.assert_called_once_with(mock.ANY, 'fake-id')
mock_detach.assert_called_once_with(mock.ANY, 'fake-id',
instance.uuid, None)
def test_terminate_with_volumes(self):
# Make sure that volumes get detached during instance termination.
@ -9831,7 +9856,7 @@ class ComputeAPITestCase(BaseTestCase):
# Stub out and record whether it gets detached
result = {"detached": False}
def fake_detach(self, context, volume_id_param):
def fake_detach(self, context, volume_id_param, instance_uuid):
result["detached"] = volume_id_param == volume_id
self.stubs.Set(cinder.API, "detach", fake_detach)
@ -9872,7 +9897,14 @@ class ComputeAPITestCase(BaseTestCase):
bdm_obj.create()
bdms.append(bdm_obj)
self.stubs.Set(self.compute, 'volume_api', mox.MockAnything())
self.stub_out('nova.volume.cinder.API.terminate_connection',
mox.MockAnything())
self.stub_out('nova.volume.cinder.API.detach', mox.MockAnything())
def fake_volume_get(self, context, volume_id):
return {'id': volume_id}
self.stub_out('nova.volume.cinder.API.get', fake_volume_get)
self.stubs.Set(self.compute, '_prep_block_device', mox.MockAnything())
self.compute.build_and_run_instance(self.context, instance, {}, {}, {},
block_device_mapping=[])
@ -11409,7 +11441,7 @@ class EvacuateHostTestCase(BaseTestCase):
# Stub out and record whether it gets detached
result = {"detached": False}
def fake_detach(self, context, volume):
def fake_detach(self, context, volume, instance_uuid, attachment_id):
result["detached"] = volume["id"] == 'fake_volume_id'
self.stubs.Set(cinder.API, "detach", fake_detach)
@ -11425,7 +11457,8 @@ class EvacuateHostTestCase(BaseTestCase):
# make sure volumes attach, detach are called
self.mox.StubOutWithMock(self.compute.volume_api, 'detach')
self.compute.volume_api.detach(mox.IsA(self.context), mox.IgnoreArg())
self.compute.volume_api.detach(mox.IsA(self.context), mox.IgnoreArg(),
mox.IgnoreArg(), None)
self.mox.StubOutWithMock(self.compute, '_prep_block_device')
self.compute._prep_block_device(mox.IsA(self.context),

View File

@ -1796,16 +1796,21 @@ class _ComputeAPIUnitTestMixIn(object):
volumes[old_volume_id] = {'id': old_volume_id,
'display_name': 'old_volume',
'attach_status': 'attached',
'instance_uuid': uuids.vol_instance,
'size': 5,
'status': 'in-use'}
'status': 'in-use',
'multiattach': False,
'attachments': {uuids.vol_instance: {
'attachment_id': 'fakeid'
}
}
}
new_volume_id = uuidutils.generate_uuid()
volumes[new_volume_id] = {'id': new_volume_id,
'display_name': 'new_volume',
'attach_status': 'detached',
'instance_uuid': None,
'size': 5,
'status': 'available'}
'status': 'available',
'multiattach': False}
self.assertRaises(exception.InstanceInvalidState,
self.compute_api.swap_volume, self.context, instance,
volumes[old_volume_id], volumes[new_volume_id])
@ -1822,13 +1827,15 @@ class _ComputeAPIUnitTestMixIn(object):
volumes[old_volume_id]['attach_status'] = 'attached'
# Should fail if old volume's instance_uuid is not that of the instance
volumes[old_volume_id]['instance_uuid'] = uuids.vol_instance_2
volumes[old_volume_id]['attachments'] = {uuids.vol_instance_2:
{'attachment_id': 'fakeid'}}
self.assertRaises(exception.InvalidVolume,
self.compute_api.swap_volume, self.context, instance,
volumes[old_volume_id], volumes[new_volume_id])
self.assertEqual(volumes[old_volume_id]['status'], 'in-use')
self.assertEqual(volumes[new_volume_id]['status'], 'available')
volumes[old_volume_id]['instance_uuid'] = uuids.vol_instance
volumes[old_volume_id]['attachments'] = {uuids.vol_instance:
{'attachment_id': 'fakeid'}}
# Should fail if new volume is attached
volumes[new_volume_id]['attach_status'] = 'attached'

View File

@ -2201,9 +2201,10 @@ class ComputeManagerUnitTestCase(test.NoDBTestCase):
'_notify_about_instance_usage')
def _test_detach_volume(self, notify_inst_usage, detach,
bdm_get, destroy_bdm=True):
volume_id = '123'
volume_id = uuids.volume
inst_obj = mock.Mock()
inst_obj.uuid = 'uuid'
inst_obj.uuid = uuids.instance
attachment_id = uuids.attachment
bdm = mock.MagicMock(spec=objects.BlockDeviceMapping)
bdm.device_name = 'vdb'
@ -2216,13 +2217,16 @@ class ComputeManagerUnitTestCase(test.NoDBTestCase):
self.compute._detach_volume(self.context, volume_id,
inst_obj,
destroy_bdm=destroy_bdm)
destroy_bdm=destroy_bdm,
attachment_id=attachment_id)
detach.assert_called_once_with(self.context, inst_obj, bdm)
driver.get_volume_connector.assert_called_once_with(inst_obj)
volume_api.terminate_connection.assert_called_once_with(
self.context, volume_id, connector_sentinel)
volume_api.detach.assert_called_once_with(mock.ANY, volume_id)
volume_api.detach.assert_called_once_with(mock.ANY, volume_id,
inst_obj.uuid,
attachment_id)
notify_inst_usage.assert_called_once_with(
self.context, inst_obj, "volume.detach",
extra_usage_info={'volume_id': volume_id}

View File

@ -198,7 +198,31 @@ class ComputeRpcAPITestCase(test.NoDBTestCase):
def test_detach_volume(self):
self._test_compute_api('detach_volume', 'cast',
instance=self.fake_instance_obj, volume_id='id',
version='4.0')
attachment_id='fake_id', version='4.7')
def test_detach_volume_no_attachment_id(self):
ctxt = context.RequestContext('fake_user', 'fake_project')
instance = self.fake_instance_obj
rpcapi = compute_rpcapi.ComputeAPI()
cast_mock = mock.Mock()
cctxt_mock = mock.Mock(cast=cast_mock)
with test.nested(
mock.patch.object(rpcapi.client, 'can_send_version',
return_value=False),
mock.patch.object(rpcapi.client, 'prepare',
return_value=cctxt_mock)
) as (
can_send_mock, prepare_mock
):
rpcapi.detach_volume(ctxt, instance=instance,
volume_id='id', attachment_id='fake_id')
# assert our mocks were called as expected
can_send_mock.assert_called_once_with('4.7')
prepare_mock.assert_called_once_with(server=instance['host'],
version='4.0')
cast_mock.assert_called_once_with(ctxt, 'detach_volume',
instance=instance,
volume_id='id')
def test_finish_resize(self):
self._test_compute_api('finish_resize', 'cast',

View File

@ -66,7 +66,8 @@ class fake_volume(object):
'display_description': description,
'provider_location': 'fake-location',
'provider_auth': 'fake-auth',
'volume_type_id': 99
'volume_type_id': 99,
'multiattach': False
}
def get(self, key, default=None):
@ -193,31 +194,38 @@ class API(object):
msg = "Instance and volume not in same availability_zone"
raise exception.InvalidVolume(reason=msg)
def check_detach(self, context, volume):
def check_detach(self, context, volume, instance=None):
if volume['status'] == "available":
msg = "already detached"
raise exception.InvalidVolume(reason=msg)
if volume['attach_status'] == 'detached':
msg = "Volume must be attached in order to detach."
raise exception.InvalidVolume(reason=msg)
if instance and not volume.get('attachments', {}).get(instance.uuid):
raise exception.VolumeUnattached(volume_id=volume['id'])
def attach(self, context, volume_id, instance_uuid, mountpoint, mode='rw'):
LOG.info('attaching volume %s', volume_id)
volume = self.get(context, volume_id)
volume['status'] = 'in-use'
volume['mountpoint'] = mountpoint
volume['attach_status'] = 'attached'
volume['instance_uuid'] = instance_uuid
volume['attach_time'] = timeutils.utcnow()
volume['multiattach'] = True
volume['attachments'] = {instance_uuid:
{'attachment_id': str(uuid.uuid4()),
'mountpoint': mountpoint}}
def reset_fake_api(self, context):
del self.volume_list[:]
del self.snapshot_list[:]
def detach(self, context, volume_id):
def detach(self, context, volume_id, instance_uuid, attachment_id=None):
LOG.info('detaching volume %s', volume_id)
volume = self.get(context, volume_id)
volume['status'] = 'available'
volume['mountpoint'] = None
volume['attach_status'] = 'detached'
volume['instance_uuid'] = None
def initialize_connection(self, context, volume_id, connector):
return {'driver_volume_type': 'iscsi', 'data': {}}

View File

@ -12,6 +12,8 @@
# License for the specific language governing permissions and limitations
# under the License.
import collections
from cinderclient.v1 import client as cinder_client_v1
from cinderclient.v2 import client as cinder_client_v2
from requests_mock.contrib import fixture
@ -29,6 +31,42 @@ _image_metadata = {
}
_volume_id = "6edbc2f4-1507-44f8-ac0d-eed1d2608d38"
_instance_uuid = "f4fda93b-06e0-4743-8117-bc8bcecd651b"
_instance_uuid_2 = "f4fda93b-06e0-4743-8117-bc8bcecd651c"
_attachment_id = "3b4db356-253d-4fab-bfa0-e3626c0b8405"
_attachment_id_2 = "3b4db356-253d-4fab-bfa0-e3626c0b8406"
_device = "/dev/vdb"
_device_2 = "/dev/vdc"
_volume_attachment = \
[{"server_id": _instance_uuid,
"attachment_id": _attachment_id,
"host_name": "",
"volume_id": _volume_id,
"device": _device,
"id": _volume_id
}]
_volume_attachment_2 = _volume_attachment
_volume_attachment_2.append({"server_id": _instance_uuid_2,
"attachment_id": _attachment_id_2,
"host_name": "",
"volume_id": _volume_id,
"device": _device_2,
"id": _volume_id})
exp_volume_attachment = collections.OrderedDict()
exp_volume_attachment[_instance_uuid] = {'attachment_id': _attachment_id,
'mountpoint': _device}
exp_volume_attachment_2 = exp_volume_attachment
exp_volume_attachment_2[_instance_uuid_2] = {'attachment_id': _attachment_id_2,
'mountpoint': _device_2}
class BaseCinderTestCase(object):
def setUp(self):
@ -97,13 +135,14 @@ class CinderTestCase(BaseCinderTestCase, test.NoDBTestCase):
"attachments": [],
"availability_zone": "cinder",
"created_at": "2012-09-10T00:00:00.000000",
"id": '00000000-0000-0000-0000-000000000000',
"id": _volume_id,
"metadata": {},
"size": 1,
"snapshot_id": None,
"status": "available",
"volume_type": "None",
"bootable": "true"
"bootable": "true",
"multiattach": "true"
}
volume.update(kwargs)
return volume
@ -161,13 +200,14 @@ class CinderV2TestCase(BaseCinderTestCase, test.NoDBTestCase):
"attachments": [],
"availability_zone": "cinderv2",
"created_at": "2013-08-10T00:00:00.000000",
"id": '00000000-0000-0000-0000-000000000000',
"id": _volume_id,
"metadata": {},
"size": 1,
"snapshot_id": None,
"status": "available",
"volume_type": "None",
"bootable": "true"
"bootable": "true",
"multiattach": "true"
}
volume.update(kwargs)
return volume
@ -191,3 +231,23 @@ class CinderV2TestCase(BaseCinderTestCase, test.NoDBTestCase):
volume = self.api.get(self.context, '5678')
self.assertIn('volume_image_metadata', volume)
self.assertEqual(_image_metadata, volume['volume_image_metadata'])
def test_volume_without_attachment(self):
v = self.stub_volume(id='1234')
self.requests.get(self.URL + '/volumes/5678', json={'volume': v})
volume = self.api.get(self.context, '5678')
self.assertIsNone(volume.get('attachments'))
def test_volume_with_one_attachment(self):
v = self.stub_volume(id='1234', attachments=_volume_attachment)
self.requests.get(self.URL + '/volumes/5678', json={'volume': v})
volume = self.api.get(self.context, '5678')
self.assertIn('attachments', volume)
self.assertEqual(exp_volume_attachment, volume['attachments'])
def test_volume_with_two_attachments(self):
v = self.stub_volume(id='1234', attachments=_volume_attachment_2)
self.requests.get(self.URL + '/volumes/5678', json={'volume': v})
volume = self.api.get(self.context, '5678')
self.assertIn('attachments', volume)
self.assertEqual(exp_volume_attachment_2, volume['attachments'])

View File

@ -19,6 +19,8 @@ import mock
from nova import context
from nova import exception
from nova import test
from nova.tests.unit.fake_instance import fake_instance_obj
from nova.tests import uuidsentinel as uuids
from nova.volume import cinder
@ -171,6 +173,7 @@ class CinderApiTestCase(test.NoDBTestCase):
volume = {'status': 'available'}
volume['attach_status'] = "detached"
volume['availability_zone'] = 'zone1'
volume['multiattach'] = False
instance = {'availability_zone': 'zone1', 'host': 'fakehost'}
cinder.CONF.set_override('cross_az_attach', False, group='cinder')
@ -182,11 +185,27 @@ class CinderApiTestCase(test.NoDBTestCase):
cinder.CONF.reset()
def test_check_detach(self):
volume = {'id': 'fake', 'status': 'available'}
volume = {'id': 'fake', 'status': 'in-use',
'attach_status': 'attached',
'attachments': {uuids.instance: {
'attachment_id': uuids.attachment}}
}
self.assertIsNone(self.api.check_detach(self.ctx, volume))
instance = fake_instance_obj(self.ctx)
instance.uuid = uuids.instance
self.assertIsNone(self.api.check_detach(self.ctx, volume, instance))
instance.uuid = uuids.instance2
self.assertRaises(exception.VolumeUnattached,
self.api.check_detach, self.ctx, volume, instance)
volume['attachments'] = {}
self.assertRaises(exception.VolumeUnattached,
self.api.check_detach, self.ctx, volume, instance)
volume['status'] = 'available'
self.assertRaises(exception.InvalidVolume,
self.api.check_detach, self.ctx, volume)
volume['attach_status'] = 'detached'
self.assertRaises(exception.InvalidVolume,
self.api.check_detach, self.ctx, volume)
volume['status'] = 'non-available'
self.assertIsNone(self.api.check_detach(self.ctx, volume))
def test_reserve_volume(self):
cinder.cinderclient(self.ctx).AndReturn(self.cinderclient)
@ -251,14 +270,24 @@ class CinderApiTestCase(test.NoDBTestCase):
mode='ro')
def test_detach(self):
self.mox.StubOutWithMock(self.api,
'get',
use_mock_anything=True)
self.api.get(self.ctx, 'id1').\
AndReturn({'id': 'id1', 'status': 'in-use',
'multiattach': True,
'attach_status': 'attached',
'attachments': {'fake_uuid':
{'attachment_id': 'fakeid'}}
})
cinder.cinderclient(self.ctx).AndReturn(self.cinderclient)
self.mox.StubOutWithMock(self.cinderclient.volumes,
'detach',
use_mock_anything=True)
self.cinderclient.volumes.detach('id1')
self.cinderclient.volumes.detach('id1', 'fakeid')
self.mox.ReplayAll()
self.api.detach(self.ctx, 'id1')
self.api.detach(self.ctx, 'id1', instance_uuid='fake_uuid')
def test_initialize_connection(self):
cinder.cinderclient(self.ctx).AndReturn(self.cinderclient)

View File

@ -18,6 +18,7 @@
Handles all requests relating to volumes + cinder.
"""
import collections
import copy
import sys
@ -160,12 +161,18 @@ def _untranslate_volume_summary_view(context, vol):
# removed.
d['attach_time'] = ""
d['mountpoint'] = ""
d['multiattach'] = getattr(vol, 'multiattach', False)
if vol.attachments:
att = vol.attachments[0]
d['attachments'] = collections.OrderedDict()
for attachment in vol.attachments:
a = {attachment['server_id']:
{'attachment_id': attachment.get('attachment_id'),
'mountpoint': attachment.get('device')}
}
d['attachments'].update(a.items())
d['attach_status'] = 'attached'
d['instance_uuid'] = att['server_id']
d['mountpoint'] = att['device']
else:
d['attach_status'] = 'detached'
# NOTE(dzyu) volume(cinder) v2 API uses 'name' instead of 'display_name',
@ -316,12 +323,24 @@ class API(object):
'vol_zone': volume['availability_zone']}
raise exception.InvalidVolume(reason=msg)
def check_detach(self, context, volume):
def check_detach(self, context, volume, instance=None):
# TODO(vish): abstract status checking?
if volume['status'] == "available":
msg = _("volume %s already detached") % volume['id']
raise exception.InvalidVolume(reason=msg)
if volume['attach_status'] == 'detached':
msg = _("Volume must be attached in order to detach.")
raise exception.InvalidVolume(reason=msg)
# NOTE(ildikov):Preparation for multiattach support, when a volume
# can be attached to multiple hosts and/or instances,
# so just check the attachment specific to this instance
if instance is not None and instance.uuid not in volume['attachments']:
# TODO(ildikov): change it to a better exception, when enable
# multi-attach.
raise exception.VolumeUnattached(volume_id=volume['id'])
@translate_volume_exception
def reserve_volume(self, context, volume_id):
cinderclient(context).volumes.reserve(volume_id)
@ -344,8 +363,34 @@ class API(object):
mountpoint, mode=mode)
@translate_volume_exception
def detach(self, context, volume_id):
cinderclient(context).volumes.detach(volume_id)
def detach(self, context, volume_id, instance_uuid=None,
attachment_id=None):
if attachment_id is None:
volume = self.get(context, volume_id)
if volume['multiattach']:
attachments = volume.get('attachments', {})
if instance_uuid:
attachment_id = attachments.get(instance_uuid, {}).\
get('attachment_id')
if not attachment_id:
LOG.warning(_LW("attachment_id couldn't be retrieved "
"for volume %(volume_id)s with "
"instance_uuid %(instance_id)s. The "
"volume has the 'multiattach' flag "
"enabled, without the attachment_id "
"Cinder most probably cannot perform "
"the detach."),
{'volume_id': volume_id,
'instance_id': instance_uuid})
else:
LOG.warning(_LW("attachment_id couldn't be retrieved for "
"volume %(volume_id)s. The volume has the "
"'multiattach' flag enabled, without the "
"attachment_id Cinder most probably "
"cannot perform the detach."),
{'volume_id': volume_id})
cinderclient(context).volumes.detach(volume_id, attachment_id)
@translate_volume_exception
def initialize_connection(self, context, volume_id, connector):