Expose volume_attachments in Volume OVO
In change-id Iabf9c3fab56ffef50695ce45745f193273822b39 we left the `volume_attachment` out of the expected attributes of the Volume OVO (even when te DB layer is providing that information) because it was breaking some unit tests. This means that in some cases we are unnecessarily loading the attachments again (manually or via lazy loading) once we have the volume OVO because that field is not set. In this patch we populate the `volume_attachment` when we load the Volume OVO from the DB. Change-Id: I6576832b2c2722ab749cfe70bbc2058ead816c36 (cherry picked from commite07bf3787a
) Conflicts: cinder/objects/volume.py cinder/tests/unit/objects/test_volume.py (cherry picked from commite85c22cef7
) (cherry picked from commita5b67fd959
) Conflicts: cinder/tests/unit/volume/flows/test_create_volume_flow.py
This commit is contained in:
parent
3339bfb839
commit
bc6585577e
|
@ -138,7 +138,8 @@ class Volume(cleanable.CinderCleanableObject, base.CinderObject,
|
|||
|
||||
@classmethod
|
||||
def _get_expected_attrs(cls, context, *args, **kwargs):
|
||||
expected_attrs = ['metadata', 'volume_type', 'volume_type.extra_specs']
|
||||
expected_attrs = ['metadata', 'volume_type', 'volume_type.extra_specs',
|
||||
'volume_attachment']
|
||||
if context.is_admin:
|
||||
expected_attrs.append('admin_metadata')
|
||||
|
||||
|
@ -490,7 +491,8 @@ class Volume(cleanable.CinderCleanableObject, base.CinderObject,
|
|||
# end of migration because we want to keep the original volume id
|
||||
# in the DB but now pointing to the migrated volume.
|
||||
skip = ({'id', 'provider_location', 'glance_metadata',
|
||||
'volume_type'} | set(self.obj_extra_fields))
|
||||
'volume_type', 'volume_attachment'}
|
||||
| set(self.obj_extra_fields))
|
||||
for key in set(dest_volume.fields.keys()) - skip:
|
||||
# Only swap attributes that are already set. We do not want to
|
||||
# unexpectedly trigger a lazy-load.
|
||||
|
|
|
@ -107,8 +107,11 @@ def fake_volume_obj(context, **updates):
|
|||
if updates.get('encryption_key_id'):
|
||||
assert is_uuid_like(updates['encryption_key_id'])
|
||||
|
||||
updates['volume_attachment'] = updates.get('volume_attachment') or []
|
||||
|
||||
expected_attrs = updates.pop('expected_attrs',
|
||||
['metadata', 'admin_metadata'])
|
||||
['metadata', 'admin_metadata',
|
||||
'volume_attachment'])
|
||||
vol = objects.Volume._from_db_object(context, objects.Volume(),
|
||||
fake_db_volume(**updates),
|
||||
expected_attrs=expected_attrs)
|
||||
|
|
|
@ -430,7 +430,8 @@ class TestVolume(test_objects.BaseObjectsTestCase):
|
|||
# finish_volume_migration
|
||||
ignore_keys = ('id', 'provider_location', '_name_id',
|
||||
'migration_status', 'display_description', 'status',
|
||||
'volume_glance_metadata', 'volume_type')
|
||||
'volume_glance_metadata', 'volume_type',
|
||||
'volume_attachment')
|
||||
|
||||
dest_vol_dict = {k: updated_dest_volume[k] for k in
|
||||
updated_dest_volume.keys() if k not in ignore_keys}
|
||||
|
@ -478,7 +479,8 @@ class TestVolume(test_objects.BaseObjectsTestCase):
|
|||
self, volume_attachment_get,
|
||||
volume_detached,
|
||||
metadata_delete):
|
||||
va_objs = [objects.VolumeAttachment(context=self.context, id=i)
|
||||
va_objs = [objects.VolumeAttachment(context=self.context, id=i,
|
||||
volume_id=fake.VOLUME_ID)
|
||||
for i in [fake.OBJECT_ID, fake.OBJECT2_ID, fake.OBJECT3_ID]]
|
||||
# As changes are not saved, we need reset it here. Later changes
|
||||
# will be checked.
|
||||
|
@ -491,6 +493,7 @@ class TestVolume(test_objects.BaseObjectsTestCase):
|
|||
admin_context = context.get_admin_context()
|
||||
volume = fake_volume.fake_volume_obj(
|
||||
admin_context,
|
||||
id=fake.VOLUME_ID,
|
||||
volume_attachment=va_list,
|
||||
volume_admin_metadata=[{'key': 'attached_mode',
|
||||
'value': 'rw'}])
|
||||
|
@ -522,7 +525,7 @@ class TestVolume(test_objects.BaseObjectsTestCase):
|
|||
admin_context,
|
||||
volume_admin_metadata=[{'key': 'attached_mode',
|
||||
'value': 'rw'}])
|
||||
self.assertFalse(volume.obj_attr_is_set('volume_attachment'))
|
||||
self.assertEqual([], volume.volume_attachment.objects)
|
||||
volume_detached.return_value = ({'status': 'in-use'}, None)
|
||||
with mock.patch.object(admin_context, 'elevated') as mock_elevated:
|
||||
mock_elevated.return_value = admin_context
|
||||
|
|
|
@ -108,6 +108,12 @@ def create_volume(ctxt,
|
|||
def attach_volume(ctxt, volume_id, instance_uuid, attached_host,
|
||||
mountpoint, mode='rw'):
|
||||
|
||||
if isinstance(volume_id, objects.Volume):
|
||||
volume_ovo = volume_id
|
||||
volume_id = volume_ovo.id
|
||||
else:
|
||||
volume_ovo = None
|
||||
|
||||
now = timeutils.utcnow()
|
||||
values = {}
|
||||
values['volume_id'] = volume_id
|
||||
|
@ -119,6 +125,13 @@ def attach_volume(ctxt, volume_id, instance_uuid, attached_host,
|
|||
volume, updated_values = db.volume_attached(
|
||||
ctxt, attachment['id'], instance_uuid,
|
||||
attached_host, mountpoint, mode)
|
||||
|
||||
if volume_ovo:
|
||||
cls = objects.Volume
|
||||
expected_attrs = cls._get_expected_attrs(ctxt)
|
||||
volume = cls._from_db_object(ctxt, cls(ctxt), volume,
|
||||
expected_attrs=expected_attrs)
|
||||
|
||||
return volume
|
||||
|
||||
|
||||
|
|
|
@ -205,7 +205,7 @@ class CreateVolumeFlowTestCase(test.TestCase):
|
|||
snapshot_obj = fake_snapshot.fake_snapshot_obj(self.ctxt)
|
||||
snapshot_get_by_id.return_value = snapshot_obj
|
||||
volume_get_by_id.return_value = volume_obj
|
||||
volume_create.return_value = {'id': '123456'}
|
||||
volume_create.return_value = {'id': '123456', 'volume_attachment': []}
|
||||
|
||||
task = create_volume.EntryCreateTask()
|
||||
|
||||
|
@ -254,7 +254,7 @@ class CreateVolumeFlowTestCase(test.TestCase):
|
|||
volume_db = {'bootable': bootable}
|
||||
volume_obj = fake_volume.fake_volume_obj(self.ctxt, **volume_db)
|
||||
volume_get_by_id.return_value = volume_obj
|
||||
volume_create.return_value = {'id': '123456'}
|
||||
volume_create.return_value = {'id': '123456', 'volume_attachment': []}
|
||||
task = create_volume.EntryCreateTask()
|
||||
|
||||
result = task.execute(self.ctxt,
|
||||
|
|
|
@ -233,12 +233,11 @@ class VolumeMigrationTestCase(base.BaseVolumeTestCase):
|
|||
volume_get.return_value = fake_new_volume
|
||||
volume = tests_utils.create_volume(self.context, size=1,
|
||||
host=CONF.host)
|
||||
volume_attach = tests_utils.attach_volume(
|
||||
self.context, volume['id'], fake_uuid, attached_host, '/dev/vda')
|
||||
self.assertIsNotNone(volume_attach['volume_attachment'][0]['id'])
|
||||
self.assertEqual(
|
||||
fake_uuid, volume_attach['volume_attachment'][0]['instance_uuid'])
|
||||
self.assertEqual('in-use', volume_attach['status'])
|
||||
volume = tests_utils.attach_volume(
|
||||
self.context, volume, fake_uuid, attached_host, '/dev/vda')
|
||||
self.assertIsNotNone(volume.volume_attachment[0].id)
|
||||
self.assertEqual(fake_uuid, volume.volume_attachment[0].instance_uuid)
|
||||
self.assertEqual('in-use', volume.status)
|
||||
self.volume._migrate_volume_generic(self.context, volume,
|
||||
host_obj, None)
|
||||
self.assertFalse(migrate_volume_completion.called)
|
||||
|
@ -603,11 +602,12 @@ class VolumeMigrationTestCase(base.BaseVolumeTestCase):
|
|||
previous_status=previous_status)
|
||||
attachment = None
|
||||
if status == 'in-use':
|
||||
vol = tests_utils.attach_volume(self.context, old_volume.id,
|
||||
instance_uuid, attached_host,
|
||||
'/dev/vda')
|
||||
self.assertEqual('in-use', vol['status'])
|
||||
attachment = vol['volume_attachment'][0]
|
||||
old_volume = tests_utils.attach_volume(self.context, old_volume,
|
||||
instance_uuid,
|
||||
attached_host,
|
||||
'/dev/vda')
|
||||
self.assertEqual('in-use', old_volume.status)
|
||||
attachment = old_volume.volume_attachment[0]
|
||||
target_status = 'target:%s' % old_volume.id
|
||||
new_host = CONF.host + 'new'
|
||||
new_volume = tests_utils.create_volume(self.context, size=0,
|
||||
|
|
Loading…
Reference in New Issue