Refactor block_device_mapping handling during boot

It has become clear that it will be necessary to store a copy of the
block_device_mapping on the BuildRequest object so that it can be
returned via the API(os-extended-volumes). In order to prepare for doing
so the block_device_mapping needs to be validated and fully set, but not
persisted to the database. This patch refactors some methods to allow
that.

Change-Id: If2020f00e15ca1b067620f880a6f7d3a8ad5281c
This commit is contained in:
Andrew Laski 2016-06-24 17:07:26 -04:00
parent 6790b92c48
commit dc6a3f4abb
5 changed files with 222 additions and 283 deletions

View File

@ -107,6 +107,10 @@ class CellsScheduler(base.Base):
security_groups,
block_device_mapping,
num_instances, i)
block_device_mapping = (
self.compute_api._bdm_validate_set_size_and_instance(
ctxt, instance, instance_type, block_device_mapping))
self.compute_api._create_block_device_mapping(block_device_mapping)
instances.append(instance)
self.msg_runner.instance_update_at_top(ctxt, instance)

View File

@ -938,7 +938,12 @@ class API(base.Base):
instance_type, boot_meta, instance, security_groups,
block_device_mapping, num_instances, i,
shutdown_terminate, create_instance=False)
block_device_mapping = (
self._bdm_validate_set_size_and_instance(context,
instance, instance_type, block_device_mapping))
# TODO(alaski): Pass block_device_mapping here when the object
# supports it.
build_request = objects.BuildRequest(context,
instance=instance, instance_uuid=instance.uuid,
project_id=instance.project_id)
@ -963,8 +968,7 @@ class API(base.Base):
instance.create()
instances.append(instance)
self._create_block_device_mapping(
instance_type, instance.uuid, block_device_mapping)
self._create_block_device_mapping(block_device_mapping)
if instance_group:
if check_server_group_quota:
@ -1206,22 +1210,36 @@ class API(base.Base):
return prepared_mappings
def _create_block_device_mapping(self, instance_type, instance_uuid,
block_device_mapping):
"""Create the BlockDeviceMapping objects in the db.
def _bdm_validate_set_size_and_instance(self, context, instance,
instance_type,
block_device_mapping):
"""Ensure the bdms are valid, then set size and associate with instance
This method makes a copy of the list in order to avoid using the same
id field in case this is called for multiple instances.
Because this method can be called multiple times when more than one
instance is booted in a single request it makes a copy of the bdm list.
"""
LOG.debug("block_device_mapping %s", list(block_device_mapping),
instance_uuid=instance_uuid)
instance_block_device_mapping = copy.deepcopy(block_device_mapping)
instance_uuid=instance.uuid)
self._validate_bdm(
context, instance, instance_type, block_device_mapping)
instance_block_device_mapping = block_device_mapping.obj_clone()
for bdm in instance_block_device_mapping:
bdm.volume_size = self._volume_size(instance_type, bdm)
bdm.instance_uuid = instance.uuid
return instance_block_device_mapping
def _create_block_device_mapping(self, block_device_mapping):
# Copy the block_device_mapping because this method can be called
# multiple times when more than one instance is booted in a single
# request. This avoids 'id' being set and triggering the object dupe
# detection
db_block_device_mapping = copy.deepcopy(block_device_mapping)
# Create the BlockDeviceMapping objects in the db.
for bdm in db_block_device_mapping:
# TODO(alaski): Why is this done?
if bdm.volume_size == 0:
continue
bdm.instance_uuid = instance_uuid
bdm.update_or_create()
def _validate_bdm(self, context, instance, instance_type,
@ -1418,24 +1436,6 @@ class API(base.Base):
if create_instance:
instance.create()
# NOTE (ndipanov): This can now raise exceptions but the instance
# has been created, so delete it and re-raise so
# that other cleanup can happen.
try:
self._validate_bdm(
context, instance, instance_type, block_device_mapping)
except (exception.CinderConnectionFailed, exception.InvalidBDM,
exception.InvalidVolume):
with excutils.save_and_reraise_exception():
if create_instance:
instance.destroy()
if create_instance:
# Because of the foreign key the bdm can't be created unless the
# instance is.
self._create_block_device_mapping(
instance_type, instance.uuid, block_device_mapping)
return instance
def _check_multiple_instances_with_neutron_ports(self,

View File

@ -112,13 +112,14 @@ class CellsSchedulerTestCase(test.TestCase):
'project_id': self.ctxt.project_id}
call_info = {'uuids': []}
block_device_mapping = [
block_device_mapping = objects.BlockDeviceMappingList(
objects=[
objects.BlockDeviceMapping(context=self.ctxt,
**fake_block_device.FakeDbBlockDeviceDict(
block_device.create_image_bdm(
uuidsentinel.fake_image_ref),
anon=True))
]
])
def _fake_instance_update_at_top(_ctxt, instance):
call_info['uuids'].append(instance['uuid'])
@ -132,6 +133,9 @@ class CellsSchedulerTestCase(test.TestCase):
self.assertEqual(instance_uuids, call_info['uuids'])
for count, instance_uuid in enumerate(instance_uuids):
bdms = db.block_device_mapping_get_all_by_instance(self.ctxt,
instance_uuid)
self.assertIsNotNone(bdms)
instance = db.instance_get_by_uuid(self.ctxt, instance_uuid)
meta = utils.instance_meta(instance)
self.assertEqual('cow', meta['moo'])
@ -157,13 +161,22 @@ class CellsSchedulerTestCase(test.TestCase):
'pci_requests': 'no thanks',
'ec2_ids': 'prime',
}
block_device_mapping = [
objects.BlockDeviceMapping(context=self.ctxt,
**fake_block_device.FakeDbBlockDeviceDict(
block_device.create_image_bdm(
uuidsentinel.fake_image_ref),
anon=True))
]
@mock.patch.object(self.scheduler.compute_api,
'create_db_entry_for_new_instance')
def test(mock_create_db):
@mock.patch.object(self.scheduler.compute_api,
'_bdm_validate_set_size_and_instance')
def test(mock_bdm_validate, mock_create_db):
self.scheduler._create_instances_here(
self.ctxt, [uuidsentinel.instance], values,
objects.Flavor(), 'foo', [], [])
objects.Flavor(), 'foo', [], block_device_mapping)
test()

View File

@ -8471,172 +8471,6 @@ class ComputeAPITestCase(BaseTestCase):
return bdm
def test_create_block_device_mapping(self):
def _compare_bdm_object(obj1, obj2, extra_keys=()):
for key in (('device_name', 'source_type', 'destination_type')
+ extra_keys):
self.assertEqual(getattr(obj1, key), getattr(obj2, key))
swap_size = ephemeral_size = 1
instance_type = {'swap': swap_size, 'ephemeral_gb': ephemeral_size}
instance = self._create_fake_instance_obj()
mappings = [
{'virtual': 'ami', 'device': 'sda1'},
{'virtual': 'root', 'device': '/dev/sda1'},
{'virtual': 'swap', 'device': 'sdb4'},
{'virtual': 'swap', 'device': 'sdb3'},
{'virtual': 'swap', 'device': 'sdb2'},
{'virtual': 'swap', 'device': 'sdb1'},
{'virtual': 'ephemeral0', 'device': 'sdc1'},
{'virtual': 'ephemeral1', 'device': 'sdc2'},
{'virtual': 'ephemeral2', 'device': 'sdc3'}]
block_device_mapping = [
# root
{'device_name': '/dev/sda1',
'source_type': 'snapshot', 'destination_type': 'volume',
'snapshot_id': '00000000-aaaa-bbbb-cccc-000000000000',
'delete_on_termination': False},
# overwrite swap
{'device_name': '/dev/sdb2',
'source_type': 'snapshot', 'destination_type': 'volume',
'snapshot_id': '11111111-aaaa-bbbb-cccc-111111111111',
'delete_on_termination': False},
{'device_name': '/dev/sdb3',
'source_type': 'snapshot', 'destination_type': 'volume',
'snapshot_id': '22222222-aaaa-bbbb-cccc-222222222222'},
{'device_name': '/dev/sdb4',
'no_device': True},
# overwrite ephemeral
{'device_name': '/dev/sdc1',
'source_type': 'snapshot', 'destination_type': 'volume',
'snapshot_id': '33333333-aaaa-bbbb-cccc-333333333333',
'delete_on_termination': False},
{'device_name': '/dev/sdc2',
'source_type': 'snapshot', 'destination_type': 'volume',
'snapshot_id': '33333333-aaaa-bbbb-cccc-444444444444',
'delete_on_termination': False},
{'device_name': '/dev/sdc3',
'source_type': 'snapshot', 'destination_type': 'volume',
'snapshot_id': '44444444-aaaa-bbbb-cccc-555555555555'},
{'device_name': '/dev/sdc4',
'no_device': True},
# volume
{'device_name': '/dev/sdd1',
'source_type': 'snapshot', 'destination_type': 'volume',
'snapshot_id': '55555555-aaaa-bbbb-cccc-666666666666',
'delete_on_termination': False},
{'device_name': '/dev/sdd2',
'source_type': 'snapshot', 'destination_type': 'volume',
'snapshot_id': '66666666-aaaa-bbbb-cccc-777777777777'},
{'device_name': '/dev/sdd3',
'source_type': 'snapshot', 'destination_type': 'volume',
'snapshot_id': '77777777-aaaa-bbbb-cccc-888888888888'},
{'device_name': '/dev/sdd4',
'no_device': True}]
image_mapping = self.compute_api._prepare_image_mapping(
instance_type, mappings)
image_mapping = block_device_obj.block_device_make_list_from_dicts(
self.context, image_mapping)
self.compute_api._create_block_device_mapping(
instance_type, instance['uuid'], image_mapping)
bdms = block_device_obj.BlockDeviceMappingList.get_by_instance_uuid(
self.context, instance['uuid'])
expected_result = [
{'source_type': 'blank', 'destination_type': 'local',
'guest_format': 'swap', 'device_name': '/dev/sdb1',
'volume_size': swap_size, 'delete_on_termination': True},
{'source_type': 'blank', 'destination_type': 'local',
'guest_format': CONF.default_ephemeral_format,
'device_name': '/dev/sdc3', 'delete_on_termination': True},
{'source_type': 'blank', 'destination_type': 'local',
'guest_format': CONF.default_ephemeral_format,
'device_name': '/dev/sdc1', 'delete_on_termination': True},
{'source_type': 'blank', 'destination_type': 'local',
'guest_format': CONF.default_ephemeral_format,
'device_name': '/dev/sdc2', 'delete_on_termination': True},
]
expected_result = block_device_obj.block_device_make_list_from_dicts(
self.context,
map(fake_block_device.AnonFakeDbBlockDeviceDict,
expected_result))
bdms.sort(key=operator.attrgetter('device_name'))
expected_result.sort(key=operator.attrgetter('device_name'))
self.assertEqual(len(bdms), len(expected_result))
for expected, got in zip(expected_result, bdms):
_compare_bdm_object(
expected, got,
extra_keys=('guest_format', 'delete_on_termination'))
block_device_mapping = (
block_device_obj.block_device_make_list_from_dicts(
self.context,
map(fake_block_device.AnonFakeDbBlockDeviceDict,
block_device_mapping)))
self.compute_api._create_block_device_mapping(
flavors.get_default_flavor(), instance['uuid'],
block_device_mapping)
bdms = block_device_obj.BlockDeviceMappingList.get_by_instance_uuid(
self.context, instance['uuid'])
expected_result = [
{'snapshot_id': '00000000-aaaa-bbbb-cccc-000000000000',
'device_name': '/dev/sda1', 'source_type': 'snapshot',
'destination_type': 'volume'},
{'source_type': 'blank', 'destination_type': 'local',
'guest_format': 'swap', 'device_name': '/dev/sdb1',
'volume_size': swap_size, 'delete_on_termination': True},
{'device_name': '/dev/sdb2',
'source_type': 'snapshot', 'destination_type': 'volume',
'snapshot_id': '11111111-aaaa-bbbb-cccc-111111111111',
'delete_on_termination': False},
{'device_name': '/dev/sdb3',
'source_type': 'snapshot', 'destination_type': 'volume',
'snapshot_id': '22222222-aaaa-bbbb-cccc-222222222222'},
{'device_name': '/dev/sdb4', 'no_device': True},
{'device_name': '/dev/sdc1',
'source_type': 'snapshot', 'destination_type': 'volume',
'snapshot_id': '33333333-aaaa-bbbb-cccc-333333333333',
'delete_on_termination': False},
{'device_name': '/dev/sdc2',
'source_type': 'snapshot', 'destination_type': 'volume',
'snapshot_id': '33333333-aaaa-bbbb-cccc-444444444444',
'delete_on_termination': False},
{'device_name': '/dev/sdc3',
'source_type': 'snapshot', 'destination_type': 'volume',
'snapshot_id': '44444444-aaaa-bbbb-cccc-555555555555'},
{'no_device': True, 'device_name': '/dev/sdc4'},
{'device_name': '/dev/sdd1',
'source_type': 'snapshot', 'destination_type': 'volume',
'snapshot_id': '55555555-aaaa-bbbb-cccc-666666666666',
'delete_on_termination': False},
{'device_name': '/dev/sdd2',
'source_type': 'snapshot', 'destination_type': 'volume',
'snapshot_id': '66666666-aaaa-bbbb-cccc-777777777777'},
{'device_name': '/dev/sdd3',
'source_type': 'snapshot', 'destination_type': 'volume',
'snapshot_id': '77777777-aaaa-bbbb-cccc-888888888888'},
{'no_device': True, 'device_name': '/dev/sdd4'}]
expected_result = block_device_obj.block_device_make_list_from_dicts(
self.context,
map(fake_block_device.AnonFakeDbBlockDeviceDict,
expected_result))
bdms.sort(key=operator.itemgetter('device_name'))
expected_result.sort(key=operator.itemgetter('device_name'))
self.assertEqual(len(bdms), len(expected_result))
for expected, got in zip(expected_result, bdms):
_compare_bdm_object(
expected, got,
extra_keys=('snapshot_id', 'delete_on_termination'))
def _test_check_and_transform_bdm(self, bdms, expected_bdms,
image_bdms=None, base_options=None,
legacy_bdms=False,

View File

@ -40,6 +40,7 @@ from nova import db
from nova import exception
from nova import objects
from nova.objects import base as obj_base
from nova.objects import block_device as block_device_obj
from nova.objects import fields as fields_obj
from nova.objects import quotas as quotas_obj
from nova import quota
@ -3018,81 +3019,17 @@ class _ComputeAPIUnitTestMixIn(object):
self.context,
instance, instance_type, bdms)
def _test_create_db_entry_for_new_instance_with_cinder_error(self,
expected_exception):
@mock.patch.object(objects.Instance, 'create')
@mock.patch.object(compute_api.SecurityGroupAPI, 'ensure_default')
@mock.patch.object(compute_api.API, '_populate_instance_names')
@mock.patch.object(compute_api.API, '_populate_instance_for_create')
def do_test(self, mock_create, mock_names, mock_ensure,
mock_inst_create):
instance = self._create_instance_obj()
instance['display_name'] = 'FAKE_DISPLAY_NAME'
instance['shutdown_terminate'] = False
instance_type = self._create_flavor()
fake_image = {
'id': 'fake-image-id',
'properties': {'mappings': []},
'status': 'fake-status',
'location': 'far-away'}
fake_security_group = None
fake_num_instances = 1
fake_index = 1
bdm = [objects.BlockDeviceMapping(
**fake_block_device.FakeDbBlockDeviceDict(
{
'id': 1,
'volume_id': 1,
'source_type': 'volume',
'destination_type': 'volume',
'device_name': 'vda',
'boot_index': 0,
}))]
with mock.patch.object(instance, "destroy") as destroy:
self.assertRaises(expected_exception,
self.compute_api.
create_db_entry_for_new_instance,
self.context,
instance_type,
fake_image,
instance,
fake_security_group,
bdm,
fake_num_instances,
fake_index)
destroy.assert_called_once_with()
# We use a nested method so we can decorate with the mocks.
do_test(self)
@mock.patch.object(cinder.API, 'get',
side_effect=exception.CinderConnectionFailed(reason='error'))
def test_create_db_entry_for_new_instancewith_cinder_down(self, mock_get):
self._test_create_db_entry_for_new_instance_with_cinder_error(
expected_exception=exception.CinderConnectionFailed)
@mock.patch.object(cinder.API, 'get',
return_value={'id': 1, 'status': 'error',
'attach_status': 'detached'})
def test_create_db_entry_for_new_instancewith_error_volume(self, mock_get):
self._test_create_db_entry_for_new_instance_with_cinder_error(
expected_exception=exception.InvalidVolume)
def test_provision_instances_creates_request_spec(self):
def _test_provision_instances_with_cinder_error(self,
expected_exception):
@mock.patch.object(self.compute_api, '_check_num_instances_quota')
@mock.patch.object(objects.Instance, 'create')
@mock.patch.object(self.compute_api.security_group_api,
'ensure_default')
@mock.patch.object(self.compute_api, '_validate_bdm')
@mock.patch.object(self.compute_api, '_create_block_device_mapping')
@mock.patch.object(objects.RequestSpec, 'from_components')
@mock.patch.object(objects, 'BuildRequest')
@mock.patch.object(objects.InstanceMapping, 'create')
def do_test(_mock_inst_mapping_create, _mock_build_req,
def do_test(
mock_req_spec_from_components, _mock_create_bdm,
_mock_validate_bdm, _mock_ensure_default, _mock_create,
mock_check_num_inst_quota):
_mock_ensure_default, _mock_create, mock_check_num_inst_quota):
quota_mock = mock.MagicMock()
req_spec_mock = mock.MagicMock()
@ -3140,21 +3077,29 @@ class _ComputeAPIUnitTestMixIn(object):
filter_properties = {'scheduler_hints': None,
'instance_type': flavor}
instances = self.compute_api._provision_instances(ctxt, flavor,
min_count, max_count, base_options, boot_meta,
security_groups, block_device_mapping, shutdown_terminate,
instance_group, check_server_group_quota,
filter_properties, None)
self.assertTrue(uuidutils.is_uuid_like(instances[0].uuid))
mock_req_spec_from_components.assert_called_once_with(ctxt,
mock.ANY, boot_meta, flavor, base_options['numa_topology'],
base_options['pci_requests'], filter_properties,
instance_group, base_options['availability_zone'])
req_spec_mock.create.assert_called_once_with()
self.assertRaises(expected_exception,
self.compute_api._provision_instances, ctxt,
flavor, min_count, max_count, base_options,
boot_meta, security_groups, block_device_mapping,
shutdown_terminate, instance_group,
check_server_group_quota, filter_properties,
None)
do_test()
@mock.patch.object(cinder.API, 'get',
side_effect=exception.CinderConnectionFailed(reason='error'))
def test_provision_instances_with_cinder_down(self, mock_get):
self._test_provision_instances_with_cinder_error(
expected_exception=exception.CinderConnectionFailed)
@mock.patch.object(cinder.API, 'get',
return_value={'id': 1, 'status': 'error',
'attach_status': 'detached'})
def test_provision_instances_with_error_volume(self, mock_get):
self._test_provision_instances_with_cinder_error(
expected_exception=exception.InvalidVolume)
@mock.patch('nova.objects.RequestSpec.from_components')
@mock.patch('nova.objects.BuildRequest')
@mock.patch('nova.objects.Instance')
@ -3167,8 +3112,10 @@ class _ComputeAPIUnitTestMixIn(object):
@mock.patch.object(self.compute_api, 'security_group_api')
@mock.patch.object(self.compute_api,
'create_db_entry_for_new_instance')
@mock.patch.object(self.compute_api,
'_bdm_validate_set_size_and_instance')
@mock.patch.object(self.compute_api, '_create_block_device_mapping')
def do_test(mock_cbdm, mock_cdb, mock_sg, mock_cniq):
def do_test(mock_cbdm, mock_bdm_v, mock_cdb, mock_sg, mock_cniq):
mock_cniq.return_value = 1, mock.MagicMock()
self.compute_api._provision_instances(self.context,
mock.sentinel.flavor,
@ -3243,7 +3190,8 @@ class _ComputeAPIUnitTestMixIn(object):
'numa_topology': None,
'pci_requests': None}
security_groups = {}
block_device_mapping = [objects.BlockDeviceMapping(
block_device_mapping = objects.BlockDeviceMappingList(
objects=[objects.BlockDeviceMapping(
**fake_block_device.FakeDbBlockDeviceDict(
{
'id': 1,
@ -3252,7 +3200,7 @@ class _ComputeAPIUnitTestMixIn(object):
'destination_type': 'volume',
'device_name': 'vda',
'boot_index': 0,
}))]
}))])
shutdown_terminate = True
instance_group = None
check_server_group_quota = False
@ -3331,7 +3279,8 @@ class _ComputeAPIUnitTestMixIn(object):
'numa_topology': None,
'pci_requests': None}
security_groups = {}
block_device_mapping = [objects.BlockDeviceMapping(
block_device_mapping = objects.BlockDeviceMappingList(
objects=[objects.BlockDeviceMapping(
**fake_block_device.FakeDbBlockDeviceDict(
{
'id': 1,
@ -3340,7 +3289,7 @@ class _ComputeAPIUnitTestMixIn(object):
'destination_type': 'volume',
'device_name': 'vda',
'boot_index': 0,
}))]
}))])
shutdown_terminate = True
instance_group = None
check_server_group_quota = False
@ -3360,6 +3309,93 @@ class _ComputeAPIUnitTestMixIn(object):
self.assertEqual(ctxt.project_id, inst_mapping_mock.project_id)
do_test()
@mock.patch.object(cinder.API, 'get')
@mock.patch.object(cinder.API, 'check_attach',
side_effect=(None, exception.InvalidVolume(reason='error')))
def test_provision_instances_cleans_up_when_volume_invalid(self,
_mock_cinder_get, _mock_cinder_check_attach):
@mock.patch.object(self.compute_api, '_check_num_instances_quota')
@mock.patch.object(objects, 'Instance')
@mock.patch.object(self.compute_api.security_group_api,
'ensure_default')
@mock.patch.object(self.compute_api, '_create_block_device_mapping')
@mock.patch.object(objects.RequestSpec, 'from_components')
@mock.patch.object(objects, 'BuildRequest')
@mock.patch.object(objects.InstanceMapping, 'create')
def do_test(_mock_inst_mapping_create, mock_build_req,
mock_req_spec_from_components, _mock_create_bdm,
_mock_ensure_default, mock_inst, mock_check_num_inst_quota):
quota_mock = mock.MagicMock()
min_count = 1
max_count = 2
mock_check_num_inst_quota.return_value = (2, quota_mock)
req_spec_mock = mock.MagicMock()
mock_req_spec_from_components.return_value = req_spec_mock
inst_mocks = [mock.MagicMock() for i in range(max_count)]
for inst_mock in inst_mocks:
inst_mock.project_id = 'fake-project'
mock_inst.side_effect = inst_mocks
build_req_mocks = [mock.MagicMock() for i in range(max_count)]
mock_build_req.side_effect = build_req_mocks
ctxt = context.RequestContext('fake-user', 'fake-project')
flavor = self._create_flavor()
boot_meta = {
'id': 'fake-image-id',
'properties': {'mappings': []},
'status': 'fake-status',
'location': 'far-away'}
base_options = {'image_ref': 'fake-ref',
'display_name': 'fake-name',
'project_id': 'fake-project',
'availability_zone': None,
'metadata': {},
'access_ip_v4': None,
'access_ip_v6': None,
'config_drive': None,
'key_name': None,
'reservation_id': None,
'kernel_id': None,
'ramdisk_id': None,
'root_device_name': None,
'user_data': None,
'numa_topology': None,
'pci_requests': None}
security_groups = {}
block_device_mapping = objects.BlockDeviceMappingList(
objects=[objects.BlockDeviceMapping(
**fake_block_device.FakeDbBlockDeviceDict(
{
'id': 1,
'volume_id': 1,
'source_type': 'volume',
'destination_type': 'volume',
'device_name': 'vda',
'boot_index': 0,
}))])
shutdown_terminate = True
instance_group = None
check_server_group_quota = False
filter_properties = {'scheduler_hints': None,
'instance_type': flavor}
self.assertRaises(exception.InvalidVolume,
self.compute_api._provision_instances, ctxt,
flavor, min_count, max_count, base_options,
boot_meta, security_groups, block_device_mapping,
shutdown_terminate, instance_group,
check_server_group_quota, filter_properties,
None)
# First instance is created and destroyed
self.assertTrue(inst_mocks[0].create.called)
self.assertTrue(inst_mocks[0].destroy.called)
# Second instance is not created nor destroyed
self.assertFalse(inst_mocks[1].create.called)
self.assertFalse(inst_mocks[1].destroy.called)
do_test()
def _test_rescue(self, vm_state=vm_states.ACTIVE, rescue_password=None,
rescue_image=None, clean_shutdown=True):
instance = self._create_instance_obj(params={'vm_state': vm_state})
@ -3535,6 +3571,58 @@ class _ComputeAPIUnitTestMixIn(object):
'volume_id': 'volume_id'}]
self._test_check_and_transform_bdm(block_device_mapping)
def test_bdm_validate_set_size_and_instance(self):
swap_size = 42
ephemeral_size = 24
instance = self._create_instance_obj()
instance_type = self._create_flavor(swap=swap_size,
ephemeral_gb=ephemeral_size)
block_device_mapping = [
{'device_name': '/dev/sda1',
'source_type': 'snapshot', 'destination_type': 'volume',
'snapshot_id': '00000000-aaaa-bbbb-cccc-000000000000',
'delete_on_termination': False,
'boot_index': 0},
{'device_name': '/dev/sdb2',
'source_type': 'blank', 'destination_type': 'local',
'guest_format': 'swap', 'delete_on_termination': False},
{'device_name': '/dev/sdb3',
'source_type': 'blank', 'destination_type': 'local',
'guest_format': 'ext3', 'delete_on_termination': False}]
block_device_mapping = (
block_device_obj.block_device_make_list_from_dicts(
self.context,
map(fake_block_device.AnonFakeDbBlockDeviceDict,
block_device_mapping)))
with mock.patch.object(self.compute_api, '_validate_bdm'):
bdms = self.compute_api._bdm_validate_set_size_and_instance(
self.context, instance, instance_type, block_device_mapping)
expected = [{'device_name': '/dev/sda1',
'source_type': 'snapshot', 'destination_type': 'volume',
'snapshot_id': '00000000-aaaa-bbbb-cccc-000000000000',
'delete_on_termination': False,
'boot_index': 0},
{'device_name': '/dev/sdb2',
'source_type': 'blank', 'destination_type': 'local',
'guest_format': 'swap', 'delete_on_termination': False},
{'device_name': '/dev/sdb3',
'source_type': 'blank', 'destination_type': 'local',
'delete_on_termination': False}]
# Check that the bdm matches what was asked for and that instance_uuid
# and volume_size are set properly.
for exp, bdm in zip(expected, bdms):
self.assertEqual(exp['device_name'], bdm.device_name)
self.assertEqual(exp['destination_type'], bdm.destination_type)
self.assertEqual(exp['source_type'], bdm.source_type)
self.assertEqual(exp['delete_on_termination'],
bdm.delete_on_termination)
self.assertEqual(instance.uuid, bdm.instance_uuid)
self.assertEqual(swap_size, bdms[1].volume_size)
self.assertEqual(ephemeral_size, bdms[2].volume_size)
@mock.patch.object(compute_api.API, '_get_instances_by_filters')
def test_tenant_to_project_conversion(self, mock_get):
mock_get.return_value = []