Save connection info in libvirt after volume connect

The libvirt driver will connect volumes for BFV while processing
the guest configuration, but not save the connection_info into
the database. This adds a save there to make sure that any
storage driver details get persisted.

Related-bug: #1328245

Change-Id: Idd6ef94ba2129b65e685a0435bc79f213d6f3f04
This commit is contained in:
Dan Smith 2014-05-23 07:05:52 -07:00
parent f20424ab73
commit a2aca5c9fb
3 changed files with 43 additions and 21 deletions

View File

@ -136,6 +136,14 @@ _fake_NodeDevXml = \
</device>"""}
def mocked_bdm(id, bdm_info):
bdm_mock = mock.MagicMock()
bdm_mock.__getitem__ = lambda s, k: bdm_info[k]
bdm_mock.get = lambda *k, **kw: bdm_info.get(*k, **kw)
bdm_mock.id = id
return bdm_mock
def _concurrency(signal, wait, done, target):
signal.send()
wait.wait()
@ -841,8 +849,11 @@ class LibvirtConnTestCase(test.TestCase):
instance_ref = db.instance_create(self.context, self.test_instance)
conn_info = {'driver_volume_type': 'fake'}
info = {'block_device_mapping': [
{'connection_info': conn_info, 'mount_device': '/dev/vdc'},
{'connection_info': conn_info, 'mount_device': '/dev/vdd'}]}
mocked_bdm(1, {'connection_info': conn_info,
'mount_device': '/dev/vdc'}),
mocked_bdm(1, {'connection_info': conn_info,
'mount_device': '/dev/vdd'}),
]}
disk_info = blockinfo.get_disk_info(CONF.libvirt_type,
instance_ref, info)
@ -3528,8 +3539,8 @@ class LibvirtConnTestCase(test.TestCase):
block_device_info = {'root_device_name': '/dev/vda',
'block_device_mapping': [
{'mount_device': 'vda',
'boot_index': 0}
mocked_bdm(1, {'mount_device': 'vda',
'boot_index': 0}),
]
}
@ -3550,6 +3561,7 @@ class LibvirtConnTestCase(test.TestCase):
instance_ref['image_ref'] = 'my_fake_image'
instance_ref['root_device_name'] = '/dev/vda'
instance_ref['uuid'] = uuidutils.generate_uuid()
block_device_info['block_device_mapping'][0].id = 2
instance = db.instance_create(self.context, instance_ref)
conn.spawn(self.context, instance, None, [], None,
@ -5647,16 +5659,19 @@ class LibvirtConnTestCase(test.TestCase):
self.stubs.Set(conn,
'_lookup_by_name',
fake_lookup_name)
block_device_info = {'block_device_mapping': [
{'guest_format': None,
'boot_index': 0,
'mount_device': '/dev/vda',
'connection_info':
{'driver_volume_type': 'iscsi'},
'disk_bus': 'virtio',
'device_type': 'disk',
'delete_on_termination': False}
]}
bdm = {'guest_format': None,
'boot_index': 0,
'mount_device': '/dev/vda',
'connection_info':
{'driver_volume_type': 'iscsi'},
'disk_bus': 'virtio',
'device_type': 'disk',
'delete_on_termination': False,
}
block_device_info = {
'block_device_mapping': [mocked_bdm(1, bdm)]
}
conn.post_live_migration_at_destination(self.context, instance,
network_info, True,
block_device_info=block_device_info)

View File

@ -28,6 +28,7 @@ from nova import test
from nova.tests.image import fake as fake_image
from nova.tests import utils as test_utils
from nova.tests.virt.libvirt import fake_libvirt_utils
from nova.tests.virt.libvirt import test_libvirt
from nova.virt import event as virtevent
from nova.virt import fake
from nova.virt.libvirt import imagebackend
@ -458,11 +459,7 @@ class _VirtDriverTestCase(_FakeDriverBackendTestCase):
self.connection.attach_volume(None, connection_info, instance_ref,
'/dev/sda')
bdm = {
'root_device_name': None,
'swap': None,
'ephemerals': [],
'block_device_mapping': [{
bdm_data = {
'instance_uuid': instance_ref['uuid'],
'connection_info': {'driver_volume_type': 'fake'},
'mount_device': '/dev/sda',
@ -471,8 +468,15 @@ class _VirtDriverTestCase(_FakeDriverBackendTestCase):
'snapshot_id': None,
'volume_id': 'abcdedf',
'volume_size': None,
'no_device': None
}]
'no_device': None,
}
bdm = {
'block_device_mapping': [
test_libvirt.mocked_bdm(1, bdm_data),
],
'root_device_name': None,
'swap': None,
'ephemerals': [],
}
self.connection.power_on(self.ctxt, instance_ref, network_info, bdm)
self.connection.detach_volume(connection_info,

View File

@ -2776,6 +2776,9 @@ class LibvirtDriver(driver.ComputeDriver):
connection_info,
info)
devices.append(cfg)
self.virtapi.block_device_mapping_update(
nova_context.get_admin_context(), vol.id,
{'connection_info': jsonutils.dumps(connection_info)})
if 'disk.config' in disk_mapping:
diskconfig = self.get_guest_disk_config(instance,