Detach disks using alias when possible
This makes us attempt to first look up a disk device by alias using the volume_uuid, before falling back to the old method of using the guest target device name. Related to blueprint libvirt-dev-alias Change-Id: I1dfe4ad3df81bc810835af9b09cfc6c06e9a5388
This commit is contained in:
parent
9a62959edd
commit
1ed02d57c0
|
@ -9929,17 +9929,21 @@ class LibvirtConnTestCase(test.NoDBTestCase,
|
|||
@mock.patch('threading.Event', new=mock.Mock())
|
||||
@mock.patch('nova.virt.libvirt.host.Host._get_domain')
|
||||
def test_detach_volume_with_vir_domain_affect_live_flag(self,
|
||||
mock_get_domain):
|
||||
mock_get_domain, use_alias=True):
|
||||
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
|
||||
instance = objects.Instance(**self.test_instance)
|
||||
volume_id = uuids.volume
|
||||
alias_xml = '<alias name="%s"/>' % vconfig.make_libvirt_device_alias(
|
||||
volume_id)
|
||||
mock_xml_with_disk = """<domain>
|
||||
<devices>
|
||||
<disk type='file'>
|
||||
%(alias)s
|
||||
<source file='/path/to/fake-volume'/>
|
||||
<target dev='vdc' bus='virtio'/>
|
||||
</disk>
|
||||
</devices>
|
||||
</domain>"""
|
||||
</domain>""" % {'alias': use_alias and alias_xml or ''}
|
||||
mock_xml_without_disk = """<domain>
|
||||
<devices>
|
||||
</devices>
|
||||
|
@ -9951,14 +9955,26 @@ class LibvirtConnTestCase(test.NoDBTestCase,
|
|||
mock_xml_with_disk, # persistent domain
|
||||
mock_xml_with_disk, # live domain
|
||||
mock_xml_without_disk, # persistent gone
|
||||
mock_xml_without_disk # live gone
|
||||
mock_xml_without_disk, # persistent gone (no-alias retry)
|
||||
mock_xml_without_disk, # live gone
|
||||
mock_xml_without_disk, # live gone (no-alias retry)
|
||||
]
|
||||
if not use_alias:
|
||||
# If we're not using the alias to detach we will end up with two
|
||||
# extra checks of the XML with the alias (which will fail to find
|
||||
# anything) before we fall back to by-path and succeed.
|
||||
return_list = [
|
||||
mock_xml_with_disk, # persistent check for missing alias
|
||||
mock_xml_with_disk, # live check for missing alias
|
||||
] + return_list
|
||||
|
||||
# Doubling the size of return list because we test with two guest power
|
||||
# states
|
||||
mock_dom.XMLDesc.side_effect = return_list + return_list
|
||||
|
||||
connection_info = {"driver_volume_type": "fake",
|
||||
"data": {"device_path": "/fake",
|
||||
"volume_id": volume_id,
|
||||
"access_mode": "rw"}}
|
||||
|
||||
with mock.patch.object(drvr, '_disconnect_volume') as \
|
||||
|
@ -9972,9 +9988,10 @@ class LibvirtConnTestCase(test.NoDBTestCase,
|
|||
|
||||
mock_get_domain.assert_called_with(instance)
|
||||
xml = """<disk type="file" device="disk">
|
||||
%(alias)s
|
||||
<source file="/path/to/fake-volume"/>
|
||||
<target bus="virtio" dev="vdc"/>
|
||||
</disk>"""
|
||||
</disk>""" % {'alias': use_alias and alias_xml or ''}
|
||||
# we expect two separate detach calls
|
||||
self.assertEqual(2, mock_dom.detachDeviceFlags.call_count)
|
||||
# one for the persistent domain
|
||||
|
@ -9993,6 +10010,10 @@ class LibvirtConnTestCase(test.NoDBTestCase,
|
|||
mock_disconnect_volume.assert_called_with(
|
||||
self.context, connection_info, instance, encryption=None)
|
||||
|
||||
def test_detach_volume_with_vir_domain_affect_live_flag_legacy(self):
|
||||
self.test_detach_volume_with_vir_domain_affect_live_flag(
|
||||
use_alias=False)
|
||||
|
||||
@mock.patch('nova.virt.libvirt.driver.LibvirtDriver._disconnect_volume')
|
||||
@mock.patch('nova.virt.libvirt.host.Host._get_domain')
|
||||
def test_detach_volume_disk_not_found(self, mock_get_domain,
|
||||
|
@ -10090,7 +10111,7 @@ class LibvirtConnTestCase(test.NoDBTestCase,
|
|||
mock.call.detach_volume(
|
||||
mock_guest,
|
||||
instance.uuid,
|
||||
# it is functools.partial(mock_guest.get_disk, 'vdc')
|
||||
# it is functools.partial(driver._get_guest_disk_device, 'vdc')
|
||||
# see assert below
|
||||
mock.ANY,
|
||||
device_name='vdc',
|
||||
|
@ -10103,8 +10124,10 @@ class LibvirtConnTestCase(test.NoDBTestCase,
|
|||
)
|
||||
])
|
||||
get_device_conf_func = mock_detach_with_retry.mock_calls[0][1][2]
|
||||
self.assertEqual(mock_guest.get_disk, get_device_conf_func.func)
|
||||
self.assertEqual(('vdc',), get_device_conf_func.args)
|
||||
self.assertEqual(drvr._get_guest_disk_device,
|
||||
get_device_conf_func.func)
|
||||
self.assertEqual((mock_get_guest.return_value, 'vdc'),
|
||||
get_device_conf_func.args)
|
||||
|
||||
def test_extend_volume(self):
|
||||
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
|
||||
|
|
|
@ -260,6 +260,84 @@ class GuestTestCase(test.NoDBTestCase):
|
|||
self.assertEqual('kvm', result.virt_type)
|
||||
self.assertEqual('fake', result.name)
|
||||
|
||||
def test_get_device_by_alias(self):
|
||||
xml = """
|
||||
<domain type='qemu'>
|
||||
<name>QEMUGuest1</name>
|
||||
<uuid>c7a5fdbd-edaf-9455-926a-d65c16db1809</uuid>
|
||||
<memory unit='KiB'>219136</memory>
|
||||
<currentMemory unit='KiB'>219136</currentMemory>
|
||||
<vcpu placement='static'>1</vcpu>
|
||||
<os>
|
||||
<type arch='i686' machine='pc'>hvm</type>
|
||||
<boot dev='hd'/>
|
||||
</os>
|
||||
<clock offset='utc'/>
|
||||
<on_poweroff>destroy</on_poweroff>
|
||||
<on_reboot>restart</on_reboot>
|
||||
<on_crash>destroy</on_crash>
|
||||
<devices>
|
||||
<emulator>/usr/bin/qemu</emulator>
|
||||
<disk type='block' device='disk'>
|
||||
<alias name="qemu-disk1"/>
|
||||
<driver name='qemu' type='raw'/>
|
||||
<source dev='/dev/HostVG/QEMUGuest2'/>
|
||||
<target dev='hda' bus='ide'/>
|
||||
<address type='drive' controller='0' bus='0' target='0' unit='0'/>
|
||||
</disk>
|
||||
<disk type='network' device='disk'>
|
||||
<alias name="ua-netdisk"/>
|
||||
<driver name='qemu' type='raw'/>
|
||||
<auth username='myname'>
|
||||
<secret type='iscsi' usage='mycluster_myname'/>
|
||||
</auth>
|
||||
<source protocol='iscsi' name='iqn.1992-01.com.example'>
|
||||
<host name='example.org' port='6000'/>
|
||||
</source>
|
||||
<target dev='vda' bus='virtio'/>
|
||||
</disk>
|
||||
<disk type='network' device='disk'>
|
||||
<driver name='qemu' type='raw'/>
|
||||
<source protocol='iscsi' name='iqn.1992-01.com.example/1'>
|
||||
<host name='example.org' port='6000'/>
|
||||
</source>
|
||||
<target dev='vdb' bus='virtio'/>
|
||||
</disk>
|
||||
<hostdev mode='subsystem' type='pci' managed='yes'>
|
||||
<source>
|
||||
<address domain='0x0000' bus='0x06' slot='0x12' function='0x5'/>
|
||||
</source>
|
||||
</hostdev>
|
||||
<hostdev mode='subsystem' type='pci' managed='yes'>
|
||||
<source>
|
||||
<address domain='0x0000' bus='0x06' slot='0x12' function='0x6'/>
|
||||
</source>
|
||||
</hostdev>
|
||||
<interface type="bridge">
|
||||
<mac address="fa:16:3e:f9:af:ae"/>
|
||||
<model type="virtio"/>
|
||||
<driver name="qemu"/>
|
||||
<source bridge="qbr84008d03-11"/>
|
||||
<target dev="tap84008d03-11"/>
|
||||
</interface>
|
||||
<controller type='usb' index='0'/>
|
||||
<controller type='pci' index='0' model='pci-root'/>
|
||||
<memballoon model='none'/>
|
||||
</devices>
|
||||
</domain>
|
||||
"""
|
||||
self.domain.XMLDesc.return_value = xml
|
||||
|
||||
dev = self.guest.get_device_by_alias('qemu-disk1')
|
||||
self.assertIsInstance(dev, vconfig.LibvirtConfigGuestDisk)
|
||||
self.assertEqual('hda', dev.target_dev)
|
||||
|
||||
dev = self.guest.get_device_by_alias('ua-netdisk')
|
||||
self.assertIsInstance(dev, vconfig.LibvirtConfigGuestDisk)
|
||||
self.assertEqual('vda', dev.target_dev)
|
||||
|
||||
self.assertIsNone(self.guest.get_device_by_alias('nope'))
|
||||
|
||||
def test_get_devices(self):
|
||||
xml = """
|
||||
<domain type='qemu'>
|
||||
|
|
|
@ -2719,6 +2719,33 @@ class LibvirtDriver(driver.ComputeDriver):
|
|||
'instance %s: %s', device_name, instance_uuid, str(ex))
|
||||
raise
|
||||
|
||||
def _get_guest_disk_device(self, guest, disk_dev, volume_uuid=None,
|
||||
from_persistent_config=False):
|
||||
"""Attempt to find the guest disk
|
||||
|
||||
If a volume_uuid is provided, we will look for the device based
|
||||
on the nova-specified alias. If not, or we do not find it that way,
|
||||
fall back to the old way of using the disk_dev.
|
||||
"""
|
||||
if volume_uuid is not None:
|
||||
dev_alias = vconfig.make_libvirt_device_alias(volume_uuid)
|
||||
dev = guest.get_device_by_alias(
|
||||
dev_alias,
|
||||
from_persistent_config=from_persistent_config)
|
||||
if dev:
|
||||
LOG.debug('Found disk %s by alias %s', disk_dev, dev_alias)
|
||||
return dev
|
||||
dev = guest.get_disk(disk_dev,
|
||||
from_persistent_config=from_persistent_config)
|
||||
if dev:
|
||||
# NOTE(danms): Only log that we fell back to the old way if it
|
||||
# worked. Since we call this method after detach is done to
|
||||
# ensure it is gone, we will always "fall back" to make sure it
|
||||
# is gone by the "old way" and thus shouldn't announce it.
|
||||
LOG.info('Device %s not found by alias %s, falling back',
|
||||
disk_dev, dev_alias)
|
||||
return dev
|
||||
|
||||
def detach_volume(self, context, connection_info, instance, mountpoint,
|
||||
encryption=None):
|
||||
disk_dev = mountpoint.rpartition("/")[2]
|
||||
|
@ -2729,7 +2756,11 @@ class LibvirtDriver(driver.ComputeDriver):
|
|||
# detaching any attached encryptors or disconnecting the underlying
|
||||
# volume in _disconnect_volume. Otherwise, the encryptor or volume
|
||||
# driver may report that the volume is still in use.
|
||||
get_dev = functools.partial(guest.get_disk, disk_dev)
|
||||
volume_id = driver_block_device.get_volume_id(connection_info)
|
||||
get_dev = functools.partial(self._get_guest_disk_device,
|
||||
guest,
|
||||
disk_dev,
|
||||
volume_uuid=volume_id)
|
||||
self._detach_with_retry(
|
||||
guest,
|
||||
instance.uuid,
|
||||
|
|
|
@ -414,6 +414,12 @@ class Guest(object):
|
|||
|
||||
return self.get_all_devices(vconfig.LibvirtConfigGuestDisk)
|
||||
|
||||
def get_device_by_alias(self, devalias, devtype=None,
|
||||
from_persistent_config=False):
|
||||
for dev in self.get_all_devices(devtype):
|
||||
if dev.alias == devalias:
|
||||
return dev
|
||||
|
||||
def get_all_devices(
|
||||
self,
|
||||
devtype: vconfig.LibvirtConfigGuestDevice = None,
|
||||
|
|
Loading…
Reference in New Issue