vSCSI In-tree Backports

This contains backports for minor code changes made during in-tree
driver vSCSI development [1].

[1] https://review.openstack.org/#/c/526094/

Change-Id: I60788e8756df2dfec5f0a407ca149a8ea3cedd53
This commit is contained in:
esberglu 2018-06-22 14:17:14 -05:00
parent edae7d1168
commit 066369594e
5 changed files with 50 additions and 48 deletions

View File

@ -1,4 +1,4 @@
# Copyright 2014, 2018 IBM Corp.
# Copyright IBM Corp. and contributors
#
# All Rights Reserved.
#
@ -23,6 +23,7 @@ from oslo_serialization import jsonutils
from nova import block_device as nova_block_device
from nova.compute import task_states
from nova import conf as cfg
from nova import exception as exc
from nova import objects
from nova.objects import base as obj_base
@ -49,6 +50,7 @@ from nova_powervm.virt.powervm import exception as p_exc
from nova_powervm.virt.powervm import live_migration as lpm
from nova_powervm.virt.powervm import vm
CONF = cfg.CONF
LOG = logging.getLogger(__name__)
logging.basicConfig()
@ -218,7 +220,7 @@ class TestPowerVMDriver(test.NoDBTestCase):
vol_connector = self.drv.get_volume_connector(mock.Mock())
self.assertIsNotNone(vol_connector['wwpns'])
self.assertIsNotNone(vol_connector['host'])
self.assertEqual(vol_connector['host'], CONF.host)
self.assertEqual('fake_iqn1', vol_connector['initiator'])
def test_setup_disk_adapter(self):
@ -1269,7 +1271,7 @@ class TestPowerVMDriver(test.NoDBTestCase):
with mock.patch.object(self.inst, 'save') as mock_save:
# Invoke the method.
self.drv.attach_volume('context', mock_bdm.get('connection_info'),
self.inst, mock.Mock())
self.inst, mock.sentinel.stg_ftsk)
mock_bld_slot_mgr.assert_called_once_with(self.inst,
self.drv.store_api)
@ -1277,7 +1279,7 @@ class TestPowerVMDriver(test.NoDBTestCase):
self.vol_drv.connect_volume.assert_called_once_with(
mock_bld_slot_mgr.return_value)
mock_bld_slot_mgr.return_value.save.assert_called_once_with()
self.assertTrue(mock_save.called)
mock_save.assert_called_once_with()
@mock.patch('nova_powervm.virt.powervm.vm.instance_exists', autospec=True)
@mock.patch('nova_powervm.virt.powervm.slot.build_slot_mgr', autospec=True)
@ -1290,7 +1292,7 @@ class TestPowerVMDriver(test.NoDBTestCase):
mock_bdm = self._fake_bdms()['block_device_mapping'][0]
# Invoke the method, good path test.
self.drv.detach_volume('context', mock_bdm.get('connection_info'),
self.inst, mock.Mock())
self.inst, mock.sentinel.stg_ftsk)
mock_bld_slot_mgr.assert_called_once_with(self.inst,
self.drv.store_api)
@ -1302,7 +1304,7 @@ class TestPowerVMDriver(test.NoDBTestCase):
# Invoke the method, instance doesn't exist, no migration
self.vol_drv.disconnect_volume.reset_mock()
self.drv.detach_volume('context', mock_bdm.get('connection_info'),
self.inst, mock.Mock())
self.inst, mock.sentinel.stg_ftsk)
# Verify the disconnect volume was not invoked
self.assertEqual(0, self.vol_drv.disconnect_volume.call_count)
@ -1312,7 +1314,7 @@ class TestPowerVMDriver(test.NoDBTestCase):
self.drv.live_migrations[self.inst.uuid] = mig
with mock.patch.object(mig, 'cleanup_volume') as mock_clnup:
self.drv.detach_volume('context', mock_bdm.get('connection_info'),
self.inst, mock.Mock())
self.inst, mock.sentinel.stg_ftsk)
# The cleanup should have been called since there was a migration
self.assertEqual(1, mock_clnup.call_count)
# Verify the disconnect volume was not invoked

View File

@ -754,7 +754,13 @@ class PowerVMDriver(driver.ComputeDriver):
instance.save()
def extend_volume(self, connection_info, instance):
"""Resize an attached volume"""
"""Extend the disk attached to the instance.
:param dict connection_info: The connection for the extended volume.
:param nova.objects.instance.Instance instance:
The instance whose volume gets extended.
:return: None
"""
vol_drv = vol_attach.build_volume_driver(
self.adapter, self.host_uuid, instance, connection_info)
vol_drv.extend_volume()
@ -764,6 +770,9 @@ class PowerVMDriver(driver.ComputeDriver):
"""Detach the volume attached to the instance."""
self._log_operation('detach_volume', instance)
# Define the flow
flow = tf_lf.Flow("detach_volume")
# Get a volume adapter for this volume
vol_drv = vol_attach.build_volume_driver(
self.adapter, self.host_uuid, instance, connection_info)
@ -783,9 +792,6 @@ class PowerVMDriver(driver.ComputeDriver):
mig.cleanup_volume(vol_drv)
return
# Define the flow
flow = tf_lf.Flow("detach_volume")
# Add a task to detach the volume
slot_mgr = slot.build_slot_mgr(instance, self.store_api)
flow.add(tf_stg.DisconnectVolume(vol_drv, slot_mgr))
@ -1660,10 +1666,6 @@ class PowerVMDriver(driver.ComputeDriver):
"""Yields a bdm and volume driver."""
# Get a volume driver for each volume
for bdm in bdms or []:
# Ignore if it's not a volume
if not bdm.is_volume:
continue
vol_drv = vol_attach.build_volume_driver(
self.adapter, self.host_uuid, instance,
bdm.get('connection_info'), stg_ftsk=stg_ftsk)
@ -1688,7 +1690,7 @@ class PowerVMDriver(driver.ComputeDriver):
classes from nova.virt.block_device. Each block device
represents one volume connection.
An example string representation of the a DriverVolumeBlockDevice
An example string representation of a DriverVolumeBlockDevice
from the early Liberty time frame is:
{'guest_format': None,
'boot_index': 0,

View File

@ -55,8 +55,6 @@ class ConnectVolume(task.Task):
self.vol_drv.connect_volume(self.slot_mgr)
def revert(self, result, flow_failures):
# The parameters have to match the execute method, plus the response +
# failures even if only a subset are used.
LOG.warning('Rolling back connection for volume %(vol)s.',
{'vol': self.vol_id}, instance=self.vol_drv.instance)
@ -101,8 +99,6 @@ class DisconnectVolume(task.Task):
self.vol_drv.disconnect_volume(self.slot_mgr)
def revert(self, result, flow_failures):
# The parameters have to match the execute method, plus the response +
# failures even if only a subset are used.
LOG.warning('Reconnecting volume %(vol)s on disconnect rollback.',
{'vol': self.vol_id}, instance=self.vol_drv.instance)

View File

@ -1,4 +1,4 @@
# Copyright 2015, 2018 IBM Corp.
# Copyright IBM Corp. and contributors
#
# All Rights Reserved.
#
@ -152,8 +152,10 @@ class VscsiVolumeAdapter(object):
:param tag: String tag to set on the physical volume.
"""
def add_func(vios_w):
LOG.info("Adding vSCSI mapping to Physical Volume %(dev)s",
{'dev': device_name}, instance=self.instance)
LOG.info("Adding vSCSI mapping to Physical Volume %(dev)s on "
"vios %(vios)s.",
{'dev': device_name, 'vios': vios_w.name},
instance=self.instance)
pv = pvm_stor.PV.bld(self.adapter, device_name, udid=udid,
tag=tag)
v_map = tsk_map.build_vscsi_mapping(
@ -172,8 +174,8 @@ class VscsiVolumeAdapter(object):
except (KeyError, ValueError):
# It's common to lose our specific data in the BDM. The connection
# information can be 'refreshed' by operations like LPM and resize
LOG.info('Failed to retrieve device_id key from BDM for volume id '
'%s', self.volume_id, instance=self.instance)
LOG.info('Failed to retrieve target_UDID key from BDM for volume '
'id %s', self.volume_id, instance=self.instance)
return None
def _set_udid(self, udid):
@ -184,7 +186,7 @@ class VscsiVolumeAdapter(object):
self.connection_info['data'][UDID_KEY] = udid
def _add_remove_mapping(self, vm_uuid, vios_uuid, device_name, slot_mgr):
"""Adds a transaction to remove the storage mapping.
"""Adds a subtask to remove the storage mapping.
:param vm_uuid: The UUID of the VM instance
:param vios_uuid: The UUID of the vios for the pypowervm adapter.
@ -193,8 +195,10 @@ class VscsiVolumeAdapter(object):
used when a volume is detached from the VM.
"""
def rm_func(vios_w):
LOG.info("Removing vSCSI mapping from physical volume %(dev)s.",
{'dev': device_name}, instance=self.instance)
LOG.info("Removing vSCSI mapping from physical volume %(dev)s "
"on vios %(vios)s",
{'dev': device_name, 'vios': vios_w.name},
instance=self.instance)
removed_maps = tsk_map.remove_maps(
vios_w, vm_uuid,
tsk_map.gen_match_func(pvm_stor.PV, names=[device_name]))
@ -249,7 +253,7 @@ class VscsiVolumeAdapter(object):
from.
:param device_name: The hdisk name to remove.
:return: True is there are multiple instances using the given hdisk
:return: True if there are multiple instances using the given hdisk
"""
vios_scsi_mappings = next(v.scsi_mappings for v in self.stg_ftsk.feed
if v.uuid == vios_wrap.uuid)
@ -257,11 +261,10 @@ class VscsiVolumeAdapter(object):
vios_scsi_mappings, None,
tsk_map.gen_match_func(pvm_stor.PV, names=[device_name]))
LOG.info("%(num)d storage mappings found for %(dev)s on VIOS %(vios)s",
{'num': len(mappings), 'dev': device_name,
'vios': vios_wrap.name}, instance=self.instance)
# the mapping is still present as the task feed removes
# the mapping later
LOG.debug("%(num)d storage mapping(s) found for %(dev)s on VIOS "
"%(vios)s", {'num': len(mappings), 'dev': device_name,
'vios': vios_wrap.name}, instance=self.instance)
# the mapping is still present as the task feed removes it later
return len(mappings) > 1
def _cleanup_volume(self, udid=None, devname=None):

View File

@ -1,4 +1,4 @@
# Copyright 2015, 2018 IBM Corp.
# Copyright IBM Corp. and contributors
#
# All Rights Reserved.
#
@ -175,7 +175,7 @@ class PVVscsiFCVolumeAdapter(volume.VscsiVolumeAdapter,
:param volume_id: Volume to discover
:returns: Status of the volume or None
:returns: Device name or None
:returns: LUN or None
:returns: UDID or None
"""
# Get the initiatior WWPNs, targets and Lun for the given VIOS.
vio_wwpns, t_wwpns, lun = self._get_hdisk_itls(vios_w)
@ -196,13 +196,13 @@ class PVVscsiFCVolumeAdapter(volume.VscsiVolumeAdapter,
LOG.info('Discovered %(hdisk)s on vios %(vios)s for volume '
'%(volume_id)s. Status code: %(status)s.',
{'hdisk': device_name, 'vios': vios_w.name,
'volume_id': volume_id, 'status': str(status)},
'volume_id': volume_id, 'status': status},
instance=self.instance)
elif status == hdisk.LUAStatus.DEVICE_IN_USE:
LOG.warning('Discovered device %(dev)s for volume %(volume)s '
'on %(vios)s is in use. Error code: %(status)s.',
{'dev': device_name, 'volume': volume_id,
'vios': vios_w.name, 'status': str(status)},
'vios': vios_w.name, 'status': status},
instance=self.instance)
return status, device_name, udid
@ -243,7 +243,9 @@ class PVVscsiFCVolumeAdapter(volume.VscsiVolumeAdapter,
# Save the UDID for the disk in the connection info. It is
# used for the detach.
self._set_udid(udid)
LOG.debug('Added deferred task to attach device %s', device_name,
LOG.debug('Added deferred task to attach device %(device_name)s '
'to vios %(vios_name)s.',
{'device_name': device_name, 'vios_name': vios_w.name},
instance=self.instance)
# Valid attachment
@ -267,10 +269,9 @@ class PVVscsiFCVolumeAdapter(volume.VscsiVolumeAdapter,
LOG.debug("Disconnect volume %(vol)s from vios uuid %(uuid)s",
dict(vol=self.volume_id, uuid=vios_w.uuid),
instance=self.instance)
udid, device_name = None, None
device_name = None
udid = self._get_udid()
try:
udid = self._get_udid()
if udid:
# This will only work if vios_w has the Storage XAG.
device_name = vios_w.hdisk_from_uuid(udid)
@ -280,10 +281,8 @@ class PVVscsiFCVolumeAdapter(volume.VscsiVolumeAdapter,
status, device_name, udid = self._discover_volume_on_vios(
vios_w, self.volume_id)
# If we have a device name, but not a udid, at this point
# we should not continue. The hdisk is in a bad state
# in the I/O Server. Subsequent scrub code on future
# deploys will clean this up.
# Check if the hdisk is in a bad state in the I/O Server.
# Subsequent scrub code on future deploys will clean it up.
if not hdisk.good_discovery(status, device_name):
LOG.warning(
"Disconnect Volume: The backing hdisk for volume "
@ -299,8 +298,8 @@ class PVVscsiFCVolumeAdapter(volume.VscsiVolumeAdapter,
"Disconnect Volume: Failed to find disk on Virtual I/O "
"Server %(vios_name)s for volume %(volume_id)s. Volume "
"UDID: %(volume_uid)s.",
{'volume_uid': udid, 'vios_name': vios_w.name,
'volume_id': self.volume_id}, instance=self.instance)
{'vios_name': vios_w.name, 'volume_id': self.volume_id,
'volume_uid': udid}, instance=self.instance)
return False
# We have found the device name