HPE3PAR: Fix pointing to backend in group failover

Issue: After group failover, subsequent operations
like attach & detach volume, which are part of that
failed over group points to primary backend instead
of secondary.

This patch fixes the above issue by setting up the
appropriate backend in case of subsequent operation
on volume, which are part of group which is failed
over.

Change-Id: I679b11317c91ad28cefdf995a8d6849dc71bc1c5
Closes-Bug: #1773069
This commit is contained in:
Vivek Soni 2018-06-15 01:31:17 -07:00
parent 2cbab15f1c
commit a433c19ef0
5 changed files with 72 additions and 18 deletions

View File

@ -3526,7 +3526,39 @@ class TestHPE3PARDriverBase(HPE3PARBaseDriver):
# host. We can assert these methods were not called to make sure
# the proper exceptions are being raised.
self.assertEqual(0, mock_client.deleteVLUN.call_count)
self.assertEqual(0, mock_client.deleteHost.call_count)
def test_terminate_connection_from_primary_when_group_failed_over(self):
mock_conf = {
'getStorageSystemInfo.return_value': {
'id': self.REPLICATION_CLIENT_ID,
'name': 'CSIM-EOS12_1611702'}}
conf = self.setup_configuration()
conf.replication_device = self.replication_targets
mock_client = self.setup_driver(config=conf, mock_conf=mock_conf)
mock_client.getHostVLUNs.side_effect = hpeexceptions.HTTPNotFound(
error={'desc': 'The host does not exist.'})
with mock.patch.object(hpecommon.HPE3PARCommon,
'_create_client') as mock_create_client:
mock_create_client.return_value = mock_client
volume = self.volume_tiramisu.copy()
volume['replication_status'] = 'failed-over'
volume['replication_driver_data'] = self.REPLICATION_CLIENT_ID
self.driver._active_backend_id = "CSIM-EOS12_1611702"
self.driver.terminate_connection(
self.volume,
self.connector,
force=True)
# When the volume is still attached to the primary array after a
# fail-over, there should be no call to delete the VLUN(s) or the
# host. We can assert these methods were not called to make sure
# the proper exceptions are being raised.
self.assertEqual(0, mock_client.deleteVLUN.call_count)
def test_extend_volume(self):
# setup_mock_client drive with default configuration

View File

@ -57,10 +57,12 @@ class HPE3PARDriverBase(driver.ManageableVD,
1.0.2 - Adds capability.
1.0.3 - Added Tiramisu feature on 3PAR.
1.0.4 - Fixed Volume migration for "in-use" volume. bug #1744021
1.0.5 - Set proper backend on subsequent operation, after group
failover. bug #1773069
"""
VERSION = "1.0.4"
VERSION = "1.0.5"
def __init__(self, *args, **kwargs):
super(HPE3PARDriverBase, self).__init__(*args, **kwargs)
@ -73,12 +75,13 @@ class HPE3PARDriverBase(driver.ManageableVD,
return hpecommon.HPE3PARCommon(self.configuration,
self._active_backend_id)
def _login(self, timeout=None):
def _login(self, timeout=None, array_id=None):
common = self._init_common()
# If replication is enabled and we cannot login, we do not want to
# raise an exception so a failover can still be executed.
try:
common.do_setup(None, timeout=timeout, stats=self._stats)
common.do_setup(None, timeout=timeout, stats=self._stats,
array_id=array_id)
common.client_login()
except Exception:
if common._replication_enabled:
@ -105,6 +108,12 @@ class HPE3PARDriverBase(driver.ManageableVD,
'san_password']
common.check_flags(self.configuration, required_flags)
def get_volume_replication_driver_data(self, volume):
if (volume.get("group_id") and volume.get("replication_status") and
volume.get("replication_status") == "failed-over"):
return int(volume.get("replication_driver_data"))
return None
@utils.trace
def get_volume_stats(self, refresh=False):
common = self._login()

View File

@ -267,11 +267,13 @@ class HPE3PARCommon(object):
4.0.6 - Monitor task of promoting a virtual copy. bug #1749642
4.0.7 - Handle force detach case. bug #1686745
4.0.8 - Added support for report backend state in service list.
4.0.9 - Set proper backend on subsequent operation, after group
failover. bug #1773069
"""
VERSION = "4.0.8"
VERSION = "4.0.9"
stats = {}
@ -430,7 +432,7 @@ class HPE3PARCommon(object):
if client is not None:
client.logout()
def do_setup(self, context, timeout=None, stats=None):
def do_setup(self, context, timeout=None, stats=None, array_id=None):
if hpe3parclient is None:
msg = _('You must install hpe3parclient before using 3PAR'
' drivers. Run "pip install python-3parclient" to'
@ -442,7 +444,7 @@ class HPE3PARCommon(object):
# to communicate with the 3PAR array. It will contain either
# the values for the primary array or secondary array in the
# case of a fail-over.
self._get_3par_config()
self._get_3par_config(array_id=array_id)
self.client = self._create_client(timeout=timeout)
wsapi_version = self.client.getWsApiVersion()
self.API_VERSION = wsapi_version['build']
@ -461,7 +463,7 @@ class HPE3PARCommon(object):
except hpeexceptions.UnsupportedVersion as ex:
# In the event we cannot contact the configured primary array,
# we want to allow a failover if replication is enabled.
self._do_replication_setup()
self._do_replication_setup(array_id=array_id)
if self._replication_enabled:
self.client = None
raise exception.InvalidInput(ex)
@ -3591,7 +3593,7 @@ class HPE3PARCommon(object):
return True
def _do_replication_setup(self):
def _do_replication_setup(self, array_id=None):
replication_targets = []
replication_devices = self.config.replication_device
if replication_devices:
@ -3624,8 +3626,11 @@ class HPE3PARCommon(object):
cl = None
try:
cl = self._create_replication_client(remote_array)
array_id = six.text_type(cl.getStorageSystemInfo()['id'])
remote_array['id'] = array_id
info = cl.getStorageSystemInfo()
remote_array['id'] = six.text_type(info['id'])
if array_id and array_id == info['id']:
self._active_backend_id = six.text_type(info['name'])
wsapi_version = cl.getWsApiVersion()['build']
if wsapi_version < REMOTE_COPY_API_VERSION:
@ -3779,8 +3784,8 @@ class HPE3PARCommon(object):
ret_mode = self.PERIODIC
return ret_mode
def _get_3par_config(self):
self._do_replication_setup()
def _get_3par_config(self, array_id=None):
self._do_replication_setup(array_id=array_id)
conf = None
if self._replication_enabled:
for target in self._replication_targets:

View File

@ -109,10 +109,12 @@ class HPE3PARFCDriver(hpebasedriver.HPE3PARDriverBase):
4.0.2 - Create one vlun in single path configuration. bug #1727176
4.0.3 - Create FC vlun as host sees. bug #1734505
4.0.4 - Handle force detach case. bug #1686745
4.0.5 - Set proper backend on subsequent operation, after group
failover. bug #1773069
"""
VERSION = "4.0.4"
VERSION = "4.0.5"
# The name of the CI wiki page.
CI_WIKI_NAME = "HPE_Storage_CI"
@ -162,7 +164,8 @@ class HPE3PARFCDriver(hpebasedriver.HPE3PARDriverBase):
* Create a VLUN for that HOST with the volume we want to export.
"""
common = self._login()
array_id = self.get_volume_replication_driver_data(volume)
common = self._login(array_id=array_id)
try:
# we have to make sure we have a host
host = self._create_host(common, volume, connector)
@ -207,7 +210,8 @@ class HPE3PARFCDriver(hpebasedriver.HPE3PARDriverBase):
@utils.trace
def terminate_connection(self, volume, connector, **kwargs):
"""Driver entry point to unattach a volume from an instance."""
common = self._login()
array_id = self.get_volume_replication_driver_data(volume)
common = self._login(array_id=array_id)
try:
is_force_detach = connector is None
if is_force_detach:

View File

@ -123,6 +123,8 @@ class HPE3PARISCSIDriver(hpebasedriver.HPE3PARDriverBase):
4.0.1 - Update CHAP on host record when volume is migrated
to new compute host. bug # 1737181
4.0.2 - Handle force detach case. bug #1686745
4.0.3 - Set proper backend on subsequent operation, after group
failover. bug #1773069
"""
@ -232,7 +234,8 @@ class HPE3PARISCSIDriver(hpebasedriver.HPE3PARDriverBase):
* Create a host on the 3par
* create vlun on the 3par
"""
common = self._login()
array_id = self.get_volume_replication_driver_data(volume)
common = self._login(array_id=array_id)
try:
# If the volume has been failed over, we need to reinitialize
# iSCSI ports so they represent the new array.
@ -363,7 +366,8 @@ class HPE3PARISCSIDriver(hpebasedriver.HPE3PARDriverBase):
@utils.trace
def terminate_connection(self, volume, connector, **kwargs):
"""Driver entry point to unattach a volume from an instance."""
common = self._login()
array_id = self.get_volume_replication_driver_data(volume)
common = self._login(array_id=array_id)
try:
is_force_detach = connector is None
if is_force_detach: