Merge "DS8K: correct in-use replication vol status"

This commit is contained in:
Zuul 2018-03-21 06:39:03 +00:00 committed by Gerrit Code Review
commit 41321d4bc0
3 changed files with 259 additions and 51 deletions

View File

@ -908,6 +908,8 @@ FAKE_REST_API_RESPONSES = {
FAKE_MAP_VOLUME_RESPONSE,
TEST_SOURCE_DS8K_IP + '/ioports/get':
FAKE_GET_IOPORT_RESPONSE,
TEST_TARGET_DS8K_IP + '/ioports/get':
FAKE_GET_IOPORT_RESPONSE,
TEST_SOURCE_DS8K_IP + '/hosts/post':
FAKE_CREATE_HOST_RESPONSE,
TEST_SOURCE_DS8K_IP + '/host_ports/assign/post':
@ -915,11 +917,18 @@ FAKE_REST_API_RESPONSES = {
TEST_SOURCE_DS8K_IP + '/hosts%5Bid=' + TEST_HOST_ID + '%5D/mappings/get':
FAKE_GET_MAPPINGS_RESPONSE,
TEST_SOURCE_DS8K_IP + '/hosts%5Bid=' + TEST_HOST_ID + '%5D/mappings/' +
TEST_LUN_ID + '/delete':
FAKE_DELETE_MAPPINGS_RESPONSE,
TEST_TARGET_DS8K_IP + '/hosts%5Bid=' + TEST_HOST_ID + '%5D/mappings/' +
TEST_LUN_ID + '/delete':
FAKE_DELETE_MAPPINGS_RESPONSE,
TEST_SOURCE_DS8K_IP + '/host_ports/' + TEST_SOURCE_WWPN_2 + '/delete':
FAKE_DELETE_HOST_PORTS_RESPONSE,
TEST_TARGET_DS8K_IP + '/host_ports/' + TEST_SOURCE_WWPN_2 + '/delete':
FAKE_DELETE_HOST_PORTS_RESPONSE,
TEST_SOURCE_DS8K_IP + '/hosts%5Bid=' + TEST_HOST_ID + '%5D/delete':
FAKE_DELETE_HOSTS_RESPONSE,
TEST_TARGET_DS8K_IP + '/hosts%5Bid=' + TEST_HOST_ID + '%5D/delete':
FAKE_DELETE_HOSTS_RESPONSE
}
@ -2740,6 +2749,23 @@ class DS8KProxyTest(test.TestCase):
self.driver.terminate_connection(volume, TEST_CONNECTOR)
def test_terminate_connection_of_eckd_volume(self):
"""attach a ECKD volume to host."""
self.configuration.connection_type = (
storage.XIV_CONNECTION_TYPE_FC_ECKD)
self.configuration.ds8k_devadd_unitadd_mapping = 'C4-10'
self.configuration.ds8k_ssid_prefix = 'FF'
self.configuration.san_clustername = TEST_ECKD_POOL_ID
self.driver = FakeDS8KProxy(self.storage_info, self.logger,
self.exception, self)
self.driver.setup(self.ctxt)
vol_type = volume_types.create(self.ctxt, 'VOL_TYPE', {})
location = six.text_type({'vol_hex_id': TEST_ECKD_VOLUME_ID})
volume = self._create_volume(volume_type_id=vol_type.id,
provider_location=location)
unmap_data = self.driver.terminate_connection(volume, {})
self.assertIsNone(unmap_data)
@mock.patch.object(helper.DS8KCommonHelper, '_get_host_ports')
def test_terminate_connection_with_multiple_hosts(self,
mock_get_host_ports):
@ -2795,8 +2821,9 @@ class DS8KProxyTest(test.TestCase):
}
]
mock_get_host_ports.side_effect = [host_ports]
unmap_data = self.driver.terminate_connection(volume, TEST_CONNECTOR)
self.assertIsNone(unmap_data)
self.assertRaises(exception.VolumeDriverException,
self.driver.terminate_connection, volume,
TEST_CONNECTOR)
@mock.patch.object(helper.DS8KCommonHelper, '_get_host_ports')
@mock.patch.object(helper.DS8KCommonHelper, '_get_mappings')
@ -2838,6 +2865,128 @@ class DS8KProxyTest(test.TestCase):
mock_get_mappings.side_effect = [mappings]
self.driver.terminate_connection(volume, TEST_CONNECTOR)
@mock.patch.object(helper.DS8KCommonHelper, '_get_host_ports')
@mock.patch.object(helper.DS8KCommonHelper, '_get_mappings')
def test_detach_with_host_has_failed_over(self, mock_get_mappings,
mock_get_host_ports):
self.configuration.replication_device = [TEST_REPLICATION_DEVICE]
self.driver = FakeDS8KProxy(self.storage_info, self.logger,
self.exception, self, TEST_TARGET_DS8K_IP)
self.driver.setup(self.ctxt)
vol_type = volume_types.create(self.ctxt, 'VOL_TYPE',
{'replication_enabled': '<is> True'})
location = six.text_type({'vol_hex_id': TEST_VOLUME_ID})
data = json.dumps(
{'default': {'vol_hex_id': TEST_VOLUME_ID}})
volume = self._create_volume(volume_type_id=vol_type.id,
provider_location=location,
replication_driver_data=data)
host_ports_1 = [
{
"wwpn": TEST_SOURCE_WWPN_1,
"state": "logged in",
"hosttype": "LinuxRHEL",
"addrdiscovery": "lunpolling",
"host_id": TEST_HOST_ID
},
{
"wwpn": TEST_SOURCE_WWPN_2,
"state": "unconfigured",
"hosttype": "LinuxRHEL",
"addrdiscovery": "lunpolling",
"host_id": ''
}
]
host_ports_2 = [
{
"wwpn": TEST_SOURCE_WWPN_1,
"state": "logged in",
"hosttype": "LinuxRHEL",
"addrdiscovery": "lunpolling",
"host_id": TEST_HOST_ID
},
{
"wwpn": TEST_SOURCE_WWPN_2,
"state": "unconfigured",
"hosttype": "LinuxRHEL",
"addrdiscovery": "lunpolling",
"host_id": ''
}
]
mappings_1 = [
{
"lunid": TEST_LUN_ID,
"link": {},
"volume": {"id": TEST_VOLUME_ID_2, "link": {}}
}
]
mappings_2 = [
{
"lunid": TEST_LUN_ID,
"link": {},
"volume": {"id": TEST_VOLUME_ID, "link": {}}
}
]
mock_get_host_ports.side_effect = [host_ports_1, host_ports_2]
mock_get_mappings.side_effect = [mappings_1, mappings_2]
self.driver.terminate_connection(volume, TEST_CONNECTOR)
@mock.patch.object(helper.DS8KCommonHelper, '_get_host_ports')
@mock.patch.object(helper.DS8KCommonHelper, '_get_mappings')
def test_detach_with_group_has_failed_over(self, mock_get_mappings,
mock_get_host_ports):
self.configuration.replication_device = [TEST_REPLICATION_DEVICE]
self.driver = FakeDS8KProxy(self.storage_info, self.logger,
self.exception, self)
self.driver.setup(self.ctxt)
group_type = group_types.create(
self.ctxt,
'group',
{'consistent_group_snapshot_enabled': '<is> True'}
)
group = self._create_group(host=TEST_GROUP_HOST,
group_type_id=group_type.id,
replication_status='failed-over')
vol_type = volume_types.create(self.ctxt, 'VOL_TYPE',
{'replication_enabled': '<is> True'})
location = six.text_type({'vol_hex_id': TEST_VOLUME_ID})
data = json.dumps(
{'default': {'vol_hex_id': TEST_VOLUME_ID}})
volume = self._create_volume(volume_type_id=vol_type.id,
provider_location=location,
replication_driver_data=data,
group_id=group.id,
replication_status='failed-over')
host_ports = [
{
"wwpn": TEST_SOURCE_WWPN_1,
"state": "logged in",
"hosttype": "LinuxRHEL",
"addrdiscovery": "lunpolling",
"host_id": TEST_HOST_ID
},
{
"wwpn": TEST_SOURCE_WWPN_2,
"state": "unconfigured",
"hosttype": "LinuxRHEL",
"addrdiscovery": "lunpolling",
"host_id": ''
}
]
mappings = [
{
"lunid": TEST_LUN_ID,
"link": {},
"volume": {"id": TEST_VOLUME_ID, "link": {}}
}
]
mock_get_host_ports.side_effect = [host_ports]
mock_get_mappings.side_effect = [mappings]
self.driver.terminate_connection(volume, TEST_CONNECTOR)
def test_create_consistency_group(self):
"""user should reserve LSS for consistency group."""
self.driver = FakeDS8KProxy(self.storage_info, self.logger,

View File

@ -592,6 +592,33 @@ class DS8KCommonHelper(object):
htype = 'LinuxRHEL'
return collections.namedtuple('Host', ('name', 'type'))(hname, htype)
def check_vol_mapped_to_host(self, connector, vol_id):
map_info = {
'host_ports': [],
'mappings': [],
'lun_ids': []
}
host_wwpn_set = set(wwpn.upper() for wwpn in connector['wwpns'])
host_ports = self._get_host_ports(host_wwpn_set)
defined_hosts = set(
hp['host_id'] for hp in host_ports if hp['host_id'])
if not defined_hosts:
return False, None, map_info
elif len(defined_hosts) > 1:
raise restclient.APIException(_('More than one host found.'))
else:
host_id = defined_hosts.pop()
mappings = self._get_mappings(host_id)
lun_ids = [
m['lunid'] for m in mappings if m['volume']['id'] == vol_id]
map_info['host_ports'] = host_ports
map_info['mappings'] = mappings
map_info['lun_ids'] = lun_ids
if not lun_ids:
return False, host_id, map_info
else:
return True, host_id, map_info
@coordination.synchronized('ibm-ds8k-{connector[host]}')
def initialize_connection(self, vol_id, connector, **kwargs):
host = self._get_host(connector)
@ -640,53 +667,39 @@ class DS8KCommonHelper(object):
}
@coordination.synchronized('ibm-ds8k-{connector[host]}')
def terminate_connection(self, vol_id, connector, force, **kwargs):
def terminate_connection(self, vol_id, host_id, connector, map_info):
host = self._get_host(connector)
host_wwpn_set = set(wwpn.upper() for wwpn in connector['wwpns'])
host_ports = self._get_host_ports(host_wwpn_set)
defined_hosts = set(
hp['host_id'] for hp in host_ports if hp['host_id'])
host_ports = map_info['host_ports']
lun_ids = map_info['lun_ids']
mappings = map_info['mappings']
delete_ports = set(
hp['wwpn'] for hp in host_ports if not hp['host_id'])
LOG.debug("terminate_connection: host_ports: %(host)s, "
"defined_hosts: %(defined)s, delete_ports: %(delete)s.",
{"host": host_ports,
"defined": defined_hosts,
"defined": host_id,
"delete": delete_ports})
if not defined_hosts:
LOG.info('Could not find host.')
return None
elif len(defined_hosts) > 1:
raise restclient.APIException(_('More than one host found.'))
else:
host_id = defined_hosts.pop()
mappings = self._get_mappings(host_id)
lun_ids = [
m['lunid'] for m in mappings if m['volume']['id'] == vol_id]
LOG.info('Volumes attached to host %(host)s are %(vols)s.',
{'host': host_id, 'vols': ','.join(lun_ids)})
for lun_id in lun_ids:
self._delete_mappings(host_id, lun_id)
if not lun_ids:
LOG.warning("Volume %(vol)s is already not mapped to "
"host %(host)s.",
{'vol': vol_id, 'host': host.name})
# if this host only has volumes that have been detached,
# remove the host and its ports
ret_info = {
'driver_volume_type': 'fibre_channel',
'data': {}
}
if len(mappings) == len(lun_ids):
for port in delete_ports:
self._delete_host_ports(port)
self._delete_host(host_id)
target_ports = [p['wwpn'] for p in self._get_ioports()]
target_map = {initiator.upper(): target_ports
for initiator in connector['wwpns']}
ret_info['data']['initiator_target_map'] = target_map
return ret_info
for lun_id in lun_ids:
self._delete_mappings(host_id, lun_id)
if not lun_ids:
LOG.warning("Volume %(vol)s is already not mapped to "
"host %(host)s.",
{'vol': vol_id, 'host': host.name})
# if this host only has volumes that have been detached,
# remove the host and its ports
ret_info = {
'driver_volume_type': 'fibre_channel',
'data': {}
}
if len(mappings) == len(lun_ids):
for port in delete_ports:
self._delete_host_ports(port)
self._delete_host(host_id)
target_ports = [p['wwpn'] for p in self._get_ioports()]
target_map = {initiator.upper(): target_ports
for initiator in connector['wwpns']}
ret_info['data']['initiator_target_map'] = target_map
return ret_info
def create_group(self, group):
return {'status': fields.GroupStatus.AVAILABLE}

View File

@ -1047,14 +1047,65 @@ class DS8KProxy(proxy.IBMStorageProxy):
@proxy.logger
def terminate_connection(self, volume, connector, force=False, **kwargs):
"""Detach a volume from a host."""
ret_info = {
'driver_volume_type': 'fibre_channel',
'data': {}
}
lun = Lun(volume)
LOG.info('Detach the volume %s.', lun.ds_id)
if lun.group and lun.failed_over:
if (lun.group and lun.failed_over) and not self._active_backend_id:
backend_helper = self._replication.get_target_helper()
else:
backend_helper = self._helper
return backend_helper.terminate_connection(lun.ds_id, connector,
force, **kwargs)
if isinstance(backend_helper, helper.DS8KECKDHelper):
LOG.info('Detach the volume %s.', lun.ds_id)
return backend_helper.terminate_connection(lun.ds_id, connector,
force, **kwargs)
else:
vol_mapped, host_id, map_info = (
backend_helper.check_vol_mapped_to_host(connector, lun.ds_id))
if host_id is None or not vol_mapped:
if host_id is None and not lun.type_replication:
msg = (_('Failed to find the Host information.'))
LOG.error(msg)
raise exception.VolumeDriverException(message=msg)
if host_id and not lun.type_replication and not vol_mapped:
LOG.warning("Volume %(vol)s is already not mapped to "
"host %(host)s.",
{'vol': lun.ds_id, 'host': host_id})
return ret_info
if lun.type_replication:
if backend_helper == self._replication.get_target_helper():
backend_helper = self._replication.get_source_helper()
else:
backend_helper = self._replication.get_target_helper()
try:
if backend_helper.lun_exists(lun.replica_ds_id):
LOG.info('Detaching volume %s from the '
'Secondary site.', lun.replica_ds_id)
mapped, host_id, map_info = (
backend_helper.check_vol_mapped_to_host(
connector, lun.replica_ds_id))
else:
msg = (_('Failed to find the attached '
'Volume %s.') % lun.ds_id)
LOG.error(msg)
raise exception.VolumeDriverException(message=msg)
except Exception as ex:
LOG.warning('Failed to get host mapping for volume '
'%(volume)s in the secondary site. '
'Exception: %(err)s.',
{'volume': lun.replica_ds_id, 'err': ex})
return ret_info
if not mapped:
return ret_info
else:
LOG.info('Detach the volume %s.', lun.replica_ds_id)
return backend_helper.terminate_connection(
lun.replica_ds_id, host_id, connector, map_info)
elif host_id and vol_mapped:
LOG.info('Detaching volume %s.', lun.ds_id)
return backend_helper.terminate_connection(lun.ds_id, host_id,
connector, map_info)
@proxy.logger
def create_group(self, ctxt, group):
@ -1427,8 +1478,6 @@ class DS8KProxy(proxy.IBMStorageProxy):
volume_update = lun.get_volume_update()
# failover_host in base cinder has considered previous status
# of the volume, it doesn't need to return it for update.
volume_update['status'] = (
lun.previous_status or 'available')
volume_update['replication_status'] = (
fields.ReplicationStatus.FAILED_OVER
if self._active_backend_id else
@ -1581,9 +1630,6 @@ class DS8KProxy(proxy.IBMStorageProxy):
volume_model_update = lun.get_volume_update()
# base cinder doesn't consider previous status of the volume
# in failover_replication, so here returns it for update.
volume_model_update['previous_status'] = lun.status
volume_model_update['status'] = (
lun.previous_status or 'available')
volume_model_update['replication_status'] = (
model_update['replication_status'])
volume_model_update['id'] = lun.os_id