Hitachi: Add port scheduler

This patch add a feature "Port Scheduler" for the Hitachi VSP Driver.
On Hitachi VSP storage, host groups are created and the same WWNs are
registered in all of the ports that are specified for the parameter
hitachi_compute_target_ports or for the parameter hitachi_target_ports.
For Hitachi storage devices, a maximum of 255 host groups and 255 WWNs
can be registered for one port. When volumes are attached, the upper
limit on the number of WWNs that can be registered might be
unexpectedly exceeded.

For the feature "Port scheduler", when the cinder-volume service
starts, the Fibre Channel Zone Manager obtains the WWNs of active
compute nodes and of active VMs. When volumes are attached, the WWNs
are registered in a round-robin procedure, in the same order as the
order of ports specified for the parameter hitachi_compute_target_ports
or for the parameter hitachi_target_ports.

Implements: blueprint hitachi-vsp-port-scheduler
Change-Id: I29882b806f5c799995ce209d19d3297275e5527c
This commit is contained in:
Atsushi Kawai 2022-03-08 05:34:05 +00:00
parent 1678260539
commit a3256416ce
11 changed files with 397 additions and 105 deletions

View File

@ -334,6 +334,7 @@ def list_opts():
cinder_volume_drivers_fujitsu_eternus_dx_eternusdxcommon.
FJ_ETERNUS_DX_OPT_opts,
cinder_volume_drivers_hitachi_hbsdcommon.COMMON_VOLUME_OPTS,
cinder_volume_drivers_hitachi_hbsdcommon.COMMON_PORT_OPTS,
cinder_volume_drivers_hitachi_hbsdrest.REST_VOLUME_OPTS,
cinder_volume_drivers_hitachi_hbsdrestfc.FC_VOLUME_OPTS,
cinder_volume_drivers_hpe_hpe3parcommon.hpe3par_opts,

View File

@ -51,7 +51,7 @@ CONFIG_MAP = {
'port_id': 'CL1-A',
'host_grp_name': 'HBSD-0123456789abcdef',
'host_mode': 'LINUX/IRIX',
'host_wwn': '0123456789abcdef',
'host_wwn': ['0123456789abcdef', '0123456789abcdeg'],
'target_wwn': '1111111123456789',
'user_id': 'user',
'user_pass': 'password',
@ -63,21 +63,28 @@ CONFIG_MAP = {
# Dummy response for FC zoning device mapping
DEVICE_MAP = {
'fabric_name': {
'initiator_port_wwn_list': [CONFIG_MAP['host_wwn']],
'initiator_port_wwn_list': [CONFIG_MAP['host_wwn'][0]],
'target_port_wwn_list': [CONFIG_MAP['target_wwn']]}}
DEFAULT_CONNECTOR = {
'host': 'host',
'ip': CONFIG_MAP['my_ip'],
'wwpns': [CONFIG_MAP['host_wwn']],
'wwpns': [CONFIG_MAP['host_wwn'][0]],
'multipath': False,
}
DEFAULT_CONNECTOR_AIX = {
'os_type': 'aix',
DEVICE_MAP_MULTI_WWN = {
'fabric_name': {
'initiator_port_wwn_list': [
CONFIG_MAP['host_wwn'][0],
CONFIG_MAP['host_wwn'][1]
],
'target_port_wwn_list': [CONFIG_MAP['target_wwn']]}}
DEFAULT_CONNECTOR_MULTI_WWN = {
'host': 'host',
'ip': CONFIG_MAP['my_ip'],
'wwpns': [CONFIG_MAP['host_wwn']],
'wwpns': [CONFIG_MAP['host_wwn'][0], CONFIG_MAP['host_wwn'][1]],
'multipath': False,
}
@ -169,7 +176,7 @@ GET_HOST_WWNS_RESULT = {
"data": [
{
"hostGroupNumber": 0,
"hostWwn": CONFIG_MAP['host_wwn'],
"hostWwn": CONFIG_MAP['host_wwn'][0],
},
],
}
@ -328,10 +335,10 @@ def _brick_get_connector_properties(multipath=False, enforce_multipath=False):
return DEFAULT_CONNECTOR
def _brick_get_connector_properties_aix(
def _brick_get_connector_properties_multi_wwn(
multipath=False, enforce_multipath=False):
"""Return a predefined connector object."""
return DEFAULT_CONNECTOR_AIX
return DEFAULT_CONNECTOR_MULTI_WWN
def reduce_retrying_time(func):
@ -381,6 +388,14 @@ class FakeLookupService():
return DEVICE_MAP
class FakeLookupServiceMultiWwn():
"""Dummy FC zoning mapping lookup service class."""
def get_device_mapping_from_network(self, initiator_wwns, target_wwns):
"""Return predefined FC zoning mapping."""
return DEVICE_MAP_MULTI_WWN
class FakeResponse():
def __init__(self, status_code, data=None, headers=None):
@ -448,6 +463,7 @@ class HBSDRESTFCDriverTest(test.TestCase):
self.configuration.hitachi_copy_speed = 3
self.configuration.hitachi_copy_check_interval = 3
self.configuration.hitachi_async_copy_check_interval = 10
self.configuration.hitachi_port_scheduler = False
self.configuration.san_login = CONFIG_MAP['user_id']
self.configuration.san_password = CONFIG_MAP['user_pass']
@ -598,13 +614,16 @@ class HBSDRESTFCDriverTest(test.TestCase):
@mock.patch.object(requests.Session, "request")
@mock.patch.object(
volume_utils, 'brick_get_connector_properties',
side_effect=_brick_get_connector_properties_aix)
def test_do_setup_create_hg_aix(
side_effect=_brick_get_connector_properties_multi_wwn)
def test_do_setup_create_hg_port_scheduler(
self, brick_get_connector_properties, request):
"""Normal case: The host group not exists in AIX."""
"""Normal case: The host group not exists with port scheduler."""
drv = hbsd_fc.HBSDFCDriver(
configuration=self.configuration)
self._setup_config()
self.configuration.hitachi_port_scheduler = True
self.configuration.hitachi_zoning_request = True
drv.common._lookup_service = FakeLookupServiceMultiWwn()
request.side_effect = [FakeResponse(200, POST_SESSIONS_RESULT),
FakeResponse(200, GET_PORTS_RESULT),
FakeResponse(200, NOTFOUND_RESULT),
@ -612,15 +631,14 @@ class HBSDRESTFCDriverTest(test.TestCase):
FakeResponse(200, NOTFOUND_RESULT),
FakeResponse(202, COMPLETED_SUCCEEDED_RESULT),
FakeResponse(202, COMPLETED_SUCCEEDED_RESULT),
FakeResponse(202, COMPLETED_SUCCEEDED_RESULT),
FakeResponse(202, COMPLETED_SUCCEEDED_RESULT)]
drv.do_setup(None)
self.assertEqual(
{CONFIG_MAP['port_id']: CONFIG_MAP['target_wwn']},
drv.common.storage_info['wwns'])
self.assertEqual(1, brick_get_connector_properties.call_count)
self.assertEqual(8, request.call_count)
kargs1 = request.call_args_list[6][1]
self.assertEqual('AIX', kargs1['json']['hostMode'])
self.assertEqual(9, request.call_count)
# stop the Loopingcall within the do_setup treatment
self.driver.common.client.keep_session_loop.stop()
self.driver.common.client.keep_session_loop.wait()
@ -1174,6 +1192,7 @@ class HBSDRESTFCDriverTest(test.TestCase):
_get_oslo_driver_opts.return_value = []
ret = self.driver.get_driver_options()
actual = (hbsd_common.COMMON_VOLUME_OPTS +
hbsd_common.COMMON_PORT_OPTS +
hbsd_rest.REST_VOLUME_OPTS +
hbsd_rest_fc.FC_VOLUME_OPTS)
self.assertEqual(actual, ret)

View File

@ -64,14 +64,6 @@ DEFAULT_CONNECTOR = {
'multipath': False,
}
DEFAULT_CONNECTOR_AIX = {
'os_type': 'aix',
'host': 'host',
'ip': CONFIG_MAP['my_ip'],
'initiator': CONFIG_MAP['host_iscsi_name'],
'multipath': False,
}
CTXT = cinder_context.get_admin_context()
TEST_VOLUME = []
@ -274,12 +266,6 @@ def _brick_get_connector_properties(multipath=False, enforce_multipath=False):
return DEFAULT_CONNECTOR
def _brick_get_connector_properties_aix(
multipath=False, enforce_multipath=False):
"""Return a predefined connector object."""
return DEFAULT_CONNECTOR_AIX
class FakeResponse():
def __init__(self, status_code, data=None, headers=None):
@ -347,6 +333,7 @@ class HBSDRESTISCSIDriverTest(test.TestCase):
self.configuration.hitachi_copy_speed = 3
self.configuration.hitachi_copy_check_interval = 3
self.configuration.hitachi_async_copy_check_interval = 10
self.configuration.hitachi_port_scheduler = False
self.configuration.san_login = CONFIG_MAP['user_id']
self.configuration.san_password = CONFIG_MAP['user_pass']
@ -502,39 +489,6 @@ class HBSDRESTISCSIDriverTest(test.TestCase):
self.driver.common.client.keep_session_loop.stop()
self.driver.common.client.keep_session_loop.wait()
@mock.patch.object(requests.Session, "request")
@mock.patch.object(
volume_utils, 'brick_get_connector_properties',
side_effect=_brick_get_connector_properties_aix)
def test_do_setup_create_hg_aix(
self, brick_get_connector_properties, request):
"""Normal case: The host group not exists in AIX."""
drv = hbsd_iscsi.HBSDISCSIDriver(
configuration=self.configuration)
self._setup_config()
request.side_effect = [FakeResponse(200, POST_SESSIONS_RESULT),
FakeResponse(200, GET_PORTS_RESULT),
FakeResponse(200, GET_PORT_RESULT),
FakeResponse(200, NOTFOUND_RESULT),
FakeResponse(200, NOTFOUND_RESULT),
FakeResponse(202, COMPLETED_SUCCEEDED_RESULT),
FakeResponse(202, COMPLETED_SUCCEEDED_RESULT),
FakeResponse(202, COMPLETED_SUCCEEDED_RESULT)]
drv.do_setup(None)
self.assertEqual(
{CONFIG_MAP['port_id']:
'%(ip)s:%(port)s' % {
'ip': CONFIG_MAP['ipv4Address'],
'port': CONFIG_MAP['tcpPort']}},
drv.common.storage_info['portals'])
self.assertEqual(1, brick_get_connector_properties.call_count)
self.assertEqual(8, request.call_count)
kargs1 = request.call_args_list[6][1]
self.assertEqual('AIX', kargs1['json']['hostMode'])
# stop the Loopingcall within the do_setup treatment
self.driver.common.client.keep_session_loop.stop()
self.driver.common.client.keep_session_loop.wait()
@mock.patch.object(requests.Session, "request")
def test_extend_volume(self, request):
request.side_effect = [FakeResponse(200, GET_LDEV_RESULT),

View File

@ -106,8 +106,17 @@ COMMON_VOLUME_OPTS = [
'a copy pair deletion or data restoration.'),
]
COMMON_PORT_OPTS = [
cfg.BoolOpt(
'hitachi_port_scheduler',
default=False,
help='Enable port scheduling of WWNs to the configured ports so that '
'WWNs are registered to ports in a round-robin fashion.'),
]
CONF = cfg.CONF
CONF.register_opts(COMMON_VOLUME_OPTS, group=configuration.SHARED_CONF_GROUP)
CONF.register_opts(COMMON_PORT_OPTS, group=configuration.SHARED_CONF_GROUP)
LOG = logging.getLogger(__name__)
MSG = utils.HBSDMsg
@ -154,6 +163,7 @@ class HBSDCommon():
self.driver_info['param_prefix'] + '_storage_id',
self.driver_info['param_prefix'] + '_pool',
]
self.port_index = {}
def create_ldev(self, size):
"""Create an LDEV and return its LDEV number."""
@ -468,6 +478,23 @@ class HBSDCommon():
self.raise_error(msg)
return values
def check_param_fc(self):
"""Check FC-related parameter values and consistency among them."""
if hasattr(
self.conf,
self.driver_info['param_prefix'] + '_port_scheduler'):
self.check_opts(self.conf, COMMON_PORT_OPTS)
if (self.conf.hitachi_port_scheduler and
not self.conf.hitachi_group_create):
msg = utils.output_log(
MSG.INVALID_PARAMETER,
param=self.driver_info['param_prefix'] + '_port_scheduler')
self.raise_error(msg)
if (self._lookup_service is None and
self.conf.hitachi_port_scheduler):
msg = utils.output_log(MSG.ZONE_MANAGER_IS_NOT_AVAILABLE)
self.raise_error(msg)
def check_param_iscsi(self):
"""Check iSCSI-related parameter values and consistency among them."""
if self.conf.use_chap_auth:
@ -505,6 +532,8 @@ class HBSDCommon():
if not self.conf.safe_get(opt):
msg = utils.output_log(MSG.INVALID_PARAMETER, param=opt)
self.raise_error(msg)
if self.storage_info['protocol'] == 'FC':
self.check_param_fc()
if self.storage_info['protocol'] == 'iSCSI':
self.check_param_iscsi()
@ -544,11 +573,33 @@ class HBSDCommon():
resource=self.driver_info['hba_id_type'])
self.raise_error(msg)
def set_device_map(self, targets, hba_ids, volume):
return None, hba_ids
def get_port_scheduler_param(self):
if hasattr(
self.conf,
self.driver_info['param_prefix'] + '_port_scheduler'):
return self.conf.hitachi_port_scheduler
else:
return False
def create_target_by_port_scheduler(
self, devmap, targets, connector, volume):
raise NotImplementedError()
def create_target_to_storage(self, port, connector, hba_ids):
"""Create a host group or an iSCSI target on the specified port."""
raise NotImplementedError()
def set_target_mode(self, port, gid, connector):
def get_gid_from_targets(self, targets, port):
for target_port, target_gid in targets['list']:
if target_port == port:
return target_gid
msg = utils.output_log(MSG.NO_CONNECTED_TARGET)
self.raise_error(msg)
def set_target_mode(self, port, gid):
"""Configure the target to meet the environment."""
raise NotImplementedError()
@ -560,35 +611,55 @@ class HBSDCommon():
"""Delete the host group or the iSCSI target from the port."""
raise NotImplementedError()
def _create_target(self, targets, port, connector, hba_ids):
def set_target_map_info(self, targets, hba_ids, port):
pass
def create_target(self, targets, port, connector, hba_ids):
"""Create a host group or an iSCSI target on the storage port."""
target_name, gid = self.create_target_to_storage(
port, connector, hba_ids)
utils.output_log(MSG.OBJECT_CREATED, object='a target',
details='port: %(port)s, gid: %(gid)s, target_name: '
'%(target)s' %
{'port': port, 'gid': gid, 'target': target_name})
if port not in targets['info'] or not targets['info'][port]:
target_name, gid = self.create_target_to_storage(
port, connector, hba_ids)
utils.output_log(
MSG.OBJECT_CREATED,
object='a target',
details='port: %(port)s, gid: %(gid)s, target_name: '
'%(target)s' %
{'port': port, 'gid': gid, 'target': target_name})
else:
gid = self.get_gid_from_targets(targets, port)
try:
self.set_target_mode(port, gid, connector)
if port not in targets['info'] or not targets['info'][port]:
self.set_target_mode(port, gid)
self.set_hba_ids(port, gid, hba_ids)
except Exception:
with excutils.save_and_reraise_exception():
self.delete_target_from_storage(port, gid)
targets['info'][port] = True
targets['list'].append((port, gid))
if (port, gid) not in targets['list']:
targets['list'].append((port, gid))
self.set_target_map_info(targets, hba_ids, port)
def create_mapping_targets(self, targets, connector):
def create_mapping_targets(self, targets, connector, volume=None):
"""Create server-storage connection for all specified storage ports."""
active_hba_ids = []
hba_ids = self.get_hba_ids_from_connector(connector)
for port in targets['info'].keys():
if targets['info'][port]:
continue
try:
self._create_target(targets, port, connector, hba_ids)
except exception.VolumeDriverException:
utils.output_log(
self.driver_info['msg_id']['target'], port=port)
devmap, active_hba_ids = self.set_device_map(targets, hba_ids, volume)
if self.get_port_scheduler_param():
self.create_target_by_port_scheduler(
devmap, targets, connector, volume)
else:
for port in targets['info'].keys():
if targets['info'][port]:
continue
try:
self.create_target(
targets, port, connector, active_hba_ids)
except exception.VolumeDriverException:
utils.output_log(
self.driver_info['msg_id']['target'], port=port)
# When other threads created a host group at same time, need to
# re-find targets.
@ -596,6 +667,20 @@ class HBSDCommon():
self.find_targets_from_storage(
targets, connector, targets['info'].keys())
def get_port_index_to_be_used(self, ports, network_name):
backend_name = self.conf.safe_get('volume_backend_name')
code = (
str(self.conf.hitachi_storage_id) + backend_name + network_name)
if code in self.port_index.keys():
if self.port_index[code] >= len(ports) - 1:
self.port_index[code] = 0
else:
self.port_index[code] += 1
else:
self.port_index[code] = 0
return self.port_index[code]
def init_cinder_hosts(self, **kwargs):
"""Initialize server-storage connection."""
targets = kwargs.pop(
@ -725,7 +810,7 @@ class HBSDCommon():
return {
'driver_volume_type': self.driver_info['volume_type'],
'data': self.get_properties(targets, target_lun, connector),
}
}, targets['target_map']
def get_target_ports(self, connector):
"""Return a list of ports corresponding to the specified connector."""
@ -818,6 +903,9 @@ class HBSDCommon():
return filtered_tps
def clean_mapping_targets(self, targets):
raise NotImplementedError()
def unmanage_snapshot(self, snapshot):
"""Output error message and raise NotImplementedError."""
utils.output_log(
@ -895,3 +983,7 @@ class HBSDCommon():
"""Raise a VolumeDriverException by driver busy message."""
message = _(utils.BUSY_MESSAGE)
raise exception.VolumeDriverException(message)
def is_controller(self, connector):
return True if (
'ip' in connector and connector['ip'] == CONF.my_ip) else False

View File

@ -70,6 +70,7 @@ class HBSDFCDriver(driver.FibreChannelDriver):
2.2.0 - Add maintenance parameters.
2.2.1 - Make the parameters name variable for supporting OEM storages.
2.2.2 - Add Target Port Assignment.
2.2.3 - Add port scheduler.
"""
@ -86,6 +87,7 @@ class HBSDFCDriver(driver.FibreChannelDriver):
super(HBSDFCDriver, self).__init__(*args, **kwargs)
self.configuration.append_config_values(common.COMMON_VOLUME_OPTS)
self.configuration.append_config_values(common.COMMON_PORT_OPTS)
self.configuration.append_config_values(rest_fc.FC_VOLUME_OPTS)
os.environ['LANG'] = 'C'
self.common = self._init_common(self.configuration, kwargs.get('db'))
@ -101,6 +103,7 @@ class HBSDFCDriver(driver.FibreChannelDriver):
['driver_ssl_cert_verify', 'driver_ssl_cert_path',
'san_api_port', ]))
return (common.COMMON_VOLUME_OPTS +
common.COMMON_PORT_OPTS +
rest.REST_VOLUME_OPTS +
rest_fc.FC_VOLUME_OPTS +
additional_opts)

View File

@ -70,6 +70,7 @@ class HBSDISCSIDriver(driver.ISCSIDriver):
2.2.0 - Add maintenance parameters.
2.2.1 - Make the parameters name variable for supporting OEM storages.
2.2.2 - Add Target Port Assignment.
2.2.3 - Add port scheduler.
"""

View File

@ -568,7 +568,7 @@ class HBSDREST(common.HBSDCommon):
if (self.find_targets_from_storage(
targets, connector, target_ports) and
self.conf.hitachi_group_create):
self.create_mapping_targets(targets, connector)
self.create_mapping_targets(targets, connector, volume)
self.require_target_existed(targets)
@ -644,7 +644,7 @@ class HBSDREST(common.HBSDCommon):
{'port': port, 'gid': gid})
return result
def _clean_mapping_targets(self, targets):
def clean_mapping_targets(self, targets):
"""Delete the empty host group without LU."""
deleted_targets = []
for target in targets['list']:
@ -681,7 +681,7 @@ class HBSDREST(common.HBSDCommon):
self.unmap_ldev(unmap_targets, ldev)
if self.conf.hitachi_group_delete:
deleted_targets = self._clean_mapping_targets(unmap_targets)
deleted_targets = self.clean_mapping_targets(unmap_targets)
return deleted_targets
def find_all_mapped_targets_from_storage(self, targets, ldev):

View File

@ -16,10 +16,12 @@
from oslo_config import cfg
from oslo_log import log as logging
from oslo_utils import excutils
from cinder import exception
from cinder.volume import configuration
from cinder.volume.drivers.hitachi import hbsd_rest as rest
from cinder.volume.drivers.hitachi import hbsd_rest_api as rest_api
from cinder.volume.drivers.hitachi import hbsd_utils as utils
from cinder.zonemanager import utils as fczm_utils
@ -34,6 +36,8 @@ FC_VOLUME_OPTS = [
_FC_HMO_DISABLE_IO = 91
_MSG_EXCEED_HOST_GROUP_MAX = "could not find empty Host group ID for adding."
LOG = logging.getLogger(__name__)
MSG = utils.HBSDMsg
@ -69,10 +73,22 @@ class HBSDRESTFC(rest.HBSDREST):
if port not in set(target_ports + compute_target_ports):
continue
secure_fc_port = True
can_port_schedule = True
if hasattr(
self.conf,
self.driver_info['param_prefix'] + '_port_scheduler'):
port_scheduler_param = self.conf.hitachi_port_scheduler
else:
port_scheduler_param = False
if (port_data['portType'] not in ['FIBRE', 'FCoE'] or
not port_data['lunSecuritySetting']):
secure_fc_port = False
if not secure_fc_port:
elif (port in set(target_ports + compute_target_ports) and
port_scheduler_param and not (
port_data.get('fabricMode') and
port_data.get('portConnection') == 'PtoP')):
can_port_schedule = False
if not secure_fc_port or not can_port_schedule:
utils.output_log(
MSG.INVALID_PORT, port=port,
additional_info='portType: %s, lunSecuritySetting: %s, '
@ -84,10 +100,11 @@ class HBSDRESTFC(rest.HBSDREST):
if not secure_fc_port:
continue
wwn = port_data.get('wwn')
if target_ports and port in target_ports:
if target_ports and port in target_ports and can_port_schedule:
available_ports.append(port)
self.storage_info['wwns'][port] = wwn
if compute_target_ports and port in compute_target_ports:
if (compute_target_ports and port in compute_target_ports and
can_port_schedule):
available_compute_ports.append(port)
self.storage_info['wwns'][port] = wwn
@ -136,20 +153,21 @@ class HBSDRESTFC(rest.HBSDREST):
try:
self.client.add_hba_wwn(port, gid, wwn, no_log=True)
registered_wwns.append(wwn)
except exception.VolumeDriverException:
except exception.VolumeDriverException as ex:
utils.output_log(MSG.ADD_HBA_WWN_FAILED, port=port, gid=gid,
wwn=wwn)
if (self.get_port_scheduler_param() and
utils.safe_get_err_code(ex.kwargs.get('errobj'))
== rest_api.EXCEED_WWN_MAX):
raise ex
if not registered_wwns:
msg = utils.output_log(MSG.NO_HBA_WWN_ADDED_TO_HOST_GRP, port=port,
gid=gid)
self.raise_error(msg)
def set_target_mode(self, port, gid, connector):
def set_target_mode(self, port, gid):
"""Configure the host group to meet the environment."""
if connector.get('os_type', None) == 'aix':
body = {'hostMode': 'AIX'}
else:
body = {'hostMode': 'LINUX/IRIX'}
body = {'hostMode': 'LINUX/IRIX'}
if self.conf.hitachi_rest_disable_io_wait:
body['hostModeOptions'] = [_FC_HMO_DISABLE_IO]
if self.conf.hitachi_host_mode_options:
@ -240,16 +258,34 @@ class HBSDRESTFC(rest.HBSDREST):
pass
else:
not_found_count += 1
if self.get_port_scheduler_param():
"""
When port scheduler feature is enabled,
it is OK to find any mapped port. so:
- return 0, if any mapped port is found
- return port count, if no mapped port is found.
It is no case with both not_found_count and len(target_ports) are
zero, bcz it must be failed in param checker if any target ports
are not defined.
"""
return (not_found_count if not_found_count == len(target_ports)
else 0)
return not_found_count
def initialize_connection(self, volume, connector, is_snapshot=False):
"""Initialize connection between the server and the volume."""
conn_info = super(HBSDRESTFC, self).initialize_connection(
conn_info, map_info = super(HBSDRESTFC, self).initialize_connection(
volume, connector, is_snapshot)
if self.conf.hitachi_zoning_request:
init_targ_map = utils.build_initiator_target_map(
connector, conn_info['data']['target_wwn'],
self._lookup_service)
if (self.get_port_scheduler_param() and
not self.is_controller(connector)):
init_targ_map = map_info
else:
init_targ_map = utils.build_initiator_target_map(
connector, conn_info['data']['target_wwn'],
self._lookup_service)
if init_targ_map:
conn_info['data']['initiator_target_map'] = init_targ_map
fczm_utils.add_fc_zone(conn_info)
@ -284,3 +320,115 @@ class HBSDRESTFC(rest.HBSDREST):
for hostgroup in hostgroups:
wwpns.update(self._get_wwpns(port, hostgroup))
fake_connector['wwpns'] = list(wwpns)
def set_device_map(self, targets, hba_ids, volume):
active_hba_ids = []
target_wwns = []
active_target_wwns = []
vol_id = volume['id'] if volume and 'id' in volume.keys() else ""
if not self.get_port_scheduler_param():
return None, hba_ids
for port in targets['info'].keys():
target_wwns.append(self.storage_info['wwns'][port])
devmap = self._lookup_service.get_device_mapping_from_network(
hba_ids, target_wwns)
for fabric_name in devmap.keys():
active_hba_ids.extend(
devmap[fabric_name]['initiator_port_wwn_list'])
active_target_wwns.extend(
devmap[fabric_name]['target_port_wwn_list'])
active_hba_ids = list(set(active_hba_ids))
if not active_hba_ids:
msg = utils.output_log(MSG.NO_ACTIVE_WWN, wwn=', '.join(hba_ids),
volume=vol_id)
self.raise_error(msg)
active_target_wwns = list(set(active_target_wwns))
if not active_target_wwns:
port_wwns = ""
for port in targets['info'].keys():
if port_wwns:
port_wwns += ", "
port_wwns += ("port, WWN: " + port +
", " + self.storage_info['wwns'][port])
msg = utils.output_log(
MSG.NO_PORT_WITH_ACTIVE_WWN, port_wwns=port_wwns,
volume=vol_id)
self.raise_error(msg)
return devmap, active_hba_ids
def build_wwpn_groups(self, wwpns, connector):
count = 1
return ([wwpns[i:i + count] for i in range(0, len(wwpns), count)])
def _create_target_to_any_port(
self, targets, ports, connector, hba_ids, fabric_name):
for port in ports:
index = self.get_port_index_to_be_used(ports, fabric_name)
try:
self.create_target(
targets, ports[index], connector, hba_ids)
return
except exception.VolumeDriverException as ex:
if ((utils.safe_get_message_id(ex.kwargs.get('errobj'))
== rest_api.MSGID_SPECIFIED_OBJECT_DOES_NOT_EXIST)
or (_MSG_EXCEED_HOST_GROUP_MAX
in utils.safe_get_message(ex.kwargs.get('errobj')))):
utils.output_log(
MSG.HOST_GROUP_NUMBER_IS_MAXIMUM, port=ports[index])
elif (utils.safe_get_err_code(ex.kwargs.get('errobj'))
== rest_api.EXCEED_WWN_MAX):
utils.output_log(
MSG.WWN_NUMBER_IS_MAXIMUM, port=ports[index],
wwn=", ". join(hba_ids))
else:
raise ex
msg = utils.output_log(
MSG.HOST_GROUP_OR_WWN_IS_NOT_AVAILABLE, ports=', '.join(ports))
self.raise_error(msg)
def create_target_by_port_scheduler(
self, devmap, targets, connector, volume):
available_ports = []
active_ports = []
if not devmap:
msg = utils.output_log(MSG.ZONE_MANAGER_IS_NOT_AVAILABLE)
self.raise_error(msg)
for fabric_name in devmap.keys():
available_ports = []
active_ports = []
active_initiator_wwns = devmap[
fabric_name]['initiator_port_wwn_list']
wwpn_groups = self.build_wwpn_groups(
active_initiator_wwns, connector)
for port, wwn in self.storage_info['wwns'].items():
if wwn in devmap[fabric_name]['target_port_wwn_list']:
available_ports.append(port)
target_ports = self.get_target_ports(connector)
filter_ports = self.filter_target_ports(target_ports, volume)
for port in target_ports:
if port in available_ports and port in filter_ports:
active_ports.append(port)
elif port not in available_ports and port in filter_ports:
utils.output_log(
MSG.INVALID_PORT_BY_ZONE_MANAGER, port=port)
for wwpns in wwpn_groups:
try:
self._create_target_to_any_port(
targets, active_ports, connector, wwpns, fabric_name)
except exception.VolumeDriverException:
with excutils.save_and_reraise_exception():
self.clean_mapping_targets(targets)
def set_target_map_info(self, targets, hba_ids, port):
for hba_id in hba_ids:
target_map = {hba_id: [self.storage_info['wwns'][port]]}
targets['target_map'].update(target_map)

View File

@ -116,13 +116,9 @@ class HBSDRESTISCSI(rest.HBSDREST):
"""Connect the specified HBA with the specified port."""
self.client.add_hba_iscsi(port, gid, hba_ids)
def set_target_mode(self, port, gid, connector):
def set_target_mode(self, port, gid):
"""Configure the iSCSI target to meet the environment."""
if connector.get('os_type', None) == 'aix':
host_mode = 'AIX'
else:
host_mode = 'LINUX/IRIX'
body = {'hostMode': host_mode,
body = {'hostMode': 'LINUX/IRIX',
'hostModeOptions': [_ISCSI_HMO_REPORT_FULL_PORTAL]}
if self.conf.hitachi_rest_disable_io_wait:
body['hostModeOptions'].append(_ISCSI_HMO_DISABLE_IO)
@ -204,6 +200,12 @@ class HBSDRESTISCSI(rest.HBSDREST):
not_found_count += 1
return not_found_count
def initialize_connection(self, volume, connector, is_snapshot=False):
"""Initialize connection between the server and the volume."""
conn_info, map_info = super(HBSDRESTISCSI, self).initialize_connection(
volume, connector, is_snapshot)
return conn_info
def get_properties_iscsi(self, targets, multipath):
"""Return iSCSI-specific server-LDEV connection info."""
if not multipath:

View File

@ -25,7 +25,7 @@ from oslo_utils import units
from cinder import exception
VERSION = '2.2.2'
VERSION = '2.2.3'
CI_WIKI_NAME = 'Hitachi_VSP_CI'
PARAM_PREFIX = 'hitachi'
VENDOR_NAME = 'Hitachi'
@ -184,6 +184,21 @@ class HBSDMsg(enum.Enum):
'%(volume_type)s)',
'suffix': WARNING_SUFFIX,
}
HOST_GROUP_NUMBER_IS_MAXIMUM = {
'msg_id': 335,
'loglevel': base_logging.WARNING,
'msg': 'Failed to create the host group because the host group '
'maximum of the port is exceeded. (port: %(port)s)',
'suffix': WARNING_SUFFIX,
}
WWN_NUMBER_IS_MAXIMUM = {
'msg_id': 336,
'loglevel': base_logging.WARNING,
'msg': 'Failed to add the wwns to the host group port because the '
'WWN maximum of the port is exceeded. '
'(port: %(port)s, WWN: %(wwn)s)',
'suffix': WARNING_SUFFIX,
}
INVALID_PORT = {
'msg_id': 339,
'loglevel': base_logging.WARNING,
@ -191,6 +206,13 @@ class HBSDMsg(enum.Enum):
'invalid. (%(additional_info)s)',
'suffix': WARNING_SUFFIX,
}
INVALID_PORT_BY_ZONE_MANAGER = {
'msg_id': 340,
'loglevel': base_logging.WARNING,
'msg': 'Port %(port)s will not be used because it is not considered '
'to be active by the Fibre Channel Zone Manager.',
'suffix': WARNING_SUFFIX,
}
STORAGE_COMMAND_FAILED = {
'msg_id': 600,
'loglevel': base_logging.ERROR,
@ -427,6 +449,36 @@ class HBSDMsg(enum.Enum):
'%(group_type)s, volume: %(volume)s, snapshot: %(snapshot)s)',
'suffix': ERROR_SUFFIX,
}
NO_ACTIVE_WWN = {
'msg_id': 747,
'loglevel': base_logging.ERROR,
'msg': 'Failed to initialize volume connection because no active WWN '
'was found for the connector. (WWN: %(wwn)s, volume: %(volume)s'
')',
'suffix': ERROR_SUFFIX,
}
NO_PORT_WITH_ACTIVE_WWN = {
'msg_id': 748,
'loglevel': base_logging.ERROR,
'msg': 'Failed to initialize volume connection because no port with '
'an active WWN was found. (%(port_wwns)s, volume: %(volume)s)',
'suffix': ERROR_SUFFIX,
}
ZONE_MANAGER_IS_NOT_AVAILABLE = {
'msg_id': 749,
'loglevel': base_logging.ERROR,
'msg': 'The Fibre Channel Zone Manager is not available. The Fibre '
'Channel Zone Manager must be up and running when '
'port_scheduler parameter is set to True.',
'suffix': ERROR_SUFFIX,
}
HOST_GROUP_OR_WWN_IS_NOT_AVAILABLE = {
'msg_id': 750,
'loglevel': base_logging.ERROR,
'msg': 'Failed to initialize volume connection because no available '
'resource of host group or wwn was found. (ports: %(ports)s)',
'suffix': ERROR_SUFFIX,
}
def __init__(self, error_info):
"""Initialize Enum attributes."""
@ -514,6 +566,12 @@ def safe_get_message_id(errobj):
return errobj.get('messageId', '')
def safe_get_message(errobj):
if not errobj:
return ''
return errobj.get('message', '')
def is_shared_connection(volume, connector):
"""Check if volume is multiattach to 1 node."""
connection_count = 0

View File

@ -0,0 +1,14 @@
---
features:
- |
Hitachi driver: Add a feature ``Port Scheduler``. This feature is enabled
when specifying ``True`` for the parameter ``hitachi_port_scheduler``.
When this feature is enabled and an attach request is received, the active
WWNs that are obtained by Fibre Channel Zone Manager will be distributed
and registered to the host groups of each port of the storage system.
To use this feature, specify ``True`` for both parameters
``hitachi_group_request`` and ``hitachi_rest_name_only_discovery``.
If you specify ``False`` or use default value for the
``hitachi_rest_name_only_discovery``, it will take a long time to attach
volume, by seeking the host group for all specified ports.
This feature is supported on Fibre Channel only.