Update to hacking 4.0.0

Update to hacking version 4.0.0.

Change-Id: I0ec3005eef5df45c45eea406ee2102ce10b6239b
This commit is contained in:
Eric Harney 2021-01-11 11:15:50 -05:00
parent e28e1a2f41
commit 2ae735bff4
17 changed files with 43 additions and 46 deletions

View File

@ -1133,8 +1133,8 @@ class TestCinderManageCmd(test.TestCase):
{'type': str,
'help': 'Cluster to delete.',
'metavar': 'cluster-name'})
self.assertTrue(expected_argument in
cinder_manage.CONF.category.action_fn.args)
self.assertIn(expected_argument,
cinder_manage.CONF.category.action_fn.args)
self.assertTrue(hasattr(cinder_manage.CONF.category, 'cluster_name'))
get_admin_mock.assert_called_with()
get_cluster_mock.assert_called_with(get_admin_mock.return_value,

View File

@ -57,7 +57,7 @@ class TestInitializeConnectionSnapshot(powerflex.TestPowerFlexDriver):
self.assertEqual(self.snapshot_id,
props['data']['scaleIO_volume_id'])
# make sure QOS properties are set
self.assertTrue('iopsLimit' in props['data'])
self.assertIn('iopsLimit', props['data'])
def test_initialize_connection_with_size(self):
"""Test initializing when we know the snapshot size.
@ -79,7 +79,7 @@ class TestInitializeConnectionSnapshot(powerflex.TestPowerFlexDriver):
self.assertEqual(self.snapshot_id,
props['data']['scaleIO_volume_id'])
# make sure QOS properties are set
self.assertTrue('iopsLimit' in props['data'])
self.assertIn('iopsLimit', props['data'])
def test_qos_specs(self):
"""Ensure QOS specs are honored if present."""

View File

@ -83,7 +83,7 @@ class TestRevertVolume(powerflex.TestPowerFlexDriver):
def test_revert_to_snapshot_replicated_volume(self):
self.volume_is_replicated_mock.return_value = True
self.assertRaisesRegexp(
self.assertRaisesRegex(
exception.InvalidVolume,
'Reverting replicated volume is not allowed.',
self.driver.revert_to_snapshot,
@ -95,7 +95,7 @@ class TestRevertVolume(powerflex.TestPowerFlexDriver):
patched_volume.id = self.volume.id
patched_volume.size = 16
patched_volume.is_replicated.return_value = False
self.assertRaisesRegexp(
self.assertRaisesRegex(
exception.InvalidVolume,
('Volume %s size is not equal to snapshot %s size.' %
(self.volume.id, self.snapshot.id)),

View File

@ -1599,7 +1599,7 @@ class PowerMaxCommonTest(test.TestCase):
srp_record = self.common.get_attributes_from_cinder_config()
extra_specs = self.common._set_vmax_extra_specs(
self.data.vol_type_extra_specs, srp_record)
self.assertTrue('storagetype:storagegrouptags' not in extra_specs)
self.assertNotIn('storagetype:storagegrouptags', extra_specs)
def test_set_vmax_extra_specs_tags_set_correctly(self):
srp_record = self.common.get_attributes_from_cinder_config()

View File

@ -153,8 +153,8 @@ class TestVolumeAttachDetach(powerstore.TestPowerStoreDriver):
iqns, portals = self.iscsi_driver.adapter._get_iscsi_targets("A1")
self.assertTrue(len(iqns) == len(portals))
self.assertEqual(1, len(portals))
self.assertFalse(
"iqn.2020-07.com.dell:dellemc-powerstore-test-iqn-2" in iqns
self.assertNotIn(
"iqn.2020-07.com.dell:dellemc-powerstore-test-iqn-2", iqns
)
def test_get_iscsi_targets_filtered_no_matched_ports(self):

View File

@ -1847,8 +1847,8 @@ class ISCSIAdapterTest(test.TestCase):
self.assertListEqual([hlu, hlu], info['target_luns'])
self.assertListEqual(target_portals, info['target_portals'])
self.assertEqual(hlu, info['target_lun'])
self.assertTrue(info['target_portal'] in target_portals)
self.assertTrue(info['target_iqn'] in target_iqns)
self.assertIn(info['target_portal'], target_portals)
self.assertIn(info['target_iqn'], target_iqns)
@patch_for_iscsi_adapter
def test_initialize_connection_volume(self):

View File

@ -560,7 +560,7 @@ class TestClient(test_base.TestCase):
@res_mock.patch_client
def test_get_running_policy(self, client, mocked):
policy, is_new = client.get_running_policy()
self.assertEqual(policy.state in ['Running', 'Measuring'], True)
self.assertIn(policy.state, ['Running', 'Measuring'])
self.assertFalse(is_new)
@res_mock.patch_client

View File

@ -1444,7 +1444,7 @@ class DS8KProxyTest(test.TestCase):
lun = ds8kproxy.Lun(volume)
self.driver._create_lun_helper(lun)
pid, lss = lun.pool_lss_pair['source']
self.assertTrue(lss in ['20', '21', '22', '23'])
self.assertIn(lss, ('20', '21', '22', '23'))
def test_find_lss_for_volume_which_belongs_to_cg2(self):
"""find lss for volume, which is in CG having volumes."""

View File

@ -8639,28 +8639,28 @@ class StorwizeHelpersTestCase(test.TestCase):
state['available_iogrps'] = [0, 1, 2, 3]
iog = self.storwize_svc_common.select_io_group(state, opts, pool)
self.assertTrue(iog in state['available_iogrps'])
self.assertIn(iog, state['available_iogrps'])
self.assertEqual(0, iog)
opts['iogrp'] = '0'
state['available_iogrps'] = [0, 1, 2]
iog = self.storwize_svc_common.select_io_group(state, opts, pool)
self.assertTrue(iog in state['available_iogrps'])
self.assertIn(iog, state['available_iogrps'])
self.assertEqual(0, iog)
opts['iogrp'] = '1,2'
state['available_iogrps'] = [0, 2]
iog = self.storwize_svc_common.select_io_group(state, opts, pool)
self.assertTrue(iog in state['available_iogrps'])
self.assertIn(iog, state['available_iogrps'])
self.assertEqual(2, iog)
opts['iogrp'] = ' 0, 1, 2 '
state['available_iogrps'] = [0, 1, 2, 3]
iog = self.storwize_svc_common.select_io_group(state, opts, pool)
self.assertTrue(iog in state['available_iogrps'])
self.assertIn(iog, state['available_iogrps'])
# since vdisk count in all iogroups is same, it will pick the first
self.assertEqual(0, iog)
@ -8668,7 +8668,7 @@ class StorwizeHelpersTestCase(test.TestCase):
state['available_iogrps'] = [0, 1, 2, 3]
iog = self.storwize_svc_common.select_io_group(state, opts, pool)
self.assertTrue(iog in state['available_iogrps'])
self.assertIn(iog, state['available_iogrps'])
self.assertEqual(1, iog)
@mock.patch.object(storwize_svc_common.StorwizeSSH, 'lsmdiskgrp')
@ -8699,28 +8699,28 @@ class StorwizeHelpersTestCase(test.TestCase):
state['available_iogrps'] = [0, 1, 2, 3]
iog = self.storwize_svc_common.select_io_group(state, opts, pool)
self.assertTrue(iog in state['available_iogrps'])
self.assertIn(iog, state['available_iogrps'])
self.assertEqual(2, iog)
opts['iogrp'] = '0'
state['available_iogrps'] = [0, 1, 2]
iog = self.storwize_svc_common.select_io_group(state, opts, pool)
self.assertTrue(iog in state['available_iogrps'])
self.assertIn(iog, state['available_iogrps'])
self.assertEqual(0, iog)
opts['iogrp'] = '1,2'
state['available_iogrps'] = [0, 2]
iog = self.storwize_svc_common.select_io_group(state, opts, pool)
self.assertTrue(iog in state['available_iogrps'])
self.assertIn(iog, state['available_iogrps'])
self.assertEqual(2, iog)
opts['iogrp'] = ' 0, 1, 2 '
state['available_iogrps'] = [0, 1, 2, 3]
iog = self.storwize_svc_common.select_io_group(state, opts, pool)
self.assertTrue(iog in state['available_iogrps'])
self.assertIn(iog, state['available_iogrps'])
# since vdisk count in all iogroups is same, it will pick the first
self.assertEqual(0, iog)
@ -8728,7 +8728,7 @@ class StorwizeHelpersTestCase(test.TestCase):
state['available_iogrps'] = [0, 1, 2, 3]
iog = self.storwize_svc_common.select_io_group(state, opts, pool)
self.assertTrue(iog in state['available_iogrps'])
self.assertIn(iog, state['available_iogrps'])
self.assertEqual(1, iog)
@mock.patch.object(storwize_svc_common.StorwizeHelpers,

View File

@ -155,28 +155,28 @@ class InStorageAssistantTestCase(test.TestCase):
state['available_iogrps'] = [0, 1, 2, 3]
iog = self.instorage_mcs_common.select_io_group(state, opts)
self.assertTrue(iog in state['available_iogrps'])
self.assertIn(iog, state['available_iogrps'])
self.assertEqual(2, iog)
opts['iogrp'] = '0'
state['available_iogrps'] = [0, 1, 2]
iog = self.instorage_mcs_common.select_io_group(state, opts)
self.assertTrue(iog in state['available_iogrps'])
self.assertIn(iog, state['available_iogrps'])
self.assertEqual(0, iog)
opts['iogrp'] = '1,2'
state['available_iogrps'] = [0, 2]
iog = self.instorage_mcs_common.select_io_group(state, opts)
self.assertTrue(iog in state['available_iogrps'])
self.assertIn(iog, state['available_iogrps'])
self.assertEqual(2, iog)
opts['iogrp'] = ' 0, 1, 2 '
state['available_iogrps'] = [0, 1, 2, 3]
iog = self.instorage_mcs_common.select_io_group(state, opts)
self.assertTrue(iog in state['available_iogrps'])
self.assertIn(iog, state['available_iogrps'])
# since vdisk count in all iogroups is same, it will pick the first
self.assertEqual(0, iog)
@ -184,7 +184,7 @@ class InStorageAssistantTestCase(test.TestCase):
state['available_iogrps'] = [0, 1, 2, 3]
iog = self.instorage_mcs_common.select_io_group(state, opts)
self.assertTrue(iog in state['available_iogrps'])
self.assertIn(iog, state['available_iogrps'])
self.assertEqual(1, iog)

View File

@ -442,8 +442,8 @@ class NetAppApiElementTransTests(test.TestCase):
# No ordering is guaranteed for elements in this XML.
self.assertTrue(result_xml.startswith("<options>"), result_xml)
self.assertTrue("<test1>abc</test1>" in result_xml, result_xml)
self.assertTrue("<test2>abc</test2>" in result_xml, result_xml)
self.assertIn("<test1>abc</test1>", result_xml)
self.assertIn("<test2>abc</test2>", result_xml)
self.assertTrue(result_xml.rstrip().endswith("</options>"), result_xml)
def test_add_new_child(self):

View File

@ -742,7 +742,7 @@ class NetAppNfsDriverTestCase(test.TestCase):
self.assertEqual(model_update['replication_status'],
actual_model_update['replication_status'])
else:
self.assertFalse('replication_status' in actual_model_update)
self.assertNotIn('replication_status', actual_model_update)
self.driver._check_volume_type.assert_called_once_with(
volume, self.fake_nfs_export_1, test_file, {})

View File

@ -120,7 +120,7 @@ class RSDClientTestCase(test.TestCase):
rsd_client = rsd_driver.RSDClient.initialize(MOCK_URL, MOCK_USER,
MOCK_PASSWORD,
verify=True)
self.assertTrue(isinstance(rsd_client, rsd_driver.RSDClient))
self.assertIsInstance(rsd_client, rsd_driver.RSDClient)
def test_initialize_rsd_api_incorrect_version(self):
self.mock_rsd_lib._rsd_api_version = "2.3.0"
@ -134,7 +134,7 @@ class RSDClientTestCase(test.TestCase):
rsd_client = rsd_driver.RSDClient.initialize(MOCK_URL, MOCK_USER,
MOCK_PASSWORD,
verify=True)
self.assertTrue(isinstance(rsd_client, rsd_driver.RSDClient))
self.assertIsInstance(rsd_client, rsd_driver.RSDClient)
def test_initialize_rsd_lib_incorrect_version(self):
self.mock_rsd_lib._redfish_version = "1.0.0"
@ -148,7 +148,7 @@ class RSDClientTestCase(test.TestCase):
rsd_client = rsd_driver.RSDClient.initialize(MOCK_URL, MOCK_USER,
MOCK_PASSWORD,
verify=True)
self.assertTrue(isinstance(rsd_client, rsd_driver.RSDClient))
self.assertIsInstance(rsd_client, rsd_driver.RSDClient)
def test_initialize_invalid_credentials(self):
self.mock_rsd_lib_factory.side_effect = (

View File

@ -147,7 +147,7 @@ class TestCiscoFcZoneDriver(CiscoFcZoneDriverBaseTest, test.TestCase):
'session': 'none'}
get_active_zone_set_mock.return_value = _active_cfg_default
self.driver.add_connection('CISCO_FAB_1', _initiator_target_map)
self.assertTrue(_zone_name in GlobalVars._zone_state)
self.assertIn(_zone_name, GlobalVars._zone_state)
@mock.patch.object(driver.CiscoFCZoneDriver, 'get_zoning_status')
@mock.patch.object(driver.CiscoFCZoneDriver, 'get_active_zone_set')
@ -161,7 +161,7 @@ class TestCiscoFcZoneDriver(CiscoFcZoneDriverBaseTest, test.TestCase):
'session': 'none'}
get_active_zone_set_mock.return_value = {}
self.driver.add_connection('CISCO_FAB_1', _initiator_target_map)
self.assertTrue(_zone_name in GlobalVars._zone_state)
self.assertIn(_zone_name, GlobalVars._zone_state)
def test_add_connection_for_invalid_fabric(self):
"""Test abnormal flows."""

View File

@ -387,10 +387,9 @@ class RBDISCSIDriver(rbd.RBDDriver):
target_portal = ips[0]
if netutils.is_valid_ipv6(target_portal):
target_portal = "[{}]:{}".format(
target_portal, "3260")
target_portal = "[%s]:3260" % target_portal
else:
target_portal = "{}:3260".format(target_portal)
target_portal = "%s:3260" % target_portal
data = {
'driver_volume_type': 'iscsi',
@ -424,8 +423,8 @@ class RBDISCSIDriver(rbd.RBDDriver):
if not found:
# we can delete the disk definition
LOG.info("Deleteing volume definition in iscsi gateway for {}".
format(lun_name))
LOG.info("Deleting volume definition in iscsi gateway for %s",
lun_name)
self.client.delete_disk(self.configuration.rbd_pool, volume.name,
preserve_image=True)
@ -459,9 +458,7 @@ class RBDISCSIDriver(rbd.RBDDriver):
disks = self._get_disks()
lun_name = self._lun_name(volume.name)
if lun_name not in disks['disks']:
LOG.debug("Volume {} not attached anywhere.".format(
lun_name
))
LOG.debug("Volume %s not attached anywhere.", lun_name)
return
for target_iqn_tmp in iscsi_config['targets']:

View File

@ -285,7 +285,7 @@ def create_tenant(driver, project_id):
try:
driver.api.tenants.create(name=name)
except dfs_sdk.exceptions.ApiConflictError:
LOG.debug("Tenant {} already exists".format(name))
LOG.debug("Tenant %s already exists", name)
return _format_tenant(name)

View File

@ -3,7 +3,7 @@
# process, which may cause wedges in the gate later.
# Install bounded pep8/pyflakes first, then let flake8 install
hacking>=3.1.0,<3.2.0 # Apache-2.0
hacking>=4.0.0,<4.1.0 # Apache-2.0
flake8-import-order # LGPLv3
flake8-logging-format>=0.6.0 # Apache-2.0