Bump hacking version to 1.1.x

E731s are be fixed, since use of lambdas makes code
harder to debug.

Ignore E402 and W503/W504 since these don't make a
sensible case for us to change code.

This also requires fixing some E501 errors that should
have already been detected, but weren't.

E117 and E305 errors are disabled for the moment,
and are fixed in a subsequent patch.

Pin pycodestyle to 2.5.0.

Change-Id: Ia06940cff7c3b71938eddb271d2cb8170e02e954
This commit is contained in:
Eric Harney 2019-04-15 11:07:12 -04:00
parent a0bbf1b3ff
commit 830490916b
26 changed files with 124 additions and 74 deletions

View File

@ -152,7 +152,7 @@ class ChunkedBackupDriver(driver.BackupDriver):
@abc.abstractmethod
def get_object_writer(self, container, object_name, extra_metadata=None):
"""Returns a writer object which stores the chunk data in backup repository.
"""Returns a writer object which stores the chunk data.
The object returned should be a context handler that can be used in a
"with" context.

View File

@ -253,5 +253,6 @@ def main():
'Please re-run using the --config-dir <dirname> option '
'with a valid cinder configuration directory.')
if __name__ == '__main__':
sys.exit(main())

View File

@ -187,7 +187,7 @@ class RequestContext(context.RequestContext):
)
def authorize(self, action, target=None, target_obj=None, fatal=True):
"""Verifies that the given action is valid on the target in this context.
"""Verify that the given action is valid on the target in this context.
:param action: string representing the action to be checked.
:param target: dictionary representing the object of the action

View File

@ -263,13 +263,14 @@ class DbQuotaDriver(object):
"""
# Filter resources
if has_sync:
sync_filt = lambda x: hasattr(x, 'sync')
else:
sync_filt = lambda x: not hasattr(x, 'sync')
def sync_filt(x, has_sync):
if has_sync:
return hasattr(x, 'sync')
else:
return not hasattr(x, 'sync')
desired = set(keys)
sub_resources = {k: v for k, v in resources.items()
if k in desired and sync_filt(v)}
if k in desired and sync_filt(v, has_sync)}
# Make sure we accounted for all of them...
if len(keys) != len(sub_resources):
@ -1237,6 +1238,7 @@ class GroupQuotaEngine(QuotaEngine):
def register_resources(self, resources):
raise NotImplementedError(_("Cannot register resources"))
QUOTAS = VolumeTypeQuotaEngine()
CGQUOTAS = CGQuotaEngine()
GROUP_QUOTAS = GroupQuotaEngine()

View File

@ -334,7 +334,8 @@ class TestCase(testtools.TestCase):
osprofiler should not run for unit tests.
"""
side_effect = lambda value: value
def side_effect(value):
return value
mock_decorator = mock.MagicMock(side_effect=side_effect)
p = mock.patch("osprofiler.profiler.trace_cls",
return_value=mock_decorator)
@ -486,7 +487,9 @@ class ModelsObjectComparatorMixin(object):
def _assertEqualListsOfObjects(self, objs1, objs2, ignored_keys=None,
msg=None):
obj_to_dict = lambda o: self._dict_from_object(o, ignored_keys)
def obj_to_dict(o):
return self._dict_from_object(o, ignored_keys)
objs1 = map(obj_to_dict, objs1)
objs2 = list(map(obj_to_dict, objs2))
# We don't care about the order of the lists, as long as they are in

View File

@ -62,7 +62,7 @@ class NestedQuotasTest(functional_helpers._FunctionalTestBase):
over_quota_exception = client.OpenStackApiException413
def _create_project_hierarchy(self):
"""Sets up the nested hierarchy show below.
r"""Sets up the nested hierarchy show below.
+-----------+
| A |

View File

@ -342,36 +342,37 @@ class ParseLimitsTest(BaseLimitTestSuite):
def test_multiple_rules(self):
"""Test that parse_limits() handles multiple rules correctly."""
try:
l = limits.Limiter.parse_limits('(get, *, .*, 20, minute);'
'(PUT, /foo*, /foo.*, 10, hour);'
'(POST, /bar*, /bar.*, 5, second);'
'(Say, /derp*, /derp.*, 1, day)')
test_limits = limits.Limiter.parse_limits(
'(get, *, .*, 20, minute);'
'(PUT, /foo*, /foo.*, 10, hour);'
'(POST, /bar*, /bar.*, 5, second);'
'(Say, /derp*, /derp.*, 1, day)')
except ValueError as e:
self.assertFalse(six.text_type(e))
# Make sure the number of returned limits are correct
self.assertEqual(4, len(l))
self.assertEqual(4, len(test_limits))
# Check all the verbs...
expected = ['GET', 'PUT', 'POST', 'SAY']
self.assertEqual(expected, [t.verb for t in l])
self.assertEqual(expected, [t.verb for t in test_limits])
# ...the URIs...
expected = ['*', '/foo*', '/bar*', '/derp*']
self.assertEqual(expected, [t.uri for t in l])
self.assertEqual(expected, [t.uri for t in test_limits])
# ...the regexes...
expected = ['.*', '/foo.*', '/bar.*', '/derp.*']
self.assertEqual(expected, [t.regex for t in l])
self.assertEqual(expected, [t.regex for t in test_limits])
# ...the values...
expected = [20, 10, 5, 1]
self.assertEqual(expected, [t.value for t in l])
self.assertEqual(expected, [t.value for t in test_limits])
# ...and the units...
expected = [limits.PER_MINUTE, limits.PER_HOUR,
limits.PER_SECOND, limits.PER_DAY]
self.assertEqual(expected, [t.unit for t in l])
self.assertEqual(expected, [t.unit for t in test_limits])
class LimiterTest(BaseLimitTestSuite):

View File

@ -541,7 +541,8 @@ class BackupCephTestCase(test.TestCase):
mock.patch.object(self.service,
'_try_delete_base_image'):
with mock.patch.object(self.service, '_backup_metadata'):
with mock.patch.object(self.service, 'get_backup_snaps') as \
with mock.patch.object(self.service,
'get_backup_snaps') as \
mock_get_backup_snaps:
image = self.service.rbd.Image()
meta = linuxrbd.RBDImageMetadata(image,
@ -566,8 +567,9 @@ class BackupCephTestCase(test.TestCase):
with mock.patch.object(self.service, '_snap_exists'), \
mock.patch.object(self.service, '_get_backup_base_name') as \
mock_get_backup_base_name, mock.patch.object(
self.service, '_get_most_recent_snap') as mock_get_most_recent_snap, \
mock_get_backup_base_name, \
mock.patch.object(self.service, '_get_most_recent_snap') as \
mock_get_most_recent_snap, \
mock.patch.object(self.service, '_rbd_diff_transfer'):
mock_get_backup_base_name.return_value = backup_name
mock_get_most_recent_snap.return_value = (
@ -635,7 +637,8 @@ class BackupCephTestCase(test.TestCase):
= mock_rbd_diff_transfer_side_effect
with mock.patch.object(self.service, '_full_backup'), \
mock.patch.object(self.service, '_try_delete_base_image') as \
mock.patch.object(self.service,
'_try_delete_base_image') as \
mock_try_delete_base_image:
def mock_try_delete_base_image_side_effect(backup_id,
base_name):
@ -748,7 +751,8 @@ class BackupCephTestCase(test.TestCase):
mock_get_backup_snaps:
with mock.patch.object(self.service, '_rbd_diff_transfer') as \
mock_rbd_diff_transfer:
with mock.patch.object(self.service, '_get_backup_base_name') as \
with mock.patch.object(self.service,
'_get_backup_base_name') as \
mock_get_backup_base_name:
mock_get_backup_base_name.return_value = (
backup_name)
@ -790,7 +794,8 @@ class BackupCephTestCase(test.TestCase):
mock_get_backup_base_name:
with mock.patch.object(self.service, '_rbd_diff_transfer') as \
mock_rbd_diff_transfer:
with mock.patch.object(self.service, '_get_new_snap_name') as \
with mock.patch.object(self.service,
'_get_new_snap_name') as \
mock_get_new_snap_name:
mock_get_backup_base_name.return_value = (
backup_name)
@ -1346,7 +1351,8 @@ class BackupCephTestCase(test.TestCase):
with mock.patch.object(self.service, '_file_is_rbd',
return_value=False):
with mock.patch.object(self.service, '_full_backup'):
with mock.patch.object(self.service, 'delete_backup') as \
with mock.patch.object(self.service,
'delete_backup') as \
mock_delete:
self.assertRaises(exception.BackupOperationError,
self.service.backup, self.backup,

View File

@ -848,7 +848,7 @@ class BackupTestCase(BaseBackupTest):
mock_chown,
mock_backup_device,
mock_brick):
backup_service = lambda: None
backup_service = mock.Mock()
backup_service.backup = mock.Mock(
return_value=mock.sentinel.backup_update)
self.backup_mgr.service = lambda x: backup_service

View File

@ -141,7 +141,8 @@ class TestTgtAdmDriver(tf.TargetDriverFixture):
@test.testtools.skipIf(sys.platform == "darwin", "SKIP on OSX")
def test_create_iscsi_target(self):
with mock.patch('cinder.privsep.targets.tgt.tgtadm_show', return_value=('', '')),\
with mock.patch('cinder.privsep.targets.tgt.tgtadm_show',
return_value=('', '')),\
mock.patch.object(self.target, '_get_target',
side_effect=lambda x: 1),\
mock.patch('cinder.privsep.targets.tgt.tgtadmin_update',
@ -342,7 +343,8 @@ class TestTgtAdmDriver(tf.TargetDriverFixture):
self.testvol['name'] + ' 1',
'auth': 'CHAP QZJb P68e'}
with mock.patch('cinder.privsep.targets.tgt.tgtadm_show', return_value=('', '')),\
with mock.patch('cinder.privsep.targets.tgt.tgtadm_show',
return_value=('', '')),\
mock.patch.object(self.target, '_get_target',
side_effect=lambda x: 1),\
mock.patch.object(self.target, '_verify_backing_lun',
@ -390,7 +392,8 @@ class TestTgtAdmDriver(tf.TargetDriverFixture):
@test.testtools.skipIf(sys.platform == "darwin", "SKIP on OSX")
def test_create_iscsi_target_retry(self):
with mock.patch('cinder.privsep.targets.tgt.tgtadm_show', return_value=('', '')),\
with mock.patch('cinder.privsep.targets.tgt.tgtadm_show',
return_value=('', '')),\
mock.patch.object(self.target, '_get_target',
side_effect=[None, None, 1]) as get_target,\
mock.patch('cinder.privsep.targets.tgt.tgtadmin_update',

View File

@ -1201,7 +1201,7 @@ class TestCinderRtstoolCmd(test.TestCase):
with mock.patch.object(rtslib_fb, 'NetworkPortal') as network_portal, \
mock.patch.object(rtslib_fb, 'LUN') as lun, \
mock.patch.object(rtslib_fb, 'TPG') as tpg, \
mock.patch.object(rtslib_fb, 'FabricModule') as fabric_module, \
mock.patch.object(rtslib_fb, 'FabricModule') as fabric_mod, \
mock.patch.object(rtslib_fb, 'Target') as target, \
mock.patch.object(rtslib_fb, 'BlockStorageObject') as \
block_storage_object, \
@ -1210,7 +1210,7 @@ class TestCinderRtstoolCmd(test.TestCase):
rts_root.return_value = root_new
block_storage_object.return_value = mock.sentinel.so_new
target.return_value = mock.sentinel.target_new
fabric_module.return_value = mock.sentinel.fabric_new
fabric_mod.return_value = mock.sentinel.fabric_new
tpg_new = tpg.return_value
lun.return_value = mock.sentinel.lun_new
@ -1235,7 +1235,7 @@ class TestCinderRtstoolCmd(test.TestCase):
name=mock.sentinel.name, dev=mock.sentinel.backing_device)
target.assert_called_once_with(mock.sentinel.fabric_new,
mock.sentinel.name, 'create')
fabric_module.assert_called_once_with('iscsi')
fabric_mod.assert_called_once_with('iscsi')
tpg.assert_called_once_with(mock.sentinel.target_new,
mode='create')
tpg_new.set_attribute.assert_called_once_with('authentication',
@ -1261,7 +1261,7 @@ class TestCinderRtstoolCmd(test.TestCase):
with mock.patch.object(rtslib_fb, 'NetworkPortal') as network_portal, \
mock.patch.object(rtslib_fb, 'LUN') as lun, \
mock.patch.object(rtslib_fb, 'TPG') as tpg, \
mock.patch.object(rtslib_fb, 'FabricModule') as fabric_module, \
mock.patch.object(rtslib_fb, 'FabricModule') as fabric_mod, \
mock.patch.object(rtslib_fb, 'Target') as target, \
mock.patch.object(rtslib_fb, 'BlockStorageObject') as \
block_storage_object, \
@ -1270,7 +1270,7 @@ class TestCinderRtstoolCmd(test.TestCase):
rts_root.return_value = root_new
block_storage_object.return_value = mock.sentinel.so_new
target.return_value = mock.sentinel.target_new
fabric_module.return_value = mock.sentinel.fabric_new
fabric_mod.return_value = mock.sentinel.fabric_new
tpg_new = tpg.return_value
lun.return_value = mock.sentinel.lun_new
@ -1285,7 +1285,7 @@ class TestCinderRtstoolCmd(test.TestCase):
name=mock.sentinel.name, dev=mock.sentinel.backing_device)
target.assert_called_once_with(mock.sentinel.fabric_new,
mock.sentinel.name, 'create')
fabric_module.assert_called_once_with('iscsi')
fabric_mod.assert_called_once_with('iscsi')
tpg.assert_called_once_with(mock.sentinel.target_new,
mode='create')
tpg_new.set_attribute.assert_called_once_with('authentication',

View File

@ -16,7 +16,7 @@ import ddt
import textwrap
import mock
import pep8
import pycodestyle
from cinder.hacking import checks
from cinder import test
@ -130,16 +130,17 @@ class HackingTestCase(test.TestCase):
"msg = _('My message')",
"cinder.tests.unit/other_files5.py"))))
# We are patching pep8 so that only the check under test is actually
# installed.
@mock.patch('pep8._checks',
# We are patching pycodestyle/pep8 so that only the check under test is
# actually installed.
# TODO(eharney): don't patch private members of external libraries
@mock.patch('pycodestyle._checks',
{'physical_line': {}, 'logical_line': {}, 'tree': {}})
def _run_check(self, code, checker, filename=None):
pep8.register_check(checker)
pycodestyle.register_check(checker)
lines = textwrap.dedent(code).strip().splitlines(True)
checker = pep8.Checker(filename=filename, lines=lines)
checker = pycodestyle.Checker(filename=filename, lines=lines)
checker.check_all()
checker.report._deferred_print.sort()
return checker.report._deferred_print

View File

@ -185,7 +185,9 @@ class TestGroups(scaleio.TestScaleIODriver):
source_group=self.group, source_vols=self.volumes))
self.assertEqual(fields.GroupStatus.AVAILABLE,
result_model_update['status'])
get_pid = lambda snapshot: snapshot['provider_id']
def get_pid(snapshot):
return snapshot['provider_id']
volume_provider_list = list(map(get_pid, result_volumes_model_update))
self.assertListEqual(volume_provider_list, ['sid1', 'sid2'])
@ -210,7 +212,9 @@ class TestGroups(scaleio.TestScaleIODriver):
snapshots=self.snapshots))
self.assertEqual(fields.GroupStatus.AVAILABLE,
result_model_update['status'])
get_pid = lambda snapshot: snapshot['provider_id']
def get_pid(snapshot):
return snapshot['provider_id']
volume_provider_list = list(map(get_pid, result_volumes_model_update))
self.assertListEqual(volume_provider_list, ['sid1', 'sid2'])
@ -272,7 +276,9 @@ class TestGroups(scaleio.TestScaleIODriver):
result_model_update['status'])
self.assertTrue(all(snapshot['status'] == 'available' for snapshot in
result_snapshot_model_update))
get_pid = lambda snapshot: snapshot['provider_id']
def get_pid(snapshot):
return snapshot['provider_id']
snapshot_provider_list = list(map(get_pid,
result_snapshot_model_update))

View File

@ -510,7 +510,6 @@ class XtremIODriverISCSITestCase(BaseXtremIODriverTestCase):
self.data.test_snapshot)
self.assertTrue(delete.called)
# ##### Clone Volume #####
def test_clone_volume(self, req):
req.side_effect = xms_request

View File

@ -211,7 +211,7 @@ class TestCommonAdapter(test_base.TestCase):
vnx_common.do_create_cg_from_cgsnap(
cg_id, cg_host, volumes, cgsnap_id, snaps))
self.assertIsNone(model_update)
provider_location = re.findall('id\^12',
provider_location = re.findall(r'id\^12',
volume_updates[0]['provider_location'])
self.assertEqual(1, len(provider_location))
@ -270,7 +270,7 @@ class TestCommonAdapter(test_base.TestCase):
model_update, volume_updates = vnx_common.do_clone_cg(
cg_id, cg_host, volumes, src_cg_id, src_volumes)
self.assertIsNone(model_update)
provider_location = re.findall('id\^12',
provider_location = re.findall(r'id\^12',
volume_updates[0]['provider_location'])
self.assertEqual(1, len(provider_location))

View File

@ -2959,8 +2959,6 @@ port_speed!N/A
vol_name = kwargs['obj'].strip('\'\"')
site1_volume_info = self._volumes_list[vol_name]
site2_volume_info = self._volumes_list['site2' + vol_name]
site1_volume_fc_info = self._volumes_list['fcsite1' + vol_name]
site2_volume_fc_info = self._volumes_list['fcsite2' + vol_name]
del self._rcrelationship_list[self._volumes_list[vol_name]['RC_name']]
site1fcmap = None
@ -2977,10 +2975,8 @@ port_speed!N/A
if site1fcmap:
del self._fcmappings_list[site1fcmap['id']]
del site1_volume_fc_info
if site2fcmap:
del self._fcmappings_list[site2fcmap['id']]
del site2_volume_fc_info
del site2_volume_info
site1_volume_info['RC_name'] = ''

View File

@ -351,11 +351,11 @@ class NetAppApiElementTransTests(test.TestCase):
root = netapp_api.NaElement('root')
root['l'] = ['l1', 'l2']
root['t'] = ('t1', 't2')
l = root.get_child_by_name('l')
self.assertIsInstance(l, netapp_api.NaElement)
l_element = root.get_child_by_name('l')
self.assertIsInstance(l_element, netapp_api.NaElement)
t = root.get_child_by_name('t')
self.assertIsInstance(t, netapp_api.NaElement)
for le in l.get_children():
for le in l_element.get_children():
self.assertIn(le.get_name(), ['l1', 'l2'])
for te in t.get_children():
self.assertIn(te.get_name(), ['t1', 't2'])

View File

@ -21,6 +21,7 @@ import math
import mock
from oslo_utils import units
import six
import textwrap
from cinder import context
from cinder import exception
@ -157,9 +158,17 @@ class TestZFSSAISCSIDriver(test.TestCase):
'iqn.1-0.org.deb:01:d7, iqn.1-0.org.deb:01:d9'
self.configuration.zfssa_initiator_user = ''
self.configuration.zfssa_initiator_password = ''
self.configuration.zfssa_initiator_config = "{'test-init-grp1':[{'iqn':\
'iqn.1-0.org.deb:01:d7','user':'','password':''}],'test-init-grp\
2':[{'iqn':'iqn.1-0.org.deb:01:d9','user':'','password':''}]}"
self.configuration.zfssa_initiator_config = textwrap.dedent('''
{'test-init-grp1':
[{'iqn': 'iqn.1-0.org.deb:01:d7',
'user': '',
'password': ''}],
'test-init-grp2':
[{'iqn': 'iqn.1.0.org.deb:01:d9',
'user': '',
'password': ''}]
}
''')
self.configuration.zfssa_target_group = 'test-target-grp1'
self.configuration.zfssa_target_user = ''
self.configuration.zfssa_target_password = ''

View File

@ -104,7 +104,8 @@ zone_string_to_post_no_activate = "zonecfginfo=openstack_cfg "\
"&saveonly=true"
zone_string_to_post_invalid_request = "zonecfginfo=openstack_cfg "\
"openstack50060b0000c26604201900051ee8e32900000000000000000000000000;"\
"zone1;zone2 openstack50060b0000c26604201900051ee8e329000000000000000000000"\
"zone1;zone2 "\
"openstack50060b0000c26604201900051ee8e329000000000000000000000"\
"00000 50:06:0b:00:00:c2:66:04;20:19:00:05:1e:e8:e3:29 "\
"zone1 20:01:00:05:33:0e:96:15;20:00:00:05:33:0e:93:11 "\
"zone2 20:01:00:05:33:0e:96:14;20:00:00:05:33:0e:93:11 &saveonly=true"

View File

@ -1765,9 +1765,12 @@ class ScaleIODriver(driver.VolumeDriver):
if not volume_utils.is_group_a_cg_snapshot_type(group_snapshot):
raise NotImplementedError()
get_scaleio_snapshot_params = lambda snapshot: {
'volumeId': snapshot.volume['provider_id'],
'snapshotName': self._id_to_base64(snapshot['id'])}
def get_scaleio_snapshot_params(snapshot):
return {
'volumeId': snapshot.volume['provider_id'],
'snapshotName': self._id_to_base64(snapshot['id'])
}
snapshot_defs = list(map(get_scaleio_snapshot_params, snapshots))
r, response = self._snapshot_volume_group(snapshot_defs)
if r.status_code != http_client.OK and "errorCode" in response:
@ -1843,9 +1846,12 @@ class ScaleIODriver(driver.VolumeDriver):
if not volume_utils.is_group_a_cg_snapshot_type(group):
raise NotImplementedError()
get_scaleio_snapshot_params = lambda src_volume, trg_volume: {
'volumeId': src_volume['provider_id'],
'snapshotName': self._id_to_base64(trg_volume['id'])}
def get_scaleio_snapshot_params(src_volume, trg_volume):
return {
'volumeId': src_volume['provider_id'],
'snapshotName': self._id_to_base64(trg_volume['id'])
}
if group_snapshot and snapshots:
snapshot_defs = map(get_scaleio_snapshot_params,
snapshots,

View File

@ -219,12 +219,12 @@ class DrbdManageBaseDriver(driver.VolumeDriver):
if req:
if level:
l = level + ":" + key
lk = level + ":" + key
else:
l = key
lk = key
msg = _('DRBDmanage driver error: expected key "%s" '
'not in answer, wrong DRBDmanage version?') % l
'not in answer, wrong DRBDmanage version?') % lk
LOG.error(msg)
raise exception.VolumeDriverException(message=msg)
@ -848,6 +848,7 @@ class DrbdManageIscsiDriver(DrbdManageBaseDriver):
connector,
**kwargs)
# for backwards compatibility keep the old class name, too
DrbdManageDriver = DrbdManageIscsiDriver

View File

@ -159,7 +159,7 @@ class StorwizeSVCReplicationMetroMirror(
class StorwizeSVCReplicationGMCV(StorwizeSVCReplicationGlobalMirror):
"""Support for Storwize/SVC global mirror with change volumes mode replication.
"""Support for Storwize/SVC GMCV mode replication.
Global Mirror with Change Volumes(GMCV) provides asynchronous replication
based on point-in-time copies of data. The volumes in a GMCV relationship

View File

@ -97,7 +97,7 @@ class EntryCreateTask(flow_utils.CinderTask):
class ManageCastTask(flow_utils.CinderTask):
"""Performs a volume manage cast to the scheduler and to the volume manager.
"""Performs a volume manage cast to the scheduler and the volume manager.
This which will signal a transition of the api workflow to another child
and/or related workflow.

View File

@ -37,7 +37,7 @@ gitdb2==2.0.3
GitPython==2.1.8
google-api-python-client==1.4.2
greenlet==0.4.10
hacking==0.12.0
hacking==1.1.0
httplib2==0.9.1
idna==2.6
imagesize==1.0.0
@ -100,6 +100,7 @@ psycopg2==2.7
pyasn1-modules==0.2.1
pyasn1==0.4.2
pycadf==2.7.0
pycodestyle==2.5.0
pycparser==2.18
pyflakes==0.8.1
Pygments==2.2.0

View File

@ -3,7 +3,7 @@
# process, which may cause wedges in the gate later.
# Install bounded pep8/pyflakes first, then let flake8 install
hacking!=0.13.0,<0.14,>=0.12.0 # Apache-2.0
hacking>=1.1.0,<1.2.0 # Apache-2.0
coverage!=4.4,>=4.0 # Apache-2.0
ddt>=1.2.1 # MIT
@ -11,6 +11,7 @@ fixtures>=3.0.0 # Apache-2.0/BSD
mock>=2.0.0 # BSD
os-api-ref>=1.4.0 # Apache-2.0
oslotest>=3.2.0 # Apache-2.0
pycodestyle==2.5.0 # MIT License
PyMySQL>=0.7.6 # MIT License
psycopg2>=2.7 # LGPL/ZPL
testtools>=2.2.0 # MIT

15
tox.ini
View File

@ -181,7 +181,20 @@ usedevelop = False
#
# E251 unexpected spaces around keyword / parameter equals
# reason: no improvement in readability
ignore = E251
#
# E402 module level import not at top of file
# reason: there are numerous places where we import modules
# later for legitimate reasons
#
# W503 line break before binary operator
# reason: pep8 itself is not sure about this one and
# reversed this rule in 2016
# W504 line break after binary operator
# reason: no agreement on this being universally
# preferable for our code. Disabled to keep checking
# tools from getting in our way with regards to this.
# E117/E305 - new rules, just disabled here temporarily.
ignore = E251,E402,W503,W504,E117,E305
# H904 Delay string interpolations at logging calls.
enable-extensions = H106,H203,H904
exclude = .git,.venv,.tox,dist,tools,doc/ext,*egg,build