Flake8: Fix E126 and E127
Fix these issues and enable E126 and E127: E126 continuation line over-indented for hanging indent E127 continuation line over-indented for visual indent Also fix a few occurences in the touched files of: E128 continuation line under-indented for visual indent H405 multi line docstring summary not separated with an empty line Partial-Bug: #1333290 Change-Id: If822401fc7e1db49d595a0736a0dff8e00dfd217
This commit is contained in:
parent
290769f087
commit
9154f3e55b
|
@ -210,8 +210,8 @@ class ShareNetworkController(wsgi.Controller):
|
|||
return (usages[name]['reserved'] + usages[name]['in_use'])
|
||||
|
||||
if 'share_networks' in overs:
|
||||
msg = _("Quota exceeded for %(s_pid)s, tried to create"
|
||||
" share-network (%(d_consumed)d of %(d_quota)d "
|
||||
msg = _("Quota exceeded for %(s_pid)s, tried to create "
|
||||
"share-network (%(d_consumed)d of %(d_quota)d "
|
||||
"already consumed)")
|
||||
LOG.warn(msg % {'s_pid': context.project_id,
|
||||
'd_consumed': _consumed('share_networks'),
|
||||
|
|
|
@ -90,9 +90,11 @@ class ShareServerController(wsgi.Controller):
|
|||
s.share_network_name = s.share_network_id
|
||||
if search_opts:
|
||||
for k, v in six.iteritems(search_opts):
|
||||
share_servers = [s for s in share_servers if (hasattr(s, k) and
|
||||
s[k] == v or k == 'share_network' and v in
|
||||
[s.share_network['name'], s.share_network['id']])]
|
||||
share_servers = [s for s in share_servers if
|
||||
(hasattr(s, k) and
|
||||
s[k] == v or k == 'share_network' and
|
||||
v in [s.share_network['name'],
|
||||
s.share_network['id']])]
|
||||
return self._view_builder.build_share_servers(share_servers)
|
||||
|
||||
@wsgi.serializers(xml=ShareServerTemplate)
|
||||
|
|
|
@ -43,7 +43,5 @@ PING_PORTS = (
|
|||
("icmp", (-1, -1)),
|
||||
)
|
||||
|
||||
SERVICE_INSTANCE_SECGROUP_DATA = CIFS_PORTS + \
|
||||
NFS_PORTS + \
|
||||
SSH_PORTS + \
|
||||
PING_PORTS
|
||||
SERVICE_INSTANCE_SECGROUP_DATA = (
|
||||
CIFS_PORTS + NFS_PORTS + SSH_PORTS + PING_PORTS)
|
||||
|
|
|
@ -144,8 +144,8 @@ class GenericShareDriver(driver.ExecuteMixin, driver.ShareDriver):
|
|||
self.service_instance_manager = service_instance.\
|
||||
ServiceInstanceManager(self.db, self._helpers,
|
||||
driver_config=self.configuration)
|
||||
self.share_networks_servers = self.service_instance_manager.\
|
||||
share_networks_servers
|
||||
self.share_networks_servers = (
|
||||
self.service_instance_manager.share_networks_servers)
|
||||
self._setup_helpers()
|
||||
|
||||
def _setup_helpers(self):
|
||||
|
@ -207,9 +207,7 @@ class GenericShareDriver(driver.ExecuteMixin, driver.ShareDriver):
|
|||
LOG.debug('%s is not mounted' % share['name'])
|
||||
|
||||
def _get_mount_path(self, share):
|
||||
"""
|
||||
Returns the path, that will be used for mount device in service vm.
|
||||
"""
|
||||
"""Returns the path to use for mount device in service vm."""
|
||||
return os.path.join(self.configuration.share_mount_path, share['name'])
|
||||
|
||||
def _attach_volume(self, context, share, instance_id, volume):
|
||||
|
@ -262,9 +260,10 @@ class GenericShareDriver(driver.ExecuteMixin, driver.ShareDriver):
|
|||
|
||||
def _get_volume_snapshot(self, context, snapshot_id):
|
||||
"""Finds volume snaphots, associated to the specific share snaphots."""
|
||||
volume_snapshot_name = self.configuration.\
|
||||
volume_snapshot_name_template % snapshot_id
|
||||
volume_snapshot_list = self.volume_api.get_all_snapshots(context,
|
||||
volume_snapshot_name = (
|
||||
self.configuration.volume_snapshot_name_template % snapshot_id)
|
||||
volume_snapshot_list = self.volume_api.get_all_snapshots(
|
||||
context,
|
||||
{'display_name': volume_snapshot_name})
|
||||
volume_snapshot = None
|
||||
if len(volume_snapshot_list) == 1:
|
||||
|
@ -309,7 +308,9 @@ class GenericShareDriver(driver.ExecuteMixin, driver.ShareDriver):
|
|||
if snapshot:
|
||||
volume_snapshot = self._get_volume_snapshot(context,
|
||||
snapshot['id'])
|
||||
volume = self.volume_api.create(context, share['size'],
|
||||
volume = self.volume_api.create(
|
||||
context,
|
||||
share['size'],
|
||||
self.configuration.volume_name_template % share['id'], '',
|
||||
snapshot=volume_snapshot)
|
||||
|
||||
|
@ -322,7 +323,8 @@ class GenericShareDriver(driver.ExecuteMixin, driver.ShareDriver):
|
|||
time.sleep(1)
|
||||
volume = self.volume_api.get(context, volume['id'])
|
||||
else:
|
||||
raise exception.ManilaException(_('Volume have not been created '
|
||||
raise exception.ManilaException(
|
||||
_('Volume have not been created '
|
||||
'in %ss. Giving up') %
|
||||
self.configuration.max_time_to_create_volume)
|
||||
|
||||
|
@ -343,12 +345,14 @@ class GenericShareDriver(driver.ExecuteMixin, driver.ShareDriver):
|
|||
break
|
||||
time.sleep(1)
|
||||
else:
|
||||
raise exception.ManilaException(_('Volume have not been '
|
||||
raise exception.ManilaException(
|
||||
_('Volume have not been '
|
||||
'deleted in %ss. Giving up')
|
||||
% self.configuration.max_time_to_create_volume)
|
||||
|
||||
def get_share_stats(self, refresh=False):
|
||||
"""Get share status.
|
||||
|
||||
If 'refresh' is True, run update the stats first.
|
||||
"""
|
||||
if refresh:
|
||||
|
@ -419,10 +423,12 @@ class GenericShareDriver(driver.ExecuteMixin, driver.ShareDriver):
|
|||
raise exception.ManilaException(_('Failed to create volume '
|
||||
'snapshot'))
|
||||
time.sleep(1)
|
||||
volume_snapshot = self.volume_api.get_snapshot(self.admin_context,
|
||||
volume_snapshot = self.volume_api.get_snapshot(
|
||||
self.admin_context,
|
||||
volume_snapshot['id'])
|
||||
else:
|
||||
raise exception.ManilaException(_('Volume snapshot have not been '
|
||||
raise exception.ManilaException(
|
||||
_('Volume snapshot have not been '
|
||||
'created in %ss. Giving up') %
|
||||
self.configuration.max_time_to_create_volume)
|
||||
|
||||
|
@ -445,7 +451,8 @@ class GenericShareDriver(driver.ExecuteMixin, driver.ShareDriver):
|
|||
break
|
||||
time.sleep(1)
|
||||
else:
|
||||
raise exception.ManilaException(_('Volume snapshot have not been '
|
||||
raise exception.ManilaException(
|
||||
_('Volume snapshot have not been '
|
||||
'deleted in %ss. Giving up') %
|
||||
self.configuration.max_time_to_create_volume)
|
||||
|
||||
|
@ -558,7 +565,8 @@ class NFSHelper(NASHelperBase):
|
|||
def create_export(self, server, share_name, recreate=False):
|
||||
"""Create new export, delete old one if exists."""
|
||||
return ':'.join([server['ip'],
|
||||
os.path.join(self.configuration.share_mount_path, share_name)])
|
||||
os.path.join(
|
||||
self.configuration.share_mount_path, share_name)])
|
||||
|
||||
def init_helper(self, server):
|
||||
try:
|
||||
|
|
|
@ -231,7 +231,8 @@ class ServiceInstanceManager(object):
|
|||
sg = self.compute_api.security_group_create(
|
||||
context, name, description)
|
||||
for protocol, ports in constants.SERVICE_INSTANCE_SECGROUP_DATA:
|
||||
self.compute_api.security_group_rule_create(context,
|
||||
self.compute_api.security_group_rule_create(
|
||||
context,
|
||||
parent_group_id=sg.id,
|
||||
ip_protocol=protocol,
|
||||
from_port=ports[0],
|
||||
|
@ -270,7 +271,8 @@ class ServiceInstanceManager(object):
|
|||
break
|
||||
time.sleep(1)
|
||||
else:
|
||||
raise exception.ServiceInstanceException(_('Instance have not '
|
||||
raise exception.ServiceInstanceException(
|
||||
_('Instance have not '
|
||||
'been deleted in %ss. Giving up.') %
|
||||
self.max_time_to_build_instance)
|
||||
|
||||
|
@ -359,7 +361,8 @@ class ServiceInstanceManager(object):
|
|||
key_name, key_path = self._get_key(context)
|
||||
if not (self.get_config_option("service_instance_password") or
|
||||
key_name):
|
||||
raise exception.ServiceInstanceException(_('Neither service '
|
||||
raise exception.ServiceInstanceException(
|
||||
_('Neither service '
|
||||
'instance password nor key are available.'))
|
||||
|
||||
security_group = self._get_or_create_security_group(context)
|
||||
|
@ -373,7 +376,8 @@ class ServiceInstanceManager(object):
|
|||
self.neutron_api.delete_port(port_id)
|
||||
raise
|
||||
|
||||
service_instance = self.compute_api.server_create(context,
|
||||
service_instance = self.compute_api.server_create(
|
||||
context,
|
||||
name=instance_name,
|
||||
image=service_image_id,
|
||||
flavor=self.get_config_option("service_instance_flavor_id"),
|
||||
|
@ -389,7 +393,8 @@ class ServiceInstanceManager(object):
|
|||
_('Failed to build service instance.'))
|
||||
time.sleep(1)
|
||||
try:
|
||||
service_instance = self.compute_api.server_get(context,
|
||||
service_instance = self.compute_api.server_get(
|
||||
context,
|
||||
service_instance['id'])
|
||||
except exception.InstanceNotFound as e:
|
||||
LOG.debug(e)
|
||||
|
@ -402,7 +407,8 @@ class ServiceInstanceManager(object):
|
|||
LOG.debug("Adding security group "
|
||||
"'%s' to server '%s'." % (security_group.id,
|
||||
service_instance["id"]))
|
||||
self.compute_api.add_security_group_to_server(context,
|
||||
self.compute_api.add_security_group_to_server(
|
||||
context,
|
||||
service_instance["id"], security_group.id)
|
||||
|
||||
service_instance['ip'] = self._get_server_ip(service_instance)
|
||||
|
@ -556,7 +562,8 @@ class ServiceInstanceManager(object):
|
|||
except exception.ProcessExecutionError as e:
|
||||
msg = _('Unable to get host. %s') % e.stderr
|
||||
raise exception.ManilaException(msg)
|
||||
port = self.neutron_api.create_port(self.service_tenant_id,
|
||||
port = self.neutron_api.create_port(
|
||||
self.service_tenant_id,
|
||||
self.service_network_id,
|
||||
device_id='manila-share',
|
||||
device_owner='manila:share',
|
||||
|
|
|
@ -147,7 +147,10 @@ class AdminActionsTest(test.TestCase):
|
|||
# snapshot in 'error_deleting'
|
||||
share = db.share_create(self.admin_context, {})
|
||||
snapshot = db.share_snapshot_create(self.admin_context,
|
||||
{'status': 'error_deleting', 'share_id': share['id']})
|
||||
{
|
||||
'status': 'error_deleting',
|
||||
'share_id': share['id']
|
||||
})
|
||||
req = webob.Request.blank('/v1/fake/snapshots/%s/action' %
|
||||
snapshot['id'])
|
||||
req.method = 'POST'
|
||||
|
@ -167,7 +170,10 @@ class AdminActionsTest(test.TestCase):
|
|||
# snapshot in 'available'
|
||||
share = db.share_create(self.admin_context, {})
|
||||
snapshot = db.share_snapshot_create(self.admin_context,
|
||||
{'status': 'available', 'share_id': share['id']})
|
||||
{
|
||||
'status': 'available',
|
||||
'share_id': share['id']
|
||||
})
|
||||
req = webob.Request.blank('/v1/fake/snapshots/%s/action' %
|
||||
snapshot['id'])
|
||||
req.method = 'POST'
|
||||
|
|
|
@ -185,10 +185,12 @@ class HostStateTestCase(test.TestCase):
|
|||
fake_host = host_manager.HostState('host1')
|
||||
self.assertEqual(fake_host.free_capacity_gb, None)
|
||||
|
||||
share_capability = {'total_capacity_gb': 'infinite',
|
||||
share_capability = {
|
||||
'total_capacity_gb': 'infinite',
|
||||
'free_capacity_gb': 'unknown',
|
||||
'reserved_percentage': 0,
|
||||
'timestamp': None}
|
||||
'timestamp': None
|
||||
}
|
||||
|
||||
fake_host.update_from_share_capability(share_capability)
|
||||
self.assertEqual(fake_host.total_capacity_gb, 'infinite')
|
||||
|
@ -200,10 +202,12 @@ class HostStateTestCase(test.TestCase):
|
|||
free_capacity = 100
|
||||
fake_share = {'id': 'foo', 'size': share_size}
|
||||
|
||||
share_capability = {'total_capacity_gb': free_capacity * 2,
|
||||
share_capability = {
|
||||
'total_capacity_gb': free_capacity * 2,
|
||||
'free_capacity_gb': free_capacity,
|
||||
'reserved_percentage': 0,
|
||||
'timestamp': None}
|
||||
'timestamp': None
|
||||
}
|
||||
|
||||
fake_host.update_from_share_capability(share_capability)
|
||||
fake_host.consume_from_share(fake_share)
|
||||
|
@ -215,10 +219,12 @@ class HostStateTestCase(test.TestCase):
|
|||
share_size = 1000
|
||||
fake_share = {'id': 'foo', 'size': share_size}
|
||||
|
||||
share_capability = {'total_capacity_gb': 'infinite',
|
||||
share_capability = {
|
||||
'total_capacity_gb': 'infinite',
|
||||
'free_capacity_gb': 'infinite',
|
||||
'reserved_percentage': 0,
|
||||
'timestamp': None}
|
||||
'timestamp': None
|
||||
}
|
||||
|
||||
fake_host.update_from_share_capability(share_capability)
|
||||
fake_host.consume_from_share(fake_share)
|
||||
|
@ -230,10 +236,12 @@ class HostStateTestCase(test.TestCase):
|
|||
share_size = 1000
|
||||
fake_share = {'id': 'foo', 'size': share_size}
|
||||
|
||||
share_capability = {'total_capacity_gb': 'infinite',
|
||||
share_capability = {
|
||||
'total_capacity_gb': 'infinite',
|
||||
'free_capacity_gb': 'unknown',
|
||||
'reserved_percentage': 0,
|
||||
'timestamp': None}
|
||||
'timestamp': None
|
||||
}
|
||||
|
||||
fake_host.update_from_share_capability(share_capability)
|
||||
fake_host.consume_from_share(fake_share)
|
||||
|
|
|
@ -138,15 +138,14 @@ class CinderApiTestCase(test.TestCase):
|
|||
self.stubs.Set(self.cinderclient.volumes, 'begin_detaching',
|
||||
mock.Mock())
|
||||
self.api.begin_detaching(self.ctx, 'id1')
|
||||
self.cinderclient.volumes.begin_detaching.\
|
||||
assert_called_once_with('id1')
|
||||
self.cinderclient.volumes.begin_detaching.assert_called_once_with(
|
||||
'id1')
|
||||
|
||||
def test_roll_detaching(self):
|
||||
self.stubs.Set(self.cinderclient.volumes, 'roll_detaching',
|
||||
mock.Mock())
|
||||
self.api.roll_detaching(self.ctx, 'id1')
|
||||
self.cinderclient.volumes.roll_detaching.\
|
||||
assert_called_once_with('id1')
|
||||
self.cinderclient.volumes.roll_detaching.assert_called_once_with('id1')
|
||||
|
||||
def test_attach(self):
|
||||
self.stubs.Set(self.cinderclient.volumes, 'attach', mock.Mock())
|
||||
|
@ -208,5 +207,5 @@ class CinderApiTestCase(test.TestCase):
|
|||
self.stubs.Set(self.cinderclient.volume_snapshots,
|
||||
'delete', mock.Mock())
|
||||
self.api.delete_snapshot(self.ctx, 'id1')
|
||||
self.cinderclient.volume_snapshots.delete.\
|
||||
assert_called_once_with('id1')
|
||||
self.cinderclient.volume_snapshots.delete.assert_called_once_with(
|
||||
'id1')
|
||||
|
|
2
tox.ini
2
tox.ini
|
@ -49,6 +49,6 @@ commands = bash tools/lintstack.sh
|
|||
#
|
||||
# H904 wrap long lines in parentheses instead of a backslash
|
||||
# reason: removed in hacking (https://review.openstack.org/#/c/101701/)
|
||||
ignore = E126,E127,E128,H302,H404,H405,H501,H904,F841
|
||||
ignore = E128,H302,H404,H405,H501,H904,F841
|
||||
builtins = _
|
||||
exclude = .venv,.tox,dist,doc,openstack,*egg
|
||||
|
|
Loading…
Reference in New Issue