Flake8: Fix E126 and E127

Fix these issues and enable E126 and E127:
E126 continuation line over-indented for hanging indent
E127 continuation line over-indented for visual indent

Also fix a few occurences in the touched files of:
E128 continuation line under-indented for visual indent
H405  multi line docstring summary not separated with an empty line

Partial-Bug: #1333290
Change-Id: If822401fc7e1db49d595a0736a0dff8e00dfd217
This commit is contained in:
Andreas Jaeger 2014-08-22 14:02:55 +02:00 committed by Andreas Jaeger
parent 290769f087
commit 9154f3e55b
14 changed files with 170 additions and 142 deletions

View File

@ -61,7 +61,7 @@ class AdminController(wsgi.Controller):
raise exc.HTTPBadRequest(explanation="Must specify 'status'")
if update['status'] not in self.valid_status:
expl = "Invalid state. Valid states: " +\
', '.join(self.valid_status) + '.'
', '.join(self.valid_status) + '.'
raise exc.HTTPBadRequest(explanation=expl)
return update

View File

@ -136,8 +136,8 @@ class ShareNetworkController(wsgi.Controller):
networks = db_api.share_network_get_all(context)
else:
networks = db_api.share_network_get_all_by_project(
context,
context.project_id)
context,
context.project_id)
if search_opts:
for key, value in six.iteritems(search_opts):
@ -210,12 +210,12 @@ class ShareNetworkController(wsgi.Controller):
return (usages[name]['reserved'] + usages[name]['in_use'])
if 'share_networks' in overs:
msg = _("Quota exceeded for %(s_pid)s, tried to create"
" share-network (%(d_consumed)d of %(d_quota)d "
"already consumed)")
msg = _("Quota exceeded for %(s_pid)s, tried to create "
"share-network (%(d_consumed)d of %(d_quota)d "
"already consumed)")
LOG.warn(msg % {'s_pid': context.project_id,
'd_consumed': _consumed('share_networks'),
'd_quota': quotas['share_networks']})
'd_consumed': _consumed('share_networks'),
'd_quota': quotas['share_networks']})
raise exception.ShareNetworksLimitExceeded(
allowed=quotas['share_networks'])
else:
@ -251,9 +251,9 @@ class ShareNetworkController(wsgi.Controller):
raise exc.HTTPForbidden(explanation=msg)
try:
share_network = db_api.share_network_add_security_service(
context,
id,
data['security_service_id'])
context,
id,
data['security_service_id'])
except KeyError:
msg = "Malformed request body"
raise exc.HTTPBadRequest(explanation=msg)
@ -272,9 +272,9 @@ class ShareNetworkController(wsgi.Controller):
policy.check_policy(context, RESOURCE_NAME, 'remove_security_service')
try:
share_network = db_api.share_network_remove_security_service(
context,
id,
data['security_service_id'])
context,
id,
data['security_service_id'])
except KeyError:
msg = "Malformed request body"
raise exc.HTTPBadRequest(explanation=msg)

View File

@ -90,9 +90,11 @@ class ShareServerController(wsgi.Controller):
s.share_network_name = s.share_network_id
if search_opts:
for k, v in six.iteritems(search_opts):
share_servers = [s for s in share_servers if (hasattr(s, k) and
s[k] == v or k == 'share_network' and v in
[s.share_network['name'], s.share_network['id']])]
share_servers = [s for s in share_servers if
(hasattr(s, k) and
s[k] == v or k == 'share_network' and
v in [s.share_network['name'],
s.share_network['id']])]
return self._view_builder.build_share_servers(share_servers)
@wsgi.serializers(xml=ShareServerTemplate)

View File

@ -43,7 +43,5 @@ PING_PORTS = (
("icmp", (-1, -1)),
)
SERVICE_INSTANCE_SECGROUP_DATA = CIFS_PORTS + \
NFS_PORTS + \
SSH_PORTS + \
PING_PORTS
SERVICE_INSTANCE_SECGROUP_DATA = (
CIFS_PORTS + NFS_PORTS + SSH_PORTS + PING_PORTS)

View File

@ -144,8 +144,8 @@ class GenericShareDriver(driver.ExecuteMixin, driver.ShareDriver):
self.service_instance_manager = service_instance.\
ServiceInstanceManager(self.db, self._helpers,
driver_config=self.configuration)
self.share_networks_servers = self.service_instance_manager.\
share_networks_servers
self.share_networks_servers = (
self.service_instance_manager.share_networks_servers)
self._setup_helpers()
def _setup_helpers(self):
@ -207,9 +207,7 @@ class GenericShareDriver(driver.ExecuteMixin, driver.ShareDriver):
LOG.debug('%s is not mounted' % share['name'])
def _get_mount_path(self, share):
"""
Returns the path, that will be used for mount device in service vm.
"""
"""Returns the path to use for mount device in service vm."""
return os.path.join(self.configuration.share_mount_path, share['name'])
def _attach_volume(self, context, share, instance_id, volume):
@ -262,16 +260,17 @@ class GenericShareDriver(driver.ExecuteMixin, driver.ShareDriver):
def _get_volume_snapshot(self, context, snapshot_id):
"""Finds volume snaphots, associated to the specific share snaphots."""
volume_snapshot_name = self.configuration.\
volume_snapshot_name_template % snapshot_id
volume_snapshot_list = self.volume_api.get_all_snapshots(context,
{'display_name': volume_snapshot_name})
volume_snapshot_name = (
self.configuration.volume_snapshot_name_template % snapshot_id)
volume_snapshot_list = self.volume_api.get_all_snapshots(
context,
{'display_name': volume_snapshot_name})
volume_snapshot = None
if len(volume_snapshot_list) == 1:
volume_snapshot = volume_snapshot_list[0]
elif len(volume_snapshot_list) > 1:
raise exception.ManilaException(
_('Error. Ambiguous volume snaphots'))
_('Error. Ambiguous volume snaphots'))
return volume_snapshot
def _detach_volume(self, context, share, server_details):
@ -309,9 +308,11 @@ class GenericShareDriver(driver.ExecuteMixin, driver.ShareDriver):
if snapshot:
volume_snapshot = self._get_volume_snapshot(context,
snapshot['id'])
volume = self.volume_api.create(context, share['size'],
self.configuration.volume_name_template % share['id'], '',
snapshot=volume_snapshot)
volume = self.volume_api.create(
context,
share['size'],
self.configuration.volume_name_template % share['id'], '',
snapshot=volume_snapshot)
t = time.time()
while time.time() - t < self.configuration.max_time_to_create_volume:
@ -322,9 +323,10 @@ class GenericShareDriver(driver.ExecuteMixin, driver.ShareDriver):
time.sleep(1)
volume = self.volume_api.get(context, volume['id'])
else:
raise exception.ManilaException(_('Volume have not been created '
'in %ss. Giving up') %
self.configuration.max_time_to_create_volume)
raise exception.ManilaException(
_('Volume have not been created '
'in %ss. Giving up') %
self.configuration.max_time_to_create_volume)
return volume
@ -343,12 +345,14 @@ class GenericShareDriver(driver.ExecuteMixin, driver.ShareDriver):
break
time.sleep(1)
else:
raise exception.ManilaException(_('Volume have not been '
'deleted in %ss. Giving up')
% self.configuration.max_time_to_create_volume)
raise exception.ManilaException(
_('Volume have not been '
'deleted in %ss. Giving up')
% self.configuration.max_time_to_create_volume)
def get_share_stats(self, refresh=False):
"""Get share status.
If 'refresh' is True, run update the stats first.
"""
if refresh:
@ -373,7 +377,7 @@ class GenericShareDriver(driver.ExecuteMixin, driver.ShareDriver):
data['total_capacity_gb'] = 'infinite'
data['free_capacity_gb'] = 'infinite'
data['reserved_percentage'] = (self.configuration.
reserved_share_percentage)
reserved_share_percentage)
data['QoS_support'] = False
self._stats = data
@ -419,12 +423,14 @@ class GenericShareDriver(driver.ExecuteMixin, driver.ShareDriver):
raise exception.ManilaException(_('Failed to create volume '
'snapshot'))
time.sleep(1)
volume_snapshot = self.volume_api.get_snapshot(self.admin_context,
volume_snapshot['id'])
volume_snapshot = self.volume_api.get_snapshot(
self.admin_context,
volume_snapshot['id'])
else:
raise exception.ManilaException(_('Volume snapshot have not been '
'created in %ss. Giving up') %
self.configuration.max_time_to_create_volume)
raise exception.ManilaException(
_('Volume snapshot have not been '
'created in %ss. Giving up') %
self.configuration.max_time_to_create_volume)
@ensure_server
def delete_snapshot(self, context, snapshot, share_server=None):
@ -445,9 +451,10 @@ class GenericShareDriver(driver.ExecuteMixin, driver.ShareDriver):
break
time.sleep(1)
else:
raise exception.ManilaException(_('Volume snapshot have not been '
'deleted in %ss. Giving up') %
self.configuration.max_time_to_create_volume)
raise exception.ManilaException(
_('Volume snapshot have not been '
'deleted in %ss. Giving up') %
self.configuration.max_time_to_create_volume)
@ensure_server
def ensure_share(self, context, share, share_server=None):
@ -558,7 +565,8 @@ class NFSHelper(NASHelperBase):
def create_export(self, server, share_name, recreate=False):
"""Create new export, delete old one if exists."""
return ':'.join([server['ip'],
os.path.join(self.configuration.share_mount_path, share_name)])
os.path.join(
self.configuration.share_mount_path, share_name)])
def init_helper(self, server):
try:

View File

@ -100,8 +100,8 @@ class GlusterfsShareDriver(driver.ExecuteMixin, driver.ShareDriver):
"""Native mount the GlusterFS volume and tune it."""
super(GlusterfsShareDriver, self).do_setup(context)
self.gluster_address = GlusterAddress(
self._read_gluster_vol_from_config()
)
self._read_gluster_vol_from_config()
)
try:
self._execute('mount.glusterfs', check_exit_code=False)
except OSError as exc:
@ -116,8 +116,8 @@ class GlusterfsShareDriver(driver.ExecuteMixin, driver.ShareDriver):
# exporting the whole volume must be prohibited
# to not to defeat access control
args, kw = self.gluster_address.make_gluster_args(
'volume', 'set', self.gluster_address.volume,
NFS_EXPORT_VOL, 'off')
'volume', 'set', self.gluster_address.volume,
NFS_EXPORT_VOL, 'off')
try:
self._execute(*args, **kw)
except exception.ProcessExecutionError as exc:
@ -146,7 +146,7 @@ class GlusterfsShareDriver(driver.ExecuteMixin, driver.ShareDriver):
self.gluster_address.export)
else:
raise exception.GlusterfsException(
'Unable to mount Gluster volume'
'Unable to mount Gluster volume'
)
def _mount_gluster_vol(self, mount_path, ensure=False):
@ -171,10 +171,10 @@ class GlusterfsShareDriver(driver.ExecuteMixin, driver.ShareDriver):
"""Get the export entries of shares in the GlusterFS volume."""
try:
args, kw = self.gluster_address.make_gluster_args(
'--xml',
'volume',
'info',
self.gluster_address.volume
'--xml',
'volume',
'info',
self.gluster_address.volume
)
out, err = self._execute(*args, **kw)
except exception.ProcessExecutionError as exc:
@ -183,8 +183,8 @@ class GlusterfsShareDriver(driver.ExecuteMixin, driver.ShareDriver):
if not out:
raise exception.GlusterfsException(
'Empty answer from gluster command'
)
'Empty answer from gluster command'
)
vix = etree.fromstring(out)
if int(vix.find('./volInfo/volumes/count').text) != 1:
@ -247,8 +247,8 @@ class GlusterfsShareDriver(driver.ExecuteMixin, driver.ShareDriver):
smp = os.stat(self._get_mount_point_for_gluster_vol())
if smpb.st_dev == smp.st_dev:
raise exception.GlusterfsException(
_("GlusterFS control mount is not available")
)
_("GlusterFS control mount is not available")
)
smpv = os.statvfs(self._get_mount_point_for_gluster_vol())
LOG.debug("Updating share stats")
@ -339,12 +339,12 @@ class GlusterfsShareDriver(driver.ExecuteMixin, driver.ShareDriver):
export_dir_new = ",".join("/%s(%s)" % (d, "|".join(v))
for d, v in export_dir_dict.items())
args, kw = self.gluster_address.make_gluster_args(
'volume', 'set', self.gluster_address.volume,
NFS_EXPORT_DIR, export_dir_new)
'volume', 'set', self.gluster_address.volume,
NFS_EXPORT_DIR, export_dir_new)
else:
args, kw = self.gluster_address.make_gluster_args(
'volume', 'reset', self.gluster_address.volume,
NFS_EXPORT_DIR)
'volume', 'reset', self.gluster_address.volume,
NFS_EXPORT_DIR)
try:
self._execute(*args, **kw)
except exception.ProcessExecutionError as exc:

View File

@ -172,7 +172,7 @@ class ServiceInstanceManager(object):
'networks.'))
elif not networks:
return self.neutron_api.network_create(self.service_tenant_id,
service_network_name)['id']
service_network_name)['id']
else:
return networks[0]['id']
@ -231,7 +231,8 @@ class ServiceInstanceManager(object):
sg = self.compute_api.security_group_create(
context, name, description)
for protocol, ports in constants.SERVICE_INSTANCE_SECGROUP_DATA:
self.compute_api.security_group_rule_create(context,
self.compute_api.security_group_rule_create(
context,
parent_group_id=sg.id,
ip_protocol=protocol,
from_port=ports[0],
@ -270,8 +271,9 @@ class ServiceInstanceManager(object):
break
time.sleep(1)
else:
raise exception.ServiceInstanceException(_('Instance have not '
'been deleted in %ss. Giving up.') %
raise exception.ServiceInstanceException(
_('Instance have not '
'been deleted in %ss. Giving up.') %
self.max_time_to_build_instance)
def set_up_service_instance(self, context, instance_name, neutron_net_id,
@ -286,9 +288,9 @@ class ServiceInstanceManager(object):
:raises: exception.ServiceInstanceException
"""
server = self._create_service_instance(context,
instance_name,
neutron_net_id,
neutron_subnet_id)
instance_name,
neutron_net_id,
neutron_subnet_id)
return {'instance_id': server['id'],
'ip': server['ip'],
@ -312,7 +314,7 @@ class ServiceInstanceManager(object):
path_to_public_key = os.path.expanduser(self.path_to_public_key)
path_to_private_key = os.path.expanduser(self.path_to_private_key)
if (not os.path.exists(path_to_public_key) or
not os.path.exists(path_to_private_key)):
not os.path.exists(path_to_private_key)):
return
keypair_name = self.get_config_option("manila_service_keypair_name")
keypairs = [k for k in self.compute_api.keypair_list(context)
@ -348,7 +350,7 @@ class ServiceInstanceManager(object):
'image was found.'))
else:
raise exception.ServiceInstanceException(
_('Ambiguous image name.'))
_('Ambiguous image name.'))
def _create_service_instance(self, context, instance_name, neutron_net_id,
neutron_subnet_id):
@ -359,8 +361,9 @@ class ServiceInstanceManager(object):
key_name, key_path = self._get_key(context)
if not (self.get_config_option("service_instance_password") or
key_name):
raise exception.ServiceInstanceException(_('Neither service '
'instance password nor key are available.'))
raise exception.ServiceInstanceException(
_('Neither service '
'instance password nor key are available.'))
security_group = self._get_or_create_security_group(context)
subnet_id, router_id, port_id = \
@ -373,7 +376,8 @@ class ServiceInstanceManager(object):
self.neutron_api.delete_port(port_id)
raise
service_instance = self.compute_api.server_create(context,
service_instance = self.compute_api.server_create(
context,
name=instance_name,
image=service_image_id,
flavor=self.get_config_option("service_instance_flavor_id"),
@ -386,23 +390,25 @@ class ServiceInstanceManager(object):
break
if service_instance['status'] == 'ERROR':
raise exception.ServiceInstanceException(
_('Failed to build service instance.'))
_('Failed to build service instance.'))
time.sleep(1)
try:
service_instance = self.compute_api.server_get(context,
service_instance['id'])
service_instance = self.compute_api.server_get(
context,
service_instance['id'])
except exception.InstanceNotFound as e:
LOG.debug(e)
else:
raise exception.ServiceInstanceException(
_('Instance have not been spawned in %ss. Giving up.') %
self.max_time_to_build_instance)
_('Instance have not been spawned in %ss. Giving up.') %
self.max_time_to_build_instance)
if security_group:
LOG.debug("Adding security group "
"'%s' to server '%s'." % (security_group.id,
service_instance["id"]))
self.compute_api.add_security_group_to_server(context,
self.compute_api.add_security_group_to_server(
context,
service_instance["id"], security_group.id)
service_instance['ip'] = self._get_server_ip(service_instance)
@ -459,8 +465,8 @@ class ServiceInstanceManager(object):
raise
LOG.debug('Subnet %(subnet_id)s is already attached to the '
'router %(router_id)s.' %
{'subnet_id': service_subnet['id'],
'router_id': private_router['id']})
{'subnet_id': service_subnet['id'],
'router_id': private_router['id']})
port = self.neutron_api.create_port(self.service_tenant_id,
self.service_network_id,
@ -475,20 +481,20 @@ class ServiceInstanceManager(object):
private_subnet = self.neutron_api.get_subnet(neutron_subnet_id)
if not private_subnet['gateway_ip']:
raise exception.ServiceInstanceException(
_('Subnet must have gateway.'))
_('Subnet must have gateway.'))
private_network_ports = [p for p in self.neutron_api.list_ports(
network_id=neutron_net_id)]
for p in private_network_ports:
fixed_ip = p['fixed_ips'][0]
if (fixed_ip['subnet_id'] == private_subnet['id'] and
fixed_ip['ip_address'] == private_subnet['gateway_ip']):
fixed_ip['ip_address'] == private_subnet['gateway_ip']):
private_subnet_gateway_port = p
break
else:
raise exception.ServiceInstanceException(
_('Subnet gateway is not attached the router.'))
_('Subnet gateway is not attached the router.'))
private_subnet_router = self.neutron_api.show_router(
private_subnet_gateway_port['device_id'])
private_subnet_gateway_port['device_id'])
return private_subnet_router
def _setup_connectivity_with_service_instances(self):
@ -548,7 +554,7 @@ class ServiceInstanceManager(object):
list_ports(device_id='manila-share')]
if len(ports) > 1:
raise exception.ServiceInstanceException(
_('Error. Ambiguous service ports.'))
_('Error. Ambiguous service ports.'))
elif not ports:
try:
stdout, stderr = self._execute('hostname')
@ -556,11 +562,12 @@ class ServiceInstanceManager(object):
except exception.ProcessExecutionError as e:
msg = _('Unable to get host. %s') % e.stderr
raise exception.ManilaException(msg)
port = self.neutron_api.create_port(self.service_tenant_id,
self.service_network_id,
device_id='manila-share',
device_owner='manila:share',
host_id=host)
port = self.neutron_api.create_port(
self.service_tenant_id,
self.service_network_id,
device_id='manila-share',
device_owner='manila:share',
host_id=host)
else:
port = ports[0]
return port
@ -582,7 +589,7 @@ class ServiceInstanceManager(object):
if subnets:
port_fixed_ips.extend([dict(subnet_id=s) for s in subnets])
port = self.neutron_api.update_port_fixed_ips(
port['id'], {'fixed_ips': port_fixed_ips})
port['id'], {'fixed_ips': port_fixed_ips})
return port
@ -616,8 +623,8 @@ class ServiceInstanceManager(object):
raise
LOG.debug('Subnet %(subnet_id)s is not attached to the '
'router %(router_id)s.' %
{'subnet_id': subnet_id,
'router_id': router_id})
{'subnet_id': subnet_id,
'router_id': router_id})
self.neutron_api.update_subnet(subnet_id, '')
@lockutils.synchronized("_get_all_service_subnets", external=True,

View File

@ -74,7 +74,7 @@ class AdminActionsTest(test.TestCase):
def test_reset_status_as_non_admin(self):
# current status is 'error'
share = db.share_create(context.get_admin_context(),
{'status': 'error'})
{'status': 'error'})
req = webob.Request.blank('/v1/fake/shares/%s/action' % share['id'])
req.method = 'POST'
req.headers['content-type'] = 'application/json'
@ -147,7 +147,10 @@ class AdminActionsTest(test.TestCase):
# snapshot in 'error_deleting'
share = db.share_create(self.admin_context, {})
snapshot = db.share_snapshot_create(self.admin_context,
{'status': 'error_deleting', 'share_id': share['id']})
{
'status': 'error_deleting',
'share_id': share['id']
})
req = webob.Request.blank('/v1/fake/snapshots/%s/action' %
snapshot['id'])
req.method = 'POST'
@ -167,7 +170,10 @@ class AdminActionsTest(test.TestCase):
# snapshot in 'available'
share = db.share_create(self.admin_context, {})
snapshot = db.share_snapshot_create(self.admin_context,
{'status': 'available', 'share_id': share['id']})
{
'status': 'available',
'share_id': share['id']
})
req = webob.Request.blank('/v1/fake/snapshots/%s/action' %
snapshot['id'])
req.method = 'POST'

View File

@ -137,7 +137,7 @@ class ShareNetworkAPITest(test.TestCase):
def test_delete_not_found(self):
share_nw = 'fake network id'
db_api.share_network_get.side_effect = exception.ShareNetworkNotFound(
share_network_id=share_nw)
share_network_id=share_nw)
self.assertRaises(webob_exc.HTTPNotFound,
self.controller.delete,
@ -243,7 +243,7 @@ class ShareNetworkAPITest(test.TestCase):
def test_update_not_found(self):
share_nw = 'fake network id'
db_api.share_network_get.side_effect = exception.ShareNetworkNotFound(
share_network_id=share_nw)
share_network_id=share_nw)
self.assertRaises(webob_exc.HTTPNotFound,
self.controller.update,

View File

@ -161,9 +161,9 @@ class HostStateTestCase(test.TestCase):
self.assertEqual(fake_host.free_capacity_gb, None)
share_capability = {'total_capacity_gb': 1024,
'free_capacity_gb': 512,
'reserved_percentage': 0,
'timestamp': None}
'free_capacity_gb': 512,
'reserved_percentage': 0,
'timestamp': None}
fake_host.update_from_share_capability(share_capability)
self.assertEqual(fake_host.free_capacity_gb, 512)
@ -173,9 +173,9 @@ class HostStateTestCase(test.TestCase):
self.assertEqual(fake_host.free_capacity_gb, None)
share_capability = {'total_capacity_gb': 'infinite',
'free_capacity_gb': 'infinite',
'reserved_percentage': 0,
'timestamp': None}
'free_capacity_gb': 'infinite',
'reserved_percentage': 0,
'timestamp': None}
fake_host.update_from_share_capability(share_capability)
self.assertEqual(fake_host.total_capacity_gb, 'infinite')
@ -185,10 +185,12 @@ class HostStateTestCase(test.TestCase):
fake_host = host_manager.HostState('host1')
self.assertEqual(fake_host.free_capacity_gb, None)
share_capability = {'total_capacity_gb': 'infinite',
'free_capacity_gb': 'unknown',
'reserved_percentage': 0,
'timestamp': None}
share_capability = {
'total_capacity_gb': 'infinite',
'free_capacity_gb': 'unknown',
'reserved_percentage': 0,
'timestamp': None
}
fake_host.update_from_share_capability(share_capability)
self.assertEqual(fake_host.total_capacity_gb, 'infinite')
@ -200,10 +202,12 @@ class HostStateTestCase(test.TestCase):
free_capacity = 100
fake_share = {'id': 'foo', 'size': share_size}
share_capability = {'total_capacity_gb': free_capacity * 2,
'free_capacity_gb': free_capacity,
'reserved_percentage': 0,
'timestamp': None}
share_capability = {
'total_capacity_gb': free_capacity * 2,
'free_capacity_gb': free_capacity,
'reserved_percentage': 0,
'timestamp': None
}
fake_host.update_from_share_capability(share_capability)
fake_host.consume_from_share(fake_share)
@ -215,10 +219,12 @@ class HostStateTestCase(test.TestCase):
share_size = 1000
fake_share = {'id': 'foo', 'size': share_size}
share_capability = {'total_capacity_gb': 'infinite',
'free_capacity_gb': 'infinite',
'reserved_percentage': 0,
'timestamp': None}
share_capability = {
'total_capacity_gb': 'infinite',
'free_capacity_gb': 'infinite',
'reserved_percentage': 0,
'timestamp': None
}
fake_host.update_from_share_capability(share_capability)
fake_host.consume_from_share(fake_share)
@ -230,10 +236,12 @@ class HostStateTestCase(test.TestCase):
share_size = 1000
fake_share = {'id': 'foo', 'size': share_size}
share_capability = {'total_capacity_gb': 'infinite',
'free_capacity_gb': 'unknown',
'reserved_percentage': 0,
'timestamp': None}
share_capability = {
'total_capacity_gb': 'infinite',
'free_capacity_gb': 'unknown',
'reserved_percentage': 0,
'timestamp': None
}
fake_host.update_from_share_capability(share_capability)
fake_host.consume_from_share(fake_share)

View File

@ -24,8 +24,8 @@ from manila import test
CONF = cfg.CONF
CONF.register_opt(cfg.StrOpt('conf_unittest',
default='foo',
help='for testing purposes only'))
default='foo',
help='for testing purposes only'))
class ConfigTestCase(test.TestCase):
@ -54,11 +54,11 @@ class ConfigTestCase(test.TestCase):
def test_long_vs_short_flags(self):
CONF.clear()
CONF.register_cli_opt(cfg.StrOpt('duplicate_answer_long',
default='val',
help='desc'))
default='val',
help='desc'))
CONF.register_cli_opt(cfg.IntOpt('duplicate_answer',
default=50,
help='desc'))
default=50,
help='desc'))
argv = ['--duplicate_answer=60']
CONF(argv, default_config_files=[])

View File

@ -48,7 +48,7 @@ class ManilaExceptionTestCase(test.TestCase):
def test_error_msg(self):
self.assertEqual(six.text_type(exception.ManilaException('test')),
'test')
'test')
def test_default_error_msg_with_kwargs(self):
class FakeManilaException(exception.ManilaException):

View File

@ -50,7 +50,7 @@ class CinderApiTestCase(test.TestCase):
self.cinderclient = FakeCinderClient()
self.ctx = context.get_admin_context()
self.stubs.Set(cinder, 'cinderclient',
mock.Mock(return_value=self.cinderclient))
mock.Mock(return_value=self.cinderclient))
self.stubs.Set(cinder, '_untranslate_volume_summary_view',
lambda ctx, vol: vol)
self.stubs.Set(cinder, '_untranslate_snapshot_summary_view',
@ -138,15 +138,14 @@ class CinderApiTestCase(test.TestCase):
self.stubs.Set(self.cinderclient.volumes, 'begin_detaching',
mock.Mock())
self.api.begin_detaching(self.ctx, 'id1')
self.cinderclient.volumes.begin_detaching.\
assert_called_once_with('id1')
self.cinderclient.volumes.begin_detaching.assert_called_once_with(
'id1')
def test_roll_detaching(self):
self.stubs.Set(self.cinderclient.volumes, 'roll_detaching',
mock.Mock())
self.api.roll_detaching(self.ctx, 'id1')
self.cinderclient.volumes.roll_detaching.\
assert_called_once_with('id1')
self.cinderclient.volumes.roll_detaching.assert_called_once_with('id1')
def test_attach(self):
self.stubs.Set(self.cinderclient.volumes, 'attach', mock.Mock())
@ -208,5 +207,5 @@ class CinderApiTestCase(test.TestCase):
self.stubs.Set(self.cinderclient.volume_snapshots,
'delete', mock.Mock())
self.api.delete_snapshot(self.ctx, 'id1')
self.cinderclient.volume_snapshots.delete.\
assert_called_once_with('id1')
self.cinderclient.volume_snapshots.delete.assert_called_once_with(
'id1')

View File

@ -49,6 +49,6 @@ commands = bash tools/lintstack.sh
#
# H904 wrap long lines in parentheses instead of a backslash
# reason: removed in hacking (https://review.openstack.org/#/c/101701/)
ignore = E126,E127,E128,H302,H404,H405,H501,H904,F841
ignore = E128,H302,H404,H405,H501,H904,F841
builtins = _
exclude = .venv,.tox,dist,doc,openstack,*egg