Attempt to fix gate about PEP8 errors
Due to recente upgrade of pep8, we are facing some gate issues. This is an attempt to make the gate happy. Change-Id: I35a76e1288247cd5595d11703f9ca6ec1e22274b
This commit is contained in:
parent
f42e1703df
commit
7773f1eec1
|
@ -116,7 +116,7 @@ authentication_opts = [
|
|||
help=_('Name of the domain for the service user.')),
|
||||
cfg.StrOpt('service_project_domain', default='Default',
|
||||
help=_('Name of the domain for the service project.')),
|
||||
]
|
||||
]
|
||||
|
||||
revision_group = cfg.OptGroup('revision')
|
||||
revision_opts = [
|
||||
|
|
|
@ -124,13 +124,13 @@ def check_size_params(cluster, desired, min_size, max_size, strict):
|
|||
"current desired_capacity of the cluster.")
|
||||
|
||||
if max_size is not None:
|
||||
if (min_size is None and max_size >= 0
|
||||
and max_size < cluster.min_size):
|
||||
if (min_size is None and max_size >= 0 and
|
||||
max_size < cluster.min_size):
|
||||
return _("The specified max_size is less than the "
|
||||
"current min_size of the cluster.")
|
||||
|
||||
if (desired is None and max_size >= 0
|
||||
and max_size < cluster.desired_capacity):
|
||||
if (desired is None and max_size >= 0 and
|
||||
max_size < cluster.desired_capacity):
|
||||
return _("The specified max_size is less than the "
|
||||
"current desired_capacity of the cluster.")
|
||||
|
||||
|
|
|
@ -176,7 +176,7 @@ def random_name(length=8):
|
|||
|
||||
lead = random.choice(string.ascii_letters)
|
||||
tail = ''.join(random.choice(string.ascii_letters + string.digits)
|
||||
for i in range(length-1))
|
||||
for i in range(length - 1))
|
||||
return lead + tail
|
||||
|
||||
|
||||
|
|
|
@ -254,7 +254,7 @@ class LoadBalancerDriver(base.DriverBase):
|
|||
) % {'resource': resource, 'msg': six.text_type(ex)}
|
||||
LOG.exception(msg)
|
||||
event.warning(oslo_context.get_current(), self,
|
||||
resource.upper()+'_GET', 'ERROR', msg)
|
||||
resource.upper() + '_GET', 'ERROR', msg)
|
||||
return None
|
||||
net_name = net.name
|
||||
|
||||
|
|
|
@ -59,7 +59,7 @@ class NodeAction(base.Action):
|
|||
cluster = cluster_mod.Cluster.load(self.context,
|
||||
self.node.cluster_id)
|
||||
result = scaleutils.check_size_params(
|
||||
cluster, cluster.desired_capacity+1, None, None, True)
|
||||
cluster, cluster.desired_capacity + 1, None, None, True)
|
||||
|
||||
if result != '':
|
||||
return self.RES_ERROR, result
|
||||
|
@ -86,7 +86,7 @@ class NodeAction(base.Action):
|
|||
cluster = cluster_mod.Cluster.load(self.context,
|
||||
self.node.cluster_id)
|
||||
result = scaleutils.check_size_params(cluster,
|
||||
cluster.desired_capacity-1,
|
||||
cluster.desired_capacity - 1,
|
||||
None, None, True)
|
||||
if result != '':
|
||||
return self.RES_ERROR, result
|
||||
|
|
|
@ -147,7 +147,7 @@ class DeletionPolicy(base.Policy):
|
|||
if self.criteria == self.OLDEST_FIRST:
|
||||
candidates.append(sorted_list[i].id)
|
||||
else: # YOUNGEST_FIRST
|
||||
candidates.append(sorted_list[-1-i].id)
|
||||
candidates.append(sorted_list[-1 - i].id)
|
||||
return candidates
|
||||
|
||||
# Node profile based selection
|
||||
|
|
|
@ -159,7 +159,7 @@ class vSphereDRSPolicy(policy_base.Policy):
|
|||
|
||||
if data.get('group_id') is None:
|
||||
# to add into nova driver
|
||||
rule = placement_group.get('placement_rule', 'anti-affinity')
|
||||
rule = placement_group.get('placement_rule', 'anti-affinity')
|
||||
|
||||
try:
|
||||
server_group = nv_client.create_server_group(rule)
|
||||
|
|
|
@ -31,7 +31,7 @@ class ServerProfile(base.Profile):
|
|||
|
||||
KEYS = (
|
||||
CONTEXT, ADMIN_PASS, AUTO_DISK_CONFIG, AVAILABILITY_ZONE,
|
||||
BLOCK_DEVICE_MAPPING, BLOCK_DEVICE_MAPPING_V2,
|
||||
BLOCK_DEVICE_MAPPING, BLOCK_DEVICE_MAPPING_V2,
|
||||
CONFIG_DRIVE, FLAVOR, IMAGE, KEY_NAME, METADATA,
|
||||
NAME, NETWORKS, PERSONALITY, SECURITY_GROUPS,
|
||||
USER_DATA, SCHEDULER_HINTS,
|
||||
|
|
|
@ -12,7 +12,6 @@
|
|||
|
||||
import random
|
||||
import string
|
||||
import uuid
|
||||
|
||||
from oslo_config import cfg
|
||||
from oslo_db import options
|
||||
|
@ -25,19 +24,6 @@ from senlin.engine import node as node_mod
|
|||
get_engine = db_api.get_engine
|
||||
|
||||
|
||||
class UUIDStub(object):
|
||||
def __init__(self, value):
|
||||
self.value = value
|
||||
|
||||
def __enter__(self):
|
||||
self.uuid4 = uuid.uuid4
|
||||
uuid_stub = lambda: self.value
|
||||
uuid.uuid4 = uuid_stub
|
||||
|
||||
def __exit__(self, *exc_info):
|
||||
uuid.uuid4 = self.uuid4
|
||||
|
||||
|
||||
def random_name():
|
||||
return ''.join(random.choice(string.ascii_uppercase)
|
||||
for x in range(10))
|
||||
|
|
|
@ -40,7 +40,7 @@ class TestHeatV1(base.SenlinTestCase):
|
|||
fake_params = {
|
||||
'disable_rollback': True,
|
||||
'stack_name': 'fake_stack',
|
||||
}
|
||||
}
|
||||
self.hc.stack_create(**fake_params)
|
||||
self.orch.create_stack.assert_called_once_with(**fake_params)
|
||||
|
||||
|
|
|
@ -421,7 +421,7 @@ class ClusterActionTest(base.SenlinTestCase):
|
|||
# assertions
|
||||
self.assertEqual(action.RES_OK, res_code)
|
||||
self.assertEqual('Cluster update completed.', res_msg)
|
||||
self.assertEqual(2, mock_action.call_count)
|
||||
self.assertEqual(2, mock_action.call_count)
|
||||
n_action_1.store.assert_called_once_with(action.context)
|
||||
n_action_2.store.assert_called_once_with(action.context)
|
||||
self.assertEqual(1, mock_dep.call_count)
|
||||
|
@ -483,7 +483,7 @@ class ClusterActionTest(base.SenlinTestCase):
|
|||
# assertions
|
||||
self.assertEqual(action.RES_TIMEOUT, res_code)
|
||||
self.assertEqual('Timeout', res_msg)
|
||||
self.assertEqual(1, mock_action.call_count)
|
||||
self.assertEqual(1, mock_action.call_count)
|
||||
n_action.store.assert_called_once_with(action.context)
|
||||
self.assertEqual(1, mock_dep.call_count)
|
||||
mock_update.assert_called_once_with(
|
||||
|
|
|
@ -513,7 +513,7 @@ class NodeActionTest(base.SenlinTestCase):
|
|||
mock_release.assert_called_once_with('FAKE_CLUSTER', 'ACTION_ID',
|
||||
lock.NODE_SCOPE)
|
||||
mock_acquire_node.assert_called_once_with(self.ctx, 'NODE_ID',
|
||||
'ACTION_ID', False)
|
||||
'ACTION_ID', False)
|
||||
mock_release_node.assert_called_once_with('NODE_ID', 'ACTION_ID')
|
||||
mock_check.assert_called_once_with('FAKE_CLUSTER', 'BEFORE')
|
||||
|
||||
|
|
|
@ -68,7 +68,7 @@ class TestNode(base.SenlinTestCase):
|
|||
'project': self.context.project,
|
||||
'name': 'node1',
|
||||
'role': 'test_node',
|
||||
}
|
||||
}
|
||||
return db_api.node_create(self.context, values)
|
||||
|
||||
def test_node_init(self):
|
||||
|
|
|
@ -35,7 +35,7 @@ class ScaleUtilsTest(base.SenlinTestCase):
|
|||
number = self.getUniqueInteger()
|
||||
res = su.calculate_desired(current, consts.CHANGE_IN_CAPACITY,
|
||||
number, None)
|
||||
self.assertEqual(current+number, res)
|
||||
self.assertEqual(current + number, res)
|
||||
|
||||
def test_calculate_desired_percentage_positive(self):
|
||||
# CHANGE_IN_PERCENTAGE, positive
|
||||
|
|
Loading…
Reference in New Issue