Pylint: fix trivial issues 1

Change-Id: Ie847c0c91f4de31d19e559ab6becaf0a3934a7d0
fix: invalid indent, incorrect formatting, incorrect type check
Related-bug: #1556791
This commit is contained in:
Alexey Stepanov 2016-03-14 14:36:14 +03:00
parent e2ff35c592
commit 37009b912a
28 changed files with 104 additions and 96 deletions

View File

@ -60,11 +60,10 @@ def debug(logger):
logger.debug(
"Done: {} with result: {}".format(func.__name__, result))
except BaseException as e:
tb = traceback.format_exc()
logger.error(
'{func} raised: {exc!r}\n'
'Traceback: {tb!s}'.format(
func=func.__name__, exc=e, tb=tb))
func=func.__name__, exc=e, tb=traceback.format_exc()))
raise
return result
return wrapped

View File

@ -496,9 +496,10 @@ def check_stats_on_collector(collector_remote, postgres_actions, master_uuid):
# Check that important data (clusters number, nodes number, nodes roles,
# user's email, used operation system, OpenStack stats) is saved correctly
for stat_type in general_stats.keys():
assert_true(type(summ_stats[stat_type]) == general_stats[stat_type],
"Installation structure in Collector's DB doesn't contain"
"the following stats: {0}".format(stat_type))
assert_true(
isinstance(summ_stats[stat_type], general_stats[stat_type]),
"Installation structure in Collector's DB doesn't contain"
"the following stats: {0}".format(stat_type))
real_clusters_number = int(postgres_actions.run_query(
db='nailgun', query='select count(*) from clusters;'))
@ -619,9 +620,10 @@ def check_stats_private_info(collector_remote, postgres_actions,
'dns_domain': _settings['DNS_DOMAIN'],
'dns_search': _settings['DNS_SEARCH'],
'dns_upstream': _settings['DNS_UPSTREAM'],
'fuel_password': _settings['FUEL_ACCESS']['password'] if
_settings['FUEL_ACCESS']['password'] != 'admin'
else 'DefaultPasswordIsNotAcceptableForSearch',
'fuel_password': (
_settings['FUEL_ACCESS']['password']
if _settings['FUEL_ACCESS']['password'] != 'admin'
else 'DefaultPasswordIsNotAcceptableForSearch'),
'nailgun_password': _settings['postgres']['nailgun_password'],
'keystone_password': _settings['postgres']['keystone_password'],
'ostf_password': _settings['postgres']['ostf_password'],

View File

@ -155,8 +155,9 @@ def upload_manifests(func):
result = func(*args, **kwargs)
try:
if settings.UPLOAD_MANIFESTS:
logger.info("Uploading new manifests from %s" %
settings.UPLOAD_MANIFESTS_PATH)
logger.info(
"Uploading new manifests from "
"{:s}".format(settings.UPLOAD_MANIFESTS_PATH))
environment = get_current_env(args)
if not environment:
logger.warning("Can't upload manifests: method of "
@ -166,8 +167,9 @@ def upload_manifests(func):
remote.execute('rm -rf /etc/puppet/modules/*')
remote.upload(settings.UPLOAD_MANIFESTS_PATH,
'/etc/puppet/modules/')
logger.info("Copying new site.pp from %s" %
settings.SITEPP_FOR_UPLOAD)
logger.info(
"Copying new site.pp from "
"{:s}".format(settings.SITEPP_FOR_UPLOAD))
remote.execute("cp %s /etc/puppet/manifests" %
settings.SITEPP_FOR_UPLOAD)
if settings.SYNC_DEPL_TASKS:
@ -185,7 +187,7 @@ def update_rpm_packages(func):
def wrapper(*args, **kwargs):
result = func(*args, **kwargs)
if not settings.UPDATE_FUEL:
return result
return result
try:
environment = get_current_env(args)
if not environment:

View File

@ -26,8 +26,7 @@ def check_hiera_resources(remote, file_name=None):
assert_true('fine' in output, output)
if not file_name:
output_f = ''.join(remote.execute(
'if [ -r /etc/hiera.yaml ] ; then echo "passed" ; fi')[
'stdout'])
'if [ -r /etc/hiera.yaml ] ; then echo "passed" ; fi')['stdout'])
assert_true('passed' in output_f, output_f)
else:
output_f = ''.join(remote.execute(

View File

@ -14,6 +14,7 @@
from devops.helpers.helpers import tcp_ping
from devops.helpers.helpers import wait
from fuelweb_test.helpers import os_actions
import json

View File

@ -19,6 +19,7 @@ import time
from devops.error import TimeoutError
from devops.helpers import helpers
from fuelweb_test.helpers import common
from fuelweb_test import logger

View File

@ -418,13 +418,14 @@ def verify_fix_apply_step(apply_step):
value=apply_step[key],
valid=validation_schema[key]['values']))
if 'data_type' in validation_schema[key].keys():
assert_true(type(apply_step[key]) is
validation_schema[key]['data_type'],
"Unexpected data type in patch apply scenario step: '"
"{key}' is '{type}', but expecting '{expect}'.".format(
key=key,
type=type(apply_step[key]),
expect=validation_schema[key]['data_type']))
assert_true(
isinstance(
apply_step[key], validation_schema[key]['data_type']),
"Unexpected data type in patch apply scenario step: '"
"{key}' is '{type}', but expecting '{expect}'.".format(
key=key,
type=type(apply_step[key]),
expect=validation_schema[key]['data_type']))
def validate_fix_apply_step(apply_step, environment, slaves):
@ -519,7 +520,7 @@ def validate_fix_apply_step(apply_step, environment, slaves):
def get_errata(path, bug_id):
scenario_path = '{0}/bugs/{1}/erratum.yaml'.format(path, bug_id)
assert_true(os.path.exists(scenario_path),
"Erratum for bug #{0} is not found in '{0}' "
"Erratum for bug #{0} is not found in '{1}' "
"directory".format(bug_id, settings.PATCHING_APPLY_TESTS))
with open(scenario_path) as f:
return yaml.load(f.read())

View File

@ -230,9 +230,7 @@ class RallyDeployment(object):
@property
def is_deployment_exist(self):
if self.uuid is not None:
return True
return False
return self.uuid is not None
def create_deployment(self):
if self.is_deployment_exist:

View File

@ -19,7 +19,7 @@ from fuelweb_test import settings
def change_cluster_uca_config(cluster_attributes):
'Returns cluster attributes with UCA repo configuration.'
"""Returns cluster attributes with UCA repo configuration."""
# check attributes have uca options

View File

@ -918,9 +918,10 @@ def get_ceph_partitions(ip, device, type="xfs"):
type=type)
)['stdout']
if not ret:
logger.error("Partition not present! {partitions}: ".format(
SSHManager().check_call(ip=ip,
cmd="parted {device} print")))
logger.error(
"Partition not present! {partitions}: ".format(
partitions=SSHManager().check_call(
ip=ip, cmd="parted {device} print")))
raise Exception()
logger.debug("Partitions: {part}".format(part=ret))
return ret
@ -936,9 +937,10 @@ def get_mongo_partitions(ip, device):
size=re.escape('{print $4}'))
)['stdout']
if not ret:
logger.error("Partition not present! {partitions}: ".format(
SSHManager().check_call(ip=ip,
cmd="parted {device} print")))
logger.error(
"Partition not present! {partitions}: ".format(
partitions=SSHManager().check_call(
ip=ip, cmd="parted {device} print")))
raise Exception()
logger.debug("Partitions: {part}".format(part=ret))
return ret

View File

@ -15,8 +15,8 @@
import re
import subprocess
import time
from devops.error import TimeoutError
from devops.error import TimeoutError
from devops.helpers.helpers import _tcp_ping
from devops.helpers.helpers import _wait
from devops.helpers.helpers import wait
@ -137,8 +137,7 @@ class EnvironmentModel(object):
.format(', '.join(sorted(nodes_names))))
new_time = sync_time(self.d_env, nodes_names, skip_sync)
for name in sorted(new_time):
logger.info("New time on '{0}' = {1}".format(name,
new_time[name]))
logger.info("New time on '{0}' = {1}".format(name, new_time[name]))
@logwrap
def get_admin_node_ip(self):
@ -340,15 +339,15 @@ class EnvironmentModel(object):
self.fuel_web.get_nailgun_node_by_devops_node(
node)['online'], timeout=60 * 6)
except TimeoutError:
raise TimeoutError(
"Node {0} does not become online".format(node.name))
raise TimeoutError(
"Node {0} does not become online".format(node.name))
return True
def revert_snapshot(self, name, skip_timesync=False):
if not self.d_env.has_snapshot(name):
return False
logger.info('We have snapshot with such name: %s' % name)
logger.info('We have snapshot with such name: {:s}'.format(name))
logger.info("Reverting the snapshot '{0}' ....".format(name))
self.d_env.revert(name)

View File

@ -1470,7 +1470,7 @@ class FuelWebClient(object):
net.get('seg_type', '') == 'tun'):
result['private_tun'] = net
elif (net['name'] == 'private' and
net.get('seg_type', '') == 'gre'):
net.get('seg_type', '') == 'gre'):
result['private_gre'] = net
elif net['name'] == 'public':
result['public'] = net
@ -1834,12 +1834,12 @@ class FuelWebClient(object):
'inet (?P<ip>\d+\.\d+\.\d+.\d+/\d+).*scope .* '
'{0}'.format(interface), ' '.join(ret['stdout']))
if ip_search is None:
logger.debug("Ip show output does not match in regex. "
"Current value is None. On node {0} in netns "
"{1} for interface {2}".format(node_name,
namespace,
interface))
return None
logger.debug("Ip show output does not match in regex. "
"Current value is None. On node {0} in netns "
"{1} for interface {2}".format(node_name,
namespace,
interface))
return None
return ip_search.group('ip')
except DevopsCalledProcessError as err:
logger.error(err)
@ -2001,8 +2001,8 @@ class FuelWebClient(object):
@logwrap
def run_ceph_task(self, cluster_id, offline_nodes):
ceph_id = [n['id'] for n in self.client.list_cluster_nodes(cluster_id)
if 'ceph-osd'
in n['roles'] and n['id'] not in offline_nodes]
if 'ceph-osd' in n['roles'] and
n['id'] not in offline_nodes]
res = self.client.put_deployment_tasks_for_cluster(
cluster_id, data=['top-role-ceph-osd'],
node_id=str(ceph_id).strip('[]'))
@ -2562,9 +2562,9 @@ class FuelWebClient(object):
@logwrap
def spawn_vms_wait(self, cluster_id, timeout=60 * 60, interval=30):
logger.info('Spawn VMs of a cluster %s', cluster_id)
task = self.client.spawn_vms(cluster_id)
self.assert_task_success(task, timeout=timeout, interval=interval)
logger.info('Spawn VMs of a cluster %s', cluster_id)
task = self.client.spawn_vms(cluster_id)
self.assert_task_success(task, timeout=timeout, interval=interval)
@logwrap
def get_all_ostf_set_names(self, cluster_id):

View File

@ -322,8 +322,8 @@ class NailgunClient(object):
def get_cluster_id(self, name):
for cluster in self.list_clusters():
if cluster["name"] == name:
logger.info('cluster name is %s' % name)
logger.info('cluster id is %s' % cluster["id"])
logger.info('Cluster name is {:s}'.format(name))
logger.info('Cluster id is {:d}'.format(cluster["id"]))
return cluster["id"]
@logwrap

View File

@ -181,7 +181,7 @@ class TestLmaCollectorPlugin(TestBasic):
assert_true(
self.fuel_web.check_plugin_exists(cluster_id, plugin_name),
msg)
logger.debug('%s plugin is installed' % plugin_name)
logger.debug('{:s} plugin is installed'.format(plugin_name))
self.fuel_web.update_plugin_settings(
cluster_id, plugin_name,
plugin_version, plugin['options'])

View File

@ -118,12 +118,13 @@ class TestLmaInfraAlertingPlugin(TestBasic):
msg = "Plugin couldn't be enabled. Check plugin version. Test aborted"
assert_true(self.fuel_web.check_plugin_exists(cluster_id, self._name),
msg)
logger.debug('%s (%s) plugin is installed' % (self._name,
self._version))
self.fuel_wb.update_plugin_settings(cluster_id,
self._name,
self._version,
plugin_options)
logger.debug(
'{name:s} ({ver!s}) plugin is installed'
''.format(name=self._name, ver=self._version))
self.fuel_web.update_plugin_settings(cluster_id,
self._name,
self._version,
plugin_options)
return cluster_id

View File

@ -95,7 +95,7 @@ class RebootPlugin(TestBasic):
options = {'metadata/enabled': True}
self.fuel_web.update_plugin_data(cluster_id, plugin_name, options)
logger.info('cluster is %s' % str(cluster_id))
logger.info('Cluster is {!s}'.format(cluster_id))
self.fuel_web.update_nodes(
cluster_id,
@ -230,7 +230,7 @@ class RebootPlugin(TestBasic):
options = {'metadata/enabled': True}
self.fuel_web.update_plugin_data(cluster_id, plugin_name, options)
logger.info('cluster is %s' % str(cluster_id))
logger.info('Cluster is {!s}'.format(cluster_id))
self.fuel_web.update_nodes(
cluster_id,

View File

@ -105,7 +105,7 @@ class VipReservation(TestBasic):
options = {'metadata/enabled': True}
self.fuel_web.update_plugin_data(cluster_id, plugin_name, options)
logger.info('cluster is %s' % str(cluster_id))
logger.info('Cluster is {!s}'.format(cluster_id))
self.fuel_web.update_nodes(
cluster_id,
@ -236,7 +236,7 @@ class VipReservation(TestBasic):
options = {'metadata/enabled': True}
self.fuel_web.update_plugin_data(cluster_id, plugin_name, options)
logger.info('cluster is %s' % str(cluster_id))
logger.info('Cluster is {!s}'.format(cluster_id))
self.fuel_web.update_nodes(
cluster_id,
@ -363,7 +363,7 @@ class VipReservation(TestBasic):
options = {'metadata/enabled': True}
self.fuel_web.update_plugin_data(cluster_id, plugin_name, options)
logger.info('cluster is %s' % str(cluster_id))
logger.info('Cluster is {!s}'.format(cluster_id))
self.fuel_web.update_nodes(
cluster_id,

View File

@ -87,8 +87,8 @@ class TestAdminNode(TestBasic):
assert_equal(len(astute_master), 1)
astute_workers = filter(lambda x: 'astute worker' in x, ps_output)
logger.info(
"Found %d astute worker processes: %s" %
(len(astute_workers), astute_workers))
"Found {len:d} astute worker processes: {workers!s}"
"".format(len=len(astute_workers), workers=astute_workers))
assert_equal(True, len(astute_workers) > 1)

View File

@ -691,8 +691,8 @@ class VmBackedWithCephMigrationBasic(TestBasic):
logger.info("Check Ceph health is ok after migration")
self.fuel_web.check_ceph_status(cluster_id)
logger.info("Server is now on host %s" %
os.get_srv_host_name(new_srv))
logger.info(
"Server is now on host {:s}".format(os.get_srv_host_name(new_srv)))
self.show_step(10)
@ -779,8 +779,8 @@ class VmBackedWithCephMigrationBasic(TestBasic):
logger.info("Check Ceph health is ok after migration")
self.fuel_web.check_ceph_status(cluster_id)
logger.info("Server is now on host %s" %
os.get_srv_host_name(new_srv))
logger.info(
"Server is now on host {:s}".format(os.get_srv_host_name(new_srv)))
self.show_step(18)
logger.info("Terminate migrated server")

View File

@ -64,7 +64,7 @@ class OneNodeDeploy(TestBasic):
name=self.__class__.__name__,
mode=DEPLOYMENT_MODE,
)
logger.info('cluster is %s' % str(cluster_id))
logger.info('Cluster is {!s}'.format(cluster_id))
self.fuel_web.update_nodes(
cluster_id,
{'slave-01': ['controller']}

View File

@ -707,11 +707,11 @@ class TestMultipleClusterNets(TestBasic):
try:
wait(lambda: self.fuel_web.get_nailgun_node_by_devops_node(
slave)['status'] == 'error', timeout=15 * 60)
logger.info('Node {} become error state'.format(slave.name,
'error'))
logger.info(
'Node {} changed state to error'.format(slave.name))
except TimeoutError:
raise TimeoutError('Node {} not become '
'error state'.format(slave.name))
raise TimeoutError('Node {} not changed state to '
'error'.format(slave.name))
self.show_step(4)
logger.info('Rebooting nodes from custom nodegroup..')

View File

@ -647,8 +647,8 @@ class TestHaFailoverBase(TestBasic):
remote.check_call(cmd_input)
remote.check_call(cmd_output)
except:
logger.error('command failed to be executed'.format(
p_d_ctrl.name))
logger.error(
'Command {:s} failed to be executed'.format(p_d_ctrl.name))
raise
finally:
remote.clear()
@ -1181,9 +1181,12 @@ class TestHaFailoverBase(TestBasic):
status):
for remote in ctrl_remotes:
pcs_nodes = _get_pcm_nodes(remote)
# TODO: FIXME: Rewrite using normal SSHManager and node name
node_name = ''.join(
remote.execute('hostname -f')['stdout']).strip()
logger.debug(
"Status of pacemaker nodes on node {0}: {1}".
format(node['name'], pcs_nodes))
format(node_name, pcs_nodes))
if set(pcs_nodes_online) != set(pcs_nodes[status]):
return False
return True

View File

@ -368,11 +368,11 @@ class FailoverGroup1(TestBasic):
self.show_step(5)
with self.fuel_web.get_ssh_for_node('slave-04') as remote:
file_name = 'test_data'
result = remote.execute(
'lvcreate -n test -L20G cinder')['exit_code']
assert_equal(result, 0, "The file {0} was not "
"allocated".format(file_name))
file_name = 'test_data'
result = remote.execute(
'lvcreate -n test -L20G cinder')['exit_code']
assert_equal(result, 0, "The file {0} was not "
"allocated".format(file_name))
self.show_step(6)
self.show_step(7)

View File

@ -465,9 +465,9 @@ class TestNeutronFailoverBase(base_test_case.TestBasic):
check_ping = ping.format(ip=floating_ip)
err_msg = 'Instance with ip:{ip} is not reachable by ICMP.'
with self.env.d_env.get_ssh_to_remote(_ip) as remote:
wait(lambda: remote.execute(check_ping)['exit_code'] == 0,
timeout=120,
timeout_msg=err_msg.format(ip=floating_ip))
wait(lambda: remote.execute(check_ping)['exit_code'] == 0,
timeout=120,
timeout_msg=err_msg.format(ip=floating_ip))
# command for get original MTU for external bridge on one
# of controllers

View File

@ -504,7 +504,7 @@ def puppet_modules_mapping(modules):
with open("gates_tests/helpers/puppet_module_mapping.yaml", "r") as f:
mapping = yaml.load(f)
if modules and type(modules) is dict:
if modules and isinstance(modules, dict):
all_modules = set([j for i in mapping.values() for j in i])
logger.debug(
"List of puppet modules covered by system_tests {}".format(
@ -522,9 +522,9 @@ def puppet_modules_mapping(modules):
# find test group which has better coverage of modules from review
system_test = "bvt_2"
max_intersection = 0
if not ("ceph" in modules and set(
["roles/cinder.pp", "cinder", "openstack-cinder"]) & set(
modules)):
if not ("ceph" in modules and
{"roles/cinder.pp", "cinder", "openstack-cinder"} &
set(modules)):
for test in mapping:
test_intersection = len(
set(mapping[test]).intersection(set(modules)))

View File

@ -46,8 +46,8 @@ class NailgunAgentGate(TestBasic):
"""
if not settings.UPDATE_FUEL:
raise Exception("{} variable don't exist"
.format(settings.UPDATE_FUEL))
raise Exception("{} variable doesn't exist"
.format(settings.UPDATE_FUEL))
self.show_step(1, initialize=True)
self.env.revert_snapshot("ready")

View File

@ -48,8 +48,8 @@ class Gate(TestBasic):
"""
if not settings.UPDATE_FUEL:
raise Exception("{} variable don't exist"
.format(settings.UPDATE_FUEL))
raise Exception(
"{} variable doesn't exist".format(settings.UPDATE_FUEL))
self.show_step(1, initialize=True)
self.env.revert_snapshot("ready")

View File

@ -52,7 +52,7 @@ def collect_yamls(path):
for r, d, f in os.walk(path):
for one in f:
if os.path.splitext(one)[1] in ('.yaml', '.yml'):
ret.append(os.path.join(r, one))
ret.append(os.path.join(r, one))
return ret