Include flake8 execution in tox -e linters
This fixes several flake8 failures and integrates flake8 into linters tox environment. Rule W504 is disabled because at this moment there is no known way to avoid flapping between W504 and W503. This allows us to retire openstack-tox-pep8 job because the more beneric openstack-tox-linters includes it. Still, developers will be able to coveniently run only pep8 if they want. Change-Id: I7da0f6f09a533dd1c4dc303029e8c587bc200f66
This commit is contained in:
parent
bac4e8aef3
commit
90d9c1bc41
|
@ -34,6 +34,7 @@ def handle_signal(signum, stack):
|
||||||
global terminate
|
global terminate
|
||||||
terminate = True
|
terminate = True
|
||||||
|
|
||||||
|
|
||||||
signal.signal(signal.SIGINT, handle_signal)
|
signal.signal(signal.SIGINT, handle_signal)
|
||||||
|
|
||||||
def run_iteration(_config, _cli_args, result_dir_ts, _logger, tools):
|
def run_iteration(_config, _cli_args, result_dir_ts, _logger, tools):
|
||||||
|
@ -204,5 +205,6 @@ def main():
|
||||||
_logger.info("Browbeat finished successfully, UUID: {}".format(browbeat.elastic.browbeat_uuid))
|
_logger.info("Browbeat finished successfully, UUID: {}".format(browbeat.elastic.browbeat_uuid))
|
||||||
sys.exit(0)
|
sys.exit(0)
|
||||||
|
|
||||||
|
|
||||||
if __name__ == '__main__':
|
if __name__ == '__main__':
|
||||||
sys.exit(main())
|
sys.exit(main())
|
||||||
|
|
|
@ -179,6 +179,7 @@ class Elastic(object):
|
||||||
this function will iterate through all the data points, combining the iteration
|
this function will iterate through all the data points, combining the iteration
|
||||||
and rerun data points into a single 95%tile.
|
and rerun data points into a single 95%tile.
|
||||||
"""
|
"""
|
||||||
|
|
||||||
def summarize_results(self, data, combined):
|
def summarize_results(self, data, combined):
|
||||||
summary = {}
|
summary = {}
|
||||||
if combined:
|
if combined:
|
||||||
|
@ -240,6 +241,7 @@ class Elastic(object):
|
||||||
|
|
||||||
"""
|
"""
|
||||||
"""
|
"""
|
||||||
|
|
||||||
def compare_rally_results(self, data, uuids, combined, metadata=None):
|
def compare_rally_results(self, data, uuids, combined, metadata=None):
|
||||||
missing = []
|
missing = []
|
||||||
if len(data) < 2:
|
if len(data) < 2:
|
||||||
|
@ -249,13 +251,13 @@ class Elastic(object):
|
||||||
self.logger.error("Not able to find UUID in data set")
|
self.logger.error("Not able to find UUID in data set")
|
||||||
return False
|
return False
|
||||||
if combined:
|
if combined:
|
||||||
print "+{}+".format("-" * (33 + 44 + 10 + 10 + 23))
|
print("+{}+".format("-" * (33 + 44 + 10 + 10 + 23)))
|
||||||
print "{0:33} | {1:40} | {2:10} | {3:10} | {4:13} ".format("Scenario",
|
print("{0:33} | {1:40} | {2:10} | {3:10} | {4:13} ".format("Scenario",
|
||||||
"Action",
|
"Action",
|
||||||
uuids[0][-8:],
|
uuids[0][-8:],
|
||||||
uuids[1][-8:],
|
uuids[1][-8:],
|
||||||
"% Difference")
|
"% Difference"))
|
||||||
print "+{}+".format("-" * (33 + 44 + 10 + 10 + 23))
|
print("+{}+".format("-" * (33 + 44 + 10 + 10 + 23)))
|
||||||
for scenario in data[uuids[0]]:
|
for scenario in data[uuids[0]]:
|
||||||
if scenario not in data[uuids[1]]:
|
if scenario not in data[uuids[1]]:
|
||||||
missing.append(scenario)
|
missing.append(scenario)
|
||||||
|
@ -268,23 +270,23 @@ class Elastic(object):
|
||||||
perf1 = data[uuids[1]][scenario][action]
|
perf1 = data[uuids[1]][scenario][action]
|
||||||
diff = numpy.diff(dset)[0] / numpy.abs(dset[:-1])[0] * 100
|
diff = numpy.diff(dset)[0] / numpy.abs(dset[:-1])[0] * 100
|
||||||
|
|
||||||
print "{0:33} | {1:40} | {2:10.3f} | {3:10.3f} | {4:13.3f}".format(scenario,
|
print("{0:33} | {1:40} | {2:10.3f} | {3:10.3f} | {4:13.3f}".format(scenario,
|
||||||
action,
|
action,
|
||||||
perf0,
|
perf0,
|
||||||
perf1,
|
perf1,
|
||||||
diff)
|
diff))
|
||||||
print "+{}+".format("-" * (33 + 44 + 10 + 10 + 26))
|
print("+{}+".format("-" * (33 + 44 + 10 + 10 + 26)))
|
||||||
else:
|
else:
|
||||||
print "+{}+".format("-" * (33 + 44 + 15 + 15 + 10 + 10 + 26))
|
print("+{}+".format("-" * (33 + 44 + 15 + 15 + 10 + 10 + 26)))
|
||||||
print "{0:33} | {1:40} | {2:15} | {3:15} | {4:10} | {5:10} | {6:23}".format(
|
print("{0:33} | {1:40} | {2:15} | {3:15} | {4:10} | {5:10} | {6:23}".format(
|
||||||
"Scenario",
|
"Scenario",
|
||||||
"Action",
|
"Action",
|
||||||
"times",
|
"times",
|
||||||
"concurrency",
|
"concurrency",
|
||||||
uuids[0][-8:],
|
uuids[0][-8:],
|
||||||
uuids[1][-8:],
|
uuids[1][-8:],
|
||||||
"% Difference")
|
"% Difference"))
|
||||||
print "+{}+".format("-" * (33 + 44 + 15 + 15 + 10 + 10 + 26))
|
print("+{}+".format("-" * (33 + 44 + 15 + 15 + 10 + 10 + 26)))
|
||||||
for scenario in data[uuids[0]]:
|
for scenario in data[uuids[0]]:
|
||||||
if scenario not in data[uuids[1]]:
|
if scenario not in data[uuids[1]]:
|
||||||
missing.append(scenario)
|
missing.append(scenario)
|
||||||
|
@ -314,38 +316,39 @@ class Elastic(object):
|
||||||
diff = numpy.diff(dset)[0] / numpy.abs(dset[:-1])[0] * 100
|
diff = numpy.diff(dset)[0] / numpy.abs(dset[:-1])[0] * 100
|
||||||
output = "{0:33} | {1:40} | {2:15} | {3:15} "
|
output = "{0:33} | {1:40} | {2:15} | {3:15} "
|
||||||
output += "| {4:10.3f} | {5:10.3f} | {6:13.3f}"
|
output += "| {4:10.3f} | {5:10.3f} | {6:13.3f}"
|
||||||
print output.format(scenario,
|
print(output.format(scenario,
|
||||||
action,
|
action,
|
||||||
times,
|
times,
|
||||||
concurrency,
|
concurrency,
|
||||||
perf0,
|
perf0,
|
||||||
perf1,
|
perf1,
|
||||||
diff)
|
diff))
|
||||||
print "+{}+".format("-" * (33 + 44 + 15 + 15 + 10 + 10 + 26))
|
print("+{}+".format("-" * (33 + 44 + 15 + 15 + 10 + 10 + 26)))
|
||||||
if metadata:
|
if metadata:
|
||||||
print "+{}+".format("-" * (40 + 20 + 20 + 33))
|
print("+{}+".format("-" * (40 + 20 + 20 + 33)))
|
||||||
print "{0:40} | {1:20} | {2:20} | {3:20}".format("UUID", "Version", "Build",
|
print("{0:40} | {1:20} | {2:20} | {3:20}".format("UUID", "Version", "Build",
|
||||||
"Number of runs")
|
"Number of runs"))
|
||||||
print "+{}+".format("-" * (40 + 20 + 20 + 33))
|
print("+{}+".format("-" * (40 + 20 + 20 + 33)))
|
||||||
for uuids in metadata:
|
for uuids in metadata:
|
||||||
print "{0:40} | {1:20} | {2:20} | {3:20}".format(uuids,
|
print("{0:40} | {1:20} | {2:20} | {3:20}".format(uuids,
|
||||||
metadata[uuids][
|
metadata[uuids][
|
||||||
'version'],
|
'version'],
|
||||||
metadata[uuids][
|
metadata[uuids][
|
||||||
'build'],
|
'build'],
|
||||||
metadata[uuids]['rerun'])
|
metadata[uuids]['rerun']))
|
||||||
|
|
||||||
print "+{}+".format("-" * (40 + 20 + 20 + 33))
|
print("+{}+".format("-" * (40 + 20 + 20 + 33)))
|
||||||
if len(missing) > 0:
|
if len(missing) > 0:
|
||||||
print "+-------------------------------------+"
|
print("+-------------------------------------+")
|
||||||
print "Missing Scenarios to compare results:"
|
print("Missing Scenarios to compare results:")
|
||||||
print "+-------------------------------------+"
|
print("+-------------------------------------+")
|
||||||
for scenario in missing:
|
for scenario in missing:
|
||||||
print " - {}".format(scenario)
|
print(" - {}".format(scenario))
|
||||||
|
|
||||||
"""
|
"""
|
||||||
returns a list of dicts that contain 95%tile performance data.
|
returns a list of dicts that contain 95%tile performance data.
|
||||||
"""
|
"""
|
||||||
|
|
||||||
def get_result_data(self, index, browbeat_uuid):
|
def get_result_data(self, index, browbeat_uuid):
|
||||||
results = []
|
results = []
|
||||||
data = []
|
data = []
|
||||||
|
@ -395,6 +398,7 @@ class Elastic(object):
|
||||||
Currently this function will only compare two uuids. I (rook) am not convinced it is worth
|
Currently this function will only compare two uuids. I (rook) am not convinced it is worth
|
||||||
the effort to engineer anything > 2.
|
the effort to engineer anything > 2.
|
||||||
"""
|
"""
|
||||||
|
|
||||||
def compare_metadata(self, index, role, uuids):
|
def compare_metadata(self, index, role, uuids):
|
||||||
meta = []
|
meta = []
|
||||||
for browbeat_uuid in uuids:
|
for browbeat_uuid in uuids:
|
||||||
|
@ -490,24 +494,24 @@ class Elastic(object):
|
||||||
"Host [{}] Service [{}] {} [{}]".format(
|
"Host [{}] Service [{}] {} [{}]".format(
|
||||||
uuids[1], host, service, options, key))
|
uuids[1], host, service, options, key))
|
||||||
|
|
||||||
print "+{}+".format("-" * (33 + 44 + 15 + 15 + 30 + 10 + 6))
|
print("+{}+".format("-" * (33 + 44 + 15 + 15 + 30 + 10 + 6)))
|
||||||
print "{0:25} | {1:15} | {2:30} | {3:23} | {4:40} | {5:40} ".format(
|
print("{0:25} | {1:15} | {2:30} | {3:23} | {4:40} | {5:40} ".format(
|
||||||
"Host",
|
"Host",
|
||||||
"Service",
|
"Service",
|
||||||
"Option",
|
"Option",
|
||||||
"Key",
|
"Key",
|
||||||
"Old Value",
|
"Old Value",
|
||||||
"New Value")
|
"New Value"))
|
||||||
print "+{}+".format("-" * (33 + 44 + 15 + 15 + 30 + 10 + 6))
|
print("+{}+".format("-" * (33 + 44 + 15 + 15 + 30 + 10 + 6)))
|
||||||
for difference in differences :
|
for difference in differences:
|
||||||
value = difference.split("|")
|
value = difference.split("|")
|
||||||
print "{0:25} | {1:15} | {2:30} | {3:23} | {4:40} | {5:40} ".format(value[0],
|
print("{0:25} | {1:15} | {2:30} | {3:23} | {4:40} | {5:40} ".format(value[0],
|
||||||
value[1],
|
value[1],
|
||||||
value[2],
|
value[2],
|
||||||
value[3],
|
value[3],
|
||||||
value[4],
|
value[4],
|
||||||
value[5])
|
value[5]))
|
||||||
print "+{}+".format("-" * (33 + 44 + 15 + 15 + 30 + 10 + 6))
|
print("+{}+".format("-" * (33 + 44 + 15 + 15 + 30 + 10 + 6)))
|
||||||
|
|
||||||
def scroll(self, search, sid, scroll_size):
|
def scroll(self, search, sid, scroll_size):
|
||||||
data = []
|
data = []
|
||||||
|
@ -528,6 +532,7 @@ class Elastic(object):
|
||||||
index, however, this is quite expensive, and it might be quicker to
|
index, however, this is quite expensive, and it might be quicker to
|
||||||
only look for errors for specific browbeat_uuids
|
only look for errors for specific browbeat_uuids
|
||||||
"""
|
"""
|
||||||
|
|
||||||
def get_errors(self, index, browbeat_id):
|
def get_errors(self, index, browbeat_id):
|
||||||
self.logger.info("Making query against {}".format(index))
|
self.logger.info("Making query against {}".format(index))
|
||||||
page = self.es.search(
|
page = self.es.search(
|
||||||
|
|
|
@ -135,5 +135,6 @@ def main():
|
||||||
metadata.write_metadata_file(
|
metadata.write_metadata_file(
|
||||||
software_data, os.path.join(args.path, 'software-metadata.json'))
|
software_data, os.path.join(args.path, 'software-metadata.json'))
|
||||||
|
|
||||||
|
|
||||||
if __name__ == '__main__':
|
if __name__ == '__main__':
|
||||||
sys.exit(main())
|
sys.exit(main())
|
||||||
|
|
|
@ -73,7 +73,7 @@ class Rally(workloadbase.WorkloadBase):
|
||||||
return (from_time, to_time)
|
return (from_time, to_time)
|
||||||
|
|
||||||
def get_task_id(self, test_name):
|
def get_task_id(self, test_name):
|
||||||
cmd = "grep \"rally task report [a-z0-9\-]* --out\" {}.log | awk '{{print $4}}'".format(
|
cmd = "grep \"rally task report [a-z0-9\\-]* --out\" {}.log | awk '{{print $4}}'".format(
|
||||||
test_name)
|
test_name)
|
||||||
return self.tools.run_cmd(cmd)['stdout']
|
return self.tools.run_cmd(cmd)['stdout']
|
||||||
|
|
||||||
|
@ -147,7 +147,7 @@ class Rally(workloadbase.WorkloadBase):
|
||||||
iteration = 1
|
iteration = 1
|
||||||
workload_name = value
|
workload_name = value
|
||||||
if value.find('(') is not -1:
|
if value.find('(') is not -1:
|
||||||
iteration = re.findall('\d+', value)[0]
|
iteration = re.findall(r'\d+', value)[0]
|
||||||
workload_name = value.split('(')[0]
|
workload_name = value.split('(')[0]
|
||||||
error = {'action': workload_name.strip(),
|
error = {'action': workload_name.strip(),
|
||||||
'iteration': iteration,
|
'iteration': iteration,
|
||||||
|
@ -164,7 +164,7 @@ class Rally(workloadbase.WorkloadBase):
|
||||||
iteration = 1
|
iteration = 1
|
||||||
workload_name = workload
|
workload_name = workload
|
||||||
if workload.find('(') is not -1:
|
if workload.find('(') is not -1:
|
||||||
iteration = re.findall('\d+', workload)[0]
|
iteration = re.findall(r'\d+', workload)[0]
|
||||||
workload_name = workload.split('(')[0]
|
workload_name = workload.split('(')[0]
|
||||||
rally_stats = {'action': workload_name.strip(),
|
rally_stats = {'action': workload_name.strip(),
|
||||||
'iteration': iteration,
|
'iteration': iteration,
|
||||||
|
|
|
@ -383,8 +383,7 @@ class Shaker(workloadbase.WorkloadBase):
|
||||||
test_time = workload.get("time", 60)
|
test_time = workload.get("time", 60)
|
||||||
for interval in range(0, test_time + 9):
|
for interval in range(0, test_time + 9):
|
||||||
es_list.append(
|
es_list.append(
|
||||||
datetime.datetime.utcnow() +
|
datetime.datetime.utcnow() + datetime.timedelta(0, interval))
|
||||||
datetime.timedelta(0, interval))
|
|
||||||
|
|
||||||
rerun_range = range(self.config["browbeat"]["rerun"])
|
rerun_range = range(self.config["browbeat"]["rerun"])
|
||||||
if self.config["browbeat"]["rerun_type"] == "complete":
|
if self.config["browbeat"]["rerun_type"] == "complete":
|
||||||
|
|
|
@ -143,7 +143,7 @@ class Tools(object):
|
||||||
|
|
||||||
""" Capture per-workload results """
|
""" Capture per-workload results """
|
||||||
workload_results = {}
|
workload_results = {}
|
||||||
json = re.compile("\.json")
|
json = re.compile(r"\.json")
|
||||||
if len(results) > 0:
|
if len(results) > 0:
|
||||||
for path in results:
|
for path in results:
|
||||||
for regex in workloads:
|
for regex in workloads:
|
||||||
|
|
|
@ -244,9 +244,9 @@ class Yoda(workloadbase.WorkloadBase):
|
||||||
time.sleep(10)
|
time.sleep(10)
|
||||||
continue
|
continue
|
||||||
if node_obj is None:
|
if node_obj is None:
|
||||||
self.logger.error("Can't find node " + node +
|
self.logger.error(
|
||||||
" Which existed at the start of introspection \
|
"Can't find node %s which existed at the start of "
|
||||||
did you delete it manually?")
|
"introspection did you delete it manually?", node)
|
||||||
continue
|
continue
|
||||||
|
|
||||||
# == works here for string comparison because they are in fact
|
# == works here for string comparison because they are in fact
|
||||||
|
@ -266,8 +266,7 @@ class Yoda(workloadbase.WorkloadBase):
|
||||||
node_obj.provision_state, results['nodes'][node_obj.id]["state_list"])
|
node_obj.provision_state, results['nodes'][node_obj.id]["state_list"])
|
||||||
|
|
||||||
times.append(
|
times.append(
|
||||||
(datetime.datetime.utcnow() -
|
(datetime.datetime.utcnow() - start_time).total_seconds())
|
||||||
start_time).total_seconds())
|
|
||||||
|
|
||||||
elif (datetime.datetime.utcnow() - start_time) > timeout:
|
elif (datetime.datetime.utcnow() - start_time) > timeout:
|
||||||
# return currently active node to the deque to be failed
|
# return currently active node to the deque to be failed
|
||||||
|
@ -533,8 +532,7 @@ class Yoda(workloadbase.WorkloadBase):
|
||||||
benchmark['timeout'], env_setup, conn)
|
benchmark['timeout'], env_setup, conn)
|
||||||
else:
|
else:
|
||||||
self.logger.error(
|
self.logger.error(
|
||||||
"Malformed YODA configuration for " +
|
"Malformed YODA configuration for " + benchmark['name'])
|
||||||
benchmark['name'])
|
|
||||||
exit(1)
|
exit(1)
|
||||||
|
|
||||||
self.get_stats()
|
self.get_stats()
|
||||||
|
@ -607,8 +605,7 @@ class Yoda(workloadbase.WorkloadBase):
|
||||||
benchmark)
|
benchmark)
|
||||||
|
|
||||||
results['total_time'] = (
|
results['total_time'] = (
|
||||||
datetime.datetime.utcnow() -
|
datetime.datetime.utcnow() - start_time).total_seconds()
|
||||||
start_time).total_seconds()
|
|
||||||
try:
|
try:
|
||||||
stack_status = conn.orchestration.find_stack("overcloud")
|
stack_status = conn.orchestration.find_stack("overcloud")
|
||||||
except exceptions.SDKException:
|
except exceptions.SDKException:
|
||||||
|
|
|
@ -12,11 +12,10 @@
|
||||||
|
|
||||||
import os
|
import os
|
||||||
import sys
|
import sys
|
||||||
sys.path.append(os.path.abspath('ansible'))
|
|
||||||
|
|
||||||
import pytest
|
import pytest
|
||||||
|
sys.path.append(os.path.abspath('ansible'))
|
||||||
|
import bootstrap # noqa
|
||||||
|
|
||||||
import bootstrap
|
|
||||||
|
|
||||||
def test_bootstrap_help(capsys):
|
def test_bootstrap_help(capsys):
|
||||||
"""Tests to see if bootstrap.py help text is correct and that it loads sample/tripleo plugins"""
|
"""Tests to see if bootstrap.py help text is correct and that it loads sample/tripleo plugins"""
|
||||||
|
|
7
tox.ini
7
tox.ini
|
@ -1,6 +1,6 @@
|
||||||
[tox]
|
[tox]
|
||||||
minversion = 2.0
|
minversion = 2.0
|
||||||
envlist = py27,py35,py36,pep8,linters
|
envlist = py27,py35,py36,linters
|
||||||
skipsdist = True
|
skipsdist = True
|
||||||
|
|
||||||
[testenv]
|
[testenv]
|
||||||
|
@ -12,8 +12,11 @@ deps = -r{toxinidir}/test-requirements.txt
|
||||||
commands = python setup.py test
|
commands = python setup.py test
|
||||||
|
|
||||||
[testenv:linters]
|
[testenv:linters]
|
||||||
|
# py3 linters are able to stop more than py2 ones
|
||||||
|
basepython = python3
|
||||||
whitelist_externals = bash
|
whitelist_externals = bash
|
||||||
commands =
|
commands =
|
||||||
|
{[testenv:pep8]commands}
|
||||||
bash -c "cd ansible; find . -type f -regex '.*.y[a]?ml' -print0 | xargs -t -n1 -0 \
|
bash -c "cd ansible; find . -type f -regex '.*.y[a]?ml' -print0 | xargs -t -n1 -0 \
|
||||||
ansible-lint \
|
ansible-lint \
|
||||||
-x ANSIBLE0012,ANSIBLE0006,ANSIBLE0007,ANSIBLE0016,ANSIBLE0019" \
|
-x ANSIBLE0012,ANSIBLE0006,ANSIBLE0007,ANSIBLE0016,ANSIBLE0019" \
|
||||||
|
@ -53,7 +56,7 @@ commands = oslo_debug_helper {posargs}
|
||||||
[flake8]
|
[flake8]
|
||||||
# E123, E125 skipped as they are invalid PEP-8.
|
# E123, E125 skipped as they are invalid PEP-8.
|
||||||
show-source = True
|
show-source = True
|
||||||
ignore = E123,E125,E226,E302,E41,E231,E203,H233,H306,H238,H236,H404,H405
|
ignore = E123,E125,E226,E302,E41,E231,E203,H233,H306,H238,H236,H404,H405,W504
|
||||||
max-line-length = 100
|
max-line-length = 100
|
||||||
builtins = _
|
builtins = _
|
||||||
exclude=.venv,.git,.tox,dist,doc,*lib/python*,*egg,build,ansible/*,.browbeat-venv,.perfkit-venv,.rally-venv,.shaker-venv
|
exclude=.venv,.git,.tox,dist,doc,*lib/python*,*egg,build,ansible/*,.browbeat-venv,.perfkit-venv,.rally-venv,.shaker-venv
|
||||||
|
|
Loading…
Reference in New Issue