Adding a Base Workload Class

Having the workload classes inherit from the abstract base class will
enforce certain methods and help the summarized output
+ Moving logger here
+ Implementing functionality to report max test time
+ Browbeat report in yaml format
+ Renaming workload base method
+ Removing an unnecessary method
+ Formatting methods in WorkloadBase
+ autopep8

Change-Id: I090a863b4b00068a48cf5d914c337e15fd5739f5
This commit is contained in:
Sindhur Malleni 2016-03-22 16:59:02 -04:00
parent 7584575895
commit f174b67e2b
5 changed files with 288 additions and 111 deletions

View File

@ -2,10 +2,13 @@
from lib.PerfKit import PerfKit
from lib.Rally import Rally
from lib.Shaker import Shaker
from lib.WorkloadBase import WorkloadBase
import argparse
import logging
import sys
import yaml
import datetime
import os
from pykwalify import core as pykwalify_core
from pykwalify import errors as pykwalify_errors
@ -14,14 +17,13 @@ _config_file = 'browbeat-config.yaml'
debug_log_file = 'log/debug.log'
def _load_config(path, _logger):
try:
stream = open(path, 'r')
except IOError:
_logger.error("Configuration file {} passed is missing".format(path))
exit(1)
config=yaml.load(stream)
config = yaml.load(stream)
stream.close()
validate_yaml(config, _logger)
return config
@ -56,10 +58,14 @@ def _run_workload_provider(provider, config):
def main():
parser = argparse.ArgumentParser(
description="Browbeat Performance and Scale testing for Openstack")
parser.add_argument('-s', '--setup', nargs='?', default=_config_file,
parser.add_argument(
'-s',
'--setup',
nargs='?',
default=_config_file,
help='Provide Browbeat YAML configuration file. Default is ./{}'.format(_config_file))
parser.add_argument('workloads', nargs='*', help='Browbeat workload(s). Takes a space separated'
' list of workloads ({}) or \"all\"'.format(', '.join(_workload_opts)))
' list of workloads ({}) or \"all\"'.format(', '.join(_workload_opts)))
parser.add_argument('--debug', action='store_true', help='Enable Debug messages')
_cli_args = parser.parse_args()
@ -96,6 +102,8 @@ def main():
_logger.error("If you meant 'all' use: './browbeat.py all' or './browbeat.py'")
exit(1)
else:
time_stamp = datetime.datetime.now().strftime("%Y%m%d-%H%M%S")
_logger.info("Browbeat test suite kicked off")
_logger.info("Running workload(s): {}".format(','.join(_cli_args.workloads)))
for wkld_provider in _cli_args.workloads:
if wkld_provider in _config:
@ -103,9 +111,14 @@ def main():
_run_workload_provider(wkld_provider, _config)
else:
_logger.warning("{} is not enabled in {}".format(wkld_provider,
_cli_args.setup))
_cli_args.setup))
else:
_logger.error("{} is missing in {}".format(wkld_provider, _cli_args.setup))
result_dir = _config['browbeat']['results']
WorkloadBase.print_report(result_dir, time_stamp)
_logger.info("Saved browbeat result summary to {}".format(
os.path.join(result_dir,time_stamp + '.' + 'report')))
WorkloadBase.print_summary()
if __name__ == '__main__':
sys.exit(main())

View File

@ -1,6 +1,7 @@
from Connmon import Connmon
from Tools import Tools
from Grafana import Grafana
from WorkloadBase import WorkloadBase
import glob
import logging
import datetime
@ -9,8 +10,7 @@ import shutil
import subprocess
import time
class PerfKit:
class PerfKit(WorkloadBase):
def __init__(self, config):
self.logger = logging.getLogger('browbeat.PerfKit')
@ -21,14 +21,27 @@ class PerfKit:
self.grafana = Grafana(self.config)
self.test_count = 0
self.scenario_count = 0
self.pass_count = 0
def _log_details(self):
self.logger.info(
"Current number of scenarios executed: {}".format(self.scenario_count))
self.logger.info(
"Current number of test(s) executed: {}".format(self.test_count))
self.logger.info(
"Current number of test failures: {}".format(self.error_count))
"Current number of Perkit scenarios executed: {}".format(
self.scenario_count))
self.logger.info("Current number of Perfkit test(s) executed: {}".format(self.test_count))
self.logger.info("Current number of Perfkit test(s) succeeded: {}".format(self.pass_count))
self.logger.info("Current number of Perfkit test failures: {}".format(self.error_count))
def update_tests(self):
self.test_count += 1
def update_pass_tests(self):
self.pass_count += 1
def update_fail_tests(self):
self.error_count += 1
def update_scenarios(self):
self.scenario_count += 1
def run_benchmark(self, benchmark_config, result_dir, test_name, cloud_type="OpenStack"):
self.logger.debug("--------------------------------")
@ -67,9 +80,10 @@ class PerfKit:
self.logger.info("Running Perfkit Command: {}".format(cmd))
stdout_file = open("{}/pkb.stdout.log".format(result_dir), 'w')
stderr_file = open("{}/pkb.stderr.log".format(result_dir), 'w')
process = subprocess.Popen(
cmd, shell=True, stdout=stdout_file, stderr=stderr_file)
from_time = time.time()
process = subprocess.Popen(cmd, shell=True, stdout=stdout_file, stderr=stderr_file)
process.communicate()
to_time = time.time()
if 'sleep_after' in self.config['perfkit']:
time.sleep(self.config['perfkit']['sleep_after'])
to_ts = int(time.time() * 1000)
@ -81,17 +95,37 @@ class PerfKit:
self.connmon.move_connmon_results(result_dir, test_name)
self.connmon.connmon_graphs(result_dir, test_name)
except:
self.logger.error(
"Connmon Result data missing, Connmon never started")
self.logger.error("Connmon Result data missing, Connmon never started")
workload = self.__class__.__name__
new_test_name = test_name.split('-')
new_test_name = new_test_name[2:]
new_test_name = '-'.join(new_test_name)
# Determine success
try:
with open("{}/pkb.stderr.log".format(result_dir), 'r') as stderr:
if any('SUCCEEDED' in line for line in stderr):
self.logger.info("Benchmark completed.")
self.update_pass_tests()
self.update_total_pass_tests()
self.get_time_dict(
to_time,
from_time,
benchmark_config['benchmarks'],
new_test_name,
workload,
"pass")
else:
self.logger.error("Benchmark failed.")
self.error_count += 1
self.update_fail_tests()
self.update_total_fail_tests()
self.get_time_dict(
to_time,
from_time,
benchmark_config['benchmarks'],
new_test_name,
workload,
"fail")
except IOError:
self.logger.error(
"File missing: {}/pkb.stderr.log".format(result_dir))
@ -117,13 +151,16 @@ class PerfKit:
for benchmark in benchmarks:
if benchmark['enabled']:
self.logger.info("Benchmark: {}".format(benchmark['name']))
self.scenario_count += 1
self.update_scenarios()
self.update_total_scenarios()
for run in range(self.config['browbeat']['rerun']):
self.test_count += 1
self.update_tests()
self.update_total_tests()
result_dir = self.tools.create_results_dir(
self.config['browbeat']['results'], time_stamp, benchmark['name'], run)
test_name = "{}-{}-{}".format(time_stamp,
benchmark['name'], run)
test_name = "{}-{}-{}".format(time_stamp, benchmark['name'], run)
workload = self.__class__.__name__
self.workload_logger(result_dir, workload)
self.run_benchmark(benchmark, result_dir, test_name)
self._log_details()
else:

View File

@ -2,6 +2,7 @@ from Connmon import Connmon
from Tools import Tools
from collections import OrderedDict
from Grafana import Grafana
from WorkloadBase import WorkloadBase
import datetime
import glob
import logging
@ -10,20 +11,20 @@ import shutil
import subprocess
import time
class Rally(WorkloadBase):
class Rally:
def __init__(self, config):
def __init__(self, config, hosts=None):
self.logger = logging.getLogger('browbeat.Rally')
self.config = config
self.tools = Tools(self.config)
self.connmon = Connmon(self.config)
self.grafana = Grafana(self.config)
self.error_count = 0
self.pass_count = 0
self.test_count = 0
self.scenario_count = 0
def run_scenario(self, task_file, scenario_args, result_dir, test_name):
def run_scenario(self, task_file, scenario_args, result_dir, test_name, benchmark):
self.logger.debug("--------------------------------")
self.logger.debug("task_file: {}".format(task_file))
self.logger.debug("scenario_args: {}".format(scenario_args))
@ -45,38 +46,31 @@ class Rally:
if len(plugins) > 0:
plugin_string = "--plugin-paths {}".format(",".join(plugins))
cmd = "source {}; ".format(self.config['rally']['venv'])
cmd += "rally {} task start {} --task-args \'{}\' 2>&1 | tee {}.log".format(plugin_string,
task_file, task_args, test_name)
cmd += "rally {} task start {} --task-args \'{}\' 2>&1 | tee {}.log".format(
plugin_string, task_file,task_args, test_name)
from_time = time.time()
self.tools.run_cmd(cmd)
to_time = time.time()
if 'sleep_after' in self.config['rally']:
time.sleep(self.config['rally']['sleep_after'])
to_ts = int(time.time() * 1000)
return (from_time, to_time)
self.grafana.print_dashboard_url(from_ts, to_ts, test_name)
self.grafana.log_snapshot_playbook_cmd(
from_ts, to_ts, result_dir, test_name)
self.grafana.run_playbook(from_ts, to_ts, result_dir, test_name)
def workload_logger(self, result_dir):
base = result_dir.split('/')
if not os.path.isfile("{}/{}/browbeat-rally-run.log".format(base[0], base[1])):
file = logging.FileHandler(
"{}/{}/browbeat-rally-run.log".format(base[0], base[1]))
file.setLevel(logging.DEBUG)
formatter = logging.Formatter(
'%(asctime)s - %(name)s - %(levelname)5s - %(message)s')
file.setFormatter(formatter)
self.logger.addHandler(file)
return None
def update_tests(self):
self.test_count += 1
def get_test_count(self):
return self.test_count
def update_pass_tests(self):
self.pass_count += 1
def get_error_count(self):
return self.error_count
def update_fail_tests(self):
self.error_count += 1
def get_scenario_count(self):
return self.scenario_count
def update_scenarios(self):
self.scenario_count += 1
def get_task_id(self, test_name):
cmd = "grep \"rally task results\" {}.log | awk '{{print $4}}'".format(
@ -84,12 +78,12 @@ class Rally:
return self.tools.run_cmd(cmd)
def _get_details(self):
self.logger.info("Current number of scenarios executed:{}".format(
self.get_scenario_count()))
self.logger.info(
"Current number of test(s) executed:{}".format(self.get_test_count()))
self.logger.info("Current number of test failures:{}".format(
self.get_error_count()))
"Current number of Rally scenarios executed:{}".format(
self.scenario_count))
self.logger.info("Current number of Rally tests executed:{}".format(self.test_count))
self.logger.info("Current number of Rally tests passed:{}".format(self.pass_count))
self.logger.info("Current number of Rally test failures:{}".format(self.error_count))
def gen_scenario_html(self, task_ids, test_name):
all_task_ids = ' '.join(task_ids)
@ -114,7 +108,6 @@ class Rally:
for benchmark in benchmarks:
if benchmark['enabled']:
self.logger.info("Benchmark: {}".format(benchmark['name']))
scenarios = benchmark['scenarios']
def_concurrencies = benchmark['concurrency']
def_times = benchmark['times']
@ -123,7 +116,8 @@ class Rally:
self.logger.debug("Default Times: {}".format(def_times))
for scenario in scenarios:
if scenario['enabled']:
self.scenario_count += 1
self.update_scenarios()
self.update_total_scenarios()
scenario_name = scenario['name']
scenario_file = scenario['file']
self.logger.info(
@ -142,9 +136,9 @@ class Rally:
self.config['browbeat'][
'results'], time_stamp, benchmark['name'],
scenario_name)
self.logger.debug(
"Created result directory: {}".format(result_dir))
self.workload_logger(result_dir)
self.logger.debug("Created result directory: {}".format(result_dir))
workload = self.__class__.__name__
self.workload_logger(result_dir, workload)
# Override concurrency/times
if 'concurrency' in scenario:
@ -160,9 +154,10 @@ class Rally:
for run in range(self.config['browbeat']['rerun']):
if run not in results:
results[run] = []
self.test_count += 1
test_name = "{}-browbeat-{}-{}-iteration-{}".format(time_stamp,
scenario_name, concurrency, run)
self.update_tests()
self.update_total_tests()
test_name = "{}-browbeat-{}-{}-iteration-{}".format(
time_stamp, scenario_name, concurrency, run)
if not result_dir:
self.logger.error(
@ -173,8 +168,9 @@ class Rally:
if self.config['connmon']['enabled']:
self.connmon.start_connmon()
self.run_scenario(
scenario_file, scenario, result_dir, test_name)
from_time,to_time = self.run_scenario(
scenario_file, scenario, result_dir, test_name,
benchmark['name'])
# Stop connmon at end of rally task
if self.config['connmon']['enabled']:
@ -184,26 +180,39 @@ class Rally:
result_dir, test_name)
except:
self.logger.error(
"Connmon Result data missing, Connmon never started")
"Connmon Result data missing, \
Connmon never started")
return False
self.connmon.connmon_graphs(
result_dir, test_name)
self.connmon.connmon_graphs(result_dir, test_name)
new_test_name = test_name.split('-')
new_test_name = new_test_name[3:]
new_test_name = "-".join(new_test_name)
# Find task id (if task succeeded in
# running)
task_id = self.get_task_id(test_name)
if task_id:
self.logger.info(
"Generating Rally HTML for task_id : {}".format(task_id))
"Generating Rally HTML for task_id : {}".
format(task_id))
self.gen_scenario_html(
[task_id], test_name)
self.gen_scenario_json(
task_id, test_name)
results[run].append(task_id)
self.update_pass_tests()
self.update_total_pass_tests()
self.get_time_dict(
to_time, from_time, benchmark['name'], new_test_name,
workload, "pass")
else:
self.logger.error(
"Cannot find task_id")
self.error_count += 1
self.logger.error("Cannot find task_id")
self.update_fail_tests()
self.update_total_fail_tests()
self.get_time_dict(
to_time, from_time, benchmark['name'], new_test_name,
workload, "fail")
for data in glob.glob("./{}*".format(test_name)):
shutil.move(data, result_dir)

View File

@ -1,5 +1,6 @@
from Tools import Tools
from Grafana import Grafana
from WorkloadBase import WorkloadBase
import yaml
import logging
import datetime
@ -7,17 +8,17 @@ import os
import json
import time
class Shaker:
class Shaker(WorkloadBase):
def __init__(self, config):
self.logger = logging.getLogger('browbeat.Shaker')
self.config = config
self.tools = Tools(self.config)
self.grafana = Grafana(self.config)
self.fail_scenarios = 0
self.pass_scenarios = 0
self.scenarios_count = 0
self.error_count = 0
self.pass_count = 0
self.test_count = 0
self.scenario_count = 0
def shaker_checks(self):
cmd = "source /home/stack/overcloudrc; glance image-list | grep -w shaker-image"
@ -28,21 +29,27 @@ class Shaker:
self.logger.info("Shaker image is built, continuing")
def get_stats(self):
self.logger.info(
"Current number of scenarios executed: {}".format(self.scenarios_count))
self.logger.info(
"Current number of scenarios passed: {}".format(self.pass_scenarios))
self.logger.info(
"Current number of scenarios failed: {}".format(self.fail_scenarios))
self.logger.info("Current number of Shaker tests executed: {}".format(self.test_count))
self.logger.info("Current number of Shaker tests passed: {}".format(self.pass_count))
self.logger.info("Current number of Shaker tests failed: {}".format(self.error_count))
def final_stats(self, total):
self.logger.info("Total scenarios enabled by user: {}".format(total))
self.logger.info(
"Total number of scenarios executed: {}".format(self.scenarios_count))
self.logger.info(
"Total number of scenarios passed: {}".format(self.pass_scenarios))
self.logger.info(
"Total number of scenarios failed: {}".format(self.fail_scenarios))
self.logger.info("Total Shaker scenarios enabled by user: {}".format(total))
self.logger.info("Total number of Shaker tests executed: {}".format(self.test_count))
self.logger.info("Total number of Shaker tests passed: {}".format(self.pass_count))
self.logger.info("Total number of Shaker tests failed: {}".format(self.error_count))
def update_tests(self):
self.test_count += 1
def update_pass_tests(self):
self.pass_count += 1
def update_fail_tests(self):
self.error_count += 1
def update_scenarios(self):
self.scenario_count += 1
def set_scenario(self, scenario):
fname = scenario['file']
@ -88,27 +95,50 @@ class Shaker:
uuidlist.append(key)
return uuidlist
def result_check(self, result_dir, test_name, scenario):
outputfile = os.path.join(result_dir, test_name + "." + "json")
def result_check(self, result_dir, test_name, scenario, to_time, from_time):
outputfile = os.path.join(result_dir,test_name + "." + "json")
error = False
with open(outputfile) as data_file:
data = json.load(data_file)
uuidlist = self.get_uuidlist(data)
workload = self.__class__.__name__
new_test_name = test_name.split('-')
new_test_name = new_test_name[3:]
new_test_name = '-'.join(new_test_name)
for uuid in uuidlist:
if data['records'][uuid]['status'] != "ok":
error = True
if error:
self.logger.error("Failed scenario: {}".format(scenario['name']))
self.logger.error("saved log to: {}.log".format(
os.path.join(result_dir, test_name)))
self.fail_scenarios += 1
self.logger.error("Failed Test: {}".format(scenario['name']))
self.logger.error("saved log to: {}.log".format(os.path.join(result_dir, test_name)))
self.update_fail_tests()
self.update_total_fail_tests()
self.get_time_dict(
to_time,
from_time,
scenario['name'],
new_test_name,
workload,
"fail")
else:
self.logger.info("Completed Scenario: {}".format(scenario['name']))
self.logger.info("Saved report to: {}".format(
os.path.join(result_dir, test_name + "." + "html")))
self.logger.info("saved log to: {}.log".format(
os.path.join(result_dir, test_name)))
self.pass_scenarios += 1
self.logger.info("Completed Test: {}".format(scenario['name']))
self.logger.info(
"Saved report to: {}".format(
os.path.join(
result_dir,
test_name +
"." +
"html")))
self.logger.info("saved log to: {}.log".format(os.path.join(result_dir, test_name)))
self.update_pass_tests()
self.update_total_pass_tests()
self.get_time_dict(
to_time,
from_time,
scenario['name'],
new_test_name,
workload,
"pass")
def run_scenario(self, scenario, result_dir, test_name):
filename = scenario['file']
@ -120,18 +150,29 @@ class Shaker:
timeout = self.config['shaker']['join_timeout']
cmd_1 = (
"source {}/bin/activate; source /home/stack/overcloudrc").format(venv)
cmd_2 = ("shaker --server-endpoint {0}:{1} --flavor-name {2} --scenario {3}"
" --os-region-name {7} --agent-join-timeout {6}"
" --report {4}/{5}.html --output {4}/{5}.json"
" --debug > {4}/{5}.log 2>&1").format(server_endpoint,
port_no, flavor, filename, result_dir, test_name, timeout, shaker_region)
cmd_2 = (
"shaker --server-endpoint {0}:{1} --flavor-name {2} --scenario {3}"
" --os-region-name {7} --agent-join-timeout {6}"
" --report {4}/{5}.html --output {4}/{5}.json"
" --debug > {4}/{5}.log 2>&1").format(
server_endpoint,
port_no,
flavor,
filename,
result_dir,
test_name,
timeout,
shaker_region)
cmd = ("{}; {}").format(cmd_1, cmd_2)
from_ts = int(time.time() * 1000)
if 'sleep_before' in self.config['shaker']:
time.sleep(self.config['shaker']['sleep_before'])
from_time = time.time()
self.tools.run_cmd(cmd)
self.scenarios_count += 1
self.result_check(result_dir, test_name, scenario)
to_time = time.time()
self.update_tests()
self.update_total_tests()
self.result_check(result_dir, test_name, scenario, to_time, from_time)
if 'sleep_after' in self.config['shaker']:
time.sleep(self.config['shaker']['sleep_after'])
to_ts = int(time.time() * 1000)
@ -148,19 +189,20 @@ class Shaker:
scenarios = self.config.get('shaker')['scenarios']
self.shaker_checks()
scen_length = len(scenarios)
scen_enabled = 0
if scen_length > 0:
for scenario in scenarios:
if scenario['enabled']:
scen_enabled += 1
self.update_scenarios()
self.update_total_scenarios()
self.logger.info("Scenario: {}".format(scenario['name']))
self.set_scenario(scenario)
self.logger.debug("Set Scenario File: {}".format(
scenario['file']))
result_dir = self.tools.create_results_dir(
self.config['browbeat'][
'results'], time_stamp, "shaker",
self.config['browbeat']['results'], time_stamp, "shaker",
scenario['name'])
workload = self.__class__.__name__
self.workload_logger(result_dir, workload)
time_stamp1 = datetime.datetime.now().strftime("%Y%m%d-%H%M%S")
test_name = "{}-browbeat-{}-{}".format(time_stamp1,
"shaker", scenario['name'])
@ -168,8 +210,9 @@ class Shaker:
self.get_stats()
else:
self.logger.info(
"Skipping {} as scenario enabled: false".format(scenario['name']))
self.final_stats(scen_enabled)
"Skipping {} as scenario enabled: false".format(
scenario['name']))
self.final_stats(self.scenario_count)
else:
self.logger.error(
"Configuration file contains no shaker scenarios")

75
lib/WorkloadBase.py Normal file
View File

@ -0,0 +1,75 @@
from abc import ABCMeta, abstractmethod
import os
import logging
import yaml
import collections
class WorkloadBase:
__metaclass__ = ABCMeta
success = 0
failure = 0
total_tests = 0
total_scenarios = 0
browbeat = {}
@abstractmethod
def update_scenarios(self):
pass
@abstractmethod
def update_tests(self):
pass
@abstractmethod
def update_pass_tests(self):
pass
@abstractmethod
def update_fail_tests(self):
pass
def update_total_scenarios(self):
WorkloadBase.total_scenarios += 1
def update_total_tests(self):
WorkloadBase.total_tests += 1
def update_total_pass_tests(self):
WorkloadBase.success += 1
def update_total_fail_tests(self):
WorkloadBase.failure += 1
def workload_logger(self, result_dir, workload):
base = result_dir.split('/')
if not os.path.isfile("{}/{}/browbeat-{}-run.log".format(base[0], base[1], workload)):
file = logging.FileHandler(
"{}/{}/browbeat-{}-run.log".format(base[0], base[1], workload))
file.setLevel(logging.DEBUG)
formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)5s - %(message)s')
file.setFormatter(formatter)
self.logger.addHandler(file)
return None
def get_time_dict(self, to_time, from_time, benchmark, test_name, workload, status):
time_diff = (to_time - from_time)
if workload not in WorkloadBase.browbeat:
WorkloadBase.browbeat[workload] = {}
if benchmark not in WorkloadBase.browbeat[workload]:
WorkloadBase.browbeat[workload][benchmark] = {}
if 'tests' not in WorkloadBase.browbeat[workload][benchmark]:
WorkloadBase.browbeat[workload][benchmark]['tests'] = []
WorkloadBase.browbeat[workload][benchmark]['tests'].append(
{'Test name': test_name, 'Time': time_diff, 'status': status})
@staticmethod
def print_report(result_dir, time_stamp):
with open(os.path.join(result_dir,time_stamp + '.' + 'report'), 'w') as yaml_file:
yaml_file.write("Browbeat Report Card\n")
yaml_file.write(yaml.dump(WorkloadBase.browbeat, default_flow_style=False))
@staticmethod
def print_summary():
print("Total scenarios executed:{}".format(WorkloadBase.total_scenarios))
print("Total tests executed:{}".format(WorkloadBase.total_tests))
print("Total tests passed:{}".format(WorkloadBase.success))
print("Total tests failed:{}".format(WorkloadBase.failure))