Merge "Adding elastic methods to query results"

This commit is contained in:
Zuul 2018-07-19 14:53:49 +00:00 committed by Gerrit Code Review
commit 005ee423ab
5 changed files with 496 additions and 48 deletions

View File

@ -86,7 +86,13 @@ def main():
'-p', '--postprocess', dest="path", help="Path to process, ie results/20171130-191420/")
parser.add_argument(
'-c', '--compare', help="Compare metadata", dest="compare", choices=['software-metadata'])
parser.add_argument(
'-q', '--query', help="Query Rally Results", dest="query", action='store_true')
parser.add_argument('-u', '--uuid', help="UUIDs to pass", dest="uuids", nargs=2)
parser.add_argument('-g', '--get_uuid', help="UUIDs to pass", dest="get_uuids",
action='store_true')
parser.add_argument('--combined', help="Aggregate over times and \
concurrency, syntax use --combined <anything>", dest="combined")
_cli_args = parser.parse_args()
_logger = logging.getLogger('browbeat')
@ -112,10 +118,23 @@ def main():
_config = load_browbeat_config(_cli_args.setup)
tools = browbeat.tools.Tools(_config)
if _cli_args.get_uuids :
es = browbeat.elastic.Elastic(_config, "BrowbeatCLI")
data = es.get_results("browbeat-*")
exit(0)
# Query Results
if _cli_args.query :
es = browbeat.elastic.Elastic(_config, "BrowbeatCLI")
data,metadata = es.get_result_data("browbeat-rally-*",_cli_args.uuids)
summary = es.summarize_results(data,bool(_cli_args.combined))
es.compare_rally_results(summary,_cli_args.uuids,bool(_cli_args.combined),metadata)
exit(0)
# Browbeat compare
if _cli_args.compare == "software-metadata":
es = browbeat.elastic.Elastic(_config, "BrowbeatCLI")
es.compare_metadata("_all", 'controller', _cli_args.uuids)
es.compare_metadata("browbeat-rally-*", 'controller', _cli_args.uuids)
exit(0)
if _cli_args.compare:

View File

@ -37,7 +37,7 @@ def load_browbeat_config(path):
# Validate per-workloads
for workload in browbeat_config["workloads"]:
_validate_yaml(workload["type"], workload)
_logger.info("Workload {} validated as {}".format(workload["name"], workload["type"]))
_logger.debug("Workload {} validated as {}".format(workload["name"], workload["type"]))
return browbeat_config

View File

@ -14,6 +14,7 @@ from collections import deque
import datetime
import json
import logging
import numpy
import os
import re
import sys
@ -28,7 +29,13 @@ browbeat_uuid = uuid.uuid4()
class Elastic(object):
def __init__(self, config, workload, tool="browbeat", cache_size=1000, max_cache_time=10):
def __init__(
self,
config,
workload,
tool="browbeat",
cache_size=1000,
max_cache_time=10):
self.config = config
self.cache = deque()
self.max_cache_size = cache_size
@ -102,16 +109,17 @@ class Elastic(object):
retry = 2
for i in range(retry):
try:
to_upload = helpers.parallel_bulk(self.es,
self.cache_insertable_iterable())
to_upload = helpers.parallel_bulk(
self.es, self.cache_insertable_iterable())
counter = 0
num_items = len(self.cache)
for item in to_upload:
self.logger.debug("{} of {} Elastic objects uploaded".format(num_items,
counter))
self.logger.debug(
"{} of {} Elastic objects uploaded".format(
num_items, counter))
counter = counter + 1
output = "Pushed {} items to Elasticsearch to index {}".format(num_items,
self.index)
output = "Pushed {} items to Elasticsearch to index {}".format(
num_items, self.index)
output += " and browbeat UUID {}".format(str(browbeat_uuid))
self.logger.info(output)
self.cache = deque()
@ -124,8 +132,11 @@ class Elastic(object):
self.logger.error("Exception: {}".format(Err))
time.sleep(10)
if i == (retry - 1):
self.logger.error("Pushing Data to Elasticsearch failed in spite of retry,"
" dumping JSON for {} cached items".format(len(self.cache)))
self.logger.error(
"Pushing Data to Elasticsearch failed in spite of retry,"
" dumping JSON for {} cached items".format(
len(
self.cache)))
for item in self.cache:
filename = item['test_name'] + '-' + item['identifier']
filename += '-elastic' + '.' + 'json'
@ -138,8 +149,9 @@ class Elastic(object):
indent=4,
sort_keys=True)
self.logger.info("Saved Elasticsearch consumable result JSON to {}".
format(elastic_file))
self.logger.info(
"Saved Elasticsearch consumable result JSON to {}". format(
elastic_file))
self.cache = deque()
self.last_upload = datetime.datetime.utcnow()
return False
@ -161,6 +173,214 @@ class Elastic(object):
self.logger.error("UUID {} wasn't found".format(browbeat_uuid))
return False
"""
summarize_results
this function will iterate through all the data points, combining the iteration
and rerun data points into a single 95%tile.
"""
def summarize_results(self, data, combined):
summary = {}
if combined:
if len(data) > 1:
for result in data:
if result['browbeat_uuid'] not in summary:
summary[result['browbeat_uuid']] = {}
if result['scenario'] not in summary[result['browbeat_uuid']]:
summary[result['browbeat_uuid']][result['scenario']] = {}
if result['action'] not in summary[
result['browbeat_uuid']][
result['scenario']]:
summary[result['browbeat_uuid']][
result['scenario']][result['action']] = []
summary[result['browbeat_uuid']][result['scenario']][
result['action']].append(result['performance'])
else:
if len(data) > 1:
for result in data:
if result['browbeat_uuid'] not in summary:
summary[result['browbeat_uuid']] = {}
if result['scenario'] not in summary[result['browbeat_uuid']]:
summary[result['browbeat_uuid']][result['scenario']] = {}
if result['time'] not in summary[result['browbeat_uuid']][result['scenario']] :
summary[result['browbeat_uuid']][result['scenario']][result['time']] = {}
if result['concurrency'] not in summary[result['browbeat_uuid']][
result['scenario']][result['time']] :
summary[result['browbeat_uuid']][result['scenario']][result['time']][
result['concurrency']] = {}
if result['action'] not in summary[
result['browbeat_uuid']][
result['scenario']][
result['time']][
result['concurrency']]:
summary[result['browbeat_uuid']][
result['scenario']][result['time']][result['concurrency']][
result['action']] = []
summary[result['browbeat_uuid']][result['scenario']][result['time']][
result['concurrency']][
result['action']].append(result['performance'])
if len(summary) > 0 and combined :
for uuids in summary:
for scenario in summary[uuids]:
for action in summary[uuids][scenario]:
summary[uuids][scenario][action] = numpy.percentile(
summary[uuids][scenario][action], 95)
elif len(summary) > 0 and not combined :
for uuids in summary:
for scenario in summary[uuids]:
for times in summary[uuids][scenario]:
for concurrency in summary[uuids][scenario][times]:
for action in summary[uuids][scenario][times][concurrency]:
summary[uuids][scenario][times][
concurrency][action] = numpy.percentile(
summary[uuids][scenario][times][concurrency][action], 95)
else:
return False
return summary
"""
"""
def compare_rally_results(self, data, uuids, combined, metadata=None):
missing = []
if len(data) < 2:
self.logger.error("Not enough data to compare")
return False
if (uuids[0] not in data) or (uuids[1] not in data):
self.logger.error("Not able to find UUID in data set")
return False
if combined:
print "+{}+".format("-" * (33 + 44 + 10 + 10 + 23))
print "{0:33} | {1:40} | {2:10} | {3:10} | {4:13} ".format("Scenario",
"Action",
uuids[0][-8:],
uuids[1][-8:],
"% Difference")
print "+{}+".format("-" * (33 + 44 + 10 + 10 + 23))
for scenario in data[uuids[0]]:
if scenario not in data[uuids[1]]:
missing.append(scenario)
continue
else:
for action in data[uuids[0]][scenario]:
dset = [data[uuids[0]][scenario][action],
data[uuids[1]][scenario][action]]
perf0 = data[uuids[0]][scenario][action]
perf1 = data[uuids[1]][scenario][action]
diff = numpy.diff(dset)[0] / numpy.abs(dset[:-1])[0] * 100
print "{0:33} | {1:40} | {2:10.3f} | {3:10.3f} | {4:13.3f}".format(scenario,
action,
perf0,
perf1,
diff)
print "+{}+".format("-" * (33 + 44 + 10 + 10 + 26))
else:
print "+{}+".format("-" * (33 + 44 + 15 + 15 + 10 + 10 + 26))
print "{0:33} | {1:40} | {2:15} | {3:15} | {4:10} | {5:10} | {6:23}".format(
"Scenario",
"Action",
"times",
"concurrency",
uuids[0][-8:],
uuids[1][-8:],
"% Difference")
print "+{}+".format("-" * (33 + 44 + 15 + 15 + 10 + 10 + 26))
for scenario in data[uuids[0]]:
if scenario not in data[uuids[1]]:
missing.append(scenario)
continue
else:
for times in data[uuids[0]][scenario]:
if times not in data[uuids[1]][scenario]:
continue
for concurrency in data[uuids[0]][scenario][times]:
if concurrency not in data[uuids[1]][scenario][times]:
# Print somehow
continue
else:
for action in data[uuids[0]][scenario][times][concurrency]:
if action not in data[uuids[1]][scenario][times][concurrency]:
# Print somehow
continue
else:
dset = [data[uuids[0]][scenario][times][
concurrency][action],
data[uuids[1]][scenario][times][
concurrency][action]]
perf0 = data[uuids[0]][scenario][times][
concurrency][action]
perf1 = data[uuids[1]][scenario][times][
concurrency][action]
diff = numpy.diff(dset)[0] / numpy.abs(dset[:-1])[0] * 100
output = "{0:33} | {1:40} | {2:15} | {3:15} "
output += "| {4:10.3f} | {5:10.3f} | {6:13.3f}"
print output.format(scenario,
action,
times,
concurrency,
perf0,
perf1,
diff)
print "+{}+".format("-" * (33 + 44 + 15 + 15 + 10 + 10 + 26))
if metadata:
print "+{}+".format("-" * (40 + 20 + 20 + 33))
print "{0:40} | {1:20} | {2:20} | {3:20}".format("UUID", "Version", "Build",
"Number of runs")
print "+{}+".format("-" * (40 + 20 + 20 + 33))
for uuids in metadata:
print "{0:40} | {1:20} | {2:20} | {3:20}".format(uuids,
metadata[uuids][
'version'],
metadata[uuids][
'build'],
metadata[uuids]['rerun'])
print "+{}+".format("-" * (40 + 20 + 20 + 33))
if len(missing) > 0:
print "+-------------------------------------+"
print "Missing Scenarios to compare results:"
print "+-------------------------------------+"
for scenario in missing:
print " - {}".format(scenario)
"""
returns a list of dicts that contain 95%tile performance data.
"""
def get_result_data(self, index, browbeat_uuid):
results = []
data = []
metadata = {}
if len(browbeat_uuid) < 1 :
self.logger.error("No uuids to calculate values")
return [], {}
for uuids in browbeat_uuid:
results.append(self.query_uuid(index, uuids))
for result in results:
for value in result:
if value['_source']['browbeat_uuid'] not in metadata:
metadata[value['_source']['browbeat_uuid']] = {}
if 'version' in value['_source']:
metadata[
value['_source']['browbeat_uuid']] = {
'version': value['_source']['version']['osp_series'],
'build': value['_source']['version']['build'],
'rerun': value['_source']['browbeat_config']['browbeat']['rerun']}
data.append({
'browbeat_uuid': value['_source']['browbeat_uuid'],
'scenario': value['_source']['scenario'],
'action': value['_source']['action'],
'time': value['_source']['rally_setup']['kw']['runner']['times'],
'concurrency': value['_source']['rally_setup']['kw']['runner'][
'concurrency'],
'iteration': value['_source']['iteration'],
'rerun': value['_source']['browbeat_rerun'],
'performance': numpy.percentile(value['_source']['raw'], 95)
})
if len(data) < 1:
return False
else:
return data, metadata
def get_version_metadata(self, index, browbeat_uuid):
version = {}
results = self.query_uuid(index, browbeat_uuid)
@ -175,7 +395,6 @@ class Elastic(object):
Currently this function will only compare two uuids. I (rook) am not convinced it is worth
the effort to engineer anything > 2.
"""
def compare_metadata(self, index, role, uuids):
meta = []
for browbeat_uuid in uuids:
@ -189,14 +408,6 @@ class Elastic(object):
else:
return False
version_metadata = self.get_version_metadata(index, browbeat_uuid)
if version_metadata:
self.logger.info(
"\nUUID: {}\nVersion: {}\nBuild: {}".format(
browbeat_uuid,
version_metadata['osp_series'],
version_metadata['build']))
ignore = [
"connection",
"admin_url",
@ -234,10 +445,15 @@ class Elastic(object):
"telemetry_secret",
"heat_metadata_server_url",
"heat_waitcondition_server_url",
"catalog_info",
"gather_conf_path",
"exec_dirs",
"transport_url"]
if len(meta) < 2:
self.logger.error("Unable to compare data-sets")
return False
differences = []
for host in meta[0]:
if host not in meta[1]:
self.logger.error("Deployment differs: "
@ -246,7 +462,7 @@ class Elastic(object):
for service in meta[0][host]:
for options in meta[0][host][service].keys():
if options not in meta[1][host][service]:
self.logger.error(
self.logger.debug(
"UUID {} "
"- Missing Option : "
"Host [{}] Service [{}] {}".format(
@ -261,31 +477,97 @@ class Elastic(object):
new_value = meta[1][host][
service][options][key]
if value != new_value:
self.logger.info(
"Difference found : "
"Host [{}] Service [{}] Section {} {} [{}]\n"
"New Value: {}\nOld Value: {}".format(
host,
service,
options,
key,
meta[0][host][service][
options][key],
value,
new_value))
differences.append("{}|{}|{}|{}|{}|{}".format(
host,
service,
options,
key,
value,
new_value))
else:
self.logger.error(
"UUID {} - Missing Value : "
"Host [{}] Service [{}] {} [{}]".format(
uuids[1], host, service, options, key))
print "+{}+".format("-" * (33 + 44 + 15 + 15 + 30 + 10 + 6))
print "{0:25} | {1:15} | {2:30} | {3:23} | {4:40} | {5:40} ".format(
"Host",
"Service",
"Option",
"Key",
"Old Value",
"New Value")
print "+{}+".format("-" * (33 + 44 + 15 + 15 + 30 + 10 + 6))
for difference in differences :
value = difference.split("|")
print "{0:25} | {1:15} | {2:30} | {3:23} | {4:40} | {5:40} ".format(value[0],
value[1],
value[2],
value[3],
value[4],
value[5])
print "+{}+".format("-" * (33 + 44 + 15 + 15 + 30 + 10 + 6))
def scroll(self, search, sid, scroll_size):
data = []
if scroll_size < 1 :
self.logger.info("Nothing to sroll through")
return data
while (scroll_size > 0):
self.logger.info("Scrolling through Browbeat {} documents...".format(scroll_size))
for x in range(0, len(search['hits']['hits'])) :
data.append(search['hits']['hits'][x])
search = self.es.scroll(scroll_id=sid, scroll='2m')
sid = search['_scroll_id']
scroll_size = len(search['hits']['hits'])
return data
"""
get_errors - was inteded to capture all the errors across the entire
index, however, this is quite expensive, and it might be quicker to
only look for errors for specific browbeat_uuids
"""
def get_errors(self, index, browbeat_id):
self.logger.info("Making query against {}".format(index))
page = self.es.search(
index=index,
doc_type='error',
scroll='2m',
size=5000,
body={"query": {"browbeat_uuid": browbeat_id}})
sid = page['_scroll_id']
scroll_size = page['hits']['total']
return self.scroll(sid,scroll_size)
def get_results(self, index, browbeat_uuid):
body = {
"query": {
"bool": {
"should": [
{
"term": {
"browbeat_uuid": browbeat_uuid
}}]}}}
self.logger.info("Making query against {}".format(index))
page = self.es.search(
index=index,
doc_type='result',
scroll='1m',
size=1000,
body=body,
request_timeout=240)
sid = page['_scroll_id']
scroll_size = page['hits']['total']
self.logger.info("Searching through ES for uuid: {}".format(browbeat_uuid))
return self.scroll(page,sid,scroll_size)
def query_uuid(self, index, browbeat_uuid):
body = {'query': {"match": {"browbeat_uuid": {
"query": browbeat_uuid, "type": "phrase"}}}}
results = self.es.search(index=index, doc_type='result', body=body)
if len(results['hits']['hits']) > 0:
return results['hits']['hits']
results = self.get_results(index, browbeat_uuid)
if len(results) > 0:
return results
else:
self.logger.info("No results found for uuid : {}".format(browbeat_uuid))
return False
def index_result(self,

View File

@ -254,13 +254,159 @@ Real world use-case, we had two builds in our CI that used the exact same DLRN h
same DLRN hash, the only difference could be how things were configured. Using this new code, we could quickly identify
the difference -- TripleO enabled l3_ha.
Below is an example output of comparing metadata:
::
[rocketship:browbeat] jtaleric:browbeat$ python browbeat.py --compare software-metadata --uuid "3fc2f149-7091-4e16-855a-60738849af17" "6738eed7-c8dd-4747-abde-47c996975a57"
2017-05-25 02:34:47,230 - browbeat.Tools - INFO - Validating the configuration file passed by the user
2017-05-25 02:34:47,311 - browbeat.Tools - INFO - Validation successful
2017-05-25 02:34:47,311 - browbeat.Elastic - INFO - Querying Elastic : index [_all] : role [controller] : uuid [3fc2f149-7091-4e16-855a-60738849af17]
2017-05-25 02:34:55,684 - browbeat.Elastic - INFO - Querying Elastic : index [_all] : role [controller] : uuid [6738eed7-c8dd-4747-abde-47c996975a57]
2017-05-25 02:35:01,165 - browbeat.Elastic - INFO - Difference found : Host [overcloud-controller-2] Service [neutron] l3_ha [False]
2017-05-25 02:35:01,168 - browbeat.Elastic - INFO - Difference found : Host [overcloud-controller-1] Service [neutron] l3_ha [False]
2017-05-25 02:35:01,172 - browbeat.Elastic - INFO - Difference found : Host [overcloud-controller-0] Service [neutron] l3_ha [False]
+-------------------------------------------------------------------------------------------------------------------------------------+
Host | Service | Option | Key | Old Value | New Value
+-------------------------------------------------------------------------------------------------------------------------------------+
overcloud-controller-2 | nova | conductor | workers | 0 | 12
overcloud-controller-2 | nova | DEFAULT | metadata_workers | 0 | 12
overcloud-controller-2 | nova | DEFAULT | my_ip | 172.16.0.23 | 172.16.0.16
overcloud-controller-2 | nova | DEFAULT | enabled_apis | osapi_compute,metadata | metadata
overcloud-controller-2 | nova | DEFAULT | osapi_compute_workers | 0 | 12
overcloud-controller-2 | nova | neutron | region_name | RegionOne | regionOne
overcloud-controller-2 | neutron-plugin | ovs | local_ip | 172.17.0.11 | 172.17.0.16
overcloud-controller-2 | neutron-plugin | securitygroup | firewall_driver | openvswitch | iptables_hybrid
overcloud-controller-2 | heat | DEFAULT | num_engine_workers | 0 | 16
overcloud-controller-2 | keystone | admin_workers | processes | 32 |
overcloud-controller-2 | keystone | admin_workers | threads | 1 |
overcloud-controller-2 | keystone | eventlet_server | admin_workers | 8 | 12
overcloud-controller-2 | keystone | eventlet_server | public_workers | 8 | 12
overcloud-controller-2 | keystone | oslo_messaging_notifications | driver | messaging | messagingv2
overcloud-controller-2 | keystone | main_workers | processes | 32 |
overcloud-controller-2 | keystone | main_workers | threads | 1 |
overcloud-controller-2 | keystone | token | provider | uuid | fernet
overcloud-controller-2 | rabbitmq | DEFAULT | file | 65436 |
overcloud-controller-2 | mysql | DEFAULT | max | 4096 |
overcloud-controller-2 | cinder | DEFAULT | exec_dirs | /sbin,/usr/sbin,/bin,/usr/bin | /sbin,/usr/sbin,/bin,/usr/bin,/usr/local/bin,/usr/local/sbin,/usr/lpp/mmfs/bin
overcloud-controller-2 | cinder | DEFAULT | osapi_volume_workers | 32 | 12
overcloud-controller-2 | glance | DEFAULT | bind_port | 9191 | 9292
overcloud-controller-2 | glance | DEFAULT | workers | 32 | 12
overcloud-controller-2 | glance | DEFAULT | log_file | /var/log/glance/registry.log | /var/log/glance/cache.log
overcloud-controller-2 | glance | ref1 | auth_version | 2 | 3
overcloud-controller-2 | glance | glance_store | stores | glance.store.http.Store,glance.store.swift.Store | http,swift
overcloud-controller-2 | glance | glance_store | os_region_name | RegionOne | regionOne
overcloud-controller-2 | gnocchi | metricd | workers | 8 | 12
overcloud-controller-2 | gnocchi | storage | swift_auth_version | 2 | 3
overcloud-controller-2 | neutron | DEFAULT | global_physnet_mtu | 1496 | 1500
overcloud-controller-2 | neutron | DEFAULT | rpc_workers | 32 | 12
overcloud-controller-2 | neutron | DEFAULT | api_workers | 32 | 12
overcloud-controller-1 | nova | conductor | workers | 0 | 12
overcloud-controller-1 | nova | DEFAULT | metadata_workers | 0 | 12
overcloud-controller-1 | nova | DEFAULT | my_ip | 172.16.0.11 | 172.16.0.23
overcloud-controller-1 | nova | DEFAULT | enabled_apis | osapi_compute,metadata | metadata
overcloud-controller-1 | nova | DEFAULT | osapi_compute_workers | 0 | 12
overcloud-controller-1 | nova | neutron | region_name | RegionOne | regionOne
overcloud-controller-1 | neutron-plugin | ovs | local_ip | 172.17.0.15 | 172.17.0.11
overcloud-controller-1 | neutron-plugin | securitygroup | firewall_driver | openvswitch | iptables_hybrid
overcloud-controller-1 | heat | DEFAULT | num_engine_workers | 0 | 16
overcloud-controller-1 | keystone | admin_workers | processes | 32 |
overcloud-controller-1 | keystone | admin_workers | threads | 1 |
overcloud-controller-1 | keystone | eventlet_server | admin_workers | 8 | 12
overcloud-controller-1 | keystone | eventlet_server | public_workers | 8 | 12
overcloud-controller-1 | keystone | oslo_messaging_notifications | driver | messaging | messagingv2
overcloud-controller-1 | keystone | main_workers | processes | 32 |
overcloud-controller-1 | keystone | main_workers | threads | 1 |
overcloud-controller-1 | keystone | token | provider | uuid | fernet
overcloud-controller-1 | rabbitmq | DEFAULT | file | 65436 |
overcloud-controller-1 | mysql | DEFAULT | max | 4096 |
overcloud-controller-1 | cinder | DEFAULT | exec_dirs | /sbin,/usr/sbin,/bin,/usr/bin | /sbin,/usr/sbin,/bin,/usr/bin,/usr/local/bin,/usr/local/sbin,/usr/lpp/mmfs/bin
overcloud-controller-1 | cinder | DEFAULT | osapi_volume_workers | 32 | 12
overcloud-controller-1 | glance | DEFAULT | bind_port | 9191 | 9292
overcloud-controller-1 | glance | DEFAULT | workers | 32 | 12
overcloud-controller-1 | glance | DEFAULT | log_file | /var/log/glance/registry.log | /var/log/glance/cache.log
overcloud-controller-1 | glance | ref1 | auth_version | 2 | 3
overcloud-controller-1 | glance | glance_store | stores | glance.store.http.Store,glance.store.swift.Store | http,swift
overcloud-controller-1 | glance | glance_store | os_region_name | RegionOne | regionOne
overcloud-controller-1 | gnocchi | metricd | workers | 8 | 12
overcloud-controller-1 | gnocchi | storage | swift_auth_version | 2 | 3
overcloud-controller-1 | neutron | DEFAULT | global_physnet_mtu | 1496 | 1500
overcloud-controller-1 | neutron | DEFAULT | rpc_workers | 32 | 12
overcloud-controller-1 | neutron | DEFAULT | api_workers | 32 | 12
overcloud-controller-0 | nova | conductor | workers | 0 | 12
overcloud-controller-0 | nova | DEFAULT | metadata_workers | 0 | 12
overcloud-controller-0 | nova | DEFAULT | my_ip | 172.16.0.15 | 172.16.0.10
overcloud-controller-0 | nova | DEFAULT | enabled_apis | osapi_compute,metadata | metadata
overcloud-controller-0 | nova | DEFAULT | osapi_compute_workers | 0 | 12
overcloud-controller-0 | nova | neutron | region_name | RegionOne | regionOne
overcloud-controller-0 | neutron-plugin | ovs | local_ip | 172.17.0.10 | 172.17.0.18
overcloud-controller-0 | neutron-plugin | securitygroup | firewall_driver | openvswitch | iptables_hybrid
overcloud-controller-0 | heat | DEFAULT | num_engine_workers | 0 | 16
overcloud-controller-0 | keystone | admin_workers | processes | 32 |
overcloud-controller-0 | keystone | admin_workers | threads | 1 |
overcloud-controller-0 | keystone | eventlet_server | admin_workers | 8 | 12
overcloud-controller-0 | keystone | eventlet_server | public_workers | 8 | 12
overcloud-controller-0 | keystone | oslo_messaging_notifications | driver | messaging | messagingv2
overcloud-controller-0 | keystone | main_workers | processes | 32 |
overcloud-controller-0 | keystone | main_workers | threads | 1 |
overcloud-controller-0 | keystone | token | provider | uuid | fernet
overcloud-controller-0 | rabbitmq | DEFAULT | file | 65436 |
overcloud-controller-0 | mysql | DEFAULT | max | 4096 |
overcloud-controller-0 | cinder | DEFAULT | exec_dirs | /sbin,/usr/sbin,/bin,/usr/bin | /sbin,/usr/sbin,/bin,/usr/bin,/usr/local/bin,/usr/local/sbin,/usr/lpp/mmfs/bin
overcloud-controller-0 | cinder | DEFAULT | osapi_volume_workers | 32 | 12
overcloud-controller-0 | glance | DEFAULT | bind_port | 9191 | 9292
overcloud-controller-0 | glance | DEFAULT | workers | 32 | 12
overcloud-controller-0 | glance | DEFAULT | log_file | /var/log/glance/registry.log | /var/log/glance/cache.log
overcloud-controller-0 | glance | ref1 | auth_version | 2 | 3
overcloud-controller-0 | glance | glance_store | stores | glance.store.http.Store,glance.store.swift.Store | http,swift
overcloud-controller-0 | glance | glance_store | os_region_name | RegionOne | regionOne
overcloud-controller-0 | gnocchi | metricd | workers | 8 | 12
overcloud-controller-0 | gnocchi | storage | swift_auth_version | 2 | 3
overcloud-controller-0 | neutron | DEFAULT | global_physnet_mtu | 1496 | 1500
overcloud-controller-0 | neutron | DEFAULT | rpc_workers | 32 | 12
overcloud-controller-0 | neutron | DEFAULT | api_workers | 32 | 12
+-------------------------------------------------------------------------------------------------------------------------------------+
Compare performance of two different runs
------------------------------------------
Using the CLI the user can determine, run to run performance differences. This is a good tool for spot checking performance of an OpenStack
release.
To use :
::
$ python browbeat.py -q -u browbeat_uuid1 browbeat_uuid2
Example output from running this CLI command
::
python browbeat.py -q -u 6b50b6f7-acae-445a-ac53-78200b5ba58c 938dc451-d881-4f28-a6cb-ad502b177f3b
2018-07-13 14:38:49,516 - browbeat.config - INFO - Config bs.yaml validated
2018-07-13 14:38:49,646 - browbeat.elastic - INFO - Making query against browbeat-rally-*
2018-07-13 14:38:54,292 - browbeat.elastic - INFO - Searching through ES for uuid: 6b50b6f7-acae-445a-ac53-78200b5ba58c
2018-07-13 14:38:54,293 - browbeat.elastic - INFO - Scrolling through Browbeat 336 documents...
2018-07-13 14:38:54,432 - browbeat.elastic - INFO - Making query against browbeat-rally-*
2018-07-13 14:38:54,983 - browbeat.elastic - INFO - Searching through ES for uuid: 938dc451-d881-4f28-a6cb-ad502b177f3b
2018-07-13 14:38:54,983 - browbeat.elastic - INFO - Scrolling through Browbeat 22 documents...
+---------------------------------------------------------------------------------------------------------------------------------------------------------+
Scenario | Action | concurrency | times | 0b5ba58c | 2b177f3b | % Difference
+---------------------------------------------------------------------------------------------------------------------------------------------------------+
create-list-router | neutron.create_router | 500 | 32 | 19.940 | 15.656 | -21.483
create-list-router | neutron.list_routers | 500 | 32 | 2.588 | 2.086 | -19.410
create-list-router | neutron.create_network | 500 | 32 | 3.294 | 2.366 | -28.177
create-list-router | neutron.create_subnet | 500 | 32 | 4.282 | 2.866 | -33.075
create-list-router | neutron.add_interface_router | 500 | 32 | 12.741 | 10.324 | -18.973
create-list-port | neutron.list_ports | 500 | 32 | 52.627 | 43.448 | -17.442
create-list-port | neutron.create_network | 500 | 32 | 4.025 | 2.771 | -31.165
create-list-port | neutron.create_port | 500 | 32 | 19.458 | 5.412 | -72.189
create-list-security-group | neutron.create_security_group | 500 | 32 | 3.244 | 2.708 | -16.514
create-list-security-group | neutron.list_security_groups | 500 | 32 | 6.837 | 5.720 | -16.339
create-list-subnet | neutron.create_subnet | 500 | 32 | 11.366 | 4.809 | -57.689
create-list-subnet | neutron.create_network | 500 | 32 | 6.432 | 4.286 | -33.368
create-list-subnet | neutron.list_subnets | 500 | 32 | 10.627 | 7.522 | -29.221
create-list-network | neutron.list_networks | 500 | 32 | 15.154 | 13.073 | -13.736
create-list-network | neutron.create_network | 500 | 32 | 10.200 | 6.595 | -35.347
+---------------------------------------------------------------------------------------------------------------------------------------------------------+
+-----------------------------------------------------------------------------------------------------------------+
UUID | Version | Build | Number of runs
+-----------------------------------------------------------------------------------------------------------------+
938dc451-d881-4f28-a6cb-ad502b177f3b | queens | 2018-03-20.2 | 1
6b50b6f7-acae-445a-ac53-78200b5ba58c | ocata | 2017-XX-XX.X | 3
+-----------------------------------------------------------------------------------------------------------------+
We can see from the output above that we also provide the user with some metadata regarding the two runs, like the amount version and the number of runs each UUID
contained.

View File

@ -1,4 +1,5 @@
ansible==2.4.1
numpy
elasticsearch
grafyaml==0.0.7
openstacksdk