Merge "Browbeat Workloads - Rally/UPerf"

This commit is contained in:
Jenkins 2017-08-08 20:01:08 +00:00 committed by Gerrit Code Review
commit fdf31ed15a
12 changed files with 634 additions and 0 deletions

View File

@ -15,6 +15,7 @@
- shaker
- flavors
- images
- { role: workloads, when: install_browbeat_workloads}
environment: "{{proxy_env}}"
- hosts: controller, compute

View File

@ -55,6 +55,32 @@ supported_distro: ((ansible_distribution == "CentOS" && ansible_distribution_maj
# iptables file - RHEL (/etc/sysconfig/iptables) CentOS (/etc/sysconfig/iptables-config)
iptables_file: /etc/sysconfig/iptables
########################################
# Browbeat Workloads
########################################
# Install Browbeat workloads
install_browbeat_workloads: false
# Network ID which has external access
browbeat_network:
# For Pbench Repos - Provide the internal RPM URL
pbench_internal_url:
# Browbeat Rally workloads
browbeat_workloads:
linpack:
name: browbeat-linpack
src: linpack-user.file
dest: "{{ browbeat_path }}/linpack-user.file"
image: centos7
uperf:
name: browbeat-uperf
src: pbench-uperf-user.file
dest: "{{ browbeat_path }}/pbench-uperf-user.file"
image: centos7
########################################
# Other Install Configuration Items
########################################

View File

@ -0,0 +1,31 @@
---
- name: Check browbeat_network
fail: msg="browbeat_network needs to be set"
when: browbeat_network is not defined
- name: Copy userdata files
template:
src: "{{ browbeat_workloads[item].src }}"
dest: "{{ browbeat_workloads[item].dest }}"
with_items: "{{ browbeat_workloads }}"
- name: Build images
shell: source {{ overcloudrc }} ; openstack server create --wait --flavor m1.small --image {{ browbeat_workloads[item].image }} --nic net-id={{ browbeat_network }} --user-data {{ browbeat_workloads[item].dest }} {{ browbeat_workloads[item].name }} | egrep '\sid\s' | awk '{print $4}'
register: workload_ids
with_items: "{{ browbeat_workloads }}"
- name: Check status of images
shell: source {{ overcloudrc }} ; nova console-log {{ item.stdout }}
register: guest_output
until: guest_output.stdout.find("Browbeat workload installed") != -1
retries: 10
with_items: "{{ workload_ids.results }}"
- name: Copy prepared workload guest into Glance
shell: source {{ overcloudrc }} ; openstack server image create --wait --name {{ browbeat_workloads[item].name }} {{ browbeat_workloads[item].name }}
with_items: "{{ browbeat_workloads }}"
- name: Update visibility
shell: source {{ overcloudrc }} ; openstack image set {{ browbeat_workloads[item].name }} --public
with_items: "{{ browbeat_workloads }}"

View File

@ -0,0 +1,24 @@
#!/bin/bash
sudo echo "nameserver {{ dns_server }}" > /etc/resolv.conf
if [ $? -gt 0 ]
then
exit 1
fi
sudo curl -O http://registrationcenter-download.intel.com/akdlm/irc_nas/9752/l_mklb_p_2017.3.018.tgz
sudo tar -xvzf l_mklb_p_2017.3.018.tgz
sudo mkdir /opt/linpack
sudo cp l_mklb_p_2017.3.018/benchmarks_2017/linux/mkl/benchmarks/linpack/* /opt/linpack
if [ $? -gt 0 ]
then
exit 1
fi
# Allow for root access
sudo sed -i 's/disable_root: 1/disable_root: 0/g' /etc/cloud/cloud.cfg
cat /etc/cloud/cloud.cfg | grep disable_root
if [ $? -gt 0 ]
then
exit 1
fi
echo "Browbeat workload installed"

View File

@ -0,0 +1,29 @@
#!/bin/bash
sudo echo "nameserver {{ dns_server }}" > /etc/resolv.conf
sudo wget -O /etc/yum.repos.d/pbench.repo "{{ pbench_internal_url }}"
sudo cat << EOF >> /etc/yum.repos.d/pbench.repo
# Template file to be used with ansible playbook "pbench-repo.yml"
###########################################################################
# External COPR repo
[copr-pbench]
name=Copr repo for pbench owned by ndokos
baseurl=https://copr-be.cloud.fedoraproject.org/results/ndokos/pbench/epel-7-x86_64/
skip_if_unavailable=True
gpgcheck=1
gpgkey=https://copr-be.cloud.fedoraproject.org/results/ndokos/pbench/pubkey.gpg
enabled=1
enabled_metadata=1
skip_if_unavailable=1
EOF
cat /etc/yum.repos.d/pbench.repo
sudo yum clean all
sudo yum install -y pbench-agent
sudo yum install -y pbench-agent-internal
sudo yum install -y pbench-sysstat
sudo yum install -y pbench-uperf
sudo sed -i 's/disable_root: 1/disable_root: 0/g' /etc/cloud/cloud.cfg
cat /etc/cloud/cloud.cfg | grep disable_root
echo "Browbeat workload installed"

View File

@ -0,0 +1,83 @@
# Complete set of Workload Benchmarks
browbeat:
results : results/
rerun: 1
cloud_name: openstack
elasticsearch:
enabled: false
regather: false
host: 1.1.1.1
port: 9200
metadata_files:
- name: hardware-metadata
file: metadata/hardware-metadata.json
- name: environment-metadata
file: metadata/environment-metadata.json
- name: software-metadata
file: metadata/software-metadata.json
- name: version
file: metadata/version.json
ansible:
ssh_config: ansible/ssh-config
hosts: ansible/hosts
adjust:
keystone_token: ansible/browbeat/adjustment-keystone-token.yml
neutron_l3: ansible/browbeat/adjustment-l3.yml
nova_db: ansible/browbeat/adjustment-db.yml
workers: ansible/browbeat/adjustment-workers.yml
grafana_snapshot: ansible/browbeat/snapshot-general-performance-dashboard.yml
metadata: ansible/gather/site.yml
connmon:
enabled: false
sudo: true
grafana:
enabled: true
grafana_ip: 1.1.1.1
grafana_port: 3000
dashboards:
- openstack-general-system-performance
snapshot:
enabled: false
snapshot_compute: false
rally:
enabled: true
sleep_before: 5
sleep_after: 5
venv: /home/stack/rally-venv/bin/activate
plugins:
- workloads: rally/rally-plugins/workloads
benchmarks:
- name: workloads
enabled: true
concurrency:
- 1
times: 1
scenarios:
- name: linpack
enabled: true
image_name: browbeat-linpack
external_network:
net_id:
flavor_name: m1.small
file: rally/rally-plugins/workloads/linpack.yml
- name: uperf
enabled: true
user: root
password: None
image_name: browbeat-uperf
external_network:
net_id:
flavor_name: m1.small
test_name: test-uperf
protocols: 'tcp'
send_results: true
num_pairs: 1
instances: 1
samples: 1
test_types: 'stream'
message_sizes: '64'
cloudname: test-cloud
elastic_host:
elastic_port: 9200
file: rally/rally-plugins/workloads/pbench-uperf.yml

View File

@ -56,6 +56,30 @@ On the Undercloud
internet. Some useful documentation can be found at:
https://access.redhat.com/documentation/en/red-hat-openstack-platform/11/single/networking-guide/
(Optional) Install Browbeat instance workloads
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
Browbeat instance workloads are orchestrated Rally plugins that ship with Browbeat.
We currently support a handful of workloads
- Pbench-Uperf - Networking throughput / RR test
- Linpack - Microbenchmark for CPU load
To enable installation of the Browbeat workloads set install_browbeat_workloads: true in
ansible/install/group_vars/all.yml.
It is also required to provide the neutron network id of a private network which
has external access. To set this, edit ansible/install/group_vars/all.yml and
provide the network id for the browbeat_network:
This work can either be done prior to installation of Browbeat, or after Browbeat
has been installed. To skip directly to this task execute:
::
$ ansible-playbook -i hosts install/browbeat.yml --start-at-task "Check browbeat_network"
...
(Optional) Install Collectd
~~~~~~~~~~~~~~~~~~~~~~~~~~~
@ -550,6 +574,7 @@ Graphite dashboard included and it is recommended to install collectd on your
monitoring host such that you can see if you hit resource issues with your
monitoring host.
Install ELK Host (ElasticSearch/LogStash/Kibana)
-------------------------------------------------

View File

@ -0,0 +1,25 @@
#!/bin/sh
# Location of Linpack binary
LINPACK='/opt/linpack/xlinpack_xeon64'
# Location to create linpack dat file
LINPACK_DAT='linpack.dat'
NUM_CPU=`cat /proc/cpuinfo | grep processor | wc -l`
export OMP_NUM_THREADS=$NUM_CPU
echo "Sample Intel(R) LINPACK data file (from lininput_xeon64)" > ${LINPACK_DAT}
echo "Intel(R) LINPACK data" >> ${LINPACK_DAT}
echo "1 # number of tests" >> ${LINPACK_DAT}
echo "10514 # problem sizes" >> ${LINPACK_DAT}
echo "20016 # leading dimensions" >> ${LINPACK_DAT}
echo "2 # times to run a test " >> ${LINPACK_DAT}
echo "4 # alignment values (in KBytes)" >> ${LINPACK_DAT}
OUTPUT=$(sudo ${LINPACK} < ${LINPACK_DAT} | grep -A 1 Average | grep 20016)
AVERAGE=$(echo $OUTPUT | awk '{print $4}')
MAX=$(echo $OUTPUT | awk '{print $5}')
echo "{
\"average_gflops\": $AVERAGE,
\"max_gflops\": $MAX
}"

View File

@ -0,0 +1,46 @@
{% set sla_max_avg_duration = sla_max_avg_duration or 60 %}
{% set sla_max_failure = sla_max_failure or 0 %}
{% set sla_max_seconds = sla_max_seconds or 60 %}
{% set times = times or 1 %}
{% set concurrency = concurrency or 1 %}
{% set username = username or 'centos' %}
---
VMTasks.boot_runcommand_delete:
-
args:
image:
name: '{{image_name}}'
flavor:
name: '{{flavor_name}}'
external:
name: '{{external_network}}'
command:
remote_path: "./linpack.sh"
local_path: "rally/rally-plugins/workloads/linpack.sh"
username: '{{username}}'
nics:
- net-id: '{{net_id}}'
runner:
concurrency: {{concurrency}}
times: {{times}}
type: "constant"
context:
users:
tenants: 1
users_per_tenant: 1
quotas:
neutron:
network: -1
port: -1
router: -1
subnet: -1
nova:
instances: -1
cores: -1
ram: -1
sla:
max_avg_duration: {{sla_max_avg_duration}}
max_seconds_per_iteration: {{sla_max_seconds}}
failure_rate:
max: {{sla_max_failure}}

Binary file not shown.

After

Width:  |  Height:  |  Size: 53 KiB

View File

@ -0,0 +1,281 @@
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from rally.task import scenario
from rally.plugins.openstack.scenarios.vm import utils as vm_utils
from rally.plugins.openstack.scenarios.neutron import utils as neutron_utils
from rally.task import types
from rally.task import validation
from rally.common import sshutils
import time
import StringIO
import csv
import json
import datetime
import logging
from Elastic import Elastic
LOG = logging.getLogger(__name__)
class BrowbeatPlugin(neutron_utils.NeutronScenario,
vm_utils.VMScenario,
scenario.Scenario):
def build_jump_host(
self,
external,
image,
flavor,
user,
password=None,
**kwargs):
keyname = self.context["user"]["keypair"]["name"]
jump_host, jump_host_ip = self._boot_server_with_fip(image,
flavor,
use_floating_ip=True,
floating_network=external['name'],
key_name=keyname,
**kwargs)
# Wait for ping
self._wait_for_ping(jump_host_ip['ip'])
# Open SSH Connection
jump_ssh = sshutils.SSH(user, jump_host_ip['ip'], 22, self.context[
"user"]["keypair"]["private"], password)
# Check for connectivity
self._wait_for_ssh(jump_ssh)
# Write id_rsa to get to guests.
self._run_command_over_ssh(jump_ssh, {'remote_path': "rm -rf ~/.ssh"})
self._run_command_over_ssh(jump_ssh, {'remote_path': "mkdir ~/.ssh"})
jump_ssh.run(
"cat > ~/.ssh/id_rsa",
stdin=self.context["user"]["keypair"]["private"])
jump_ssh.execute("chmod 0600 ~/.ssh/id_rsa")
return jump_ssh, jump_host_ip, jump_host
def create_guest_pairs(
self,
jump_ssh,
num_pairs,
image,
flavor,
user,
zones=None,
**kwargs):
_servers = []
_clients = []
# Launch Guests
network_name = None
if num_pairs is 1:
if zones['server'] != 'None':
kwargs['availability_zone'] = zones['server']
server = self._boot_server(
image,
flavor,
key_name=self.context["user"]["keypair"]["name"],
**kwargs)
if zones['client'] != 'None':
kwargs['availability_zone'] = zones['client']
client = self._boot_server(
image,
flavor,
key_name=self.context["user"]["keypair"]["name"],
**kwargs)
for net in server.addresses:
network_name = net
break
if network_name is None:
return False
# IP Addresses
_servers.append(
str(server.addresses[network_name][0]["addr"]))
_clients.append(
str(client.addresses[network_name][0]["addr"]))
else:
for i in range(num_pairs):
if zones['server'] != 'None':
kwargs['availability_zone'] = zones['server']
server = self._boot_server(
image,
flavor,
key_name=self.context["user"]["keypair"]["name"],
**kwargs)
if zones['client'] != 'None':
kwargs['availability_zone'] = zones['client']
client = self._boot_server(
image,
flavor,
key_name=self.context["user"]["keypair"]["name"],
**kwargs)
if network_name is None:
# IP Addresses
for net in server.addresses:
network_name = net
break
if network_name is None:
return False
_servers.append(
str(server.addresses[network_name][0]["addr"]))
_clients.append(
str(client.addresses[network_name][0]["addr"]))
# Check status of guest
ready = False
retry = 10
while (not ready):
for sip in _servers + _clients:
cmd = "ssh -o StrictHostKeyChecking=no {}@{} /bin/true".format(
user, sip)
s1_exitcode, s1_stdout, s1_stderr = jump_ssh.execute(cmd)
if retry < 1:
LOG.error(
"Error : Issue reaching {} the guests through the Jump host".format(sip))
return False
if s1_exitcode is 0:
LOG.info("Server: {} ready".format(sip))
ready = True
else:
LOG.info(
"Error reaching server: {} error {}".format(
sip, s1_stderr))
retry = retry - 1
time.sleep(10)
return _clients, _servers
@types.convert(image={"type": "glance_image"},
flavor={"type": "nova_flavor"})
@validation.required_openstack(users=True)
@scenario.configure(context={"cleanup": ["nova", "neutron", "cinder"],
"keypair": {}, "allow_ssh": {}})
def pbench_uperf(
self,
image,
flavor,
user,
test_types,
protocols,
samples,
test_name,
external=None,
send_results=True,
num_pairs=1,
password="",
network_id=None,
zones=None,
message_sizes=None,
instances=None,
elastic_host=None,
elastic_port=None,
cloudname=None,
**kwargs):
pbench_path = "/opt/pbench-agent"
pbench_results = "/var/lib/pbench-agent"
# Create env
if not network_id:
router = self._create_router({}, external_gw=external)
network = self._create_network({})
subnet = self._create_subnet(network, {})
kwargs["nics"] = [{'net-id': network['network']['id']}]
self._add_interface_router(subnet['subnet'], router['router'])
else:
kwargs["nics"] = [{'net-id': network_id}]
jump_ssh, jump_host_ip, jump_host = self.build_jump_host(
external, image, flavor, user, **kwargs)
_clients, _servers = self.create_guest_pairs(
jump_ssh, num_pairs, image, flavor, user, zones, **kwargs)
# Register pbench across FIP
for sip in _servers + _clients:
cmd = "{}/util-scripts/pbench-register-tool-set --remote={}".format(
pbench_path, sip)
exitcode, stdout, stderr = jump_ssh.execute(cmd)
# Start uperf against private address
uperf = "{}/bench-scripts/pbench-uperf --clients={} --servers={} --samples={}".format(
pbench_path, ','.join(_clients), ','.join(_servers), samples)
uperf += " --test-types={} --protocols={} --config={}".format(
test_types,
protocols,
test_name)
if message_sizes is not None:
uperf += " --message-sizes={}".format(message_sizes)
if instances is not None:
uperf += " --instances={}".format(instances)
# Execute pbench-uperf
# execute returns, exitcode,stdout,stderr
LOG.info("Starting Rally - PBench UPerf")
uperf_exitcode, stdout_uperf, stderr = jump_ssh.execute(uperf)
# Prepare results
cmd = "cat {}/uperf_{}*/result.csv".format(pbench_results, test_name)
exitcode, stdout, stderr = jump_ssh.execute(cmd)
if exitcode is 1:
return False
if send_results:
if uperf_exitcode is not 1:
cmd = "cat {}/uperf_{}*/result.json".format(
pbench_results, test_name)
LOG.info("Running command : {}".format(cmd))
exitcode, stdout_json, stderr = jump_ssh.execute(cmd)
LOG.info("Result: {}".format(stderr))
es_ts = datetime.datetime.utcnow()
config = {
'elasticsearch': {
'host': elastic_host,
'port': elastic_port},
'browbeat': {
'cloud_name': cloudname,
'timestamp': es_ts,
'num_pairs': num_pairs}}
elastic = Elastic(config, 'pbench')
json_result = StringIO.StringIO(stdout_json)
json_data = json.load(json_result)
for iteration in json_data:
elastic.index_result(iteration, test_name, 'results/')
else:
LOG.error("Error with PBench Results")
# Parse results
result = StringIO.StringIO('\n'.join(stdout.split('\n')[1:]))
creader = csv.reader(result)
report = []
for row in creader:
if len(row) >= 1:
report.append(["aggregate.{}".format(row[1]), float(row[2])])
report.append(["single.{}".format(row[1]), float(row[3])])
if len(report) > 0:
self.add_output(
additive={"title": "PBench UPerf Stats",
"description": "PBench UPerf Scenario",
"chart_plugin": "StatsTable",
"axis_label": "Gbps",
"label": "Gbps",
"data": report})
cmd = "{}/util-scripts/pbench-move-results".format(pbench_path)
self._run_command_over_ssh(jump_ssh, {"remote_path": cmd})

View File

@ -0,0 +1,63 @@
{% set sla_max_avg_duration = sla_max_avg_duration or 60 %}
{% set sla_max_failure = sla_max_failure or 0 %}
{% set sla_max_seconds = sla_max_seconds or 60 %}
{% set times = times or 1 %}
{% set num_pairs = num_pairs or 1 %}
{% set instances = instances or 1 %}
{% set password = password or 'None' %}
{% set protocols = protocols or 'tcp' %}
{% set message_sizes = message_sizes or '64,1024,16384' %}
{% set hypervisor_server = hypervsior_server or 'None' %}
{% set hypervisor_client = hypervsior_client or 'None' %}
---
BrowbeatPlugin.pbench_uperf:
-
args:
image:
name: '{{image_name}}'
flavor:
name: '{{flavor_name}}'
zones:
server: '{{hypervisor_server}}'
client: '{{hypervisor_client}}'
external:
name: '{{external_network}}'
user: '{{user}}'
password: '{{password}}'
num_pairs: {{num_pairs}}
network_id: '{{net_id}}'
test_types: '{{test_types}}'
protocols: '{{protocols}}'
samples: '{{samples}}'
message_sizes: '{{message_sizes}}'
instances: '{{instances}}'
test_name: '{{test_name}}'
send_results: {{send_results}}
cloudname: '{{cloudname}}'
elastic_host: '{{elastic_host}}'
elastic_port: '{{elastic_port}}'
runner:
concurrency: 1
times: 1
type: "constant"
context:
users:
tenants: 1
users_per_tenant: 1
quotas:
neutron:
network: -1
port: -1
router: -1
subnet: -1
nova:
instances: -1
cores: -1
ram: -1
sla:
max_avg_duration: {{sla_max_avg_duration}}
max_seconds_per_iteration: {{sla_max_seconds}}
failure_rate:
max: {{sla_max_failure}}