Bugfixes and tuning for YODA in CI

* Fixes an indentation error that caused YODA to exit unexpextedly
  in some cases

* Reduces the length of the YODA CI benchmark to try and get
  the runtime below 12h

* Removes the last vestiges of the yoda venv, it's not longer
  required the way all venvs are escaped by yoda console commands

Change-Id: I13c3c4b814eacdd6d0eb4e702177866eabb51754
This commit is contained in:
jkilpatr 2017-07-27 09:16:28 -04:00
parent b2f2d2065b
commit d0b449a34c
4 changed files with 8 additions and 78 deletions

View File

@ -4,6 +4,7 @@ browbeat:
results : results/
rerun: 1
cloud_name: {{ browbeat_cloud_name }}
overcloud_credentials: /home/stack/overcloudrc
elasticsearch:
enabled: {{ elastic_enabled }}
host: {{ elastic_host }}
@ -44,7 +45,6 @@ yoda:
enabled: true
instackenv: "/home/stack/instackenv.json"
stackrc: "/home/stack/stackrc"
venv: "/home/stack/yoda-venv/bin/activate"
benchmarks:
- name: introspect-{{ overcloud_size }}-batch-5
type: introspection
@ -59,58 +59,6 @@ yoda:
method: bulk
times: 3
timeout: 1800
- name: No-HA-Max-Compute-{{ overcloud_size }}-full-deploy
type: overcloud
ntp_server: pool.ntp.org
timeout: 600
templates:
- ""
enabled: true
step: 5
keep_stack: false
times: 1
cloud:
- node: "compute"
start_scale: 1
end_scale: {{ overcloud_size | int - 1 }}
- node: "control"
start_scale: 1
end_scale: 1
- name: No-HA-Max-Compute-{{ overcloud_size }}-stack-update
type: overcloud
ntp_server: pool.ntp.org
timeout: 600
templates:
- ""
instackenv: "/home/stack/instackenv.json"
enabled: true
step: 5
keep_stack: true
times: 1
cloud:
- node: "compute"
start_scale: 1
end_scale: {{ overcloud_size | int - 1 }}
- node: "control"
start_scale: 1
end_scale: 1
- name: HA-Max-Compute-{{ overcloud_size }}-full-deploy
type: overcloud
ntp_server: pool.ntp.org
timeout: 600
templates:
- ""
enabled: true
step: 5
keep_stack: false
times: 1
cloud:
- node: "compute"
start_scale: 1
end_scale: {{ overcloud_size | int - 3 }}
- node: "control"
start_scale: 3
end_scale: 3
- name: HA-Max-Compute-{{ overcloud_size }}-stack-update
type: overcloud
ntp_server: pool.ntp.org
@ -118,29 +66,12 @@ yoda:
templates:
- ""
enabled: true
step: 5
step: 1
keep_stack: true
times: 1
cloud:
- node: "compute"
start_scale: 1
end_scale: {{ overcloud_size | int - 3 }}
- node: "control"
start_scale: 3
end_scale: 3
- name: HA-Max-Compute-{{ overcloud_size }}-stack-update
type: overcloud
ntp_server: pool.ntp.org
timeout: 600
templates:
- ""
enabled: true
step: 5
keep_stack: true
times: 1
cloud:
- node: "compute"
start_scale: 1
start_scale: {{ overcloud_size | int - 3 }}
end_scale: {{ overcloud_size | int - 3 }}
- node: "control"
start_scale: 3

View File

@ -44,7 +44,6 @@ yoda:
enabled: true
instackenv: "/home/stack/instackenv.json"
stackrc: "/home/stack/stackrc"
venv: "/home/stack/yoda-venv/bin/activate"
benchmarks:
- name: introspect-{{ overcloud_size }}-10-individual-batch-2
type: introspection

View File

@ -325,7 +325,7 @@ class Yoda(WorkloadBase.WorkloadBase):
wait_time += 10
if wait_time > timeout:
self.logger.error("Overcloud stack delete failed")
exit(1)
exit(1)
except exceptions.SDKException:
# Recursion is probably the wrong way to handle this
self.logger.error("Heat failure during overcloud delete, retrying")
@ -436,8 +436,11 @@ class Yoda(WorkloadBase.WorkloadBase):
# even if nodes are never pingable
rentry['ping_time'] = -1
condition = 'private' in node.addresses
if condition:
ping = self.tools.is_pingable(node.addresses['private'])
else:
ping = False
condition = condition and 'pingable_at' not in rentry
ping = self.tools.is_pingable(node.addresses['private'])
condition = condition and ping
if condition:
ping_time = datetime.datetime.utcnow()

View File

@ -300,9 +300,6 @@ mapping:
stackrc:
type: str
required: True
venv:
type: str
required: True
benchmarks:
type: seq
required: True