Feature commit #6 to support running fio

1. Support progression runs on FIO;

Change-Id: Id0b29328fc7198982515849db17487b8b9716a94
This commit is contained in:
Yichen Wang 2016-01-26 20:57:27 -08:00
parent 5597be46a4
commit 3c3a2f44eb
7 changed files with 113 additions and 23 deletions

View File

@ -31,7 +31,7 @@ import redis
#
# This version must be incremented if the interface changes or if new features
# are added to the agent VM
__version__ = '6'
__version__ = '5'
# TODO(Logging on Agent)
@ -117,10 +117,12 @@ class KB_Instance(object):
# Init volume
@staticmethod
def init_volume(size):
cmd = 'mkfs.xfs /dev/vdb && '
cmd = 'if [ ! -e /mnt/volume ]; then\n'
cmd += 'mkfs.xfs /dev/vdb && '
cmd += 'mkdir -p /mnt/volume && '
cmd += 'mount /dev/vdb /mnt/volume && '
cmd += 'dd if=/dev/zero of=/mnt/volume/kb_storage_test.bin bs=%s count=1' % size
cmd += 'dd if=/dev/zero of=/mnt/volume/kb_storage_test.bin bs=%s count=1\n' % size
cmd += 'fi'
return cmd
# Run fio

View File

@ -92,7 +92,12 @@ class BaseCompute(object):
time.sleep(2)
def attach_vol(self):
self.novaclient.volumes.create_server_volume(self.instance.id, self.vol.id)
for _ in range(10):
try:
self.novaclient.volumes.create_server_volume(self.instance.id, self.vol.id)
break
except Exception:
time.sleep(1)
def get_server_list(self):
servers_list = self.novaclient.servers.list()
@ -105,7 +110,8 @@ class BaseCompute(object):
self.instance = None
def detach_vol(self):
self.novaclient.volumes.delete_server_volume(self.instance.id, self.vol.id)
if self.instance and self.vol:
self.novaclient.volumes.delete_server_volume(self.instance.id, self.vol.id)
def find_image(self, image_name):
"""

View File

@ -107,8 +107,9 @@ client:
vm_start: 1
# The steping for the VM count for each stage
vm_step: 1
# The stop condition, it is used for KloudBuster to determine when to
# stop the progression, and do the cleanup if needed. It defines as:
# The stop condition for HTTP benchmarking, it is used for KloudBuster to
# determine when to stop the progression, and do the cleanup if needed.
# It defines as:
# [number_of_socket_errs, percentile_of_requests_not_timeout(%%)]
#
# e.g. [50, 99.99] means, KloudBuster will continue the progression run
@ -123,7 +124,25 @@ client:
# (2) The percentile of requests must be in the below list:
# [50, 75, 90, 99, 99.9, 99.99, 99.999]
# (3) Sets percentile to 0 to disable HTTP request timeout checks;
stop_limit: [50, 0]
http_stop_limit: [50, 0]
# The stop condition for storage benchmarking, it is used for KloudBuster
# to determine when to stop the progression, and do the cleanup if needed.
# In the mode of random read and random write, this value indicates the
# percentile of degrading on IOPS, while in the mode of sequential read
# and sequential write, this value indicates the percentile of degrading
# on throughput.
#
# Assume the IOPS or throughput per VM is a fixed value, usually we are
# expecting higher values when the VM count grows. At certain point where
# the capacity of storage is reached, the overall performance will start
# to degrade.
#
# e.g. In the randread and randwrite mode, the last run with 10 VMs is
# measured at 1000 IOPS, i.e. 100 IOPS/VM; the current run with 11 VMs
# is measured at 880 IOPS, i.e. 80 IOPS/VM. So this set of data shows a
# 20% degrading. KloudBuster will continue the progression run if the
# degrading percentile is within (less or equal) the range defined below.
storage_stop_limit: 20
# Assign floating IP for every client side test VM
# Default: no floating IP (only assign internal fixed IP)
@ -218,12 +237,12 @@ client:
runtime: 30
block_size: '64k'
iodepth: 64
rate: '60m'
rate: '60M'
- mode: 'write'
runtime: 30
block_size: '64k'
iodepth: 64
rate: '60m'
rate: '60M'
# Volumes size in GB for each VM, setting to 0 to disable volume creation
volume_size: 0

View File

@ -60,13 +60,6 @@ class KBRunner(object):
self.report_chan_name = "kloudbuster_report"
self.message_queue = deque()
def header_formatter(self, stage, vm_count):
conns = vm_count * self.config.http_tool_configs.connections
rate_limit = vm_count * self.config.http_tool_configs.rate_limit
msg = "Stage %d: %d VM(s), %d Connections, %d Expected RPS" %\
(stage, vm_count, conns, rate_limit)
return msg
def msg_handler(self):
for message in self.pubsub.listen():
if message['data'] == "STOP":

View File

@ -18,9 +18,6 @@ from kb_runner_base import KBException
from kb_runner_base import KBRunner
import log as logging
# A set of warned VM version mismatches
vm_version_mismatches = set()
LOG = logging.getLogger(__name__)
class KBSetStaticRouteException(KBException):
@ -40,6 +37,13 @@ class KBRunner_HTTP(KBRunner):
def __init__(self, client_list, config, expected_agent_version, single_cloud=True):
KBRunner.__init__(self, client_list, config, expected_agent_version, single_cloud)
def header_formatter(self, stage, vm_count):
conns = vm_count * self.config.http_tool_configs.connections
rate_limit = vm_count * self.config.http_tool_configs.rate_limit
msg = "Stage %d: %d VM(s), %d Connections, %d Expected RPS" %\
(stage, vm_count, conns, rate_limit)
return msg
def setup_static_route(self, active_range, timeout=30):
func = {'cmd': 'setup_static_route', 'active_range': active_range}
self.send_cmd('EXEC', 'http', func)
@ -117,7 +121,7 @@ class KBRunner_HTTP(KBRunner):
self.tool_result = {}
start = self.config.progression.vm_start
step = self.config.progression.vm_step
limit = self.config.progression.stop_limit
limit = self.config.progression.http_stop_limit
timeout = self.config.http_tool_configs.timeout
vm_list = self.full_client_dict.keys()
vm_list.sort(cmp=lambda x, y: cmp(int(x[x.rfind('I') + 1:]), int(y[y.rfind('I') + 1:])))

View File

@ -31,6 +31,20 @@ class KBRunner_Storage(KBRunner):
def __init__(self, client_list, config, expected_agent_version):
KBRunner.__init__(self, client_list, config, expected_agent_version, single_cloud=True)
def header_formatter(self, stage, vm_count):
rr_iops = vm_count * self.config.storage_tool_configs[0].rate_iops
rw_iops = vm_count * self.config.storage_tool_configs[1].rate_iops
sr_tp = self.config.storage_tool_configs[2].rate.upper()
ex_unit = sr_tp[-1] if sr_tp[-1] in ['K', 'M', 'G', 'T'] else None
sr_tp = (str(vm_count * int(sr_tp[:-1])) + ex_unit) if ex_unit else vm_count * int(sr_tp)
sw_tp = self.config.storage_tool_configs[3].rate.upper()
ex_unit = sw_tp[-1] if sw_tp[-1] in ['K', 'M', 'G', 'T'] else None
sw_tp = (str(vm_count * int(sw_tp[:-1])) + ex_unit) if ex_unit else vm_count * int(sw_tp)
msg = "Stage %d: %d VM(s), %d/%d(r/w) Expected IOPS, %sB/%sB(r/w) Expected Throughput" %\
(stage, vm_count, rr_iops, rw_iops, sr_tp, sw_tp)
return msg
def init_volume(self, active_range, timeout=30):
func = {'cmd': 'init_volume', 'active_range': active_range,
'parameter': str(self.config.volume_size) + 'GB'}
@ -96,8 +110,58 @@ class KBRunner_Storage(KBRunner):
self.wait_for_vm_up()
if self.config.progression.enabled:
# TODO(Implement progression runs)
pass
self.tool_result = {}
self.last_result = None
start = self.config.progression.vm_start
step = self.config.progression.vm_step
limit = self.config.progression.storage_stop_limit
vm_list = self.full_client_dict.keys()
vm_list.sort(cmp=lambda x, y: cmp(int(x[x.rfind('I') + 1:]), int(y[y.rfind('I') + 1:])))
self.client_dict = {}
cur_stage = 1
while True:
tc_flag = False
cur_vm_count = len(self.client_dict)
target_vm_count = start + (cur_stage - 1) * step
if target_vm_count > len(self.full_client_dict):
break
for idx in xrange(cur_vm_count, target_vm_count):
self.client_dict[vm_list[idx]] = self.full_client_dict[vm_list[idx]]
description = "-- %s --" % self.header_formatter(cur_stage, len(self.client_dict))
LOG.info(description)
self.single_run(active_range=[0, target_vm_count - 1], test_only=test_only)
LOG.info('-- Stage %s: %s --' % (cur_stage, str(self.tool_result)))
cur_stage += 1
if self.tool_result and self.last_result:
for idx, cur_tc in enumerate(self.config.storage_tool_configs):
if cur_tc['mode'] in ['randread', 'read']:
last_iops = self.last_result[idx]['read_iops'] / cur_vm_count
last_bw = self.last_result[idx]['read_bw'] / cur_vm_count
cur_iops = self.tool_result[idx]['read_iops'] / target_vm_count
cur_bw = self.tool_result[idx]['read_bw'] / target_vm_count
else:
last_iops = self.last_result[idx]['write_iops'] / cur_vm_count
last_bw = self.last_result[idx]['write_bw'] / cur_vm_count
cur_iops = self.tool_result[idx]['write_iops'] / target_vm_count
cur_bw = self.tool_result[idx]['write_bw'] / target_vm_count
degrade_iops = (last_iops - cur_iops) * 100 / last_iops
degrade_bw = (last_bw - cur_bw) * 100 / last_bw
if ((cur_tc['mode'] in ['randread', 'randwrite'] and degrade_iops > limit)
or (cur_tc['mode'] in ['read', 'write'] and degrade_bw > limit)):
LOG.warning('KloudBuster is stopping the iteration because the result '
'reaches the stop limit.')
tc_flag = True
break
if tc_flag:
break
self.last_result = self.tool_result
yield self.tool_result
else:
self.single_run(test_only=test_only)
yield self.tool_result

View File

@ -637,10 +637,12 @@ class KloudBuster(object):
total_vm = self.get_tenant_vm_count(self.server_cfg)
server_quota = {}
server_quota['gigabytes'] = total_vm * self.server_cfg['flavor']['disk']
server_quota['volumes'] = total_vm
client_quota = {}
total_vm = total_vm * self.server_cfg['number_tenants']
client_quota['gigabytes'] = total_vm * self.client_cfg['flavor']['disk'] + 20
client_quota['volumes'] = total_vm
return [server_quota, client_quota]