Replace deprecated LOG.warn with LOG.warning
LOG.warn is deprecated. It still used in a few places. Updated to non-deprecated LOG.warning. Change-Id: I375a86cf06a6353cb03602e5fa7051a8f8909e12 Closes-Bug: #1508442
This commit is contained in:
parent
781f59015b
commit
3cec41522c
|
@ -180,7 +180,7 @@ class ConfigController(object):
|
||||||
kb_config.topo_cfg, kb_config.tenants_list)
|
kb_config.topo_cfg, kb_config.tenants_list)
|
||||||
kb_session.kloudbuster.fp_logfile = open(logfile_name)
|
kb_session.kloudbuster.fp_logfile = open(logfile_name)
|
||||||
except Exception:
|
except Exception:
|
||||||
LOG.warn(traceback.format_exc())
|
LOG.warning(traceback.format_exc())
|
||||||
kb_session.kb_status = 'ERROR'
|
kb_session.kb_status = 'ERROR'
|
||||||
response.status = 400
|
response.status = 400
|
||||||
response.text = u"Cannot initialize KloudBuster instance."
|
response.text = u"Cannot initialize KloudBuster instance."
|
||||||
|
|
|
@ -60,7 +60,7 @@ class KBController(object):
|
||||||
kb_session.kloudbuster.stage()
|
kb_session.kloudbuster.stage()
|
||||||
kb_session.kb_status = 'STAGED'
|
kb_session.kb_status = 'STAGED'
|
||||||
except Exception:
|
except Exception:
|
||||||
LOG.warn(traceback.format_exc())
|
LOG.warning(traceback.format_exc())
|
||||||
kb_session.kb_status = 'ERROR'
|
kb_session.kb_status = 'ERROR'
|
||||||
|
|
||||||
def kb_run_test_thread_handler(self, session_id):
|
def kb_run_test_thread_handler(self, session_id):
|
||||||
|
@ -73,7 +73,7 @@ class KBController(object):
|
||||||
kb_session.first_run = False
|
kb_session.first_run = False
|
||||||
kb_session.kb_status = 'STAGED'
|
kb_session.kb_status = 'STAGED'
|
||||||
except Exception:
|
except Exception:
|
||||||
LOG.warn(traceback.format_exc())
|
LOG.warning(traceback.format_exc())
|
||||||
kb_session.kb_status = 'ERROR'
|
kb_session.kb_status = 'ERROR'
|
||||||
|
|
||||||
def kb_cleanup_thread_handler(self, session_id):
|
def kb_cleanup_thread_handler(self, session_id):
|
||||||
|
@ -197,7 +197,7 @@ class KBController(object):
|
||||||
try:
|
try:
|
||||||
kb_session.kloudbuster.stop_test()
|
kb_session.kloudbuster.stop_test()
|
||||||
except Exception:
|
except Exception:
|
||||||
LOG.warn(traceback.format_exc())
|
LOG.warning(traceback.format_exc())
|
||||||
kb_session.kb_status = 'ERROR'
|
kb_session.kb_status = 'ERROR'
|
||||||
|
|
||||||
return "OK!"
|
return "OK!"
|
||||||
|
|
|
@ -98,7 +98,7 @@ class Credentials(object):
|
||||||
# just check that they are present
|
# just check that they are present
|
||||||
for varname in ['OS_USERNAME', 'OS_AUTH_URL', 'OS_TENANT_NAME']:
|
for varname in ['OS_USERNAME', 'OS_AUTH_URL', 'OS_TENANT_NAME']:
|
||||||
if varname not in os.environ:
|
if varname not in os.environ:
|
||||||
LOG.warn("%s is missing" % varname)
|
LOG.warning("%s is missing" % varname)
|
||||||
success = False
|
success = False
|
||||||
if success:
|
if success:
|
||||||
self.rc_username = os.environ['OS_USERNAME']
|
self.rc_username = os.environ['OS_USERNAME']
|
||||||
|
|
|
@ -94,8 +94,8 @@ class KBConfig(object):
|
||||||
self.config_scale['public_key_file'] = pub_key
|
self.config_scale['public_key_file'] = pub_key
|
||||||
LOG.info('Using %s as public key for all VMs' % (pub_key))
|
LOG.info('Using %s as public key for all VMs' % (pub_key))
|
||||||
else:
|
else:
|
||||||
LOG.warn('No public key is found or specified to instantiate VMs. '
|
LOG.warning('No public key is found or specified to instantiate VMs. '
|
||||||
'You will not be able to access the VMs spawned by KloudBuster.')
|
'You will not be able to access the VMs spawned by KloudBuster.')
|
||||||
|
|
||||||
if self.storage_mode and not self.config_scale.client['volume_size']:
|
if self.storage_mode and not self.config_scale.client['volume_size']:
|
||||||
LOG.error('You have to specify a volumn size in order to run '
|
LOG.error('You have to specify a volumn size in order to run '
|
||||||
|
|
|
@ -206,9 +206,9 @@ class KBRunner(object):
|
||||||
and (self.expected_agent_version not in vm_version_mismatches):
|
and (self.expected_agent_version not in vm_version_mismatches):
|
||||||
# only warn once for each unexpected VM version
|
# only warn once for each unexpected VM version
|
||||||
vm_version_mismatches.add(self.expected_agent_version)
|
vm_version_mismatches.add(self.expected_agent_version)
|
||||||
LOG.warn("The VM image you are running (%s) is not the expected version (%s) "
|
LOG.warning("The VM image you are running (%s) is not the expected version (%s) "
|
||||||
"this may cause some incompatibilities" %
|
"this may cause some incompatibilities" %
|
||||||
(self.agent_version, self.expected_agent_version))
|
(self.agent_version, self.expected_agent_version))
|
||||||
|
|
||||||
def gen_host_stats(self):
|
def gen_host_stats(self):
|
||||||
self.host_stats = {}
|
self.host_stats = {}
|
||||||
|
|
|
@ -62,8 +62,8 @@ class KBRunner_HTTP(KBRunner):
|
||||||
timeout = self.config.http_tool_configs.duration + 30
|
timeout = self.config.http_tool_configs.duration + 30
|
||||||
cnt_pending = self.polling_vms(timeout)[2]
|
cnt_pending = self.polling_vms(timeout)[2]
|
||||||
if cnt_pending != 0:
|
if cnt_pending != 0:
|
||||||
LOG.warn("Testing VMs are not returning results within grace period, "
|
LOG.warning("Testing VMs are not returning results within grace period, "
|
||||||
"summary shown below may not be accurate!")
|
"summary shown below may not be accurate!")
|
||||||
|
|
||||||
# Parse the results from HTTP benchmarking tool
|
# Parse the results from HTTP benchmarking tool
|
||||||
for key, instance in self.client_dict.items():
|
for key, instance in self.client_dict.items():
|
||||||
|
@ -136,10 +136,10 @@ class KBRunner_HTTP(KBRunner):
|
||||||
if limit[1] in pert_dict.keys():
|
if limit[1] in pert_dict.keys():
|
||||||
timeout_at_percentile = pert_dict[limit[1]] // 1000000
|
timeout_at_percentile = pert_dict[limit[1]] // 1000000
|
||||||
elif limit[1] != 0:
|
elif limit[1] != 0:
|
||||||
LOG.warn('Percentile %s%% is not a standard statistic point.' % limit[1])
|
LOG.warning('Percentile %s%% is not a standard statistic point.' % limit[1])
|
||||||
if err > limit[0] or timeout_at_percentile > timeout:
|
if err > limit[0] or timeout_at_percentile > timeout:
|
||||||
LOG.warn('KloudBuster is stopping the iteration because the result '
|
LOG.warning('KloudBuster is stopping the iteration because the result '
|
||||||
'reaches the stop limit.')
|
'reaches the stop limit.')
|
||||||
break
|
break
|
||||||
|
|
||||||
for idx in xrange(cur_vm_count, target_vm_count):
|
for idx in xrange(cur_vm_count, target_vm_count):
|
||||||
|
|
|
@ -47,8 +47,8 @@ class KBRunner_Storage(KBRunner):
|
||||||
timeout = self.config.storage_tool_configs.runtime + 30
|
timeout = self.config.storage_tool_configs.runtime + 30
|
||||||
cnt_pending = self.polling_vms(timeout)[2]
|
cnt_pending = self.polling_vms(timeout)[2]
|
||||||
if cnt_pending != 0:
|
if cnt_pending != 0:
|
||||||
LOG.warn("Testing VMs are not returning results within grace period, "
|
LOG.warning("Testing VMs are not returning results within grace period, "
|
||||||
"summary shown below may not be accurate!")
|
"summary shown below may not be accurate!")
|
||||||
|
|
||||||
# Parse the results from storage benchmarking tool
|
# Parse the results from storage benchmarking tool
|
||||||
for key, instance in self.client_dict.items():
|
for key, instance in self.client_dict.items():
|
||||||
|
|
|
@ -232,7 +232,7 @@ class KloudBuster(object):
|
||||||
self.storage_mode = storage_mode
|
self.storage_mode = storage_mode
|
||||||
if topology and tenants_list:
|
if topology and tenants_list:
|
||||||
self.topology = None
|
self.topology = None
|
||||||
LOG.warn("REUSING MODE: Topology configs will be ignored.")
|
LOG.warning("REUSING MODE: Topology configs will be ignored.")
|
||||||
else:
|
else:
|
||||||
self.topology = topology
|
self.topology = topology
|
||||||
if tenants_list:
|
if tenants_list:
|
||||||
|
@ -241,8 +241,8 @@ class KloudBuster(object):
|
||||||
[{'name': tenants_list['tenant_name'], 'user': tenants_list['server_user']}]
|
[{'name': tenants_list['tenant_name'], 'user': tenants_list['server_user']}]
|
||||||
self.tenants_list['client'] =\
|
self.tenants_list['client'] =\
|
||||||
[{'name': tenants_list['tenant_name'], 'user': tenants_list['client_user']}]
|
[{'name': tenants_list['tenant_name'], 'user': tenants_list['client_user']}]
|
||||||
LOG.warn("REUSING MODE: The quotas will not be adjusted automatically.")
|
LOG.warning("REUSING MODE: The quotas will not be adjusted automatically.")
|
||||||
LOG.warn("REUSING MODE: The flavor configs will be ignored.")
|
LOG.warning("REUSING MODE: The flavor configs will be ignored.")
|
||||||
else:
|
else:
|
||||||
self.tenants_list = {'server': None, 'client': None}
|
self.tenants_list = {'server': None, 'client': None}
|
||||||
# TODO(check on same auth_url instead)
|
# TODO(check on same auth_url instead)
|
||||||
|
@ -540,7 +540,7 @@ class KloudBuster(object):
|
||||||
except Exception:
|
except Exception:
|
||||||
traceback.print_exc()
|
traceback.print_exc()
|
||||||
if not cleanup_flag:
|
if not cleanup_flag:
|
||||||
LOG.warn('Some resources in server cloud are not cleaned up properly.')
|
LOG.warning('Some resources in server cloud are not cleaned up properly.')
|
||||||
KBResLogger.dump_and_save('svr', self.kloud.res_logger.resource_list)
|
KBResLogger.dump_and_save('svr', self.kloud.res_logger.resource_list)
|
||||||
|
|
||||||
cleanup_flag = False
|
cleanup_flag = False
|
||||||
|
@ -549,7 +549,7 @@ class KloudBuster(object):
|
||||||
except Exception:
|
except Exception:
|
||||||
traceback.print_exc()
|
traceback.print_exc()
|
||||||
if not cleanup_flag:
|
if not cleanup_flag:
|
||||||
LOG.warn('Some resources in client cloud are not cleaned up properly.')
|
LOG.warning('Some resources in client cloud are not cleaned up properly.')
|
||||||
KBResLogger.dump_and_save('clt', self.testing_kloud.res_logger.resource_list)
|
KBResLogger.dump_and_save('clt', self.testing_kloud.res_logger.resource_list)
|
||||||
|
|
||||||
# Set the kloud to None
|
# Set the kloud to None
|
||||||
|
|
|
@ -96,8 +96,8 @@ class User(object):
|
||||||
if exc.http_status != 409:
|
if exc.http_status != 409:
|
||||||
raise exc
|
raise exc
|
||||||
# Try to repair keystone by removing that user
|
# Try to repair keystone by removing that user
|
||||||
LOG.warn("User creation failed due to stale user with same name: " +
|
LOG.warning("User creation failed due to stale user with same name: " +
|
||||||
self.user_name)
|
self.user_name)
|
||||||
user = self.tenant.kloud.keystone.users.find(name=self.user_name)
|
user = self.tenant.kloud.keystone.users.find(name=self.user_name)
|
||||||
LOG.info("Deleting stale user with name: " + self.user_name)
|
LOG.info("Deleting stale user with name: " + self.user_name)
|
||||||
self.tenant.kloud.keystone.users.delete(user)
|
self.tenant.kloud.keystone.users.delete(user)
|
||||||
|
|
|
@ -125,8 +125,8 @@ class WrkTool(PerfTool):
|
||||||
all_res['latency_stats'].sort()
|
all_res['latency_stats'].sort()
|
||||||
|
|
||||||
if err_flag:
|
if err_flag:
|
||||||
LOG.warn('Unable to find latency_stats from the result dictionary, this '
|
LOG.warning('Unable to find latency_stats from the result dictionary, this '
|
||||||
'may indicate that the test application on VM exited abnormally.')
|
'may indicate that the test application on VM exited abnormally.')
|
||||||
|
|
||||||
return all_res
|
return all_res
|
||||||
|
|
||||||
|
|
Loading…
Reference in New Issue