Rename 'self.savanna' to 'self.sahara' in integration tests

This is a simple change to rename the member holding the
sahara client in the integration test code.

Partial-Implements: blueprint savanna-renaming-service
Change-Id: Ib9acc489805e613af97d3d0272a3d25ff28e61c6
This commit is contained in:
Trevor McKay 2014-03-18 17:26:27 -04:00
parent 499213e7de
commit b28c7703a3
6 changed files with 31 additions and 31 deletions

View File

@ -63,7 +63,7 @@ class ITestCase(unittest2.TestCase):
self.common_config.SAVANNA_HOST, self.common_config.SAVANNA_PORT
)
self.savanna = savanna_client.Client(
self.sahara = savanna_client.Client(
self.common_config.SAVANNA_API_VERSION,
username=self.common_config.OS_USERNAME,
api_key=self.common_config.OS_PASSWORD,
@ -113,7 +113,7 @@ class ITestCase(unittest2.TestCase):
node_processes, node_configs,
volumes_per_node=0, volume_size=0,
floating_ip_pool=None):
data = self.savanna.node_group_templates.create(
data = self.sahara.node_group_templates.create(
name, plugin_config.PLUGIN_NAME, plugin_config.HADOOP_VERSION,
self.flavor_id, description, volumes_per_node, volume_size,
node_processes, node_configs, floating_ip_pool)
@ -127,7 +127,7 @@ class ITestCase(unittest2.TestCase):
for key, value in node_group.items():
if value is None:
del node_group[key]
data = self.savanna.cluster_templates.create(
data = self.sahara.cluster_templates.create(
name, plugin_config.PLUGIN_NAME, plugin_config.HADOOP_VERSION,
description, cluster_configs, node_groups, anti_affinity, net_id)
cluster_template_id = data.id
@ -138,7 +138,7 @@ class ITestCase(unittest2.TestCase):
node_groups=None, anti_affinity=None,
net_id=None, is_transient=False):
self.cluster_id = None
data = self.savanna.clusters.create(
data = self.sahara.clusters.create(
self.common_config.CLUSTER_NAME + '-' + plugin_config.PLUGIN_NAME,
plugin_config.PLUGIN_NAME, plugin_config.HADOOP_VERSION,
cluster_template_id, plugin_config.IMAGE_ID, is_transient,
@ -196,7 +196,7 @@ class ITestCase(unittest2.TestCase):
#---------Helper methods for cluster info obtaining and its processing---------
def poll_cluster_state(self, cluster_id):
data = self.savanna.clusters.get(cluster_id)
data = self.sahara.clusters.get(cluster_id)
timeout = self.common_config.CLUSTER_CREATION_TIMEOUT * 60
while str(data.status) != 'Active':
if str(data.status) == 'Error':
@ -207,13 +207,13 @@ class ITestCase(unittest2.TestCase):
'within %d minutes.'
% self.common_config.CLUSTER_CREATION_TIMEOUT
)
data = self.savanna.clusters.get(cluster_id)
data = self.sahara.clusters.get(cluster_id)
time.sleep(10)
timeout -= 10
return str(data.status)
def get_cluster_node_ip_list_with_node_processes(self, cluster_id):
data = self.savanna.clusters.get(cluster_id)
data = self.sahara.clusters.get(cluster_id)
node_groups = data.node_groups
node_ip_list_with_node_processes = {}
for node_group in node_groups:
@ -526,12 +526,12 @@ class ITestCase(unittest2.TestCase):
node_group_template_id_list=None):
if not self.common_config.RETAIN_CLUSTER_AFTER_TEST:
if cluster_id:
self.savanna.clusters.delete(cluster_id)
self.sahara.clusters.delete(cluster_id)
if cluster_template_id:
self.savanna.cluster_templates.delete(cluster_template_id)
self.sahara.cluster_templates.delete(cluster_template_id)
if node_group_template_id_list:
for node_group_template_id in node_group_template_id_list:
self.savanna.node_group_templates.delete(
self.sahara.node_group_templates.delete(
node_group_template_id
)

View File

@ -18,7 +18,7 @@ from sahara.tests.integration.tests import base
class CinderVolumeTest(base.ITestCase):
def _get_node_list_with_volumes(self, cluster_info):
data = self.savanna.clusters.get(cluster_info['cluster_id'])
data = self.sahara.clusters.get(cluster_info['cluster_id'])
node_groups = data.node_groups
node_list_with_volumes = []
for node_group in node_groups:

View File

@ -118,7 +118,7 @@ class ClusterConfigTest(base.ITestCase):
message='Test for cluster configs was skipped.')
def cluster_config_testing(self, cluster_info):
cluster_id = cluster_info['cluster_id']
data = self.savanna.clusters.get(cluster_id)
data = self.sahara.clusters.get(cluster_id)
self._compare_configs(
{'Enable Swift': True}, data.cluster_configs['general']
)

View File

@ -26,24 +26,24 @@ from sahara.utils import edp
class EDPTest(base.ITestCase):
def _create_data_source(self, name, data_type, url, description=''):
return self.savanna.data_sources.create(
return self.sahara.data_sources.create(
name, description, data_type, url, self.common_config.OS_USERNAME,
self.common_config.OS_PASSWORD).id
def _create_job_binary_internals(self, name, data):
return self.savanna.job_binary_internals.create(name, data).id
return self.sahara.job_binary_internals.create(name, data).id
def _create_job_binary(self, name, url):
return self.savanna.job_binaries.create(name, url,
description='', extra={}).id
return self.sahara.job_binaries.create(name, url,
description='', extra={}).id
def _create_job(self, name, job_type, mains, libs):
return self.savanna.jobs.create(name, job_type, mains, libs,
description='').id
return self.sahara.jobs.create(name, job_type, mains, libs,
description='').id
def _await_job_execution(self, job):
timeout = self.common_config.JOB_LAUNCH_TIMEOUT * 60
status = self.savanna.job_executions.get(job.id).info['status']
status = self.sahara.job_executions.get(job.id).info['status']
while status != 'SUCCEEDED':
if status == 'KILLED':
self.fail('Job status == \'KILLED\'.')
@ -52,7 +52,7 @@ class EDPTest(base.ITestCase):
'Job did not return to \'SUCCEEDED\' status within '
'%d minute(s).' % self.common_config.JOB_LAUNCH_TIMEOUT
)
status = self.savanna.job_executions.get(job.id).info['status']
status = self.sahara.job_executions.get(job.id).info['status']
time.sleep(10)
timeout -= 10
@ -77,19 +77,19 @@ class EDPTest(base.ITestCase):
def _delete_job(self, execution_job, job_id, job_binary_list,
job_binary_internal_list, input_id, output_id):
if execution_job:
self.savanna.job_executions.delete(execution_job.id)
self.sahara.job_executions.delete(execution_job.id)
if job_id:
self.savanna.jobs.delete(job_id)
self.sahara.jobs.delete(job_id)
if job_binary_list:
for job_binary_id in job_binary_list:
self.savanna.job_binaries.delete(job_binary_id)
self.sahara.job_binaries.delete(job_binary_id)
if job_binary_internal_list:
for internal_id in job_binary_internal_list:
self.savanna.job_binary_internals.delete(internal_id)
self.sahara.job_binary_internals.delete(internal_id)
if input_id:
self.savanna.data_sources.delete(input_id)
self.sahara.data_sources.delete(input_id)
if output_id:
self.savanna.data_sources.delete(output_id)
self.sahara.data_sources.delete(output_id)
def _add_swift_configs(self, configs):
swift_user = "fs.swift.service.sahara.username"
@ -170,7 +170,7 @@ class EDPTest(base.ITestCase):
configs["args"] = [swift_input_url,
swift_output_url]
job_execution = self.savanna.job_executions.create(
job_execution = self.sahara.job_executions.create(
job_id, self.cluster_id, input_id, output_id,
configs=configs)

View File

@ -48,7 +48,7 @@ class MapReduceTest(base.ITestCase):
)
def _transfer_helper_script_to_nodes(self, cluster_info):
data = self.savanna.clusters.get(cluster_info['cluster_id'])
data = self.sahara.clusters.get(cluster_info['cluster_id'])
node_groups = data.node_groups
for node_group in node_groups:
if node_group['volumes_per_node'] != 0:

View File

@ -20,7 +20,7 @@ from sahara.tests.integration.tests import base
class ScalingTest(base.ITestCase):
def _change_node_info_while_ng_adding(self, ngt_id, count, cluster_info):
cluster_info['node_info']['node_count'] += count
node_processes = self.savanna.node_group_templates.get(
node_processes = self.sahara.node_group_templates.get(
ngt_id).node_processes
if cluster_info['plugin_config'].PROCESS_NAMES['tt'] in node_processes:
cluster_info['node_info']['tasktracker_count'] += count
@ -28,7 +28,7 @@ class ScalingTest(base.ITestCase):
cluster_info['node_info']['datanode_count'] += count
def _change_node_info_while_ng_resizing(self, name, count, cluster_info):
node_groups = self.savanna.clusters.get(
node_groups = self.sahara.clusters.get(
cluster_info['cluster_id']).node_groups
for node_group in node_groups:
if node_group['name'] == name:
@ -87,7 +87,7 @@ class ScalingTest(base.ITestCase):
self._change_node_info_while_ng_adding(
node_group_id, node_group_size, cluster_info
)
self.savanna.clusters.scale(cluster_info['cluster_id'], scale_body)
self.sahara.clusters.scale(cluster_info['cluster_id'], scale_body)
self.poll_cluster_state(cluster_info['cluster_id'])
new_node_ip_list = self.get_cluster_node_ip_list_with_node_processes(
cluster_info['cluster_id']