Use default plugin instead of fake

This patch refactors the API's tests templates and other plugin
useful functions to make it reusable in other tests,
so that they don't rely on the fake one.

Change-Id: Ic7cdcf6681ba5f449cd1736444b1ac33a73f596f
This commit is contained in:
Raissa Sarmento 2016-09-08 10:25:40 -03:00
parent b2237d096e
commit d70d4cc010
11 changed files with 381 additions and 309 deletions

View File

@ -0,0 +1,8 @@
---
prelude: >
Tests no longer depend on fake plugin to run
other:
- Adapt Sahara Tests code to stop relying only on the fake
plugin and use the default plugin available. However, it's
worth noting that - if available - the fake plugin will
be used.

View File

View File

@ -0,0 +1,318 @@
# Copyright 2016 Red Hat, Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from collections import OrderedDict
import copy
from tempest import config
CONF = config.CONF
"""Default templates.
There should always be at least a master1 and a worker1 node
group template."""
BASE_VANILLA_DESC = {
'NODES': {
'master1': {
'count': 1,
'node_processes': ['namenode', 'resourcemanager',
'hiveserver']
},
'master2': {
'count': 1,
'node_processes': ['oozie', 'historyserver',
'secondarynamenode']
},
'worker1': {
'count': 1,
'node_processes': ['datanode', 'nodemanager'],
'node_configs': {
'MapReduce': {
'yarn.app.mapreduce.am.resource.mb': 256,
'yarn.app.mapreduce.am.command-opts': '-Xmx256m'
},
'YARN': {
'yarn.scheduler.minimum-allocation-mb': 256,
'yarn.scheduler.maximum-allocation-mb': 1024,
'yarn.nodemanager.vmem-check-enabled': False
}
}
}
},
'cluster_configs': {
'HDFS': {
'dfs.replication': 1
}
}
}
BASE_SPARK_DESC = {
'NODES': {
'master1': {
'count': 1,
'node_processes': ['namenode', 'master']
},
'worker1': {
'count': 1,
'node_processes': ['datanode', 'slave']
}
},
'cluster_configs': {
'HDFS': {
'dfs.replication': 1
}
}
}
BASE_CDH_DESC = {
'NODES': {
'master1': {
'count': 1,
'node_processes': ['CLOUDERA_MANAGER']
},
'master2': {
'count': 1,
'node_processes': ['HDFS_NAMENODE',
'YARN_RESOURCEMANAGER']
},
'master3': {
'count': 1,
'node_processes': ['OOZIE_SERVER', 'YARN_JOBHISTORY',
'HDFS_SECONDARYNAMENODE',
'HIVE_METASTORE', 'HIVE_SERVER2']
},
'worker1': {
'count': 1,
'node_processes': ['YARN_NODEMANAGER', 'HDFS_DATANODE']
}
},
'cluster_configs': {
'HDFS': {
'dfs_replication': 1
}
}
}
DEFAULT_TEMPLATES = {
'fake': OrderedDict([
('0.1', {
'NODES': {
'master1': {
'count': 1,
'node_processes': ['namenode', 'jobtracker']
},
'worker1': {
'count': 1,
'node_processes': ['datanode', 'tasktracker'],
}
}
})
]),
'vanilla': OrderedDict([
('2.6.0', copy.deepcopy(BASE_VANILLA_DESC)),
('2.7.1', copy.deepcopy(BASE_VANILLA_DESC)),
('1.2.1', {
'NODES': {
'master1': {
'count': 1,
'node_processes': ['namenode', 'jobtracker']
},
'worker1': {
'count': 1,
'node_processes': ['datanode', 'tasktracker'],
'node_configs': {
'HDFS': {
'Data Node Heap Size': 1024
},
'MapReduce': {
'Task Tracker Heap Size': 1024
}
}
}
},
'cluster_configs': {
'HDFS': {
'dfs.replication': 1
},
'MapReduce': {
'mapred.map.tasks.speculative.execution': False,
'mapred.child.java.opts': '-Xmx500m'
},
'general': {
'Enable Swift': False
}
}
})
]),
'hdp': OrderedDict([
('2.0.6', {
'NODES': {
'master1': {
'count': 1,
'node_processes': ['NAMENODE', 'SECONDARY_NAMENODE',
'ZOOKEEPER_SERVER', 'AMBARI_SERVER',
'HISTORYSERVER', 'RESOURCEMANAGER',
'GANGLIA_SERVER', 'NAGIOS_SERVER',
'OOZIE_SERVER']
},
'worker1': {
'count': 1,
'node_processes': ['HDFS_CLIENT', 'DATANODE',
'YARN_CLIENT', 'ZOOKEEPER_CLIENT',
'MAPREDUCE2_CLIENT', 'NODEMANAGER',
'PIG', 'OOZIE_CLIENT']
}
},
'cluster_configs': {
'HDFS': {
'dfs.replication': 1
}
}
})
]),
'spark': OrderedDict([
('1.0.0', copy.deepcopy(BASE_SPARK_DESC)),
('1.3.1', copy.deepcopy(BASE_SPARK_DESC))
]),
'cdh': OrderedDict([
('5.4.0', copy.deepcopy(BASE_CDH_DESC)),
('5.3.0', copy.deepcopy(BASE_CDH_DESC)),
('5', copy.deepcopy(BASE_CDH_DESC))
])
}
def get_plugin_data(plugin_name, plugin_version):
return DEFAULT_TEMPLATES[plugin_name][plugin_version]
def get_default_plugin():
"""Returns the default plugin used for testing."""
enabled_plugins = CONF.data_processing_feature_enabled.plugins
if len(enabled_plugins) == 0:
return None
# NOTE(raissa) if fake is available, use it first.
# this is to reduce load and should be removed
# once the fake plugin is no longer needed
if 'fake' in enabled_plugins:
return 'fake'
for plugin in enabled_plugins:
if plugin in DEFAULT_TEMPLATES.keys():
break
else:
plugin = ''
return plugin
def get_default_version(plugin):
"""Returns the default plugin version used for testing.
This is gathered separately from the plugin to allow
the usage of plugin name in skip_checks. This method is
rather invoked into resource_setup, which allows API calls
and exceptions.
"""
default_plugin_name = get_default_plugin()
if not (plugin and default_plugin_name):
return None
for version in DEFAULT_TEMPLATES[default_plugin_name].keys():
if version in plugin['versions']:
break
else:
version = None
return version
def get_node_group_template(nodegroup='worker1',
default_version=None,
floating_ip_pool=None):
"""Returns a node group template for the default plugin."""
try:
flavor = CONF.compute.flavor_ref
default_plugin_name = get_default_plugin()
plugin_data = (
get_plugin_data(default_plugin_name, default_version)
)
nodegroup_data = plugin_data['NODES'][nodegroup]
node_group_template = {
'description': 'Test node group template',
'plugin_name': default_plugin_name,
'hadoop_version': default_version,
'node_processes': nodegroup_data['node_processes'],
'flavor_id': flavor,
'floating_ip_pool': floating_ip_pool,
'node_configs': nodegroup_data.get('node_configs', {})
}
return node_group_template
except (IndexError, KeyError):
return None
def get_cluster_template(node_group_template_ids=None,
default_version=None):
"""Returns a cluster template for the default plugin.
node_group_template_ids contains the type and ID of pre-defined
node group templates that have to be used in the cluster template
(instead of dynamically defining them with 'node_processes').
"""
flavor = CONF.compute.flavor_ref
default_plugin_name = get_default_plugin()
if node_group_template_ids is None:
node_group_template_ids = {}
try:
plugin_data = (
get_plugin_data(default_plugin_name, default_version)
)
all_node_groups = []
for ng_name, ng_data in plugin_data['NODES'].items():
node_group = {
'name': '%s-node' % (ng_name),
'flavor_id': flavor,
'count': ng_data['count']
}
if ng_name in node_group_template_ids.keys():
# node group already defined, use it
node_group['node_group_template_id'] = (
node_group_template_ids[ng_name]
)
else:
# node_processes list defined on-the-fly
node_group['node_processes'] = ng_data['node_processes']
if 'node_configs' in ng_data:
node_group['node_configs'] = ng_data['node_configs']
all_node_groups.append(node_group)
cluster_template = {
'description': 'Test cluster template',
'plugin_name': default_plugin_name,
'hadoop_version': default_version,
'cluster_configs': plugin_data.get('cluster_configs', {}),
'node_groups': all_node_groups,
}
return cluster_template
except (IndexError, KeyError):
return None

View File

@ -12,180 +12,17 @@
# License for the specific language governing permissions and limitations
# under the License.
from collections import OrderedDict
import copy
import six
from tempest import config
from tempest import exceptions
from tempest.lib import exceptions as lib_exc
import tempest.test
from sahara_tempest_plugin import clients
from sahara_tempest_plugin.common import plugin_utils
CONF = config.CONF
"""Default templates.
There should always be at least a master1 and a worker1 node
group template."""
BASE_VANILLA_DESC = {
'NODES': {
'master1': {
'count': 1,
'node_processes': ['namenode', 'resourcemanager',
'hiveserver']
},
'master2': {
'count': 1,
'node_processes': ['oozie', 'historyserver',
'secondarynamenode']
},
'worker1': {
'count': 1,
'node_processes': ['datanode', 'nodemanager'],
'node_configs': {
'MapReduce': {
'yarn.app.mapreduce.am.resource.mb': 256,
'yarn.app.mapreduce.am.command-opts': '-Xmx256m'
},
'YARN': {
'yarn.scheduler.minimum-allocation-mb': 256,
'yarn.scheduler.maximum-allocation-mb': 1024,
'yarn.nodemanager.vmem-check-enabled': False
}
}
}
},
'cluster_configs': {
'HDFS': {
'dfs.replication': 1
}
}
}
BASE_SPARK_DESC = {
'NODES': {
'master1': {
'count': 1,
'node_processes': ['namenode', 'master']
},
'worker1': {
'count': 1,
'node_processes': ['datanode', 'slave']
}
},
'cluster_configs': {
'HDFS': {
'dfs.replication': 1
}
}
}
BASE_CDH_DESC = {
'NODES': {
'master1': {
'count': 1,
'node_processes': ['CLOUDERA_MANAGER']
},
'master2': {
'count': 1,
'node_processes': ['HDFS_NAMENODE',
'YARN_RESOURCEMANAGER']
},
'master3': {
'count': 1,
'node_processes': ['OOZIE_SERVER', 'YARN_JOBHISTORY',
'HDFS_SECONDARYNAMENODE',
'HIVE_METASTORE', 'HIVE_SERVER2']
},
'worker1': {
'count': 1,
'node_processes': ['YARN_NODEMANAGER', 'HDFS_DATANODE']
}
},
'cluster_configs': {
'HDFS': {
'dfs_replication': 1
}
}
}
DEFAULT_TEMPLATES = {
'vanilla': OrderedDict([
('2.6.0', copy.deepcopy(BASE_VANILLA_DESC)),
('2.7.1', copy.deepcopy(BASE_VANILLA_DESC)),
('1.2.1', {
'NODES': {
'master1': {
'count': 1,
'node_processes': ['namenode', 'jobtracker']
},
'worker1': {
'count': 1,
'node_processes': ['datanode', 'tasktracker'],
'node_configs': {
'HDFS': {
'Data Node Heap Size': 1024
},
'MapReduce': {
'Task Tracker Heap Size': 1024
}
}
}
},
'cluster_configs': {
'HDFS': {
'dfs.replication': 1
},
'MapReduce': {
'mapred.map.tasks.speculative.execution': False,
'mapred.child.java.opts': '-Xmx500m'
},
'general': {
'Enable Swift': False
}
}
})
]),
'hdp': OrderedDict([
('2.0.6', {
'NODES': {
'master1': {
'count': 1,
'node_processes': ['NAMENODE', 'SECONDARY_NAMENODE',
'ZOOKEEPER_SERVER', 'AMBARI_SERVER',
'HISTORYSERVER', 'RESOURCEMANAGER',
'GANGLIA_SERVER', 'NAGIOS_SERVER',
'OOZIE_SERVER']
},
'worker1': {
'count': 1,
'node_processes': ['HDFS_CLIENT', 'DATANODE',
'YARN_CLIENT', 'ZOOKEEPER_CLIENT',
'MAPREDUCE2_CLIENT', 'NODEMANAGER',
'PIG', 'OOZIE_CLIENT']
}
},
'cluster_configs': {
'HDFS': {
'dfs.replication': 1
}
}
})
]),
'spark': OrderedDict([
('1.0.0', copy.deepcopy(BASE_SPARK_DESC)),
('1.3.1', copy.deepcopy(BASE_SPARK_DESC))
]),
'cdh': OrderedDict([
('5.4.0', copy.deepcopy(BASE_CDH_DESC)),
('5.3.0', copy.deepcopy(BASE_CDH_DESC)),
('5', copy.deepcopy(BASE_CDH_DESC))
]),
}
class BaseDataProcessingTest(tempest.test.BaseTestCase):
@ -198,7 +35,7 @@ class BaseDataProcessingTest(tempest.test.BaseTestCase):
super(BaseDataProcessingTest, cls).skip_checks()
if not CONF.service_available.sahara:
raise cls.skipException('Sahara support is required')
cls.default_plugin = cls._get_default_plugin()
cls.default_plugin = plugin_utils.get_default_plugin()
@classmethod
def setup_clients(cls):
@ -209,11 +46,14 @@ class BaseDataProcessingTest(tempest.test.BaseTestCase):
def resource_setup(cls):
super(BaseDataProcessingTest, cls).resource_setup()
cls.default_version = cls._get_default_version()
plugin = None
if cls.default_plugin:
plugin = cls.client.get_plugin(cls.default_plugin)['plugin']
cls.default_version = plugin_utils.get_default_version(plugin)
if cls.default_plugin is not None and cls.default_version is None:
raise exceptions.InvalidConfiguration(
message="No known Sahara plugin version was found")
cls.flavor_ref = CONF.compute.flavor_ref
# add lists for watched resources
cls._node_group_templates = []
@ -349,59 +189,11 @@ class BaseDataProcessingTest(tempest.test.BaseTestCase):
return resp_body
@classmethod
def _get_default_plugin(cls):
"""Returns the default plugin used for testing."""
if len(CONF.data_processing_feature_enabled.plugins) == 0:
return None
for plugin in CONF.data_processing_feature_enabled.plugins:
if plugin in DEFAULT_TEMPLATES.keys():
break
else:
plugin = ''
return plugin
@classmethod
def _get_default_version(cls):
"""Returns the default plugin version used for testing.
This is gathered separately from the plugin to allow
the usage of plugin name in skip_checks. This method is
rather invoked into resource_setup, which allows API calls
and exceptions.
"""
if not cls.default_plugin:
return None
plugin = cls.client.get_plugin(cls.default_plugin)['plugin']
for version in DEFAULT_TEMPLATES[cls.default_plugin].keys():
if version in plugin['versions']:
break
else:
version = None
return version
@classmethod
def get_node_group_template(cls, nodegroup='worker1'):
"""Returns a node group template for the default plugin."""
try:
plugin_data = (
DEFAULT_TEMPLATES[cls.default_plugin][cls.default_version]
)
nodegroup_data = plugin_data['NODES'][nodegroup]
node_group_template = {
'description': 'Test node group template',
'plugin_name': cls.default_plugin,
'hadoop_version': cls.default_version,
'node_processes': nodegroup_data['node_processes'],
'flavor_id': cls.flavor_ref,
'node_configs': nodegroup_data.get('node_configs', {}),
}
return node_group_template
except (IndexError, KeyError):
return None
return plugin_utils.get_node_group_template(nodegroup,
cls.default_version)
@classmethod
def get_cluster_template(cls, node_group_template_ids=None):
@ -411,42 +203,8 @@ class BaseDataProcessingTest(tempest.test.BaseTestCase):
node group templates that have to be used in the cluster template
(instead of dynamically defining them with 'node_processes').
"""
if node_group_template_ids is None:
node_group_template_ids = {}
try:
plugin_data = (
DEFAULT_TEMPLATES[cls.default_plugin][cls.default_version]
)
all_node_groups = []
for ng_name, ng_data in six.iteritems(plugin_data['NODES']):
node_group = {
'name': '%s-node' % (ng_name),
'flavor_id': cls.flavor_ref,
'count': ng_data['count']
}
if ng_name in node_group_template_ids.keys():
# node group already defined, use it
node_group['node_group_template_id'] = (
node_group_template_ids[ng_name]
)
else:
# node_processes list defined on-the-fly
node_group['node_processes'] = ng_data['node_processes']
if 'node_configs' in ng_data:
node_group['node_configs'] = ng_data['node_configs']
all_node_groups.append(node_group)
cluster_template = {
'description': 'Test cluster template',
'plugin_name': cls.default_plugin,
'hadoop_version': cls.default_version,
'cluster_configs': plugin_data.get('cluster_configs', {}),
'node_groups': all_node_groups,
}
return cluster_template
except (IndexError, KeyError):
return None
return plugin_utils.get_cluster_template(node_group_template_ids,
cls.default_version)
@classmethod
def wait_for_resource_deletion(cls, resource_id, get_resource):

View File

@ -18,6 +18,8 @@ from tempest.lib.cli import base
from tempest.test import BaseTestCase
from tempest.lib import exceptions as exc
from sahara_tempest_plugin.common import plugin_utils
DEL_RESULT = '''\
{} "{}" has been removed successfully.
'''
@ -81,15 +83,13 @@ class ClientTestBase(base.ClientTestBase):
# have the first letter capitalized.
self.assertEqual(delete_cmd.lower(), result.lower())
def find_fake_plugin(self):
found_plugin = None
def get_default_plugin(self):
plugins = self.listing_result('plugin list')
default_plugin_name = plugin_utils.get_default_plugin()
for plugin in plugins:
if plugin['Name'] == 'fake':
found_plugin = plugin
if found_plugin is None:
raise self.skipException('No available plugins for testing')
return found_plugin
if plugin['Name'] == default_plugin_name:
return plugin
raise self.skipException('No available plugins for testing')
def find_id_of_pool(self):
floating_pool_list = self.neutron('floatingip-list')

View File

@ -25,7 +25,6 @@ class SaharaClusterTemplateCLITest(base.ClientTestBase):
])
def openstack_cluster_template_create(self, ng_master, ng_worker):
self.find_fake_plugin()
cluster_template_name = data_utils.rand_name('cl-tmp')
flag = ("%(ct_name)s %(ngm)s %(ngw)s "
% {'ngw': ''.join([ng_worker, ':3']),

View File

@ -48,7 +48,10 @@ class SaharaImageCLITest(base.ClientTestBase):
])
def openstack_image_tags_add(self, image_name):
flag = ''.join([image_name, ' --tags fake 0.1'])
plugin = self.get_default_plugin()
flag = '%s --tags %s %s' % (image_name,
plugin['Name'],
plugin['Versions'])
self.assertTableStruct(
self.listing_result(''.join(['image tags add ', flag])), [
'Field',

View File

@ -26,7 +26,7 @@ class SaharaNodeGroupCLITest(base.ClientTestBase):
])
def openstack_node_group_template_create(self, ng_type, flavor_id):
plugin = self.find_fake_plugin()
plugin = self.get_default_plugin()
id_net_pool = self.find_id_of_pool()
node_group_name = data_utils.rand_name(ng_type)
flags = ("%(ngt_name)s %(plugin)s %(plugin-version)s "

View File

@ -24,6 +24,7 @@ from tempest import exceptions as tempest_exc
from tempest.lib import exceptions
from tempest.scenario import manager
from sahara_tempest_plugin.common import plugin_utils
TEMPEST_CONF = config.CONF
@ -78,36 +79,23 @@ class BaseDataProcessingTest(manager.ScenarioTest):
cls.test_image_id = cls.get_image_id(test_image_name)
cls.worker_template = {
'description': 'Test node group template',
'plugin_name': 'fake',
'hadoop_version': '0.1',
'node_processes': [
'datanode',
'tasktracker'
],
'flavor_id': TEMPEST_CONF.compute.flavor_ref,
'floating_ip_pool': cls.floating_ip_pool
}
default_plugin = cls.get_plugin()
plugin_dict = default_plugin.to_dict()
default_version = plugin_utils.get_default_version(plugin_dict)
cls.master_template = {
'description': 'Test node group template',
'plugin_name': 'fake',
'hadoop_version': '0.1',
'node_processes': [
'namenode',
'jobtracker'
],
'flavor_id': TEMPEST_CONF.compute.flavor_ref,
'floating_ip_pool': cls.floating_ip_pool,
'auto_security_group': True
}
cls.worker_template = (
plugin_utils.get_node_group_template('worker1',
default_version,
cls.floating_ip_pool))
cls.cluster_template = {
'description': 'Test cluster template',
'plugin_name': 'fake',
'hadoop_version': '0.1'
}
cls.master_template = (
plugin_utils.get_node_group_template('master1',
default_version,
cls.floating_ip_pool))
cls.cluster_template = (
plugin_utils.get_cluster_template(
default_version=default_version))
cls.swift_data_source_with_creds = {
'url': 'swift://sahara-container/input-source',
@ -166,6 +154,15 @@ class BaseDataProcessingTest(manager.ScenarioTest):
raise exceptions.NotFound('Image \'%s\' not found in the image list.'
% (image_name))
@classmethod
def get_plugin(cls):
plugins = cls.client.plugins.list()
plugin_name = plugin_utils.get_default_plugin()
for plugin in plugins:
if plugin.name == plugin_name:
return plugin
raise exceptions.NotFound('No available plugins for testing')
def create_node_group_template(self, name, **kwargs):
resp_body = self.client.node_group_templates.create(

View File

@ -18,9 +18,6 @@ from tempest.lib.common.utils import data_utils
from sahara_tempest_plugin.tests.clients import base
TEMPEST_CONF = config.CONF
class ClusterTemplateTest(base.BaseDataProcessingTest):
def _check_create_cluster_template(self):
ng_template_name = data_utils.rand_name('sahara-ng-template')
@ -28,19 +25,10 @@ class ClusterTemplateTest(base.BaseDataProcessingTest):
**self.worker_template)
full_cluster_template = self.cluster_template.copy()
full_cluster_template['node_groups'] = [
{
'name': 'master-node',
'flavor_id': TEMPEST_CONF.compute.flavor_ref,
'node_processes': ['namenode'],
'count': 1
},
{
'name': 'worker-node',
'node_group_template_id': ng_template.id,
'count': 3
}
]
# The 'node_groups' field in the response body
# has some extra info that post body does not have.
del self.cluster_template['node_groups']
template_name = data_utils.rand_name('sahara-cluster-template')

View File

@ -20,7 +20,8 @@ class PluginsTest(base.BaseDataProcessingTest):
def _check_plugins_list(self):
plugins = self.client.plugins.list()
plugins_names = [plugin.name for plugin in plugins]
self.assertIn('fake', plugins_names)
plugin = self.get_plugin()
self.assertIn(plugin.name, plugins_names)
return plugins_names