Test templates: add the ocata/ dir, remove liberty/ etc

The current set matches the job tested on the Sahara CI on the
current master, which is going to become Ocata.
Remove the directory with Liberty templates (Liberty has been EOL
for a while).
Also remove mapr-5.0.0.mrv2.yaml.mako in the main test directory
(it is going to be removed completely in newer versions).
Finally, fix the unit tests which depended on the test templates
for Liberty (use Newton for now).

Change-Id: I61cae34c7ce3b7de22502f4ebb66464df9164647
This commit is contained in:
Luigi Toscano 2017-01-31 16:52:15 +01:00
parent dabfeb562f
commit 52ea7a6e03
13 changed files with 169 additions and 206 deletions

View File

@ -0,0 +1,22 @@
---
prelude: >
Ocata test templates are now available, while Liberty
test templates have been removed.
features:
- A folder with test templates with Ocata has been created
and initialized starting from the templates in the main
directory, following the status of the jobs tested on
the Sahara CI.
deprecations:
- The Liberty-specific job templates have been removed.
This means that starting from this release Liberty is
not supported (it has been EOL for a while).
- The MapR 5.0.0 test template have been removed from
the master branch as well.
other:
- Add other notes here, or remove this section.
All of the list items in this section are combined
when the release notes are rendered, so the text
needs to be worded so that it does not depend on any
information only available in another section, such
as the prelude. This may mean repeating some details.

View File

@ -1,54 +0,0 @@
<%page args="is_proxy_gateway='true', use_auto_security_group='true', ci_flavor_id='m1.small'"/>
clusters:
- plugin_name: hdp
plugin_version: 2.0.6
image: ${hdp_206_image}
node_group_templates:
- name: master
flavor: ${ci_flavor_id}
node_processes:
- AMBARI_SERVER
- GANGLIA_SERVER
- HISTORYSERVER
- NAGIOS_SERVER
- NAMENODE
- OOZIE_SERVER
- RESOURCEMANAGER
- SECONDARY_NAMENODE
- ZOOKEEPER_SERVER
auto_security_group: ${use_auto_security_group}
is_proxy_gateway: ${is_proxy_gateway}
- name: worker
flavor: ${ci_flavor_id}
node_processes:
- DATANODE
- HDFS_CLIENT
- MAPREDUCE2_CLIENT
- NODEMANAGER
- OOZIE_CLIENT
- PIG
- YARN_CLIENT
- ZOOKEEPER_CLIENT
volumes_per_node: 2
volumes_size: 2
auto_security_group: ${use_auto_security_group}
cluster_template:
name: hdp206
node_group_templates:
master: 1
worker: 3
cluster_configs:
YARN:
yarn.log-aggregation-enable: false
cluster:
name: ${cluster_name}
scaling:
- operation: add
node_group: worker
size: 1
edp_jobs_flow:
- pig_job
- mapreduce_job
- mapreduce_streaming_job
- java_job

View File

@ -1,54 +0,0 @@
<%page args="is_proxy_gateway='true', use_auto_security_group='true', ci_flavor_id='m1.small'"/>
clusters:
- plugin_name: vanilla
plugin_version: 2.7.1
image: ${vanilla_271_image}
node_group_templates:
- name: worker
flavor: ${ci_flavor_id}
node_processes:
- datanode
- nodemanager
volumes_per_node: 2
volumes_size: 2
auto_security_group: ${use_auto_security_group}
node_configs:
&ng_configs
MapReduce:
yarn.app.mapreduce.am.resource.mb: 256
yarn.app.mapreduce.am.command-opts: -Xmx256m
YARN:
yarn.scheduler.minimum-allocation-mb: 256
yarn.scheduler.maximum-allocation-mb: 1024
yarn.nodemanager.vmem-check-enabled: false
- name: master
flavor: ${ci_flavor_id}
node_processes:
- oozie
- historyserver
- resourcemanager
- namenode
auto_security_group: ${use_auto_security_group}
is_proxy_gateway: ${is_proxy_gateway}
cluster_template:
name: transient
node_group_templates:
master: 1
worker: 3
cluster_configs:
HDFS:
dfs.replication: 1
MapReduce:
mapreduce.tasktracker.map.tasks.maximum: 16
mapreduce.tasktracker.reduce.tasks.maximum: 16
YARN:
yarn.resourcemanager.scheduler.class: org.apache.hadoop.yarn.server.resourcemanager.scheduler.fair.FairScheduler
cluster:
name: ${cluster_name}
is_transient: true
scenario:
- run_jobs
- transient
edp_jobs_flow: pig_job

View File

@ -1,56 +0,0 @@
<%page args="use_auto_security_group='true', mapr_master_flavor_id='mapr.master', mapr_worker_flavor_id='mapr.worker'"/>
clusters:
- plugin_name: mapr
plugin_version: 5.0.0.mrv2
image: ${mapr_500mrv2_image}
node_group_templates:
- name: master
flavor:
name: ${mapr_master_flavor_id}
vcpus: 4
ram: 8192
root_disk: 80
ephemeral_disk: 40
node_processes:
- Metrics
- Webserver
- ZooKeeper
- HTTPFS
- Oozie
- FileServer
- CLDB
- Flume
- Hue
- NodeManager
- HistoryServer
- ResourceManager
- HiveServer2
- HiveMetastore
- Sqoop2-Client
- Sqoop2-Server
auto_security_group: ${use_auto_security_group}
- name: worker
flavor:
name: ${mapr_worker_flavor_id}
vcpus: 2
ram: 4096
root_disk: 40
ephemeral_disk: 40
node_processes:
- NodeManager
- FileServer
auto_security_group: ${use_auto_security_group}
cluster_template:
name: mapr500mrv2
node_group_templates:
master: 1
worker: 1
cluster:
name: ${cluster_name}
scaling:
- operation: add
node_group: worker
size: 1
edp_jobs_flow:
- mapr

View File

@ -2,7 +2,7 @@
clusters:
- plugin_name: ambari
plugin_version: '2.3'
plugin_version: '2.4'
image: ${ambari_22_image}
node_group_templates:
- name: master
@ -58,8 +58,10 @@ clusters:
timeout: 30
cluster:
name: ${cluster_name}
scenario:
- run_jobs
scaling:
- operation: add
node_group: worker
size: 1
edp_jobs_flow:
- java_job
- spark_pi

View File

@ -1,9 +1,9 @@
<%page args="is_proxy_gateway='true', use_auto_security_group='true', ci_flavor_id='m1.small', medium_flavor_id='m1.medium', large_flavor_id='m1.large'"/>
<%page args="is_proxy_gateway='true', use_auto_security_group='true', ci_flavor_id='m1.small', large_flavor_id='m1.large'"/>
clusters:
- plugin_name: cdh
plugin_version: 5.4.0
image: ${cdh_540_image}
plugin_version: 5.7.0
image: ${cdh_570_image}
node_group_templates:
- name: worker-dn
flavor: ${ci_flavor_id}
@ -39,7 +39,7 @@ clusters:
is_proxy_gateway: ${is_proxy_gateway}
auto_security_group: ${use_auto_security_group}
- name: master-core
flavor: ${medium_flavor_id}
flavor: ${large_flavor_id}
node_processes:
- HDFS_NAMENODE
- YARN_RESOURCEMANAGER
@ -48,7 +48,7 @@ clusters:
- ZOOKEEPER_SERVER
auto_security_group: ${use_auto_security_group}
- name: master-additional
flavor: ${medium_flavor_id}
flavor: ${large_flavor_id}
node_processes:
- OOZIE_SERVER
- YARN_JOBHISTORY
@ -58,8 +58,15 @@ clusters:
- HIVE_SERVER2
- SPARK_YARN_HISTORY_SERVER
auto_security_group: ${use_auto_security_group}
# In 5.7 the defaults of following configs are too large,
# restrict them to save memory for scenario testing.
node_configs:
HIVEMETASTORE:
hive_metastore_java_heapsize: 2147483648
HIVESERVER:
hiveserver2_java_heapsize: 2147483648
cluster_template:
name: cdh540
name: cdh570
node_group_templates:
manager: 1
master-core: 1

View File

@ -0,0 +1,90 @@
<%page args="is_proxy_gateway='true', use_auto_security_group='true', ci_flavor_id='m1.small', large_flavor_id='m1.large'"/>
clusters:
- plugin_name: cdh
plugin_version: 5.9.0
image: ${cdh_590_image}
node_group_templates:
- name: worker-dn
flavor: ${ci_flavor_id}
node_processes:
- HDFS_DATANODE
volumes_per_node: 2
volumes_size: 2
auto_security_group: ${use_auto_security_group}
node_configs:
&ng_configs
DATANODE:
dfs_datanode_du_reserved: 0
- name: worker-nm
flavor: ${ci_flavor_id}
node_processes:
- YARN_NODEMANAGER
auto_security_group: ${use_auto_security_group}
- name: worker-nm-dn
flavor: ${ci_flavor_id}
node_processes:
- YARN_NODEMANAGER
- HDFS_DATANODE
volumes_per_node: 2
volumes_size: 2
auto_security_group: ${use_auto_security_group}
node_configs:
*ng_configs
- name: manager
flavor: ${large_flavor_id}
node_processes:
- CLOUDERA_MANAGER
- KMS
is_proxy_gateway: ${is_proxy_gateway}
auto_security_group: ${use_auto_security_group}
- name: master-core
flavor: ${large_flavor_id}
node_processes:
- HDFS_NAMENODE
- YARN_RESOURCEMANAGER
- SENTRY_SERVER
- YARN_NODEMANAGER
- ZOOKEEPER_SERVER
auto_security_group: ${use_auto_security_group}
- name: master-additional
flavor: ${large_flavor_id}
node_processes:
- OOZIE_SERVER
- YARN_JOBHISTORY
- YARN_NODEMANAGER
- HDFS_SECONDARYNAMENODE
- HIVE_METASTORE
- HIVE_SERVER2
- SPARK_YARN_HISTORY_SERVER
auto_security_group: ${use_auto_security_group}
# In 5.9 the defaults of following configs are too large,
# restrict them to save memory for scenario testing.
node_configs:
HIVEMETASTORE:
hive_metastore_java_heapsize: 2147483648
HIVESERVER:
hiveserver2_java_heapsize: 2147483648
cluster_template:
name: cdh590
node_group_templates:
manager: 1
master-core: 1
master-additional: 1
worker-nm-dn: 1
worker-nm: 1
worker-dn: 1
cluster_configs:
HDFS:
dfs_replication: 1
cluster:
name: ${cluster_name}
scenario:
- run_jobs
- sentry
edp_jobs_flow:
- pig_job
- mapreduce_job
- mapreduce_streaming_job
- java_job
- spark_wordcount

View File

@ -2,8 +2,8 @@
clusters:
- plugin_name: mapr
plugin_version: 5.0.0.mrv2
image: ${mapr_500mrv2_image}
plugin_version: 5.2.0.mrv2
image: ${mapr_520mrv2_image}
node_group_templates:
- name: master
flavor:
@ -42,7 +42,7 @@ clusters:
- FileServer
auto_security_group: ${use_auto_security_group}
cluster_template:
name: mapr500mrv2
name: mapr520mrv2
node_group_templates:
master: 1
worker: 1

View File

@ -2,8 +2,8 @@
clusters:
- plugin_name: spark
plugin_version: 1.3.1
image: ${spark_131_image}
plugin_version: 1.6.0
image: ${spark_160_image}
node_group_templates:
- name: master
flavor: ${ci_flavor_id}
@ -19,7 +19,7 @@ clusters:
- slave
auto_security_group: ${use_auto_security_group}
cluster_template:
name: spark131
name: spark160
node_group_templates:
master: 1
worker: 1

View File

@ -1,34 +1,37 @@
<%page args="is_proxy_gateway='true', use_auto_security_group='true', ci_flavor_id='m1.small'"/>
<%page args="is_proxy_gateway='true', use_auto_security_group='true', ci_flavor_id='m1.small', medium_flavor_id='m1.medium'"/>
clusters:
- plugin_name: fake
plugin_version: "0.1"
image: ${fake_plugin_image}
- plugin_name: storm
plugin_version: 1.0.1
image: ${storm_101_image}
node_group_templates:
- name: worker
flavor: ${ci_flavor_id}
node_processes:
- datanode
- tasktracker
volumes_per_node: 2
volumes_size: 2
auto_security_group: ${use_auto_security_group}
- name: master
flavor: ${ci_flavor_id}
node_processes:
- jobtracker
- namenode
- nimbus
auto_security_group: ${use_auto_security_group}
is_proxy_gateway: ${is_proxy_gateway}
- name: worker
flavor: ${ci_flavor_id}
node_processes:
- supervisor
auto_security_group: ${use_auto_security_group}
- name: zookeeper
flavor: ${medium_flavor_id}
node_processes:
- zookeeper
auto_security_group: ${use_auto_security_group}
cluster_template:
name: fake01
name: storm101
node_group_templates:
master: 1
worker: 1
zookeeper: 1
cluster:
name: ${cluster_name}
scaling:
- operation: add
node_group: worker
size: 1
edp_jobs_flow: pig_job
scenario:
- scale

View File

@ -25,13 +25,14 @@ clusters:
volumes_per_node: 2
volumes_size: 2
auto_security_group: ${use_auto_security_group}
- name: master-rm-nn-hvs
- name: master-rm-nn-hvs-sp
flavor: ${ci_flavor_id}
node_processes:
- namenode
- resourcemanager
- hiveserver
- nodemanager
- spark history server
auto_security_group: ${use_auto_security_group}
- name: master-oo-hs-sn
flavor: ${ci_flavor_id}
@ -45,7 +46,7 @@ clusters:
cluster_template:
name: vanilla271
node_group_templates:
master-rm-nn-hvs: 1
master-rm-nn-hvs-sp: 1
master-oo-hs-sn: 1
worker-dn-nm: 2
worker-dn: 1
@ -77,3 +78,4 @@ clusters:
- mapreduce_streaming_job
- java_job
- hive_job
- spark_wordcount

View File

@ -4,8 +4,8 @@ network_private_name: private
network_public_name: public
vanilla_26_image: centos_sahara_vanilla_hadoop_2_6_latest
vanilla_271_image: vanilla271
spark_131_image: spark
cdh_540_image: cdh540
spark_160_image: spark
cdh_550_image: cdh550
cluster_name: cluster
ci_flavor_id: '2'
medium_flavor_id: '3'

View File

@ -375,11 +375,11 @@ class RunnerUnitTest(testtools.TestCase):
@mock.patch('subprocess.Popen',
return_value=_create_subprocess_communicate_mock())
@mock.patch('sys.exit', return_value=None)
def test_default_templates_kilo(self, mock_sys, mock_sub, mock_validate):
def test_default_templates(self, mock_sys, mock_sub, mock_validate):
sys.argv = ['sahara_tests/scenario/runner.py',
'-V',
'sahara_tests/unit/scenario/templatevars_complete.ini',
'-p', 'spark', '-v', '1.3.1', '-r', 'liberty',
'-p', 'spark', '-v', '1.6.0', '-r', 'newton',
'--os-username', 'demo', '--os-password', 'demopwd',
'--os-project-name', 'demo',
'--os-auth-url', 'http://127.0.0.1:5000/v2']
@ -387,7 +387,7 @@ class RunnerUnitTest(testtools.TestCase):
self.assertEqual('spark',
mock_validate.call_args[0][0]['clusters'][0][
'plugin_name'])
self.assertEqual('1.3.1',
self.assertEqual('1.6.0',
mock_validate.call_args[0][0]['clusters'][0][
'plugin_version'])
@ -398,7 +398,7 @@ class RunnerUnitTest(testtools.TestCase):
sys.argv = ['sahara_tests/scenario/runner.py',
'-V',
'sahara_tests/unit/scenario/templatevars_complete.ini',
'-p', 'spark', '-v', '1.3.1', '--release', 'liberty',
'-p', 'spark', '-v', '1.6.0', '--release', 'newton',
'--count', '4',
'--os-username', 'demo', '--os-password', 'demopwd',
'--os-project-name', 'demo',
@ -412,14 +412,15 @@ class RunnerUnitTest(testtools.TestCase):
sys.argv = ['sahara_tests/scenario/runner.py',
'-V',
'sahara_tests/unit/scenario/templatevars_complete.ini',
'sahara_tests/scenario/defaults/liberty',
'sahara_tests/scenario/defaults/newton',
'sahara_tests/scenario/defaults/edp.yaml.mako',
'--os-username', 'demo', '--os-password', 'demopwd',
'--os-project-name', 'demo',
'--os-auth-url', 'http://127.0.0.1:5000/v2', '--args',
'ambari_22_image:ambari', 'fake_plugin_image:fake',
'mapr_500mrv2_image:mapr', 'spark_131_image:spark',
'hdp_206_image:hdp']
'mapr_510mrv2_image:mapr', 'mapr_520mrv2_image:mapr',
'cdh_570_image:cdh', 'spark_160_image:spark',
'storm_101_image:storm']
runner.main()
@mock.patch('sahara_tests.scenario.validation.validate')