Prepare Pike-friendly release: templates, requirements

Add Pike templates, remove Mitaka templates and few other outdated
from the master directory.
Sync with newer tempest requirements (some changes in the Tempest plugin
requires them).

Change-Id: I43c181f886522d02605222ae3d8005efb5d5861a
This commit is contained in:
Luigi Toscano 2017-08-30 14:45:10 +02:00
parent 5aee39c1fc
commit e638bcc351
15 changed files with 232 additions and 236 deletions

View File

@ -0,0 +1,12 @@
---
prelude: >
Pike test templates are now available, while Mitaka
test templates have been removed.
features:
- A folder with scenario templates for Pike was added.
It is a subset of the templates in the main directory.
- Some requirements have been raised (especially Tempest).
deprecations:
- The Mitaka-specific job templates have been removed.
This means that starting from this release Mitaka is
not supported (it has been EOL for a while).

View File

@ -13,7 +13,7 @@ oslo.serialization>=1.10.0 # Apache-2.0
oslo.utils>=3.5.0 # Apache-2.0
oslotest>=1.10.0 # Apache-2.0
os-client-config>=1.13.1 # Apache-2.0
os-testr>=0.4.1 # Apache-2.0
os-testr>=0.8.0 # Apache-2.0
paramiko>=1.16.0 # LGPL
python-glanceclient>=2.0.0 # Apache-2.0
python-novaclient!=2.33.0,>=2.29.0 # Apache-2.0
@ -22,6 +22,6 @@ python-swiftclient>=2.2.0 # Apache-2.0
python-neutronclient>=4.2.0 # Apache-2.0
rfc3986>=0.2.0 # Apache-2.0
six>=1.9.0 # MIT
tempest>=12.2.0 # Apache-2.0
tempest>=16.0.0 # Apache-2.0
testrepository>=0.0.18 # Apache-2.0/BSD
testtools>=1.4.0 # MIT

View File

@ -1,34 +0,0 @@
<%page args="is_proxy_gateway='true', use_auto_security_group='true', ci_flavor_id='m1.small'"/>
clusters:
- plugin_name: fake
plugin_version: "0.1"
image: ${fake_plugin_image}
node_group_templates:
- name: worker
flavor: ${ci_flavor_id}
node_processes:
- datanode
- tasktracker
volumes_per_node: 2
volumes_size: 2
auto_security_group: ${use_auto_security_group}
- name: master
flavor: ${ci_flavor_id}
node_processes:
- jobtracker
- namenode
auto_security_group: ${use_auto_security_group}
is_proxy_gateway: ${is_proxy_gateway}
cluster_template:
name: fake01
node_group_templates:
master: 1
worker: 1
cluster:
name: ${cluster_name}
scaling:
- operation: add
node_group: worker
size: 1
edp_jobs_flow: pig_job

View File

@ -1,56 +0,0 @@
<%page args="use_auto_security_group='true', mapr_master_flavor_id='mapr.master', mapr_worker_flavor_id='mapr.worker'"/>
clusters:
- plugin_name: mapr
plugin_version: 5.0.0.mrv2
image: ${mapr_500mrv2_image}
node_group_templates:
- name: master
flavor:
name: ${mapr_master_flavor_id}
vcpus: 4
ram: 8192
root_disk: 80
ephemeral_disk: 40
node_processes:
- Metrics
- Webserver
- ZooKeeper
- HTTPFS
- Oozie
- FileServer
- CLDB
- Flume
- Hue
- NodeManager
- HistoryServer
- ResourceManager
- HiveServer2
- HiveMetastore
- Sqoop2-Client
- Sqoop2-Server
auto_security_group: ${use_auto_security_group}
- name: worker
flavor:
name: ${mapr_worker_flavor_id}
vcpus: 2
ram: 4096
root_disk: 40
ephemeral_disk: 40
node_processes:
- NodeManager
- FileServer
auto_security_group: ${use_auto_security_group}
cluster_template:
name: mapr500mrv2
node_group_templates:
master: 1
worker: 1
cluster:
name: ${cluster_name}
scaling:
- operation: add
node_group: worker
size: 1
edp_jobs_flow:
- mapr

View File

@ -1,56 +0,0 @@
<%page args="use_auto_security_group='true', mapr_master_flavor_id='mapr.master', mapr_worker_flavor_id='mapr.worker'"/>
clusters:
- plugin_name: mapr
plugin_version: 5.1.0.mrv2
image: ${mapr_510mrv2_image}
node_group_templates:
- name: master
flavor:
name: ${mapr_master_flavor_id}
vcpus: 4
ram: 8192
root_disk: 80
ephemeral_disk: 40
node_processes:
- Metrics
- Webserver
- ZooKeeper
- HTTPFS
- Oozie
- FileServer
- CLDB
- Flume
- Hue
- NodeManager
- HistoryServer
- ResourceManager
- HiveServer2
- HiveMetastore
- Sqoop2-Client
- Sqoop2-Server
auto_security_group: ${use_auto_security_group}
- name: worker
flavor:
name: ${mapr_worker_flavor_id}
vcpus: 2
ram: 4096
root_disk: 40
ephemeral_disk: 40
node_processes:
- NodeManager
- FileServer
auto_security_group: ${use_auto_security_group}
cluster_template:
name: mapr510mrv2
node_group_templates:
master: 1
worker: 1
cluster:
name: ${cluster_name}
scaling:
- operation: add
node_group: worker
size: 1
edp_jobs_flow:
- mapr

View File

@ -1,54 +0,0 @@
<%page args="is_proxy_gateway='true', use_auto_security_group='true', ci_flavor_id='m1.small'"/>
clusters:
- plugin_name: vanilla
plugin_version: 2.7.1
image: ${vanilla_271_image}
node_group_templates:
- name: worker
flavor: ${ci_flavor_id}
node_processes:
- datanode
- nodemanager
volumes_per_node: 2
volumes_size: 2
auto_security_group: ${use_auto_security_group}
node_configs:
&ng_configs
MapReduce:
yarn.app.mapreduce.am.resource.mb: 256
yarn.app.mapreduce.am.command-opts: -Xmx256m
YARN:
yarn.scheduler.minimum-allocation-mb: 256
yarn.scheduler.maximum-allocation-mb: 1024
yarn.nodemanager.vmem-check-enabled: false
- name: master
flavor: ${ci_flavor_id}
node_processes:
- oozie
- historyserver
- resourcemanager
- namenode
auto_security_group: ${use_auto_security_group}
is_proxy_gateway: ${is_proxy_gateway}
cluster_template:
name: transient
node_group_templates:
master: 1
worker: 3
cluster_configs:
HDFS:
dfs.replication: 1
MapReduce:
mapreduce.tasktracker.map.tasks.maximum: 16
mapreduce.tasktracker.reduce.tasks.maximum: 16
YARN:
yarn.resourcemanager.scheduler.class: org.apache.hadoop.yarn.server.resourcemanager.scheduler.fair.FairScheduler
cluster:
name: ${cluster_name}
is_transient: true
scenario:
- run_jobs
- transient
edp_jobs_flow: pig_job

View File

@ -2,7 +2,7 @@
clusters:
- plugin_name: ambari
plugin_version: '2.3'
plugin_version: '2.4'
image: ${ambari_22_image}
node_group_templates:
- name: master
@ -50,7 +50,7 @@ clusters:
- type: Spark
main_lib:
type: database
source: etc/edp-examples/edp-spark/spark-kafka-example.jar
source: edp-examples/edp-spark/spark-kafka-example.jar
args:
- '{zookeeper_list}'
- '{topic}'
@ -58,8 +58,10 @@ clusters:
timeout: 30
cluster:
name: ${cluster_name}
scenario:
- run_jobs
scaling:
- operation: add
node_group: worker
size: 1
edp_jobs_flow:
- java_job
- spark_pi

View File

@ -0,0 +1,90 @@
<%page args="is_proxy_gateway='true', use_auto_security_group='true', ci_flavor_id='m1.small', large_flavor_id='m1.large'"/>
clusters:
- plugin_name: cdh
plugin_version: 5.11.0
image: ${cdh_5110_image}
node_group_templates:
- name: worker-dn
flavor: ${ci_flavor_id}
node_processes:
- HDFS_DATANODE
volumes_per_node: 2
volumes_size: 2
auto_security_group: ${use_auto_security_group}
node_configs:
&ng_configs
DATANODE:
dfs_datanode_du_reserved: 0
- name: worker-nm
flavor: ${ci_flavor_id}
node_processes:
- YARN_NODEMANAGER
auto_security_group: ${use_auto_security_group}
- name: worker-nm-dn
flavor: ${ci_flavor_id}
node_processes:
- YARN_NODEMANAGER
- HDFS_DATANODE
volumes_per_node: 2
volumes_size: 2
auto_security_group: ${use_auto_security_group}
node_configs:
*ng_configs
- name: manager
flavor: ${large_flavor_id}
node_processes:
- CLOUDERA_MANAGER
- KMS
is_proxy_gateway: ${is_proxy_gateway}
auto_security_group: ${use_auto_security_group}
- name: master-core
flavor: ${large_flavor_id}
node_processes:
- HDFS_NAMENODE
- YARN_RESOURCEMANAGER
- SENTRY_SERVER
- YARN_NODEMANAGER
- ZOOKEEPER_SERVER
auto_security_group: ${use_auto_security_group}
- name: master-additional
flavor: ${large_flavor_id}
node_processes:
- OOZIE_SERVER
- YARN_JOBHISTORY
- YARN_NODEMANAGER
- HDFS_SECONDARYNAMENODE
- HIVE_METASTORE
- HIVE_SERVER2
- SPARK_YARN_HISTORY_SERVER
auto_security_group: ${use_auto_security_group}
# In 5.11 the defaults of following configs are too large,
# restrict them to save memory for scenario testing.
node_configs:
HIVEMETASTORE:
hive_metastore_java_heapsize: 2147483648
HIVESERVER:
hiveserver2_java_heapsize: 2147483648
cluster_template:
name: cdh5110
node_group_templates:
manager: 1
master-core: 1
master-additional: 1
worker-nm-dn: 1
worker-nm: 1
worker-dn: 1
cluster_configs:
HDFS:
dfs_replication: 1
cluster:
name: ${cluster_name}
scenario:
- run_jobs
- sentry
edp_jobs_flow:
- pig_job
- mapreduce_job
- mapreduce_streaming_job
- java_job
- spark_wordcount

View File

@ -1,9 +1,9 @@
<%page args="is_proxy_gateway='true', use_auto_security_group='true', ci_flavor_id='m1.small', medium_flavor_id='m1.medium', large_flavor_id='m1.large'"/>
<%page args="is_proxy_gateway='true', use_auto_security_group='true', ci_flavor_id='m1.small', large_flavor_id='m1.large'"/>
clusters:
- plugin_name: cdh
plugin_version: 5.5.0
image: ${cdh_550_image}
plugin_version: 5.7.0
image: ${cdh_570_image}
node_group_templates:
- name: worker-dn
flavor: ${ci_flavor_id}
@ -39,7 +39,7 @@ clusters:
is_proxy_gateway: ${is_proxy_gateway}
auto_security_group: ${use_auto_security_group}
- name: master-core
flavor: ${medium_flavor_id}
flavor: ${large_flavor_id}
node_processes:
- HDFS_NAMENODE
- YARN_RESOURCEMANAGER
@ -48,7 +48,7 @@ clusters:
- ZOOKEEPER_SERVER
auto_security_group: ${use_auto_security_group}
- name: master-additional
flavor: ${medium_flavor_id}
flavor: ${large_flavor_id}
node_processes:
- OOZIE_SERVER
- YARN_JOBHISTORY
@ -58,7 +58,7 @@ clusters:
- HIVE_SERVER2
- SPARK_YARN_HISTORY_SERVER
auto_security_group: ${use_auto_security_group}
# In 5.5 the defaults of following configs are too large,
# In 5.7 the defaults of following configs are too large,
# restrict them to save memory for scenario testing.
node_configs:
HIVEMETASTORE:
@ -66,7 +66,7 @@ clusters:
HIVESERVER:
hiveserver2_java_heapsize: 2147483648
cluster_template:
name: cdh540
name: cdh570
node_group_templates:
manager: 1
master-core: 1

View File

@ -0,0 +1,90 @@
<%page args="is_proxy_gateway='true', use_auto_security_group='true', ci_flavor_id='m1.small', large_flavor_id='m1.large'"/>
clusters:
- plugin_name: cdh
plugin_version: 5.9.0
image: ${cdh_590_image}
node_group_templates:
- name: worker-dn
flavor: ${ci_flavor_id}
node_processes:
- HDFS_DATANODE
volumes_per_node: 2
volumes_size: 2
auto_security_group: ${use_auto_security_group}
node_configs:
&ng_configs
DATANODE:
dfs_datanode_du_reserved: 0
- name: worker-nm
flavor: ${ci_flavor_id}
node_processes:
- YARN_NODEMANAGER
auto_security_group: ${use_auto_security_group}
- name: worker-nm-dn
flavor: ${ci_flavor_id}
node_processes:
- YARN_NODEMANAGER
- HDFS_DATANODE
volumes_per_node: 2
volumes_size: 2
auto_security_group: ${use_auto_security_group}
node_configs:
*ng_configs
- name: manager
flavor: ${large_flavor_id}
node_processes:
- CLOUDERA_MANAGER
- KMS
is_proxy_gateway: ${is_proxy_gateway}
auto_security_group: ${use_auto_security_group}
- name: master-core
flavor: ${large_flavor_id}
node_processes:
- HDFS_NAMENODE
- YARN_RESOURCEMANAGER
- SENTRY_SERVER
- YARN_NODEMANAGER
- ZOOKEEPER_SERVER
auto_security_group: ${use_auto_security_group}
- name: master-additional
flavor: ${large_flavor_id}
node_processes:
- OOZIE_SERVER
- YARN_JOBHISTORY
- YARN_NODEMANAGER
- HDFS_SECONDARYNAMENODE
- HIVE_METASTORE
- HIVE_SERVER2
- SPARK_YARN_HISTORY_SERVER
auto_security_group: ${use_auto_security_group}
# In 5.9 the defaults of following configs are too large,
# restrict them to save memory for scenario testing.
node_configs:
HIVEMETASTORE:
hive_metastore_java_heapsize: 2147483648
HIVESERVER:
hiveserver2_java_heapsize: 2147483648
cluster_template:
name: cdh590
node_group_templates:
manager: 1
master-core: 1
master-additional: 1
worker-nm-dn: 1
worker-nm: 1
worker-dn: 1
cluster_configs:
HDFS:
dfs_replication: 1
cluster:
name: ${cluster_name}
scenario:
- run_jobs
- sentry
edp_jobs_flow:
- pig_job
- mapreduce_job
- mapreduce_streaming_job
- java_job
- spark_wordcount

View File

@ -2,8 +2,8 @@
clusters:
- plugin_name: mapr
plugin_version: 5.1.0.mrv2
image: ${mapr_510mrv2_image}
plugin_version: 5.2.0.mrv2
image: ${mapr_520mrv2_image}
node_group_templates:
- name: master
flavor:
@ -42,7 +42,7 @@ clusters:
- FileServer
auto_security_group: ${use_auto_security_group}
cluster_template:
name: mapr510mrv2
name: mapr520mrv2
node_group_templates:
master: 1
worker: 1

View File

@ -2,8 +2,8 @@
clusters:
- plugin_name: spark
plugin_version: 1.3.1
image: ${spark_131_image}
plugin_version: 2.1.0
image: ${spark_160_image}
node_group_templates:
- name: master
flavor: ${ci_flavor_id}
@ -19,7 +19,7 @@ clusters:
- slave
auto_security_group: ${use_auto_security_group}
cluster_template:
name: spark131
name: spark210
node_group_templates:
master: 1
worker: 1

View File

@ -1,37 +1,37 @@
<%page args="is_proxy_gateway='true', use_auto_security_group='true', ci_flavor_id='m1.small'"/>
<%page args="is_proxy_gateway='true', use_auto_security_group='true', ci_flavor_id='m1.small', medium_flavor_id='m1.medium'"/>
clusters:
- plugin_name: spark
plugin_version: 1.3.1
image: ${spark_131_image}
- plugin_name: storm
plugin_version: 1.1.0
image: ${storm_110_image}
node_group_templates:
- name: master
flavor: ${ci_flavor_id}
node_processes:
- master
- namenode
- nimbus
auto_security_group: ${use_auto_security_group}
is_proxy_gateway: ${is_proxy_gateway}
- name: worker
flavor: ${ci_flavor_id}
node_processes:
- datanode
- slave
- supervisor
auto_security_group: ${use_auto_security_group}
- name: zookeeper
flavor: ${medium_flavor_id}
node_processes:
- zookeeper
auto_security_group: ${use_auto_security_group}
cluster_template:
name: spark131
name: storm110
node_group_templates:
master: 1
worker: 1
cluster_configs:
HDFS:
dfs.replication: 1
zookeeper: 1
cluster:
name: ${cluster_name}
scaling:
- operation: add
node_group: worker
size: 1
edp_jobs_flow:
- spark_pi
- spark_wordcount
scenario:
- scale

View File

@ -25,13 +25,14 @@ clusters:
volumes_per_node: 2
volumes_size: 2
auto_security_group: ${use_auto_security_group}
- name: master-rm-nn-hvs
- name: master-rm-nn-hvs-sp
flavor: ${ci_flavor_id}
node_processes:
- namenode
- resourcemanager
- hiveserver
- nodemanager
- spark history server
auto_security_group: ${use_auto_security_group}
- name: master-oo-hs-sn
flavor: ${ci_flavor_id}
@ -45,7 +46,7 @@ clusters:
cluster_template:
name: vanilla271
node_group_templates:
master-rm-nn-hvs: 1
master-rm-nn-hvs-sp: 1
master-oo-hs-sn: 1
worker-dn-nm: 2
worker-dn: 1
@ -77,3 +78,4 @@ clusters:
- mapreduce_streaming_job
- java_job
- hive_job
- spark_wordcount