From 4ebba66236c943ec36a0e7c6f6e583038bb321c7 Mon Sep 17 00:00:00 2001 From: Evgeny Sikachev Date: Sun, 2 Oct 2016 22:58:35 +0300 Subject: [PATCH] Added folder with defaults for newton Added default yamls for newton release Change-Id: I97a91e2acd29b91df50414a4009d1a8ae1131102 --- .../defaults/newton/ambari-2.4.yaml.mako | 67 ++++++++++++++ .../defaults/newton/cdh-5.7.0.yaml.mako | 90 +++++++++++++++++++ .../defaults/newton/mapr-5.1.0.mrv2.yaml.mako | 54 +++++++++++ .../defaults/newton/mapr-5.2.0.mrv2.yaml.mako | 54 +++++++++++ .../defaults/newton/spark-1.6.0.yaml.mako | 37 ++++++++ .../defaults/newton/storm-1.0.1.yaml.mako | 37 ++++++++ .../defaults/newton/vanilla-2.7.1.yaml.mako | 81 +++++++++++++++++ 7 files changed, 420 insertions(+) create mode 100644 sahara_tests/scenario/defaults/newton/ambari-2.4.yaml.mako create mode 100644 sahara_tests/scenario/defaults/newton/cdh-5.7.0.yaml.mako create mode 100644 sahara_tests/scenario/defaults/newton/mapr-5.1.0.mrv2.yaml.mako create mode 100644 sahara_tests/scenario/defaults/newton/mapr-5.2.0.mrv2.yaml.mako create mode 100644 sahara_tests/scenario/defaults/newton/spark-1.6.0.yaml.mako create mode 100644 sahara_tests/scenario/defaults/newton/storm-1.0.1.yaml.mako create mode 100644 sahara_tests/scenario/defaults/newton/vanilla-2.7.1.yaml.mako diff --git a/sahara_tests/scenario/defaults/newton/ambari-2.4.yaml.mako b/sahara_tests/scenario/defaults/newton/ambari-2.4.yaml.mako new file mode 100644 index 00000000..883ad644 --- /dev/null +++ b/sahara_tests/scenario/defaults/newton/ambari-2.4.yaml.mako @@ -0,0 +1,67 @@ +<%page args="use_auto_security_group='true'"/> + +clusters: + - plugin_name: ambari + plugin_version: '2.4' + image: ${ambari_2_2_image} + node_group_templates: + - name: master + flavor: ${medium_flavor_id} + node_processes: + - Ambari + - MapReduce History Server + - Spark History Server + - NameNode + - ResourceManager + - SecondaryNameNode + - YARN Timeline Server + - ZooKeeper + - Kafka Broker + auto_security_group: ${use_auto_security_group} + - name: master-edp + flavor: ${ci_flavor_id} + node_processes: + - Hive Metastore + - HiveServer + - Oozie + auto_security_group: ${use_auto_security_group} + - name: worker + flavor: ${ci_flavor_id} + node_processes: + - DataNode + - NodeManager + volumes_per_node: 2 + volumes_size: 2 + auto_security_group: ${use_auto_security_group} + cluster_template: + name: ambari21 + node_group_templates: + master: 1 + master-edp: 1 + worker: 3 + cluster_configs: + HDFS: + dfs.datanode.du.reserved: 0 + custom_checks: + check_kafka: + zookeeper_process: ZooKeeper + kafka_process: Kafka Broker + spark_flow: + - type: Spark + main_lib: + type: database + source: edp-examples/edp-spark/spark-kafka-example.jar + args: + - '{zookeeper_list}' + - '{topic}' + - '{timeout}' + timeout: 30 + cluster: + name: ${cluster_name} + scaling: + - operation: add + node_group: worker + size: 1 + edp_jobs_flow: + - java_job + - spark_pi diff --git a/sahara_tests/scenario/defaults/newton/cdh-5.7.0.yaml.mako b/sahara_tests/scenario/defaults/newton/cdh-5.7.0.yaml.mako new file mode 100644 index 00000000..746aa63b --- /dev/null +++ b/sahara_tests/scenario/defaults/newton/cdh-5.7.0.yaml.mako @@ -0,0 +1,90 @@ +<%page args="is_proxy_gateway='true', use_auto_security_group='true'"/> + +clusters: + - plugin_name: cdh + plugin_version: 5.7.0 + image: ${cdh_5_7_0_image} + node_group_templates: + - name: worker-dn + flavor: ${ci_flavor_id} + node_processes: + - HDFS_DATANODE + volumes_per_node: 2 + volumes_size: 2 + auto_security_group: ${use_auto_security_group} + node_configs: + &ng_configs + DATANODE: + dfs_datanode_du_reserved: 0 + - name: worker-nm + flavor: ${ci_flavor_id} + node_processes: + - YARN_NODEMANAGER + auto_security_group: ${use_auto_security_group} + - name: worker-nm-dn + flavor: ${ci_flavor_id} + node_processes: + - YARN_NODEMANAGER + - HDFS_DATANODE + volumes_per_node: 2 + volumes_size: 2 + auto_security_group: ${use_auto_security_group} + node_configs: + *ng_configs + - name: manager + flavor: ${large_flavor_id} + node_processes: + - CLOUDERA_MANAGER + - KMS + is_proxy_gateway: ${is_proxy_gateway} + auto_security_group: ${use_auto_security_group} + - name: master-core + flavor: ${large_flavor_id} + node_processes: + - HDFS_NAMENODE + - YARN_RESOURCEMANAGER + - SENTRY_SERVER + - YARN_NODEMANAGER + - ZOOKEEPER_SERVER + auto_security_group: ${use_auto_security_group} + - name: master-additional + flavor: ${large_flavor_id} + node_processes: + - OOZIE_SERVER + - YARN_JOBHISTORY + - YARN_NODEMANAGER + - HDFS_SECONDARYNAMENODE + - HIVE_METASTORE + - HIVE_SERVER2 + - SPARK_YARN_HISTORY_SERVER + auto_security_group: ${use_auto_security_group} + # In 5.7 the defaults of following configs are too large, + # restrict them to save memory for scenario testing. + node_configs: + HIVEMETASTORE: + hive_metastore_java_heapsize: 2147483648 + HIVESERVER: + hiveserver2_java_heapsize: 2147483648 + cluster_template: + name: cdh570 + node_group_templates: + manager: 1 + master-core: 1 + master-additional: 1 + worker-nm-dn: 1 + worker-nm: 1 + worker-dn: 1 + cluster_configs: + HDFS: + dfs_replication: 1 + cluster: + name: ${cluster_name} + scenario: + - run_jobs + - sentry + edp_jobs_flow: + - pig_job + - mapreduce_job + - mapreduce_streaming_job + - java_job + - spark_wordcount diff --git a/sahara_tests/scenario/defaults/newton/mapr-5.1.0.mrv2.yaml.mako b/sahara_tests/scenario/defaults/newton/mapr-5.1.0.mrv2.yaml.mako new file mode 100644 index 00000000..db2facdb --- /dev/null +++ b/sahara_tests/scenario/defaults/newton/mapr-5.1.0.mrv2.yaml.mako @@ -0,0 +1,54 @@ +<%page args="use_auto_security_group='true'"/> + +clusters: + - plugin_name: mapr + plugin_version: 5.1.0.mrv2 + image: ${mapr_510mrv2_image} + node_group_templates: + - name: master + flavor: + vcpus: 4 + ram: 8192 + root_disk: 80 + ephemeral_disk: 40 + node_processes: + - Metrics + - Webserver + - ZooKeeper + - HTTPFS + - Oozie + - FileServer + - CLDB + - Flume + - Hue + - NodeManager + - HistoryServer + - ResourceManager + - HiveServer2 + - HiveMetastore + - Sqoop2-Client + - Sqoop2-Server + auto_security_group: ${use_auto_security_group} + - name: worker + flavor: + vcpus: 2 + ram: 4096 + root_disk: 40 + ephemeral_disk: 40 + node_processes: + - NodeManager + - FileServer + auto_security_group: ${use_auto_security_group} + cluster_template: + name: mapr510mrv2 + node_group_templates: + master: 1 + worker: 1 + cluster: + name: ${cluster_name} + scaling: + - operation: add + node_group: worker + size: 1 + edp_jobs_flow: + - mapr diff --git a/sahara_tests/scenario/defaults/newton/mapr-5.2.0.mrv2.yaml.mako b/sahara_tests/scenario/defaults/newton/mapr-5.2.0.mrv2.yaml.mako new file mode 100644 index 00000000..46721907 --- /dev/null +++ b/sahara_tests/scenario/defaults/newton/mapr-5.2.0.mrv2.yaml.mako @@ -0,0 +1,54 @@ +<%page args="use_auto_security_group='true'"/> + +clusters: + - plugin_name: mapr + plugin_version: 5.2.0.mrv2 + image: ${mapr_520mrv2_image} + node_group_templates: + - name: master + flavor: + vcpus: 4 + ram: 8192 + root_disk: 80 + ephemeral_disk: 40 + node_processes: + - Metrics + - Webserver + - ZooKeeper + - HTTPFS + - Oozie + - FileServer + - CLDB + - Flume + - Hue + - NodeManager + - HistoryServer + - ResourceManager + - HiveServer2 + - HiveMetastore + - Sqoop2-Client + - Sqoop2-Server + auto_security_group: ${use_auto_security_group} + - name: worker + flavor: + vcpus: 2 + ram: 4096 + root_disk: 40 + ephemeral_disk: 40 + node_processes: + - NodeManager + - FileServer + auto_security_group: ${use_auto_security_group} + cluster_template: + name: mapr520mrv2 + node_group_templates: + master: 1 + worker: 1 + cluster: + name: ${cluster_name} + scaling: + - operation: add + node_group: worker + size: 1 + edp_jobs_flow: + - mapr diff --git a/sahara_tests/scenario/defaults/newton/spark-1.6.0.yaml.mako b/sahara_tests/scenario/defaults/newton/spark-1.6.0.yaml.mako new file mode 100644 index 00000000..d94da806 --- /dev/null +++ b/sahara_tests/scenario/defaults/newton/spark-1.6.0.yaml.mako @@ -0,0 +1,37 @@ +<%page args="is_proxy_gateway='true', use_auto_security_group='true'"/> + +clusters: + - plugin_name: spark + plugin_version: 1.6.0 + image: ${spark_1_6_image} + node_group_templates: + - name: master + flavor: ${ci_flavor_id} + node_processes: + - master + - namenode + auto_security_group: ${use_auto_security_group} + is_proxy_gateway: ${is_proxy_gateway} + - name: worker + flavor: ${ci_flavor_id} + node_processes: + - datanode + - slave + auto_security_group: ${use_auto_security_group} + cluster_template: + name: spark160 + node_group_templates: + master: 1 + worker: 1 + cluster_configs: + HDFS: + dfs.replication: 1 + cluster: + name: ${cluster_name} + scaling: + - operation: add + node_group: worker + size: 1 + edp_jobs_flow: + - spark_pi + - spark_wordcount diff --git a/sahara_tests/scenario/defaults/newton/storm-1.0.1.yaml.mako b/sahara_tests/scenario/defaults/newton/storm-1.0.1.yaml.mako new file mode 100644 index 00000000..a146ee48 --- /dev/null +++ b/sahara_tests/scenario/defaults/newton/storm-1.0.1.yaml.mako @@ -0,0 +1,37 @@ +<%page args="is_proxy_gateway='true', use_auto_security_group='true'"/> + +clusters: + - plugin_name: storm + plugin_version: 1.0.1 + image: ${storm_1_0_image} + node_group_templates: + - name: master + flavor: ${ci_flavor_id} + node_processes: + - nimbus + auto_security_group: ${use_auto_security_group} + is_proxy_gateway: ${is_proxy_gateway} + - name: worker + flavor: ${ci_flavor_id} + node_processes: + - supervisor + auto_security_group: ${use_auto_security_group} + - name: zookeeper + flavor: ${medium_flavor_id} + node_processes: + - zookeeper + auto_security_group: ${use_auto_security_group} + cluster_template: + name: storm101 + node_group_templates: + master: 1 + worker: 1 + zookeeper: 1 + cluster: + name: ${cluster_name} + scaling: + - operation: add + node_group: worker + size: 1 + scenario: + - scale diff --git a/sahara_tests/scenario/defaults/newton/vanilla-2.7.1.yaml.mako b/sahara_tests/scenario/defaults/newton/vanilla-2.7.1.yaml.mako new file mode 100644 index 00000000..771a787a --- /dev/null +++ b/sahara_tests/scenario/defaults/newton/vanilla-2.7.1.yaml.mako @@ -0,0 +1,81 @@ +<%page args="is_proxy_gateway='true', use_auto_security_group='true'"/> + +clusters: + - plugin_name: vanilla + plugin_version: 2.7.1 + image: ${vanilla_two_seven_one_image} + node_group_templates: + - name: worker-dn-nm + flavor: ${ci_flavor_id} + node_processes: + - datanode + - nodemanager + volumes_per_node: 2 + volumes_size: 2 + auto_security_group: ${use_auto_security_group} + - name: worker-nm + flavor: ${ci_flavor_id} + node_processes: + - nodemanager + auto_security_group: ${use_auto_security_group} + - name: worker-dn + flavor: ${ci_flavor_id} + node_processes: + - datanode + volumes_per_node: 2 + volumes_size: 2 + auto_security_group: ${use_auto_security_group} + - name: master-rm-nn-hvs-sp + flavor: ${ci_flavor_id} + node_processes: + - namenode + - resourcemanager + - hiveserver + - nodemanager + - spark history server + auto_security_group: ${use_auto_security_group} + - name: master-oo-hs-sn + flavor: ${ci_flavor_id} + node_processes: + - oozie + - historyserver + - secondarynamenode + - nodemanager + auto_security_group: ${use_auto_security_group} + is_proxy_gateway: ${is_proxy_gateway} + cluster_template: + name: vanilla271 + node_group_templates: + master-rm-nn-hvs-sp: 1 + master-oo-hs-sn: 1 + worker-dn-nm: 2 + worker-dn: 1 + worker-nm: 1 + cluster_configs: + HDFS: + dfs.replication: 1 + cluster: + name: ${cluster_name} + scaling: + - operation: resize + node_group: worker-dn-nm + size: 1 + - operation: resize + node_group: worker-dn + size: 0 + - operation: resize + node_group: worker-nm + size: 0 + - operation: add + node_group: worker-dn + size: 1 + - operation: add + node_group: worker-nm + size: 2 + edp_jobs_flow: + - pig_job + - mapreduce_job + - mapreduce_streaming_job + - java_job + - hive_job + - spark_wordcount