From 6b5f0d0b1b7d8620d98f054225b22279b99150c4 Mon Sep 17 00:00:00 2001 From: Nikita Konovalov Date: Wed, 17 Jun 2015 15:00:51 +0300 Subject: [PATCH] Drop Vanilla Hadoop 1 Dropping support of unpopular and unused Hadoop v1 in Vanilla plugin. Partially-implements bp: drop-hadoop-1 Change-Id: I0a322fb7b8db50941c4854f45077fe6232e2c766 --- MANIFEST.in | 3 - doc/source/userdoc/vanilla_plugin.rst | 22 +- .../sahara-ci/vanilla-1.2.1.yaml.mako | 73 - .../vanilla/v1_2_1/cluster.json | 24 - .../vanilla/v1_2_1/master.json | 11 - .../vanilla/v1_2_1/secondary-master.json | 11 - .../vanilla/v1_2_1/worker.json | 11 - sahara/plugins/vanilla/v1_2_1/__init__.py | 0 .../plugins/vanilla/v1_2_1/config_helper.py | 500 ----- sahara/plugins/vanilla/v1_2_1/edp_engine.py | 43 - sahara/plugins/vanilla/v1_2_1/mysql_helper.py | 45 - sahara/plugins/vanilla/v1_2_1/oozie_helper.py | 62 - .../vanilla/v1_2_1/resources/README.rst | 27 - .../vanilla/v1_2_1/resources/core-default.xml | 632 ------ .../v1_2_1/resources/create_hive_db.sql | 9 - .../v1_2_1/resources/create_oozie_db.sql | 4 - .../vanilla/v1_2_1/resources/hdfs-default.xml | 709 ------ .../vanilla/v1_2_1/resources/hive-default.xml | 1873 ---------------- .../v1_2_1/resources/mapred-default.xml | 1328 ------------ .../v1_2_1/resources/oozie-default.xml | 1929 ----------------- .../vanilla/v1_2_1/resources/topology.sh | 21 - sahara/plugins/vanilla/v1_2_1/run_scripts.py | 123 -- sahara/plugins/vanilla/v1_2_1/scaling.py | 98 - .../plugins/vanilla/v1_2_1/versionhandler.py | 596 ----- .../workflow_creator/workflow_factory.py | 4 +- sahara/tests/scenario/README.rst | 4 +- sahara/tests/unit/db/templates/test_update.py | 7 - .../tests/unit/plugins/general/test_utils.py | 2 +- .../tests/unit/plugins/vanilla/test_utils.py | 57 +- .../unit/plugins/vanilla/v1_2_1/__init__.py | 0 .../vanilla/v1_2_1/test_dfsadmin_parsing.py | 54 - .../plugins/vanilla/v1_2_1/test_edp_engine.py | 96 - .../plugins/vanilla/v1_2_1/test_plugin.py | 312 --- .../vanilla/v1_2_1/test_run_scripts.py | 42 - .../tests/unit/service/edp/edp_test_utils.py | 2 +- .../service/edp/test_job_possible_configs.py | 9 +- .../tests/unit/service/heat/test_templates.py | 3 +- .../validation/edp/test_job_executor.py | 8 +- .../test_cluster_create_validation.py | 76 +- .../test_cluster_scaling_validation.py | 10 +- ...test_cluster_template_create_validation.py | 28 +- ...test_cluster_template_update_validation.py | 2 +- .../test_ng_template_validation_create.py | 56 +- .../test_ng_template_validation_update.py | 8 +- sahara/tests/unit/service/validation/utils.py | 20 +- sahara/tests/unit/utils/test_api_validator.py | 18 +- 46 files changed, 144 insertions(+), 8828 deletions(-) delete mode 100644 etc/scenario/sahara-ci/vanilla-1.2.1.yaml.mako delete mode 100644 sahara/plugins/default_templates/vanilla/v1_2_1/cluster.json delete mode 100644 sahara/plugins/default_templates/vanilla/v1_2_1/master.json delete mode 100644 sahara/plugins/default_templates/vanilla/v1_2_1/secondary-master.json delete mode 100644 sahara/plugins/default_templates/vanilla/v1_2_1/worker.json delete mode 100644 sahara/plugins/vanilla/v1_2_1/__init__.py delete mode 100644 sahara/plugins/vanilla/v1_2_1/config_helper.py delete mode 100644 sahara/plugins/vanilla/v1_2_1/edp_engine.py delete mode 100644 sahara/plugins/vanilla/v1_2_1/mysql_helper.py delete mode 100644 sahara/plugins/vanilla/v1_2_1/oozie_helper.py delete mode 100644 sahara/plugins/vanilla/v1_2_1/resources/README.rst delete mode 100644 sahara/plugins/vanilla/v1_2_1/resources/core-default.xml delete mode 100644 sahara/plugins/vanilla/v1_2_1/resources/create_hive_db.sql delete mode 100644 sahara/plugins/vanilla/v1_2_1/resources/create_oozie_db.sql delete mode 100644 sahara/plugins/vanilla/v1_2_1/resources/hdfs-default.xml delete mode 100644 sahara/plugins/vanilla/v1_2_1/resources/hive-default.xml delete mode 100644 sahara/plugins/vanilla/v1_2_1/resources/mapred-default.xml delete mode 100644 sahara/plugins/vanilla/v1_2_1/resources/oozie-default.xml delete mode 100755 sahara/plugins/vanilla/v1_2_1/resources/topology.sh delete mode 100644 sahara/plugins/vanilla/v1_2_1/run_scripts.py delete mode 100644 sahara/plugins/vanilla/v1_2_1/scaling.py delete mode 100644 sahara/plugins/vanilla/v1_2_1/versionhandler.py delete mode 100644 sahara/tests/unit/plugins/vanilla/v1_2_1/__init__.py delete mode 100644 sahara/tests/unit/plugins/vanilla/v1_2_1/test_dfsadmin_parsing.py delete mode 100644 sahara/tests/unit/plugins/vanilla/v1_2_1/test_edp_engine.py delete mode 100644 sahara/tests/unit/plugins/vanilla/v1_2_1/test_plugin.py delete mode 100644 sahara/tests/unit/plugins/vanilla/v1_2_1/test_run_scripts.py diff --git a/MANIFEST.in b/MANIFEST.in index 11fff5ae31..e5dfa141e5 100644 --- a/MANIFEST.in +++ b/MANIFEST.in @@ -31,9 +31,6 @@ include sahara/plugins/cdh/v5_4_0/resources/*.sql include sahara/plugins/vanilla/hadoop2/resources/*.sh include sahara/plugins/vanilla/hadoop2/resources/*.sql include sahara/plugins/vanilla/hadoop2/resources/*.template -include sahara/plugins/vanilla/v1_2_1/resources/*.sh -include sahara/plugins/vanilla/v1_2_1/resources/*.sql -include sahara/plugins/vanilla/v1_2_1/resources/*.xml include sahara/plugins/vanilla/v2_6_0/resources/*.xml include sahara/plugins/vanilla/v2_7_1/resources/*.xml include sahara/plugins/hdp/versions/version_1_3_2/resources/*.template diff --git a/doc/source/userdoc/vanilla_plugin.rst b/doc/source/userdoc/vanilla_plugin.rst index bd5b5f344f..cdfe4160fd 100644 --- a/doc/source/userdoc/vanilla_plugin.rst +++ b/doc/source/userdoc/vanilla_plugin.rst @@ -5,19 +5,13 @@ The vanilla plugin is a reference implementation which allows users to operate a cluster with Apache Hadoop. For cluster provisioning prepared images should be used. They already have -Apache Hadoop 1.2.1 and Apache Hadoop 2.7.1 installed. +Apache Hadoop 2.7.1 installed. You may build images by yourself using :doc:`vanilla_imagebuilder` or you could download prepared images from http://sahara-files.mirantis.com/images/upstream/liberty/ -Keep in mind that if you want to use the Swift Integration feature -( :doc:`features`), -Hadoop 1.2.1 must be patched with an implementation of Swift File System. -For more information about patching required by the Swift Integration feature -see :doc:`hadoop-swift`. - Vanilla plugin requires an image to be tagged in Sahara Image Registry with -two tags: 'vanilla' and '' (e.g. '1.2.1'). +two tags: 'vanilla' and '' (e.g. '2.7.1'). The default username specified for these images is different for each distribution: @@ -48,18 +42,6 @@ the cluster topology requested by user is verified for consistency. Currently there are the following limitations in cluster topology for Vanilla plugin: -For Vanilla Hadoop version 1.X.X: - - + Cluster must contain exactly one namenode - + Cluster can contain at most one jobtracker - + Cluster can contain at most one secondary namenode - + Cluster can contain at most one oozie and this process is also required - for EDP - + Cluster can't contain oozie without jobtracker - + Cluster can't have tasktracker nodes if it doesn't have jobtracker - + Cluster can't have hive node if it doesn't have jobtracker. - + Cluster can have at most one hive node. - For Vanilla Hadoop version 2.X.X: + Cluster must contain exactly one namenode diff --git a/etc/scenario/sahara-ci/vanilla-1.2.1.yaml.mako b/etc/scenario/sahara-ci/vanilla-1.2.1.yaml.mako deleted file mode 100644 index eee1780d8b..0000000000 --- a/etc/scenario/sahara-ci/vanilla-1.2.1.yaml.mako +++ /dev/null @@ -1,73 +0,0 @@ -clusters: - - plugin_name: vanilla - plugin_version: 1.2.1 - image: ${vanilla_image} - node_group_templates: - - name: worker-tt-dn - flavor: ${ci_flavor_id} - node_processes: - - datanode - - tasktracker - volumes_per_node: 2 - volumes_size: 2 - auto_security_group: true - - name: worker-tt - flavor: ${ci_flavor_id} - node_processes: - - tasktracker - auto_security_group: true - - name: worker-dn - flavor: ${ci_flavor_id} - node_processes: - - datanode - volumes_per_node: 2 - volumes_size: 2 - auto_security_group: true - - name: master-jt-nn - flavor: ${ci_flavor_id} - node_processes: - - namenode - - jobtracker - auto_security_group: true - - name: master-sec-nn-oz - flavor: ${ci_flavor_id} - node_processes: - - oozie - - secondarynamenode - auto_security_group: true - - cluster_template: - name: vanilla121 - node_group_templates: - master-sec-nn-oz: 1 - master-jt-nn: 1 - worker-tt: 1 - worker-tt-dn: 2 - worker-dn: 1 - cluster_configs: - HDFS: - dfs.replication: 1 - MapReduce: - mapred.map.tasks.speculative.execution: False - mapred.child.java.opts: -Xmx512m - general: - 'Enable Swift': True - cluster: - name: ${cluster_name} - scaling: - - operation: resize - node_group: worker-tt-dn - size: 1 - - operation: resize - node_group: worker-dn - size: 0 - - operation: resize - node_group: worker-tt - size: 0 - - operation: add - node_group: worker-tt - size: 1 - - operation: add - node_group: worker-dn - size: 1 - edp_jobs_flow: hadoop_1 diff --git a/sahara/plugins/default_templates/vanilla/v1_2_1/cluster.json b/sahara/plugins/default_templates/vanilla/v1_2_1/cluster.json deleted file mode 100644 index 004fceaae5..0000000000 --- a/sahara/plugins/default_templates/vanilla/v1_2_1/cluster.json +++ /dev/null @@ -1,24 +0,0 @@ -{ - "plugin_name": "vanilla", - "hadoop_version": "1.2.1", - "node_groups": [ - { - "name": "worker", - "count": 3, - "node_group_template_id": "{vanilla-121-default-worker}" - }, - { - "name": "secondary-master", - "count": 1, - "node_group_template_id": "{vanilla-121-default-secondary-master}" - }, - { - "name": "master", - "count": 1, - "node_group_template_id": "{vanilla-121-default-master}" - } - ], - "name": "vanilla-121-default-cluster", - "neutron_management_network": "{neutron_management_network}", - "cluster_configs": {} -} diff --git a/sahara/plugins/default_templates/vanilla/v1_2_1/master.json b/sahara/plugins/default_templates/vanilla/v1_2_1/master.json deleted file mode 100644 index 0feb9716cd..0000000000 --- a/sahara/plugins/default_templates/vanilla/v1_2_1/master.json +++ /dev/null @@ -1,11 +0,0 @@ -{ - "plugin_name": "vanilla", - "hadoop_version": "1.2.1", - "node_processes": [ - "namenode", - "jobtracker" - ], - "name": "vanilla-121-default-master", - "floating_ip_pool": "{floating_ip_pool}", - "flavor_id": "{flavor_id}" -} diff --git a/sahara/plugins/default_templates/vanilla/v1_2_1/secondary-master.json b/sahara/plugins/default_templates/vanilla/v1_2_1/secondary-master.json deleted file mode 100644 index 18552309ea..0000000000 --- a/sahara/plugins/default_templates/vanilla/v1_2_1/secondary-master.json +++ /dev/null @@ -1,11 +0,0 @@ -{ - "plugin_name": "vanilla", - "hadoop_version": "1.2.1", - "node_processes": [ - "secondarynamenode", - "oozie" - ], - "name": "vanilla-121-default-secondary-master", - "floating_ip_pool": "{floating_ip_pool}", - "flavor_id": "{flavor_id}" -} diff --git a/sahara/plugins/default_templates/vanilla/v1_2_1/worker.json b/sahara/plugins/default_templates/vanilla/v1_2_1/worker.json deleted file mode 100644 index 58ae960d10..0000000000 --- a/sahara/plugins/default_templates/vanilla/v1_2_1/worker.json +++ /dev/null @@ -1,11 +0,0 @@ -{ - "plugin_name": "vanilla", - "hadoop_version": "1.2.1", - "node_processes": [ - "tasktracker", - "datanode" - ], - "name": "vanilla-121-default-worker", - "floating_ip_pool": "{floating_ip_pool}", - "flavor_id": "{flavor_id}" -} diff --git a/sahara/plugins/vanilla/v1_2_1/__init__.py b/sahara/plugins/vanilla/v1_2_1/__init__.py deleted file mode 100644 index e69de29bb2..0000000000 diff --git a/sahara/plugins/vanilla/v1_2_1/config_helper.py b/sahara/plugins/vanilla/v1_2_1/config_helper.py deleted file mode 100644 index d777d7297c..0000000000 --- a/sahara/plugins/vanilla/v1_2_1/config_helper.py +++ /dev/null @@ -1,500 +0,0 @@ -# Copyright (c) 2013 Mirantis Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or -# implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -from oslo_config import cfg -from oslo_log import log as logging -import six - -from sahara import conductor as c -from sahara import context -from sahara import exceptions as ex -from sahara.i18n import _ -from sahara.i18n import _LW -from sahara.plugins import provisioning as p -from sahara.plugins import utils -from sahara.plugins.vanilla import utils as vu -from sahara.plugins.vanilla.v1_2_1 import mysql_helper as m_h -from sahara.plugins.vanilla.v1_2_1 import oozie_helper as o_h -from sahara.swift import swift_helper as swift -from sahara.topology import topology_helper as topology -from sahara.utils import crypto -from sahara.utils import types as types -from sahara.utils import xmlutils as x - - -conductor = c.API -LOG = logging.getLogger(__name__) -CONF = cfg.CONF - -CORE_DEFAULT = x.load_hadoop_xml_defaults( - 'plugins/vanilla/v1_2_1/resources/core-default.xml') - -HDFS_DEFAULT = x.load_hadoop_xml_defaults( - 'plugins/vanilla/v1_2_1/resources/hdfs-default.xml') - -MAPRED_DEFAULT = x.load_hadoop_xml_defaults( - 'plugins/vanilla/v1_2_1/resources/mapred-default.xml') - -HIVE_DEFAULT = x.load_hadoop_xml_defaults( - 'plugins/vanilla/v1_2_1/resources/hive-default.xml') - -# Append Oozie configs fore core-site.xml -CORE_DEFAULT += o_h.OOZIE_CORE_DEFAULT - -XML_CONFS = { - "HDFS": [CORE_DEFAULT, HDFS_DEFAULT], - "MapReduce": [MAPRED_DEFAULT], - "JobFlow": [o_h.OOZIE_DEFAULT], - "Hive": [HIVE_DEFAULT] -} - -ENV_CONFS = { - "MapReduce": { - 'Job Tracker Heap Size': 'HADOOP_JOBTRACKER_OPTS=\\"-Xmx%sm\\"', - 'Task Tracker Heap Size': 'HADOOP_TASKTRACKER_OPTS=\\"-Xmx%sm\\"' - }, - "HDFS": { - 'Name Node Heap Size': 'HADOOP_NAMENODE_OPTS=\\"-Xmx%sm\\"', - 'Secondary Name Node Heap Size': 'HADOOP_SECONDARYNAMENODE_OPTS=' - '\\"-Xmx%sm\\"', - 'Data Node Heap Size': 'HADOOP_DATANODE_OPTS=\\"-Xmx%sm\\"' - }, - "JobFlow": { - 'Oozie Heap Size': 'CATALINA_OPTS -Xmx%sm' - } -} - -ENABLE_SWIFT = p.Config('Enable Swift', 'general', 'cluster', - config_type="bool", priority=1, - default_value=True, is_optional=True) - -ENABLE_DATA_LOCALITY = p.Config('Enable Data Locality', 'general', 'cluster', - config_type="bool", priority=1, - default_value=True, is_optional=True) - -ENABLE_MYSQL = p.Config('Enable MySQL', 'general', 'cluster', - config_type="bool", priority=1, - default_value=True, is_optional=True) - -# Default set to 1 day, which is the default Keystone token -# expiration time. After the token is expired we can't continue -# scaling anyway. -DECOMMISSIONING_TIMEOUT = p.Config('Decommissioning Timeout', 'general', - 'cluster', config_type='int', priority=1, - default_value=86400, is_optional=True, - description='Timeout for datanode' - ' decommissioning operation' - ' during scaling, in seconds') - -DATANODES_STARTUP_TIMEOUT = p.Config( - 'Datanodes startup timeout', 'general', 'cluster', config_type='int', - priority=1, default_value=10800, is_optional=True, - description='Timeout for datanodes startup, in seconds') - - -HIDDEN_CONFS = ['fs.default.name', 'dfs.name.dir', 'dfs.data.dir', - 'mapred.job.tracker', 'mapred.system.dir', 'mapred.local.dir', - 'hadoop.proxyuser.hadoop.hosts', - 'hadoop.proxyuser.hadoop.groups'] - -CLUSTER_WIDE_CONFS = ['dfs.block.size', 'dfs.permissions', 'dfs.replication', - 'dfs.replication.min', 'dfs.replication.max', - 'io.file.buffer.size', 'mapreduce.job.counters.max', - 'mapred.output.compress', 'io.compression.codecs', - 'mapred.output.compression.codec', - 'mapred.output.compression.type', - 'mapred.compress.map.output', - 'mapred.map.output.compression.codec'] - -PRIORITY_1_CONFS = ['dfs.datanode.du.reserved', - 'dfs.datanode.failed.volumes.tolerated', - 'dfs.datanode.max.xcievers', 'dfs.datanode.handler.count', - 'dfs.namenode.handler.count', 'mapred.child.java.opts', - 'mapred.jobtracker.maxtasks.per.job', - 'mapred.job.tracker.handler.count', - 'mapred.map.child.java.opts', - 'mapred.reduce.child.java.opts', - 'io.sort.mb', 'mapred.tasktracker.map.tasks.maximum', - 'mapred.tasktracker.reduce.tasks.maximum'] - -# for now we have not so many cluster-wide configs -# lets consider all of them having high priority -PRIORITY_1_CONFS += CLUSTER_WIDE_CONFS - - -def _initialise_configs(): - configs = [] - for service, config_lists in six.iteritems(XML_CONFS): - for config_list in config_lists: - for config in config_list: - if config['name'] not in HIDDEN_CONFS: - cfg = p.Config(config['name'], service, "node", - is_optional=True, config_type="string", - default_value=str(config['value']), - description=config['description']) - if cfg.default_value in ["true", "false"]: - cfg.config_type = "bool" - cfg.default_value = (cfg.default_value == 'true') - elif types.is_int(cfg.default_value): - cfg.config_type = "int" - cfg.default_value = int(cfg.default_value) - if config['name'] in CLUSTER_WIDE_CONFS: - cfg.scope = 'cluster' - if config['name'] in PRIORITY_1_CONFS: - cfg.priority = 1 - configs.append(cfg) - - for service, config_items in six.iteritems(ENV_CONFS): - for name, param_format_str in six.iteritems(config_items): - configs.append(p.Config(name, service, "node", - default_value=1024, priority=1, - config_type="int")) - - configs.append(ENABLE_SWIFT) - configs.append(ENABLE_MYSQL) - configs.append(DECOMMISSIONING_TIMEOUT) - configs.append(DATANODES_STARTUP_TIMEOUT) - if CONF.enable_data_locality: - configs.append(ENABLE_DATA_LOCALITY) - - return configs - -# Initialise plugin Hadoop configurations -PLUGIN_CONFIGS = _initialise_configs() - - -def get_plugin_configs(): - return PLUGIN_CONFIGS - - -def get_general_configs(hive_hostname, passwd_hive_mysql): - config = { - ENABLE_SWIFT.name: { - 'default_value': ENABLE_SWIFT.default_value, - 'conf': extract_name_values(swift.get_swift_configs()) - }, - ENABLE_MYSQL.name: { - 'default_value': ENABLE_MYSQL.default_value, - 'conf': m_h.get_required_mysql_configs( - hive_hostname, passwd_hive_mysql) - } - } - if CONF.enable_data_locality: - config.update({ - ENABLE_DATA_LOCALITY.name: { - 'default_value': ENABLE_DATA_LOCALITY.default_value, - 'conf': extract_name_values(topology.vm_awareness_all_config()) - } - }) - return config - - -def get_config_value(service, name, cluster=None): - if cluster: - sahara_configs = generate_sahara_configs(cluster) - if sahara_configs.get(name): - return sahara_configs[name] - - for ng in cluster.node_groups: - if (ng.configuration().get(service) and - ng.configuration()[service].get(name)): - return ng.configuration()[service][name] - - for configs in PLUGIN_CONFIGS: - if configs.applicable_target == service and configs.name == name: - return configs.default_value - - raise ex.ConfigurationError(_("Unable get parameter '%(parameter)s' from " - "service %(service)s") - % {"parameter": name, "service": service}) - - -def generate_cfg_from_general(cfg, configs, general_config, - rest_excluded=False): - if 'general' in configs: - for nm in general_config: - if nm not in configs['general'] and not rest_excluded: - configs['general'][nm] = general_config[nm]['default_value'] - for name, value in configs['general'].items(): - if value: - cfg = _set_config(cfg, general_config, name) - LOG.debug("Applying config: {config}".format(config=name)) - else: - cfg = _set_config(cfg, general_config) - return cfg - - -def get_hadoop_ssh_keys(cluster): - extra = cluster.extra.to_dict() if cluster.extra else {} - private_key = extra.get('hadoop_private_ssh_key') - public_key = extra.get('hadoop_public_ssh_key') - if not private_key or not public_key: - private_key, public_key = crypto.generate_key_pair() - extra['hadoop_private_ssh_key'] = private_key - extra['hadoop_public_ssh_key'] = public_key - conductor.cluster_update(context.ctx(), cluster, {'extra': extra}) - - return private_key, public_key - - -def generate_sahara_configs(cluster, node_group=None): - nn_hostname = vu.get_instance_hostname(vu.get_namenode(cluster)) - jt_hostname = vu.get_instance_hostname(vu.get_jobtracker(cluster)) - oozie_hostname = vu.get_instance_hostname(vu.get_oozie(cluster)) - hive_hostname = vu.get_instance_hostname(vu.get_hiveserver(cluster)) - - storage_path = node_group.storage_paths() if node_group else None - - # inserting common configs depends on provisioned VMs and HDFS placement - # TODO(aignatov): should be moved to cluster context - - cfg = { - 'fs.default.name': 'hdfs://%s:8020' % nn_hostname, - 'dfs.name.dir': extract_hadoop_path(storage_path, - '/lib/hadoop/hdfs/namenode'), - 'dfs.data.dir': extract_hadoop_path(storage_path, - '/lib/hadoop/hdfs/datanode'), - 'dfs.hosts': '/etc/hadoop/dn.incl', - 'dfs.hosts.exclude': '/etc/hadoop/dn.excl', - } - - if jt_hostname: - mr_cfg = { - 'mapred.job.tracker': '%s:8021' % jt_hostname, - 'mapred.system.dir': extract_hadoop_path(storage_path, - '/mapred/mapredsystem'), - 'mapred.local.dir': extract_hadoop_path(storage_path, - '/lib/hadoop/mapred'), - 'mapred.hosts': '/etc/hadoop/tt.incl', - 'mapred.hosts.exclude': '/etc/hadoop/tt.excl', - } - cfg.update(mr_cfg) - - if oozie_hostname: - o_cfg = { - 'hadoop.proxyuser.hadoop.hosts': "localhost," + oozie_hostname, - 'hadoop.proxyuser.hadoop.groups': 'hadoop', - } - cfg.update(o_cfg) - LOG.debug('Applied Oozie configs for core-site.xml') - cfg.update(o_h.get_oozie_required_xml_configs()) - LOG.debug('Applied Oozie configs for oozie-site.xml') - - if hive_hostname: - h_cfg = { - 'hive.warehouse.subdir.inherit.perms': True, - 'javax.jdo.option.ConnectionURL': - 'jdbc:derby:;databaseName=/opt/hive/metastore_db;create=true' - } - cfg.update(h_cfg) - LOG.debug('Applied Hive config for hive metastore server') - - return cfg - - -def generate_xml_configs(cluster, node_group, hive_mysql_passwd): - oozie_hostname = vu.get_instance_hostname(vu.get_oozie(cluster)) - hive_hostname = vu.get_instance_hostname(vu.get_hiveserver(cluster)) - - ng_configs = node_group.configuration() - - general_cfg = get_general_configs(hive_hostname, hive_mysql_passwd) - - all_cfg = generate_sahara_configs(cluster, node_group) - - # inserting user-defined configs - for key, value in extract_xml_confs(ng_configs): - all_cfg[key] = value - - # applying swift configs if user enabled it - swift_xml_confs = swift.get_swift_configs() - all_cfg = generate_cfg_from_general(all_cfg, ng_configs, general_cfg) - - # invoking applied configs to appropriate xml files - core_all = CORE_DEFAULT + swift_xml_confs - mapred_all = MAPRED_DEFAULT - - if CONF.enable_data_locality: - all_cfg.update(topology.TOPOLOGY_CONFIG) - - # applying vm awareness configs - core_all += topology.vm_awareness_core_config() - mapred_all += topology.vm_awareness_mapred_config() - - xml_configs = { - 'core-site': x.create_hadoop_xml(all_cfg, core_all), - 'mapred-site': x.create_hadoop_xml(all_cfg, mapred_all), - 'hdfs-site': x.create_hadoop_xml(all_cfg, HDFS_DEFAULT) - } - - if hive_hostname: - cfg = all_cfg - cfg_filter = HIVE_DEFAULT - proxy_configs = cluster.cluster_configs.get('proxy_configs') - if CONF.use_identity_api_v3 and proxy_configs: - cfg, cfg_filter = _inject_swift_trust_info(cfg, - cfg_filter, - proxy_configs) - xml_configs.update({'hive-site': - x.create_hadoop_xml(cfg, cfg_filter)}) - LOG.debug('Generated hive-site.xml for hive {host}'.format( - host=hive_hostname)) - - if oozie_hostname: - xml_configs.update({'oozie-site': - x.create_hadoop_xml(all_cfg, o_h.OOZIE_DEFAULT)}) - LOG.debug('Generated oozie-site.xml for oozie {host}'.format( - host=oozie_hostname)) - - return xml_configs - - -def _inject_swift_trust_info(cfg, cfg_filter, proxy_configs): - cfg = cfg.copy() - cfg.update({ - swift.HADOOP_SWIFT_USERNAME: proxy_configs['proxy_username'], - swift.HADOOP_SWIFT_PASSWORD: proxy_configs['proxy_password'], - swift.HADOOP_SWIFT_TRUST_ID: proxy_configs['proxy_trust_id'], - swift.HADOOP_SWIFT_DOMAIN_NAME: CONF.proxy_user_domain_name - }) - - allow_swift_auth_filter = [ - {'name': swift.HADOOP_SWIFT_USERNAME}, - {'name': swift.HADOOP_SWIFT_PASSWORD}, - {'name': swift.HADOOP_SWIFT_TRUST_ID}, - {'name': swift.HADOOP_SWIFT_DOMAIN_NAME} - ] - cfg_filter = cfg_filter + allow_swift_auth_filter - - return cfg, cfg_filter - - -def extract_environment_confs(configs): - """Returns environment specific Hadoop configurations. - - :returns list of Hadoop parameters which should be passed via environment - """ - - lst = [] - for service, srv_confs in configs.items(): - if ENV_CONFS.get(service): - for param_name, param_value in srv_confs.items(): - for cfg_name, cfg_format_str in ENV_CONFS[service].items(): - if param_name == cfg_name and param_value is not None: - lst.append(cfg_format_str % param_value) - else: - LOG.warning(_LW("Plugin received wrong applicable target {service}" - " in environmental configs").format( - service=service)) - return sorted(lst) - - -def extract_xml_confs(configs): - """Returns xml specific Hadoop configurations. - - :returns list of Hadoop parameters which should be passed into general - configs like core-site.xml - """ - - lst = [] - for service, srv_confs in configs.items(): - if XML_CONFS.get(service): - for param_name, param_value in srv_confs.items(): - for cfg_list in XML_CONFS[service]: - names = [cfg['name'] for cfg in cfg_list] - if param_name in names and param_value is not None: - lst.append((param_name, param_value)) - else: - LOG.warning(_LW("Plugin received wrong applicable target {service}" - " for xml configs").format(service=service)) - return sorted(lst) - - -def generate_setup_script(storage_paths, env_configs, append_oozie=False): - script_lines = ["#!/bin/bash -x"] - script_lines.append("echo -n > /tmp/hadoop-env.sh") - for line in env_configs: - if 'HADOOP' in line: - script_lines.append('echo "%s" >> /tmp/hadoop-env.sh' % line) - script_lines.append("cat /etc/hadoop/hadoop-env.sh >> /tmp/hadoop-env.sh") - script_lines.append("cp /tmp/hadoop-env.sh /etc/hadoop/hadoop-env.sh") - - hadoop_log = storage_paths[0] + "/log/hadoop/\$USER/" - script_lines.append('sed -i "s,export HADOOP_LOG_DIR=.*,' - 'export HADOOP_LOG_DIR=%s," /etc/hadoop/hadoop-env.sh' - % hadoop_log) - - hadoop_log = storage_paths[0] + "/log/hadoop/hdfs" - script_lines.append('sed -i "s,export HADOOP_SECURE_DN_LOG_DIR=.*,' - 'export HADOOP_SECURE_DN_LOG_DIR=%s," ' - '/etc/hadoop/hadoop-env.sh' % hadoop_log) - - if append_oozie: - o_h.append_oozie_setup(script_lines, env_configs) - - for path in storage_paths: - script_lines.append("chown -R hadoop:hadoop %s" % path) - script_lines.append("chmod -R 755 %s" % path) - return "\n".join(script_lines) - - -def extract_name_values(configs): - return {cfg['name']: cfg['value'] for cfg in configs} - - -def extract_hadoop_path(lst, hadoop_dir): - if lst: - return ",".join([p + hadoop_dir for p in lst]) - - -def _set_config(cfg, gen_cfg, name=None): - if name in gen_cfg: - cfg.update(gen_cfg[name]['conf']) - if name is None: - for name in gen_cfg: - cfg.update(gen_cfg[name]['conf']) - return cfg - - -def _get_general_cluster_config_value(cluster, option): - conf = cluster.cluster_configs - - if 'general' in conf and option.name in conf['general']: - return conf['general'][option.name] - - return option.default_value - - -def is_mysql_enable(cluster): - return _get_general_cluster_config_value(cluster, ENABLE_MYSQL) - - -def is_data_locality_enabled(cluster): - if not CONF.enable_data_locality: - return False - return _get_general_cluster_config_value(cluster, ENABLE_DATA_LOCALITY) - - -def is_swift_enable(cluster): - return _get_general_cluster_config_value(cluster, ENABLE_SWIFT) - - -def get_decommissioning_timeout(cluster): - return _get_general_cluster_config_value(cluster, DECOMMISSIONING_TIMEOUT) - - -def get_port_from_config(service, name, cluster=None): - address = get_config_value(service, name, cluster) - return utils.get_port_from_address(address) diff --git a/sahara/plugins/vanilla/v1_2_1/edp_engine.py b/sahara/plugins/vanilla/v1_2_1/edp_engine.py deleted file mode 100644 index c371af7daa..0000000000 --- a/sahara/plugins/vanilla/v1_2_1/edp_engine.py +++ /dev/null @@ -1,43 +0,0 @@ -# Copyright (c) 2014 Mirantis Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or -# implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -from sahara.plugins.vanilla import confighints_helper as ch_helper -from sahara.plugins.vanilla import edp_engine -from sahara.service.edp import hdfs_helper -from sahara.utils import edp - - -class EdpOozieEngine(edp_engine.EdpOozieEngine): - - def create_hdfs_dir(self, remote, dir_name): - hdfs_helper.create_dir_hadoop1(remote, dir_name, self.get_hdfs_user()) - - @staticmethod - def get_possible_job_config(job_type): - if edp.compare_job_type(job_type, edp.JOB_TYPE_HIVE): - return {'job_config': ch_helper.get_possible_hive_config_from( - 'plugins/vanilla/v1_2_1/resources/hive-default.xml')} - if edp.compare_job_type(job_type, - edp.JOB_TYPE_MAPREDUCE, - edp.JOB_TYPE_MAPREDUCE_STREAMING): - return {'job_config': ch_helper.get_possible_mapreduce_config_from( - 'plugins/vanilla/v1_2_1/resources/mapred-default.xml')} - if edp.compare_job_type(job_type, edp.JOB_TYPE_PIG): - return {'job_config': ch_helper.get_possible_pig_config_from( - 'plugins/vanilla/v1_2_1/resources/mapred-default.xml')} - return edp_engine.EdpOozieEngine.get_possible_job_config(job_type) - - def get_resource_manager_uri(self, cluster): - return cluster['info']['MapReduce']['JobTracker'] diff --git a/sahara/plugins/vanilla/v1_2_1/mysql_helper.py b/sahara/plugins/vanilla/v1_2_1/mysql_helper.py deleted file mode 100644 index 6527e07e8d..0000000000 --- a/sahara/plugins/vanilla/v1_2_1/mysql_helper.py +++ /dev/null @@ -1,45 +0,0 @@ -# Copyright (c) 2013 Mirantis Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or -# implied. -# See the License for the specific language governing permissions and -# limitations under the License. - - -def get_hive_mysql_configs(metastore_host, passwd): - return { - 'javax.jdo.option.ConnectionURL': 'jdbc:mysql://%s/metastore' % - metastore_host, - 'javax.jdo.option.ConnectionDriverName': 'com.mysql.jdbc.Driver', - 'javax.jdo.option.ConnectionUserName': 'hive', - 'javax.jdo.option.ConnectionPassword': passwd, - 'datanucleus.autoCreateSchema': 'false', - 'datanucleus.fixedDatastore': 'true', - 'hive.metastore.uris': 'thrift://%s:9083' % metastore_host, - } - - -def get_oozie_mysql_configs(): - return { - 'oozie.service.JPAService.jdbc.driver': - 'com.mysql.jdbc.Driver', - 'oozie.service.JPAService.jdbc.url': - 'jdbc:mysql://localhost:3306/oozie', - 'oozie.service.JPAService.jdbc.username': 'oozie', - 'oozie.service.JPAService.jdbc.password': 'oozie' - } - - -def get_required_mysql_configs(hive_hostname, passwd_mysql): - configs = get_oozie_mysql_configs() - if hive_hostname: - configs.update(get_hive_mysql_configs(hive_hostname, passwd_mysql)) - return configs diff --git a/sahara/plugins/vanilla/v1_2_1/oozie_helper.py b/sahara/plugins/vanilla/v1_2_1/oozie_helper.py deleted file mode 100644 index 87c740afa8..0000000000 --- a/sahara/plugins/vanilla/v1_2_1/oozie_helper.py +++ /dev/null @@ -1,62 +0,0 @@ -# Copyright (c) 2013 Mirantis Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or -# implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -from sahara.utils import xmlutils as x - - -OOZIE_DEFAULT = x.load_hadoop_xml_defaults( - 'plugins/vanilla/v1_2_1/resources/oozie-default.xml') - -OOZIE_CORE_DEFAULT = [ - { - 'name': 'hadoop.proxyuser.hadoop.hosts', - 'value': "localhost" - }, - { - 'name': 'hadoop.proxyuser.hadoop.groups', - 'value': 'hadoop' - }] - -OOZIE_HEAPSIZE_DEFAULT = "CATALINA_OPTS -Xmx1024m" - - -def get_oozie_required_xml_configs(): - """Following configs differ from default configs in oozie-default.xml.""" - return { - 'oozie.service.ActionService.executor.ext.classes': - 'org.apache.oozie.action.email.EmailActionExecutor,' - 'org.apache.oozie.action.hadoop.HiveActionExecutor,' - 'org.apache.oozie.action.hadoop.ShellActionExecutor,' - 'org.apache.oozie.action.hadoop.SqoopActionExecutor,' - 'org.apache.oozie.action.hadoop.DistcpActionExecutor', - - 'oozie.service.SchemaService.wf.ext.schemas': - 'shell-action-0.1.xsd,shell-action-0.2.xsd,shell-action-0.3.xsd,' - 'email-action-0.1.xsd,hive-action-0.2.xsd,hive-action-0.3.xsd,' - 'hive-action-0.4.xsd,hive-action-0.5.xsd,sqoop-action-0.2.xsd,' - 'sqoop-action-0.3.xsd,sqoop-action-0.4.xsd,ssh-action-0.1.xsd,' - 'ssh-action-0.2.xsd,distcp-action-0.1.xsd,distcp-action-0.2.xsd,' - 'oozie-sla-0.1.xsd,oozie-sla-0.2.xsd', - - 'oozie.service.JPAService.create.db.schema': 'false', - } - - -def append_oozie_setup(setup_script, env_configs): - for line in env_configs: - if 'CATALINA_OPT' in line: - setup_script.append('sed -i "s,%s,%s," ' - '/opt/oozie/conf/oozie-env.sh' - % (OOZIE_HEAPSIZE_DEFAULT, line)) diff --git a/sahara/plugins/vanilla/v1_2_1/resources/README.rst b/sahara/plugins/vanilla/v1_2_1/resources/README.rst deleted file mode 100644 index d23b81818e..0000000000 --- a/sahara/plugins/vanilla/v1_2_1/resources/README.rst +++ /dev/null @@ -1,27 +0,0 @@ -Apache Hadoop Configurations for Sahara -======================================== - -This directory contains default XML configuration files: - -* core-default.xml -* hdfs-default.xml -* mapred-default.xml -* oozie-default.xml -* hive-default.xml - -These files are applied for Sahara's plugin of Apache Hadoop version 1.2.1, -Oozie 4.0.0, Hive version 0.11.0. - - -Files were taken from here: - -* `core-default.xml `_ -* `hdfs-default.xml `_ -* `mapred-default.xml `_ -* `oozie-default.xml `_ -* `hive-default.xml `_ - -XML configs are used to expose default Hadoop configurations to the users through -the Sahara's REST API. It allows users to override some config values which will -be pushed to the provisioned VMs running Hadoop services as part of appropriate -xml config. diff --git a/sahara/plugins/vanilla/v1_2_1/resources/core-default.xml b/sahara/plugins/vanilla/v1_2_1/resources/core-default.xml deleted file mode 100644 index 613595f79f..0000000000 --- a/sahara/plugins/vanilla/v1_2_1/resources/core-default.xml +++ /dev/null @@ -1,632 +0,0 @@ - - - - - - - - - - - - - hadoop.tmp.dir - /tmp/hadoop-${user.name} - A base for other temporary directories. - - - - hadoop.native.lib - true - Should native hadoop libraries, if present, be used. - - - - hadoop.http.filter.initializers - - A comma separated list of class names. Each class in the list - must extend org.apache.hadoop.http.FilterInitializer. The corresponding - Filter will be initialized. Then, the Filter will be applied to all user - facing jsp and servlet web pages. The ordering of the list defines the - ordering of the filters. - - - - hadoop.security.group.mapping - org.apache.hadoop.security.ShellBasedUnixGroupsMapping - Class for user to group mapping (get groups for a given user) - - - - - hadoop.security.authorization - false - Is service-level authorization enabled? - - - - hadoop.security.instrumentation.requires.admin - false - - Indicates if administrator ACLs are required to access - instrumentation servlets (JMX, METRICS, CONF, STACKS). - - - - - hadoop.security.authentication - simple - Possible values are simple (no authentication), and kerberos - - - - - hadoop.security.token.service.use_ip - true - Controls whether tokens always use IP addresses. DNS changes - will not be detected if this option is enabled. Existing client connections - that break will always reconnect to the IP of the original host. New clients - will connect to the host's new IP but fail to locate a token. Disabling - this option will allow existing and new clients to detect an IP change and - continue to locate the new host's token. - - - - - hadoop.security.use-weak-http-crypto - false - If enabled, use KSSL to authenticate HTTP connections to the - NameNode. Due to a bug in JDK6, using KSSL requires one to configure - Kerberos tickets to use encryption types that are known to be - cryptographically weak. If disabled, SPNEGO will be used for HTTP - authentication, which supports stronger encryption types. - - - - - - - - - hadoop.logfile.size - 10000000 - The max size of each log file - - - - hadoop.logfile.count - 10 - The max number of log files - - - - - io.file.buffer.size - 4096 - The size of buffer for use in sequence files. - The size of this buffer should probably be a multiple of hardware - page size (4096 on Intel x86), and it determines how much data is - buffered during read and write operations. - - - - io.bytes.per.checksum - 512 - The number of bytes per checksum. Must not be larger than - io.file.buffer.size. - - - - io.skip.checksum.errors - false - If true, when a checksum error is encountered while - reading a sequence file, entries are skipped, instead of throwing an - exception. - - - - io.compression.codecs - org.apache.hadoop.io.compress.DefaultCodec,org.apache.hadoop.io.compress.GzipCodec,org.apache.hadoop.io.compress.BZip2Codec,org.apache.hadoop.io.compress.SnappyCodec - A list of the compression codec classes that can be used - for compression/decompression. - - - - io.serializations - org.apache.hadoop.io.serializer.WritableSerialization - A list of serialization classes that can be used for - obtaining serializers and deserializers. - - - - - - fs.default.name - file:/// - The name of the default file system. A URI whose - scheme and authority determine the FileSystem implementation. The - uri's scheme determines the config property (fs.SCHEME.impl) naming - the FileSystem implementation class. The uri's authority is used to - determine the host, port, etc. for a filesystem. - - - - fs.trash.interval - 0 - Number of minutes between trash checkpoints. - If zero, the trash feature is disabled. - - - - - fs.file.impl - org.apache.hadoop.fs.LocalFileSystem - The FileSystem for file: uris. - - - - fs.hdfs.impl - org.apache.hadoop.hdfs.DistributedFileSystem - The FileSystem for hdfs: uris. - - - - fs.s3.impl - org.apache.hadoop.fs.s3.S3FileSystem - The FileSystem for s3: uris. - - - - fs.s3n.impl - org.apache.hadoop.fs.s3native.NativeS3FileSystem - The FileSystem for s3n: (Native S3) uris. - - - - fs.kfs.impl - org.apache.hadoop.fs.kfs.KosmosFileSystem - The FileSystem for kfs: uris. - - - - fs.hftp.impl - org.apache.hadoop.hdfs.HftpFileSystem - - - - fs.hsftp.impl - org.apache.hadoop.hdfs.HsftpFileSystem - - - - fs.webhdfs.impl - org.apache.hadoop.hdfs.web.WebHdfsFileSystem - - - - fs.ftp.impl - org.apache.hadoop.fs.ftp.FTPFileSystem - The FileSystem for ftp: uris. - - - - fs.ramfs.impl - org.apache.hadoop.fs.InMemoryFileSystem - The FileSystem for ramfs: uris. - - - - fs.har.impl - org.apache.hadoop.fs.HarFileSystem - The filesystem for Hadoop archives. - - - - fs.har.impl.disable.cache - true - Don't cache 'har' filesystem instances. - - - - fs.checkpoint.dir - ${hadoop.tmp.dir}/dfs/namesecondary - Determines where on the local filesystem the DFS secondary - name node should store the temporary images to merge. - If this is a comma-delimited list of directories then the image is - replicated in all of the directories for redundancy. - - - - - fs.checkpoint.edits.dir - ${fs.checkpoint.dir} - Determines where on the local filesystem the DFS secondary - name node should store the temporary edits to merge. - If this is a comma-delimited list of directoires then teh edits is - replicated in all of the directoires for redundancy. - Default value is same as fs.checkpoint.dir - - - - - fs.checkpoint.period - 3600 - The number of seconds between two periodic checkpoints. - - - - - fs.checkpoint.size - 67108864 - The size of the current edit log (in bytes) that triggers - a periodic checkpoint even if the fs.checkpoint.period hasn't expired. - - - - - - - fs.s3.block.size - 67108864 - Block size to use when writing files to S3. - - - - fs.s3.buffer.dir - ${hadoop.tmp.dir}/s3 - Determines where on the local filesystem the S3 filesystem - should store files before sending them to S3 - (or after retrieving them from S3). - - - - - fs.s3.maxRetries - 4 - The maximum number of retries for reading or writing files to S3, - before we signal failure to the application. - - - - - fs.s3.sleepTimeSeconds - 10 - The number of seconds to sleep between each S3 retry. - - - - - - local.cache.size - 10737418240 - The limit on the size of cache you want to keep, set by default - to 10GB. This will act as a soft limit on the cache directory for out of band data. - - - - - io.seqfile.compress.blocksize - 1000000 - The minimum block size for compression in block compressed - SequenceFiles. - - - - - io.seqfile.lazydecompress - true - Should values of block-compressed SequenceFiles be decompressed - only when necessary. - - - - - io.seqfile.sorter.recordlimit - 1000000 - The limit on number of records to be kept in memory in a spill - in SequenceFiles.Sorter - - - - - io.mapfile.bloom.size - 1048576 - The size of BloomFilter-s used in BloomMapFile. Each time this many - keys is appended the next BloomFilter will be created (inside a DynamicBloomFilter). - Larger values minimize the number of filters, which slightly increases the performance, - but may waste too much space if the total number of keys is usually much smaller - than this number. - - - - - io.mapfile.bloom.error.rate - 0.005 - The rate of false positives in BloomFilter-s used in BloomMapFile. - As this value decreases, the size of BloomFilter-s increases exponentially. This - value is the probability of encountering false positives (default is 0.5%). - - - - - hadoop.util.hash.type - murmur - The default implementation of Hash. Currently this can take one of the - two values: 'murmur' to select MurmurHash and 'jenkins' to select JenkinsHash. - - - - - - - - ipc.client.idlethreshold - 4000 - Defines the threshold number of connections after which - connections will be inspected for idleness. - - - - - ipc.client.kill.max - 10 - Defines the maximum number of clients to disconnect in one go. - - - - - ipc.client.connection.maxidletime - 10000 - The maximum time in msec after which a client will bring down the - connection to the server. - - - - - ipc.client.connect.max.retries - 10 - Indicates the number of retries a client will make to establish - a server connection. - - - - - ipc.server.listen.queue.size - 128 - Indicates the length of the listen queue for servers accepting - client connections. - - - - - ipc.server.tcpnodelay - false - Turn on/off Nagle's algorithm for the TCP socket connection on - the server. Setting to true disables the algorithm and may decrease latency - with a cost of more/smaller packets. - - - - - ipc.client.tcpnodelay - false - Turn on/off Nagle's algorithm for the TCP socket connection on - the client. Setting to true disables the algorithm and may decrease latency - with a cost of more/smaller packets. - - - - - - - - webinterface.private.actions - false - If set to true, the web interfaces of JT and NN may contain - actions, such as kill job, delete file, etc., that should - not be exposed to public. Enable this option if the interfaces - are only reachable by those who have the right authorization. - - - - - - - hadoop.rpc.socket.factory.class.default - org.apache.hadoop.net.StandardSocketFactory - Default SocketFactory to use. This parameter is expected to be - formatted as "package.FactoryClassName". - - - - - hadoop.rpc.socket.factory.class.ClientProtocol - - SocketFactory to use to connect to a DFS. If null or empty, use - hadoop.rpc.socket.class.default. This socket factory is also used by - DFSClient to create sockets to DataNodes. - - - - - - - hadoop.socks.server - - Address (host:port) of the SOCKS server to be used by the - SocksSocketFactory. - - - - - - - - topology.node.switch.mapping.impl - org.apache.hadoop.net.ScriptBasedMapping - The default implementation of the DNSToSwitchMapping. It - invokes a script specified in topology.script.file.name to resolve - node names. If the value for topology.script.file.name is not set, the - default value of DEFAULT_RACK is returned for all node names. - - - - - net.topology.impl - org.apache.hadoop.net.NetworkTopology - The default implementation of NetworkTopology which is classic three layer one. - - - - - topology.script.file.name - - The script name that should be invoked to resolve DNS names to - NetworkTopology names. Example: the script would take host.foo.bar as an - argument, and return /rack1 as the output. - - - - - topology.script.number.args - 100 - The max number of args that the script configured with - topology.script.file.name should be run with. Each arg is an - IP address. - - - - - hadoop.security.uid.cache.secs - 14400 - NativeIO maintains a cache from UID to UserName. This is - the timeout for an entry in that cache. - - - - - - hadoop.http.authentication.type - simple - - Defines authentication used for Oozie HTTP endpoint. - Supported values are: simple | kerberos | #AUTHENTICATION_HANDLER_CLASSNAME# - - - - - hadoop.http.authentication.token.validity - 36000 - - Indicates how long (in seconds) an authentication token is valid before it has - to be renewed. - - - - - hadoop.http.authentication.signature.secret.file - ${user.home}/hadoop-http-auth-signature-secret - - The signature secret for signing the authentication tokens. - If not set a random secret is generated at startup time. - The same secret should be used for JT/NN/DN/TT configurations. - - - - - hadoop.http.authentication.cookie.domain - - - The domain to use for the HTTP cookie that stores the authentication token. - In order to authentiation to work correctly across all Hadoop nodes web-consoles - the domain must be correctly set. - IMPORTANT: when using IP addresses, browsers ignore cookies with domain settings. - For this setting to work properly all nodes in the cluster must be configured - to generate URLs with hostname.domain names on it. - - - - - hadoop.http.authentication.simple.anonymous.allowed - true - - Indicates if anonymous requests are allowed when using 'simple' authentication. - - - - - hadoop.http.authentication.kerberos.principal - HTTP/localhost@LOCALHOST - - Indicates the Kerberos principal to be used for HTTP endpoint. - The principal MUST start with 'HTTP/' as per Kerberos HTTP SPNEGO specification. - - - - - hadoop.http.authentication.kerberos.keytab - ${user.home}/hadoop.keytab - - Location of the keytab file with the credentials for the principal. - Referring to the same keytab file Oozie uses for its Kerberos credentials for Hadoop. - - - - - hadoop.relaxed.worker.version.check - false - - By default datanodes refuse to connect to namenodes if their build - revision (svn revision) do not match, and tasktrackers refuse to - connect to jobtrackers if their build version (version, revision, - user, and source checksum) do not match. This option changes the - behavior of hadoop workers to only check for a version match (eg - "1.0.2") but ignore the other build fields (revision, user, and - source checksum). - - - - - hadoop.skip.worker.version.check - false - - By default datanodes refuse to connect to namenodes if their build - revision (svn revision) do not match, and tasktrackers refuse to - connect to jobtrackers if their build version (version, revision, - user, and source checksum) do not match. This option changes the - behavior of hadoop workers to skip doing a version check at all. - This option supersedes the 'hadoop.relaxed.worker.version.check' - option. - - - - - hadoop.jetty.logs.serve.aliases - true - - Enable/Disable aliases serving from jetty - - - - - ipc.client.fallback-to-simple-auth-allowed - false - - When a client is configured to attempt a secure connection, but attempts to - connect to an insecure server, that server may instruct the client to - switch to SASL SIMPLE (unsecure) authentication. This setting controls - whether or not the client will accept this instruction from the server. - When false (the default), the client will not allow the fallback to SIMPLE - authentication, and will abort the connection. - - - - diff --git a/sahara/plugins/vanilla/v1_2_1/resources/create_hive_db.sql b/sahara/plugins/vanilla/v1_2_1/resources/create_hive_db.sql deleted file mode 100644 index 741800dc95..0000000000 --- a/sahara/plugins/vanilla/v1_2_1/resources/create_hive_db.sql +++ /dev/null @@ -1,9 +0,0 @@ -CREATE DATABASE metastore; -USE metastore; -SOURCE /opt/hive/scripts/metastore/upgrade/mysql/hive-schema-0.10.0.mysql.sql; -CREATE USER 'hive'@'localhost' IDENTIFIED BY 'pass'; -REVOKE ALL PRIVILEGES, GRANT OPTION FROM 'hive'@'localhost'; -GRANT ALL PRIVILEGES ON metastore.* TO 'hive'@'localhost' IDENTIFIED BY 'pass'; -GRANT ALL PRIVILEGES ON metastore.* TO 'hive'@'%' IDENTIFIED BY 'pass'; -FLUSH PRIVILEGES; -exit \ No newline at end of file diff --git a/sahara/plugins/vanilla/v1_2_1/resources/create_oozie_db.sql b/sahara/plugins/vanilla/v1_2_1/resources/create_oozie_db.sql deleted file mode 100644 index ad45051829..0000000000 --- a/sahara/plugins/vanilla/v1_2_1/resources/create_oozie_db.sql +++ /dev/null @@ -1,4 +0,0 @@ -create database oozie; -grant all privileges on oozie.* to 'oozie'@'localhost' identified by 'oozie'; -grant all privileges on oozie.* to 'oozie'@'%' identified by 'oozie'; -exit \ No newline at end of file diff --git a/sahara/plugins/vanilla/v1_2_1/resources/hdfs-default.xml b/sahara/plugins/vanilla/v1_2_1/resources/hdfs-default.xml deleted file mode 100644 index a4a7783fee..0000000000 --- a/sahara/plugins/vanilla/v1_2_1/resources/hdfs-default.xml +++ /dev/null @@ -1,709 +0,0 @@ - - - - - - - - - - - dfs.namenode.logging.level - info - The logging level for dfs namenode. Other values are "dir"(trac -e namespace mutations), "block"(trace block under/over replications and block -creations/deletions), or "all". - - - - dfs.namenode.rpc-address - - - RPC address that handles all clients requests. If empty then we'll get the - value from fs.default.name. - The value of this property will take the form of hdfs://nn-host1:rpc-port. - - - - - dfs.secondary.http.address - 0.0.0.0:50090 - - The secondary namenode http server address and port. - If the port is 0 then the server will start on a free port. - - - - - dfs.datanode.address - 0.0.0.0:50010 - - The datanode server address and port for data transfer. - If the port is 0 then the server will start on a free port. - - - - - dfs.datanode.http.address - 0.0.0.0:50075 - - The datanode http server address and port. - If the port is 0 then the server will start on a free port. - - - - - dfs.datanode.ipc.address - 0.0.0.0:50020 - - The datanode ipc server address and port. - If the port is 0 then the server will start on a free port. - - - - - dfs.datanode.handler.count - 3 - The number of server threads for the datanode. - - - - dfs.http.address - 0.0.0.0:50070 - - The address and the base port where the dfs namenode web ui will listen on. - If the port is 0 then the server will start on a free port. - - - - - dfs.https.enable - false - Decide if HTTPS(SSL) is supported on HDFS - - - - - dfs.https.need.client.auth - false - Whether SSL client certificate authentication is required - - - - - dfs.https.server.keystore.resource - ssl-server.xml - Resource file from which ssl server keystore - information will be extracted - - - - - dfs.https.client.keystore.resource - ssl-client.xml - Resource file from which ssl client keystore - information will be extracted - - - - - dfs.datanode.https.address - 0.0.0.0:50475 - - - - dfs.https.address - 0.0.0.0:50470 - - - - dfs.datanode.dns.interface - default - The name of the Network Interface from which a data node should - report its IP address. - - - - - dfs.datanode.dns.nameserver - default - The host name or IP address of the name server (DNS) - which a DataNode should use to determine the host name used by the - NameNode for communication and display purposes. - - - - - - - dfs.replication.considerLoad - true - Decide if chooseTarget considers the target's load or not - - - - dfs.default.chunk.view.size - 32768 - The number of bytes to view for a file on the browser. - - - - - dfs.datanode.du.reserved - 0 - Reserved space in bytes per volume. Always leave this much space free for non dfs use. - - - - - dfs.name.dir - ${hadoop.tmp.dir}/dfs/name - Determines where on the local filesystem the DFS name node - should store the name table(fsimage). If this is a comma-delimited list - of directories then the name table is replicated in all of the - directories, for redundancy. - - - - dfs.name.edits.dir - ${dfs.name.dir} - Determines where on the local filesystem the DFS name node - should store the transaction (edits) file. If this is a comma-delimited list - of directories then the transaction file is replicated in all of the - directories, for redundancy. Default value is same as dfs.name.dir - - - - - dfs.namenode.edits.toleration.length - 0 - - The length in bytes that namenode is willing to tolerate when the edit log - is corrupted. The edit log toleration feature checks the entire edit log. - It computes read length (the length of valid data), corruption length and - padding length. In case that corruption length is non-zero, the corruption - will be tolerated only if the corruption length is less than or equal to - the toleration length. - - For disabling edit log toleration feature, set this property to -1. When - the feature is disabled, the end of edit log will not be checked. In this - case, namenode will startup normally even if the end of edit log is - corrupted. - - - - - dfs.web.ugi - webuser,webgroup - The user account used by the web interface. - Syntax: USERNAME,GROUP1,GROUP2, ... - - - - - dfs.permissions - true - - If "true", enable permission checking in HDFS. - If "false", permission checking is turned off, - but all other behavior is unchanged. - Switching from one parameter value to the other does not change the mode, - owner or group of files or directories. - - - - - dfs.permissions.supergroup - supergroup - The name of the group of super-users. - - - - dfs.block.access.token.enable - false - - If "true", access tokens are used as capabilities for accessing datanodes. - If "false", no access tokens are checked on accessing datanodes. - - - - - dfs.block.access.key.update.interval - 600 - - Interval in minutes at which namenode updates its access keys. - - - - - dfs.block.access.token.lifetime - 600 - The lifetime of access tokens in minutes. - - - - - dfs.data.dir - ${hadoop.tmp.dir}/dfs/data - Determines where on the local filesystem an DFS data node - should store its blocks. If this is a comma-delimited - list of directories, then data will be stored in all named - directories, typically on different devices. - Directories that do not exist are ignored. - - - - - dfs.datanode.data.dir.perm - 755 - Permissions for the directories on on the local filesystem where - the DFS data node store its blocks. The permissions can either be octal or - symbolic. - - - - dfs.replication - 3 - Default block replication. - The actual number of replications can be specified when the file is created. - The default is used if replication is not specified in create time. - - - - - dfs.replication.max - 512 - Maximal block replication. - - - - - dfs.replication.min - 1 - Minimal block replication. - - - - - dfs.block.size - 67108864 - The default block size for new files. - - - - dfs.df.interval - 60000 - Disk usage statistics refresh interval in msec. - - - - dfs.client.block.write.retries - 3 - The number of retries for writing blocks to the data nodes, - before we signal failure to the application. - - - - - dfs.blockreport.intervalMsec - 3600000 - Determines block reporting interval in milliseconds. - - - - dfs.blockreport.initialDelay 0 - Delay for first block report in seconds. - - - - dfs.heartbeat.interval - 3 - Determines datanode heartbeat interval in seconds. - - - - dfs.namenode.handler.count - 10 - The number of server threads for the namenode. - - - - dfs.safemode.threshold.pct - 0.999f - - Specifies the percentage of blocks that should satisfy - the minimal replication requirement defined by dfs.replication.min. - Values less than or equal to 0 mean not to wait for any particular - percentage of blocks before exiting safemode. - Values greater than 1 will make safe mode permanent. - - - - - dfs.namenode.safemode.min.datanodes - 0 - - Specifies the number of datanodes that must be considered alive - before the name node exits safemode. - Values less than or equal to 0 mean not to take the number of live - datanodes into account when deciding whether to remain in safe mode - during startup. - Values greater than the number of datanodes in the cluster - will make safe mode permanent. - - - - - dfs.safemode.extension - 30000 - - Determines extension of safe mode in milliseconds - after the threshold level is reached. - - - - - dfs.balance.bandwidthPerSec - 1048576 - - Specifies the maximum amount of bandwidth that each datanode - can utilize for the balancing purpose in term of - the number of bytes per second. - - - - - dfs.hosts - - Names a file that contains a list of hosts that are - permitted to connect to the namenode. The full pathname of the file - must be specified. If the value is empty, all hosts are - permitted. - - - - dfs.hosts.exclude - - Names a file that contains a list of hosts that are - not permitted to connect to the namenode. The full pathname of the - file must be specified. If the value is empty, no hosts are - excluded. - - - - dfs.max.objects - 0 - The maximum number of files, directories and blocks - dfs supports. A value of zero indicates no limit to the number - of objects that dfs supports. - - - - - dfs.namenode.decommission.interval - 30 - Namenode periodicity in seconds to check if decommission is - complete. - - - - dfs.namenode.decommission.nodes.per.interval - 5 - The number of nodes namenode checks if decommission is complete - in each dfs.namenode.decommission.interval. - - - - dfs.replication.interval - 3 - The periodicity in seconds with which the namenode computes - repliaction work for datanodes. - - - - dfs.access.time.precision - 3600000 - The access time for HDFS file is precise upto this value. - The default value is 1 hour. Setting a value of 0 disables - access times for HDFS. - - - - - dfs.support.append - - This option is no longer supported. HBase no longer requires that - this option be enabled as sync is now enabled by default. See - HADOOP-8230 for additional information. - - - - - dfs.namenode.delegation.key.update-interval - 86400000 - The update interval for master key for delegation tokens - in the namenode in milliseconds. - - - - - dfs.namenode.delegation.token.max-lifetime - 604800000 - The maximum lifetime in milliseconds for which a delegation - token is valid. - - - - - dfs.namenode.delegation.token.renew-interval - 86400000 - The renewal interval for delegation token in milliseconds. - - - - - dfs.datanode.failed.volumes.tolerated - 0 - The number of volumes that are allowed to - fail before a datanode stops offering service. By default - any volume failure will cause a datanode to shutdown. - - - - - dfs.datanode.max.xcievers - 4096 - Specifies the maximum number of threads to use for transferring data - in and out of the DN. - - - - - dfs.datanode.readahead.bytes - 4193404 - - While reading block files, if the Hadoop native libraries are available, - the datanode can use the posix_fadvise system call to explicitly - page data into the operating system buffer cache ahead of the current - reader's position. This can improve performance especially when - disks are highly contended. - - This configuration specifies the number of bytes ahead of the current - read position which the datanode will attempt to read ahead. This - feature may be disabled by configuring this property to 0. - - If the native libraries are not available, this configuration has no - effect. - - - - - dfs.datanode.drop.cache.behind.reads - false - - In some workloads, the data read from HDFS is known to be significantly - large enough that it is unlikely to be useful to cache it in the - operating system buffer cache. In this case, the DataNode may be - configured to automatically purge all data from the buffer cache - after it is delivered to the client. This behavior is automatically - disabled for workloads which read only short sections of a block - (e.g HBase random-IO workloads). - - This may improve performance for some workloads by freeing buffer - cache spage usage for more cacheable data. - - If the Hadoop native libraries are not available, this configuration - has no effect. - - - - - dfs.datanode.drop.cache.behind.writes - false - - In some workloads, the data written to HDFS is known to be significantly - large enough that it is unlikely to be useful to cache it in the - operating system buffer cache. In this case, the DataNode may be - configured to automatically purge all data from the buffer cache - after it is written to disk. - - This may improve performance for some workloads by freeing buffer - cache spage usage for more cacheable data. - - If the Hadoop native libraries are not available, this configuration - has no effect. - - - - - dfs.datanode.sync.behind.writes - false - - If this configuration is enabled, the datanode will instruct the - operating system to enqueue all written data to the disk immediately - after it is written. This differs from the usual OS policy which - may wait for up to 30 seconds before triggering writeback. - - This may improve performance for some workloads by smoothing the - IO profile for data written to disk. - - If the Hadoop native libraries are not available, this configuration - has no effect. - - - - - dfs.client.use.datanode.hostname - false - Whether clients should use datanode hostnames when - connecting to datanodes. - - - - - dfs.datanode.use.datanode.hostname - false - Whether datanodes should use datanode hostnames when - connecting to other datanodes for data transfer. - - - - - dfs.client.local.interfaces - - A comma separated list of network interface names to use - for data transfer between the client and datanodes. When creating - a connection to read from or write to a datanode, the client - chooses one of the specified interfaces at random and binds its - socket to the IP of that interface. Individual names may be - specified as either an interface name (eg "eth0"), a subinterface - name (eg "eth0:0"), or an IP address (which may be specified using - CIDR notation to match a range of IPs). - - - - - dfs.image.transfer.bandwidthPerSec - 0 - - Specifies the maximum amount of bandwidth that can be utilized - for image transfer in term of the number of bytes per second. - A default value of 0 indicates that throttling is disabled. - - - - - dfs.webhdfs.enabled - false - - Enable WebHDFS (REST API) in Namenodes and Datanodes. - - - - - dfs.namenode.kerberos.internal.spnego.principal - ${dfs.web.authentication.kerberos.principal} - - - - dfs.secondary.namenode.kerberos.internal.spnego.principal - ${dfs.web.authentication.kerberos.principal} - - - - dfs.namenode.invalidate.work.pct.per.iteration - 0.32f - - *Note*: Advanced property. Change with caution. - This determines the percentage amount of block - invalidations (deletes) to do over a single DN heartbeat - deletion command. The final deletion count is determined by applying this - percentage to the number of live nodes in the system. - The resultant number is the number of blocks from the deletion list - chosen for proper invalidation over a single heartbeat of a single DN. - Value should be a positive, non-zero percentage in float notation (X.Yf), - with 1.0f meaning 100%. - - - - - dfs.namenode.replication.work.multiplier.per.iteration - 2 - - *Note*: Advanced property. Change with caution. - This determines the total amount of block transfers to begin in - parallel at a DN, for replication, when such a command list is being - sent over a DN heartbeat by the NN. The actual number is obtained by - multiplying this multiplier with the total number of live nodes in the - cluster. The result number is the number of blocks to begin transfers - immediately for, per DN heartbeat. This number can be any positive, - non-zero integer. - - - - - dfs.namenode.avoid.read.stale.datanode - false - - Indicate whether or not to avoid reading from "stale" datanodes whose - heartbeat messages have not been received by the namenode - for more than a specified time interval. Stale datanodes will be - moved to the end of the node list returned for reading. See - dfs.namenode.avoid.write.stale.datanode for a similar setting for writes. - - - - - dfs.namenode.avoid.write.stale.datanode - false - - Indicate whether or not to avoid writing to "stale" datanodes whose - heartbeat messages have not been received by the namenode - for more than a specified time interval. Writes will avoid using - stale datanodes unless more than a configured ratio - (dfs.namenode.write.stale.datanode.ratio) of datanodes are marked as - stale. See dfs.namenode.avoid.read.stale.datanode for a similar setting - for reads. - - - - - dfs.namenode.stale.datanode.interval - 30000 - - Default time interval for marking a datanode as "stale", i.e., if - the namenode has not received heartbeat msg from a datanode for - more than this time interval, the datanode will be marked and treated - as "stale" by default. The stale interval cannot be too small since - otherwise this may cause too frequent change of stale states. - We thus set a minimum stale interval value (the default value is 3 times - of heartbeat interval) and guarantee that the stale interval cannot be less - than the minimum value. - - - - - dfs.namenode.write.stale.datanode.ratio - 0.5f - - When the ratio of number stale datanodes to total datanodes marked - is greater than this ratio, stop avoiding writing to stale nodes so - as to prevent causing hotspots. - - - - - dfs.datanode.plugins - - Comma-separated list of datanode plug-ins to be activated. - - - - - dfs.namenode.plugins - - Comma-separated list of namenode plug-ins to be activated. - - - - diff --git a/sahara/plugins/vanilla/v1_2_1/resources/hive-default.xml b/sahara/plugins/vanilla/v1_2_1/resources/hive-default.xml deleted file mode 100644 index 6ffe7c990a..0000000000 --- a/sahara/plugins/vanilla/v1_2_1/resources/hive-default.xml +++ /dev/null @@ -1,1873 +0,0 @@ - - - - - - - - - - - - - - mapred.reduce.tasks - -1 - The default number of reduce tasks per job. Typically set - to a prime close to the number of available hosts. Ignored when - mapred.job.tracker is "local". Hadoop set this to 1 by default, whereas hive uses -1 as its default value. - By setting this property to -1, Hive will automatically figure out what should be the number of reducers. - - - - - hive.exec.reducers.bytes.per.reducer - 1000000000 - size per reducer.The default is 1G, i.e if the input size is 10G, it will use 10 reducers. - - - - hive.exec.reducers.max - 999 - max number of reducers will be used. If the one - specified in the configuration parameter mapred.reduce.tasks is - negative, hive will use this one as the max number of reducers when - automatically determine number of reducers. - - - - hive.cli.print.header - false - Whether to print the names of the columns in query output. - - - - hive.cli.print.current.db - false - Whether to include the current database in the hive prompt. - - - - hive.cli.prompt - hive - Command line prompt configuration value. Other hiveconf can be used in - this configuration value. Variable substitution will only be invoked at the hive - cli startup. - - - - hive.cli.pretty.output.num.cols - -1 - The number of columns to use when formatting output generated - by the DESCRIBE PRETTY table_name command. If the value of this property - is -1, then hive will use the auto-detected terminal width. - - - - hive.exec.scratchdir - /tmp/hive-${user.name} - Scratch space for Hive jobs - - - - hive.exec.local.scratchdir - /tmp/${user.name} - Local scratch space for Hive jobs - - - - hive.test.mode - false - whether hive is running in test mode. If yes, it turns on sampling and prefixes the output tablename - - - - hive.test.mode.prefix - test_ - if hive is running in test mode, prefixes the output table by this string - - - - - - - - - - - hive.test.mode.samplefreq - 32 - if hive is running in test mode and table is not bucketed, sampling frequency - - - - hive.test.mode.nosamplelist - - if hive is running in test mode, dont sample the above comma seperated list of tables - - - - hive.metastore.uris - - Thrift uri for the remote metastore. Used by metastore client to connect to remote metastore. - - - - javax.jdo.option.ConnectionURL - jdbc:derby:;databaseName=metastore_db;create=true - JDBC connect string for a JDBC metastore - - - - javax.jdo.option.ConnectionDriverName - org.apache.derby.jdbc.EmbeddedDriver - Driver class name for a JDBC metastore - - - - javax.jdo.PersistenceManagerFactoryClass - org.datanucleus.jdo.JDOPersistenceManagerFactory - class implementing the jdo persistence - - - - javax.jdo.option.DetachAllOnCommit - true - detaches all objects from session so that they can be used after transaction is committed - - - - javax.jdo.option.NonTransactionalRead - true - reads outside of transactions - - - - javax.jdo.option.ConnectionUserName - APP - username to use against metastore database - - - - javax.jdo.option.ConnectionPassword - mine - password to use against metastore database - - - - javax.jdo.option.Multithreaded - true - Set this to true if multiple threads access metastore through JDO concurrently. - - - - datanucleus.connectionPoolingType - DBCP - Uses a DBCP connection pool for JDBC metastore - - - - datanucleus.validateTables - false - validates existing schema against code. turn this on if you want to verify existing schema - - - - datanucleus.validateColumns - false - validates existing schema against code. turn this on if you want to verify existing schema - - - - datanucleus.validateConstraints - false - validates existing schema against code. turn this on if you want to verify existing schema - - - - datanucleus.storeManagerType - rdbms - metadata store type - - - - datanucleus.autoCreateSchema - true - creates necessary schema on a startup if one doesn't exist. set this to false, after creating it once - - - - datanucleus.autoStartMechanismMode - checked - throw exception if metadata tables are incorrect - - - - datanucleus.transactionIsolation - read-committed - Default transaction isolation level for identity generation. - - - - datanucleus.cache.level2 - false - Use a level 2 cache. Turn this off if metadata is changed independently of hive metastore server - - - - datanucleus.cache.level2.type - SOFT - SOFT=soft reference based cache, WEAK=weak reference based cache. - - - - datanucleus.identifierFactory - datanucleus - Name of the identifier factory to use when generating table/column names etc. 'datanucleus' is used for backward compatibility - - - - datanucleus.plugin.pluginRegistryBundleCheck - LOG - Defines what happens when plugin bundles are found and are duplicated [EXCEPTION|LOG|NONE] - - - - hive.metastore.warehouse.dir - /user/hive/warehouse - location of default database for the warehouse - - - - hive.metastore.execute.setugi - false - In unsecure mode, setting this property to true will cause the metastore to execute DFS operations using the client's reported user and group permissions. Note that this property must be set on both the client and server sides. Further note that its best effort. If client sets its to true and server sets it to false, client setting will be ignored. - - - - hive.metastore.event.listeners - - list of comma seperated listeners for metastore events. - - - - hive.metastore.partition.inherit.table.properties - - list of comma seperated keys occurring in table properties which will get inherited to newly created partitions. * implies all the keys will get inherited. - - - - hive.metadata.export.location - - When used in conjunction with the org.apache.hadoop.hive.ql.parse.MetaDataExportListener pre event listener, it is the location to which the metadata will be exported. The default is an empty string, which results in the metadata being exported to the current user's home directory on HDFS. - - - - hive.metadata.move.exported.metadata.to.trash - - When used in conjunction with the org.apache.hadoop.hive.ql.parse.MetaDataExportListener pre event listener, this setting determines if the metadata that is exported will subsequently be moved to the user's trash directory alongside the dropped table data. This ensures that the metadata will be cleaned up along with the dropped table data. - - - - hive.metastore.partition.name.whitelist.pattern - - Partition names will be checked against this regex pattern and rejected if not matched. - - - - hive.metastore.end.function.listeners - - list of comma separated listeners for the end of metastore functions. - - - - hive.metastore.event.expiry.duration - 0 - Duration after which events expire from events table (in seconds) - - - - hive.metastore.event.clean.freq - 0 - Frequency at which timer task runs to purge expired events in metastore(in seconds). - - - - hive.metastore.connect.retries - 5 - Number of retries while opening a connection to metastore - - - - hive.metastore.failure.retries - 3 - Number of retries upon failure of Thrift metastore calls - - - - hive.metastore.client.connect.retry.delay - 1 - Number of seconds for the client to wait between consecutive connection attempts - - - - hive.metastore.client.socket.timeout - 20 - MetaStore Client socket timeout in seconds - - - - hive.metastore.rawstore.impl - org.apache.hadoop.hive.metastore.ObjectStore - Name of the class that implements org.apache.hadoop.hive.metastore.rawstore interface. This class is used to store and retrieval of raw metadata objects such as table, database - - - - hive.metastore.batch.retrieve.max - 300 - Maximum number of objects (tables/partitions) can be retrieved from metastore in one batch. The higher the number, the less the number of round trips is needed to the Hive metastore server, but it may also cause higher memory requirement at the client side. - - - - hive.metastore.batch.retrieve.table.partition.max - 1000 - Maximum number of table partitions that metastore internally retrieves in one batch. - - - - hive.default.fileformat - TextFile - Default file format for CREATE TABLE statement. Options are TextFile and SequenceFile. Users can explicitly say CREATE TABLE ... STORED AS <TEXTFILE|SEQUENCEFILE> to override - - - - hive.fileformat.check - true - Whether to check file format or not when loading data files - - - - hive.map.aggr - true - Whether to use map-side aggregation in Hive Group By queries - - - - hive.groupby.skewindata - false - Whether there is skew in data to optimize group by queries - - - - hive.optimize.multigroupby.common.distincts - true - Whether to optimize a multi-groupby query with the same distinct. - Consider a query like: - - from src - insert overwrite table dest1 select col1, count(distinct colx) group by col1 - insert overwrite table dest2 select col2, count(distinct colx) group by col2; - - With this parameter set to true, first we spray by the distinct value (colx), and then - perform the 2 groups bys. This makes sense if map-side aggregation is turned off. However, - with maps-side aggregation, it might be useful in some cases to treat the 2 inserts independently, - thereby performing the query above in 2MR jobs instead of 3 (due to spraying by distinct key first). - If this parameter is turned off, we dont consider the fact that the distinct key is the same across - different MR jobs. - - - - - hive.groupby.mapaggr.checkinterval - 100000 - Number of rows after which size of the grouping keys/aggregation classes is performed - - - - hive.mapred.local.mem - 0 - For local mode, memory of the mappers/reducers - - - - hive.mapjoin.followby.map.aggr.hash.percentmemory - 0.3 - Portion of total memory to be used by map-side grup aggregation hash table, when this group by is followed by map join - - - - hive.map.aggr.hash.force.flush.memory.threshold - 0.9 - The max memory to be used by map-side grup aggregation hash table, if the memory usage is higher than this number, force to flush data - - - - hive.map.aggr.hash.percentmemory - 0.5 - Portion of total memory to be used by map-side grup aggregation hash table - - - - hive.map.aggr.hash.min.reduction - 0.5 - Hash aggregation will be turned off if the ratio between hash - table size and input rows is bigger than this number. Set to 1 to make sure - hash aggregation is never turned off. - - - - hive.optimize.cp - true - Whether to enable column pruner - - - - hive.optimize.index.filter - false - Whether to enable automatic use of indexes - - - - hive.optimize.index.groupby - false - Whether to enable optimization of group-by queries using Aggregate indexes. - - - - hive.optimize.ppd - true - Whether to enable predicate pushdown - - - - hive.optimize.ppd.storage - true - Whether to push predicates down into storage handlers. Ignored when hive.optimize.ppd is false. - - - - hive.ppd.recognizetransivity - true - Whether to transitively replicate predicate filters over equijoin conditions. - - - - hive.optimize.groupby - true - Whether to enable the bucketed group by from bucketed partitions/tables. - - - - hive.optimize.skewjoin.compiletime - false - Whether to create a separate plan for skewed keys for the tables in the join. - This is based on the skewed keys stored in the metadata. At compile time, the plan is broken - into different joins: one for the skewed keys, and the other for the remaining keys. And then, - a union is performed for the 2 joins generated above. So unless the same skewed key is present - in both the joined tables, the join for the skewed key will be performed as a map-side join. - - The main difference between this paramater and hive.optimize.skewjoin is that this parameter - uses the skew information stored in the metastore to optimize the plan at compile time itself. - If there is no skew information in the metadata, this parameter will not have any affect. - Both hive.optimize.skewjoin.compiletime and hive.optimize.skewjoin should be set to true. - Ideally, hive.optimize.skewjoin should be renamed as hive.optimize.skewjoin.runtime, but not doing - so for backward compatibility. - - If the skew information is correctly stored in the metadata, hive.optimize.skewjoin.compiletime - would change the query plan to take care of it, and hive.optimize.skewjoin will be a no-op. - - - - - hive.optimize.union.remove - false - - Whether to remove the union and push the operators between union and the filesink above - union. This avoids an extra scan of the output by union. This is independently useful for union - queries, and specially useful when hive.optimize.skewjoin.compiletime is set to true, since an - extra union is inserted. - - The merge is triggered if either of hive.merge.mapfiles or hive.merge.mapredfiles is set to true. - If the user has set hive.merge.mapfiles to true and hive.merge.mapredfiles to false, the idea was the - number of reducers are few, so the number of files anyway are small. However, with this optimization, - we are increasing the number of files possibly by a big margin. So, we merge aggresively. - - - - hive.mapred.supports.subdirectories - false - Whether the version of hadoop which is running supports sub-directories for tables/partitions. - Many hive optimizations can be applied if the hadoop version supports sub-directories for - tables/partitions. It was added by MAPREDUCE-1501 - - - - hive.multigroupby.singlemr - false - Whether to optimize multi group by query to generate single M/R - job plan. If the multi group by query has common group by keys, it will be - optimized to generate single M/R job. - - - - hive.map.groupby.sorted - false - If the bucketing/sorting properties of the table exactly match the grouping key, whether to - perform the group by in the mapper by using BucketizedHiveInputFormat. The only downside to this - is that it limits the number of mappers to the number of files. - - - - - hive.map.groupby.sorted.testmode - false - If the bucketing/sorting properties of the table exactly match the grouping key, whether to - perform the group by in the mapper by using BucketizedHiveInputFormat. If the test mode is set, the plan - is not converted, but a query property is set to denote the same. - - - - - hive.new.job.grouping.set.cardinality - 30 - - Whether a new map-reduce job should be launched for grouping sets/rollups/cubes. - For a query like: select a, b, c, count(1) from T group by a, b, c with rollup; - 4 rows are created per row: (a, b, c), (a, b, null), (a, null, null), (null, null, null). - This can lead to explosion across map-reduce boundary if the cardinality of T is very high, - and map-side aggregation does not do a very good job. - - This parameter decides if hive should add an additional map-reduce job. If the grouping set - cardinality (4 in the example above), is more than this value, a new MR job is added under the - assumption that the orginal group by will reduce the data size. - - - - - hive.join.emit.interval - 1000 - How many rows in the right-most join operand Hive should buffer before emitting the join result. - - - - hive.join.cache.size - 25000 - How many rows in the joining tables (except the streaming table) should be cached in memory. - - - - hive.mapjoin.bucket.cache.size - 100 - How many values in each keys in the map-joined table should be cached in memory. - - - - hive.mapjoin.cache.numrows - 25000 - How many rows should be cached by jdbm for map join. - - - - hive.optimize.skewjoin - false - Whether to enable skew join optimization. - The algorithm is as follows: At runtime, detect the keys with a large skew. Instead of - processing those keys, store them temporarily in a hdfs directory. In a follow-up map-reduce - job, process those skewed keys. The same key need not be skewed for all the tables, and so, - the follow-up map-reduce job (for the skewed keys) would be much faster, since it would be a - map-join. - - - - - hive.skewjoin.key - 100000 - Determine if we get a skew key in join. If we see more - than the specified number of rows with the same key in join operator, - we think the key as a skew join key. - - - - hive.skewjoin.mapjoin.map.tasks - 10000 - Determine the number of map task used in the follow up map join job - for a skew join. It should be used together with hive.skewjoin.mapjoin.min.split - to perform a fine grained control. - - - - hive.skewjoin.mapjoin.min.split - 33554432 - Determine the number of map task at most used in the follow up map join job - for a skew join by specifying the minimum split size. It should be used together with - hive.skewjoin.mapjoin.map.tasks to perform a fine grained control. - - - - hive.mapred.mode - nonstrict - The mode in which the hive operations are being performed. - In strict mode, some risky queries are not allowed to run. They include: - Cartesian Product. - No partition being picked up for a query. - Comparing bigints and strings. - Comparing bigints and doubles. - Orderby without limit. - - - - - hive.enforce.bucketmapjoin - false - If the user asked for bucketed map-side join, and it cannot be performed, - should the query fail or not ? For eg, if the buckets in the tables being joined are - not a multiple of each other, bucketed map-side join cannot be performed, and the - query will fail if hive.enforce.bucketmapjoin is set to true. - - - - - hive.exec.script.maxerrsize - 100000 - Maximum number of bytes a script is allowed to emit to standard error (per map-reduce task). This prevents runaway scripts from filling logs partitions to capacity - - - - hive.exec.script.allow.partial.consumption - false - When enabled, this option allows a user script to exit successfully without consuming all the data from the standard input. - - - - - hive.script.operator.id.env.var - HIVE_SCRIPT_OPERATOR_ID - Name of the environment variable that holds the unique script operator ID in the user's transform function (the custom mapper/reducer that the user has specified in the query) - - - - - hive.script.operator.truncate.env - false - Truncate each environment variable for external script in scripts operator to 20KB (to fit system limits) - - - - hive.exec.compress.output - false - This controls whether the final outputs of a query (to a local/hdfs file or a hive table) is compressed. The compression codec and other options are determined from hadoop config variables mapred.output.compress* - - - - hive.exec.compress.intermediate - false - This controls whether intermediate files produced by hive between multiple map-reduce jobs are compressed. The compression codec and other options are determined from hadoop config variables mapred.output.compress* - - - - hive.exec.parallel - false - Whether to execute jobs in parallel - - - - hive.exec.parallel.thread.number - 8 - How many jobs at most can be executed in parallel - - - - hive.exec.rowoffset - false - Whether to provide the row offset virtual column - - - - hive.task.progress - false - Whether Hive should periodically update task progress counters during execution. Enabling this allows task progress to be monitored more closely in the job tracker, but may impose a performance penalty. This flag is automatically set to true for jobs with hive.exec.dynamic.partition set to true. - - - - hive.hwi.war.file - lib/hive-hwi-0.11.0.war - This sets the path to the HWI war file, relative to ${HIVE_HOME}. - - - - hive.hwi.listen.host - 0.0.0.0 - This is the host address the Hive Web Interface will listen on - - - - hive.hwi.listen.port - 9999 - This is the port the Hive Web Interface will listen on - - - - hive.exec.pre.hooks - - Comma-separated list of pre-execution hooks to be invoked for each statement. A pre-execution hook is specified as the name of a Java class which implements the org.apache.hadoop.hive.ql.hooks.ExecuteWithHookContext interface. - - - - hive.exec.post.hooks - - Comma-separated list of post-execution hooks to be invoked for each statement. A post-execution hook is specified as the name of a Java class which implements the org.apache.hadoop.hive.ql.hooks.ExecuteWithHookContext interface. - - - - hive.exec.failure.hooks - - Comma-separated list of on-failure hooks to be invoked for each statement. An on-failure hook is specified as the name of Java class which implements the org.apache.hadoop.hive.ql.hooks.ExecuteWithHookContext interface. - - - - hive.metastore.init.hooks - - A comma separated list of hooks to be invoked at the beginning of HMSHandler initialization. Aninit hook is specified as the name of Java class which extends org.apache.hadoop.hive.metastore.MetaStoreInitListener. - - - - hive.client.stats.publishers - - Comma-separated list of statistics publishers to be invoked on counters on each job. A client stats publisher is specified as the name of a Java class which implements the org.apache.hadoop.hive.ql.stats.ClientStatsPublisher interface. - - - - hive.client.stats.counters - - Subset of counters that should be of interest for hive.client.stats.publishers (when one wants to limit their publishing). Non-display names should be used - - - - hive.merge.mapfiles - true - Merge small files at the end of a map-only job - - - - hive.merge.mapredfiles - false - Merge small files at the end of a map-reduce job - - - - hive.heartbeat.interval - 1000 - Send a heartbeat after this interval - used by mapjoin and filter operators - - - - hive.merge.size.per.task - 256000000 - Size of merged files at the end of the job - - - - hive.merge.smallfiles.avgsize - 16000000 - When the average output file size of a job is less than this number, Hive will start an additional map-reduce job to merge the output files into bigger files. This is only done for map-only jobs if hive.merge.mapfiles is true, and for map-reduce jobs if hive.merge.mapredfiles is true. - - - - hive.mapjoin.smalltable.filesize - 25000000 - The threshold for the input file size of the small tables; if the file size is smaller than this threshold, it will try to convert the common join into map join - - - - hive.ignore.mapjoin.hint - true - Ignore the mapjoin hint - - - - hive.mapjoin.localtask.max.memory.usage - 0.90 - This number means how much memory the local task can take to hold the key/value into in-memory hash table; If the local task's memory usage is more than this number, the local task will be abort by themself. It means the data of small table is too large to be hold in the memory. - - - - hive.mapjoin.followby.gby.localtask.max.memory.usage - 0.55 - This number means how much memory the local task can take to hold the key/value into in-memory hash table when this map join followed by a group by; If the local task's memory usage is more than this number, the local task will be abort by themself. It means the data of small table is too large to be hold in the memory. - - - - hive.mapjoin.check.memory.rows - 100000 - The number means after how many rows processed it needs to check the memory usage - - - - hive.auto.convert.join - false - Whether Hive enable the optimization about converting common join into mapjoin based on the input file size - - - - hive.auto.convert.join.noconditionaltask - true - Whether Hive enable the optimization about converting common join into mapjoin based on the input file - size. If this paramater is on, and the sum of size for n-1 of the tables/partitions for a n-way join is smaller than the - specified size, the join is directly converted to a mapjoin (there is no conditional task). - - - - - hive.auto.convert.join.noconditionaltask.size - 10000000 - If hive.auto.convert.join.noconditionaltask is off, this parameter does not take affect. However, if it - is on, and the sum of size for n-1 of the tables/partitions for a n-way join is smaller than this size, the join is directly - converted to a mapjoin(there is no conditional task). The default is 10MB - - - - - hive.optimize.mapjoin.mapreduce - false - If hive.auto.convert.join is off, this parameter does not take - affect. If it is on, and if there are map-join jobs followed by a map-reduce - job (for e.g a group by), each map-only job is merged with the following - map-reduce job. - - - - - hive.script.auto.progress - false - Whether Hive Tranform/Map/Reduce Clause should automatically send progress information to TaskTracker to avoid the task getting killed because of inactivity. Hive sends progress information when the script is outputting to stderr. This option removes the need of periodically producing stderr messages, but users should be cautious because this may prevent infinite loops in the scripts to be killed by TaskTracker. - - - - hive.script.serde - org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - The default serde for trasmitting input data to and reading output data from the user scripts. - - - - hive.binary.record.max.length - 1000 - Read from a binary stream and treat each hive.binary.record.max.length bytes as a record. - The last record before the end of stream can have less than hive.binary.record.max.length bytes - - - - - hive.script.recordreader - org.apache.hadoop.hive.ql.exec.TextRecordReader - The default record reader for reading data from the user scripts. - - - - hive.script.recordwriter - org.apache.hadoop.hive.ql.exec.TextRecordWriter - The default record writer for writing data to the user scripts. - - - - hive.input.format - org.apache.hadoop.hive.ql.io.CombineHiveInputFormat - The default input format. Set this to HiveInputFormat if you encounter problems with CombineHiveInputFormat. - - - - hive.udtf.auto.progress - false - Whether Hive should automatically send progress information to TaskTracker when using UDTF's to prevent the task getting killed because of inactivity. Users should be cautious because this may prevent TaskTracker from killing tasks with infinte loops. - - - - hive.mapred.reduce.tasks.speculative.execution - true - Whether speculative execution for reducers should be turned on. - - - - hive.exec.counters.pull.interval - 1000 - The interval with which to poll the JobTracker for the counters the running job. The smaller it is the more load there will be on the jobtracker, the higher it is the less granular the caught will be. - - - - hive.querylog.location - /tmp/${user.name} - - Location of Hive run time structured log file - - - - - hive.querylog.enable.plan.progress - true - - Whether to log the plan's progress every time a job's progress is checked. - These logs are written to the location specified by hive.querylog.location - - - - - hive.querylog.plan.progress.interval - 60000 - - The interval to wait between logging the plan's progress in milliseconds. - If there is a whole number percentage change in the progress of the mappers or the reducers, - the progress is logged regardless of this value. - The actual interval will be the ceiling of (this value divided by the value of - hive.exec.counters.pull.interval) multiplied by the value of hive.exec.counters.pull.interval - I.e. if it is not divide evenly by the value of hive.exec.counters.pull.interval it will be - logged less frequently than specified. - This only has an effect if hive.querylog.enable.plan.progress is set to true. - - - - - hive.enforce.bucketing - false - Whether bucketing is enforced. If true, while inserting into the table, bucketing is enforced. - - - - hive.enforce.sorting - false - Whether sorting is enforced. If true, while inserting into the table, sorting is enforced. - - - - hive.optimize.bucketingsorting - true - If hive.enforce.bucketing or hive.enforce.sorting is true, dont create a reducer for enforcing - bucketing/sorting for queries of the form: - insert overwrite table T2 select * from T1; - where T1 and T2 are bucketed/sorted by the same keys into the same number of buckets. - - - - - hive.enforce.sortmergebucketmapjoin - false - If the user asked for sort-merge bucketed map-side join, and it cannot be performed, - should the query fail or not ? - - - - - hive.auto.convert.sortmerge.join - false - Will the join be automatically converted to a sort-merge join, if the joined tables pass - the criteria for sort-merge join. - - - - - hive.auto.convert.sortmerge.join.bigtable.selection.policy - org.apache.hadoop.hive.ql.optimizer.AvgPartitionSizeBasedBigTableSelectorForAutoSMJ - The policy to choose the big table for automatic conversion to sort-merge join. - By default, the table with the largest partitions is assigned the big table. All policies are: - . based on position of the table - the leftmost table is selected - org.apache.hadoop.hive.ql.optimizer.LeftmostBigTableSMJ. - . based on total size (all the partitions selected in the query) of the table - org.apache.hadoop.hive.ql.optimizer.TableSizeBasedBigTableSelectorForAutoSMJ. - . based on average size (all the partitions selected in the query) of the table - org.apache.hadoop.hive.ql.optimizer.AvgPartitionSizeBasedBigTableSelectorForAutoSMJ. - New policies can be added in future. - - - - - hive.metastore.ds.connection.url.hook - - Name of the hook to use for retriving the JDO connection URL. If empty, the value in javax.jdo.option.ConnectionURL is used - - - - hive.metastore.ds.retry.attempts - 1 - The number of times to retry a metastore call if there were a connection error - - - - hive.metastore.ds.retry.interval - 1000 - The number of miliseconds between metastore retry attempts - - - - hive.metastore.server.min.threads - 200 - Minimum number of worker threads in the Thrift server's pool. - - - - hive.metastore.server.max.threads - 100000 - Maximum number of worker threads in the Thrift server's pool. - - - - hive.metastore.server.tcp.keepalive - true - Whether to enable TCP keepalive for the metastore server. Keepalive will prevent accumulation of half-open connections. - - - - hive.metastore.sasl.enabled - false - If true, the metastore thrift interface will be secured with SASL. Clients must authenticate with Kerberos. - - - - hive.metastore.thrift.framed.transport.enabled - false - If true, the metastore thrift interface will use TFramedTransport. When false (default) a standard TTransport is used. - - - - hive.metastore.kerberos.keytab.file - - The path to the Kerberos Keytab file containing the metastore thrift server's service principal. - - - - hive.metastore.kerberos.principal - hive-metastore/_HOST@EXAMPLE.COM - The service principal for the metastore thrift server. The special string _HOST will be replaced automatically with the correct host name. - - - - hive.cluster.delegation.token.store.class - org.apache.hadoop.hive.thrift.MemoryTokenStore - The delegation token store implementation. Set to org.apache.hadoop.hive.thrift.ZooKeeperTokenStore for load-balanced cluster. - - - - hive.cluster.delegation.token.store.zookeeper.connectString - localhost:2181 - The ZooKeeper token store connect string. - - - - hive.cluster.delegation.token.store.zookeeper.znode - /hive/cluster/delegation - The root path for token store data. - - - - hive.cluster.delegation.token.store.zookeeper.acl - sasl:hive/host1@EXAMPLE.COM:cdrwa,sasl:hive/host2@EXAMPLE.COM:cdrwa - ACL for token store entries. List comma separated all server principals for the cluster. - - - - hive.metastore.cache.pinobjtypes - Table,StorageDescriptor,SerDeInfo,Partition,Database,Type,FieldSchema,Order - List of comma separated metastore object types that should be pinned in the cache - - - - hive.optimize.reducededuplication - true - Remove extra map-reduce jobs if the data is already clustered by the same key which needs to be used again. This should always be set to true. Since it is a new feature, it has been made configurable. - - - - hive.optimize.reducededuplication.min.reducer - 4 - Reduce deduplication merges two RSs by moving key/parts/reducer-num of the child RS to parent RS. - That means if reducer-num of the child RS is fixed (order by or forced bucketing) and small, it can make very slow, single MR. - The optimization will be disabled if number of reducers is less than specified value. - - - - hive.exec.dynamic.partition - true - Whether or not to allow dynamic partitions in DML/DDL. - - - - hive.exec.dynamic.partition.mode - strict - In strict mode, the user must specify at least one static partition in case the user accidentally overwrites all partitions. - - - - hive.exec.max.dynamic.partitions - 1000 - Maximum number of dynamic partitions allowed to be created in total. - - - - hive.exec.max.dynamic.partitions.pernode - 100 - Maximum number of dynamic partitions allowed to be created in each mapper/reducer node. - - - - hive.exec.max.created.files - 100000 - Maximum number of HDFS files created by all mappers/reducers in a MapReduce job. - - - - hive.exec.default.partition.name - __HIVE_DEFAULT_PARTITION__ - The default partition name in case the dynamic partition column value is null/empty string or anyother values that cannot be escaped. This value must not contain any special character used in HDFS URI (e.g., ':', '%', '/' etc). The user has to be aware that the dynamic partition value should not contain this value to avoid confusions. - - - - hive.stats.dbclass - jdbc:derby - The default database that stores temporary hive statistics. - - - - hive.stats.autogather - true - A flag to gather statistics automatically during the INSERT OVERWRITE command. - - - - hive.stats.jdbcdriver - org.apache.derby.jdbc.EmbeddedDriver - The JDBC driver for the database that stores temporary hive statistics. - - - - hive.stats.dbconnectionstring - jdbc:derby:;databaseName=TempStatsStore;create=true - The default connection string for the database that stores temporary hive statistics. - - - - hive.stats.default.publisher - - The Java class (implementing the StatsPublisher interface) that is used by default if hive.stats.dbclass is not JDBC or HBase. - - - - hive.stats.default.aggregator - - The Java class (implementing the StatsAggregator interface) that is used by default if hive.stats.dbclass is not JDBC or HBase. - - - - hive.stats.jdbc.timeout - 30 - Timeout value (number of seconds) used by JDBC connection and statements. - - - - hive.stats.retries.max - 0 - Maximum number of retries when stats publisher/aggregator got an exception updating intermediate database. Default is no tries on failures. - - - - hive.stats.retries.wait - 3000 - The base waiting window (in milliseconds) before the next retry. The actual wait time is calculated by baseWindow * failues baseWindow * (failure 1) * (random number between [0.0,1.0]). - - - - hive.stats.reliable - false - Whether queries will fail because stats cannot be collected completely accurately. - If this is set to true, reading/writing from/into a partition may fail becuase the stats - could not be computed accurately. - - - - - hive.stats.collect.tablekeys - false - Whether join and group by keys on tables are derived and maintained in the QueryPlan. - This is useful to identify how tables are accessed and to determine if they should be bucketed. - - - - - hive.stats.collect.scancols - false - Whether column accesses are tracked in the QueryPlan. - This is useful to identify how tables are accessed and to determine if there are wasted columns that can be trimmed. - - - - - hive.stats.ndv.error - 20.0 - Standard error expressed in percentage. Provides a tradeoff between accuracy and compute cost.A lower value for error indicates higher accuracy and a higher compute cost. - - - - - hive.stats.key.prefix.max.length - 200 - - Determines if when the prefix of the key used for intermediate stats collection - exceeds a certain length, a hash of the key is used instead. If the value < 0 then hashing - is never used, if the value >= 0 then hashing is used only when the key prefixes length - exceeds that value. The key prefix is defined as everything preceding the task ID in the key. - - - - - hive.support.concurrency - false - Whether hive supports concurrency or not. A zookeeper instance must be up and running for the default hive lock manager to support read-write locks. - - - - hive.lock.numretries - 100 - The number of times you want to try to get all the locks - - - - hive.unlock.numretries - 10 - The number of times you want to retry to do one unlock - - - - hive.lock.sleep.between.retries - 60 - The sleep time (in seconds) between various retries - - - - hive.zookeeper.quorum - - The list of zookeeper servers to talk to. This is only needed for read/write locks. - - - - hive.zookeeper.client.port - 2181 - The port of zookeeper servers to talk to. This is only needed for read/write locks. - - - - hive.zookeeper.session.timeout - 600000 - Zookeeper client's session timeout. The client is disconnected, and as a result, all locks released, if a heartbeat is not sent in the timeout. - - - - hive.zookeeper.namespace - hive_zookeeper_namespace - The parent node under which all zookeeper nodes are created. - - - - hive.zookeeper.clean.extra.nodes - false - Clean extra nodes at the end of the session. - - - - fs.har.impl - org.apache.hadoop.hive.shims.HiveHarFileSystem - The implementation for accessing Hadoop Archives. Note that this won't be applicable to Hadoop vers less than 0.20 - - - - hive.archive.enabled - false - Whether archiving operations are permitted - - - - hive.fetch.output.serde - org.apache.hadoop.hive.serde2.DelimitedJSONSerDe - The serde used by FetchTask to serialize the fetch output. - - - - hive.exec.mode.local.auto - false - Let hive determine whether to run in local mode automatically - - - - hive.exec.drop.ignorenonexistent - true - - Do not report an error if DROP TABLE/VIEW specifies a non-existent table/view - - - - - hive.exec.show.job.failure.debug.info - true - - If a job fails, whether to provide a link in the CLI to the task with the - most failures, along with debugging hints if applicable. - - - - - hive.auto.progress.timeout - 0 - - How long to run autoprogressor for the script/UDTF operators (in seconds). - Set to 0 for forever. - - - - - - - hive.hbase.wal.enabled - true - Whether writes to HBase should be forced to the write-ahead log. Disabling this improves HBase write performance at the risk of lost writes in case of a crash. - - - - hive.table.parameters.default - - Default property values for newly created tables - - - - hive.entity.separator - @ - Separator used to construct names of tables and partitions. For example, dbname@tablename@partitionname - - - - hive.ddl.createtablelike.properties.whitelist - - Table Properties to copy over when executing a Create Table Like. - - - - hive.variable.substitute - true - This enables substitution using syntax like ${var} ${system:var} and ${env:var}. - - - - hive.variable.substitute.depth - 40 - The maximum replacements the substitution engine will do. - - - - hive.conf.validation - true - Eables type checking for registered hive configurations - - - - hive.security.authorization.enabled - false - enable or disable the hive client authorization - - - - hive.security.authorization.manager - org.apache.hadoop.hive.ql.security.authorization.DefaultHiveAuthorizationProvider - the hive client authorization manager class name. - The user defined authorization class should implement interface org.apache.hadoop.hive.ql.security.authorization.HiveAuthorizationProvider. - - - - - hive.security.metastore.authorization.manager - org.apache.hadoop.hive.ql.security.authorization.DefaultHiveMetastoreAuthorizationProvider - authorization manager class name to be used in the metastore for authorization. - The user defined authorization class should implement interface org.apache.hadoop.hive.ql.security.authorization.HiveMetastoreAuthorizationProvider. - - - - - hive.security.authenticator.manager - org.apache.hadoop.hive.ql.security.HadoopDefaultAuthenticator - hive client authenticator manager class name. - The user defined authenticator should implement interface org.apache.hadoop.hive.ql.security.HiveAuthenticationProvider. - - - - hive.security.metastore.authenticator.manager - org.apache.hadoop.hive.ql.security.HadoopDefaultMetastoreAuthenticator - authenticator manager class name to be used in the metastore for authentication. - The user defined authenticator should implement interface org.apache.hadoop.hive.ql.security.HiveAuthenticationProvider. - - - - hive.security.authorization.createtable.user.grants - - the privileges automatically granted to some users whenever a table gets created. - An example like "userX,userY:select;userZ:create" will grant select privilege to userX and userY, - and grant create privilege to userZ whenever a new table created. - - - - hive.security.authorization.createtable.group.grants - - the privileges automatically granted to some groups whenever a table gets created. - An example like "groupX,groupY:select;groupZ:create" will grant select privilege to groupX and groupY, - and grant create privilege to groupZ whenever a new table created. - - - - hive.security.authorization.createtable.role.grants - - the privileges automatically granted to some roles whenever a table gets created. - An example like "roleX,roleY:select;roleZ:create" will grant select privilege to roleX and roleY, - and grant create privilege to roleZ whenever a new table created. - - - - hive.security.authorization.createtable.owner.grants - - the privileges automatically granted to the owner whenever a table gets created. - An example like "select,drop" will grant select and drop privilege to the owner of the table - - - - hive.metastore.authorization.storage.checks - false - Should the metastore do authorization checks against the underlying storage - for operations like drop-partition (disallow the drop-partition if the user in - question doesn't have permissions to delete the corresponding directory - on the storage). - - - - hive.error.on.empty.partition - false - Whether to throw an excpetion if dynamic partition insert generates empty results. - - - - hive.index.compact.file.ignore.hdfs - false - True the hdfs location stored in the index file will be igbored at runtime. - If the data got moved or the name of the cluster got changed, the index data should still be usable. - - - - hive.optimize.index.filter.compact.minsize - 5368709120 - Minimum size (in bytes) of the inputs on which a compact index is automatically used. - - - - hive.optimize.index.filter.compact.maxsize - -1 - Maximum size (in bytes) of the inputs on which a compact index is automatically used. - A negative number is equivalent to infinity. - - - - hive.index.compact.query.max.size - 10737418240 - The maximum number of bytes that a query using the compact index can read. Negative value is equivalent to infinity. - - - - hive.index.compact.query.max.entries - 10000000 - The maximum number of index entries to read during a query that uses the compact index. Negative value is equivalent to infinity. - - - - hive.index.compact.binary.search - true - Whether or not to use a binary search to find the entries in an index table that match the filter, where possible - - - - hive.exim.uri.scheme.whitelist - hdfs,pfile - A comma separated list of acceptable URI schemes for import and export. - - - - hive.lock.mapred.only.operation - false - This param is to control whether or not only do lock on queries - that need to execute at least one mapred job. - - - - hive.limit.row.max.size - 100000 - When trying a smaller subset of data for simple LIMIT, how much size we need to guarantee - each row to have at least. - - - - hive.limit.optimize.limit.file - 10 - When trying a smaller subset of data for simple LIMIT, maximum number of files we can - sample. - - - - hive.limit.optimize.enable - false - Whether to enable to optimization to trying a smaller subset of data for simple LIMIT first. - - - - hive.limit.optimize.fetch.max - 50000 - Maximum number of rows allowed for a smaller subset of data for simple LIMIT, if it is a fetch query. - Insert queries are not restricted by this limit. - - - - hive.rework.mapredwork - false - should rework the mapred work or not. - This is first introduced by SymlinkTextInputFormat to replace symlink files with real paths at compile time. - - - - hive.exec.concatenate.check.index - true - If this sets to true, hive will throw error when doing - 'alter table tbl_name [partSpec] concatenate' on a table/partition - that has indexes on it. The reason the user want to set this to true - is because it can help user to avoid handling all index drop, recreation, - rebuild work. This is very helpful for tables with thousands of partitions. - - - - hive.sample.seednumber - 0 - A number used to percentage sampling. By changing this number, user will change the subsets - of data sampled. - - - - hive.io.exception.handlers - - A list of io exception handler class names. This is used - to construct a list exception handlers to handle exceptions thrown - by record readers - - - - hive.autogen.columnalias.prefix.label - _c - String used as a prefix when auto generating column alias. - By default the prefix label will be appended with a column position number to form the column alias. Auto generation would happen if an aggregate function is used in a select clause without an explicit alias. - - - - hive.autogen.columnalias.prefix.includefuncname - false - Whether to include function name in the column alias auto generated by hive. - - - - hive.exec.perf.logger - org.apache.hadoop.hive.ql.log.PerfLogger - The class responsible logging client side performance metrics. Must be a subclass of org.apache.hadoop.hive.ql.log.PerfLogger - - - - hive.start.cleanup.scratchdir - false - To cleanup the hive scratchdir while starting the hive server - - - - hive.output.file.extension - - String used as a file extension for output files. If not set, defaults to the codec extension for text files (e.g. ".gz"), or no extension otherwise. - - - - hive.insert.into.multilevel.dirs - false - Where to insert into multilevel directories like - "insert directory '/HIVEFT25686/chinna/' from table" - - - - hive.warehouse.subdir.inherit.perms - false - Set this to true if the the table directories should inherit the - permission of the warehouse or database directory instead of being created - with the permissions derived from dfs umask - - - - hive.exec.job.debug.capture.stacktraces - true - Whether or not stack traces parsed from the task logs of a sampled failed task for - each failed job should be stored in the SessionState - - - - - hive.exec.driver.run.hooks - - A comma separated list of hooks which implement HiveDriverRunHook and will be run at the - beginning and end of Driver.run, these will be run in the order specified - - - - - hive.ddl.output.format - text - - The data format to use for DDL output. One of "text" (for human - readable text) or "json" (for a json object). - - - - - hive.transform.escape.input - false - - This adds an option to escape special chars (newlines, carriage returns and - tabs) when they are passed to the user script. This is useful if the hive tables - can contain data that contains special characters. - - - - - hive.exec.rcfile.use.explicit.header - true - - If this is set the header for RC Files will simply be RCF. If this is not - set the header will be that borrowed from sequence files, e.g. SEQ- followed - by the input and output RC File formats. - - - - - hive.multi.insert.move.tasks.share.dependencies - false - - If this is set all move tasks for tables/partitions (not directories) at the end of a - multi-insert query will only begin once the dependencies for all these move tasks have been - met. - Advantages: If concurrency is enabled, the locks will only be released once the query has - finished, so with this config enabled, the time when the table/partition is - generated will be much closer to when the lock on it is released. - Disadvantages: If concurrency is not enabled, with this disabled, the tables/partitions which - are produced by this query and finish earlier will be available for querying - much earlier. Since the locks are only released once the query finishes, this - does not apply if concurrency is enabled. - - - - - hive.fetch.task.conversion - minimal - - Some select queries can be converted to single FETCH task minimizing latency. - Currently the query should be single sourced not having any subquery and should not have - any aggregations or distincts (which incurrs RS), lateral views and joins. - 1. minimal : SELECT STAR, FILTER on partition columns, LIMIT only - 2. more : SELECT, FILTER, LIMIT only (TABLESAMPLE, virtual columns) - - - - - hive.hmshandler.retry.attempts - 1 - The number of times to retry a HMSHandler call if there were a connection error - - - - hive.hmshandler.retry.interval - 1000 - The number of miliseconds between HMSHandler retry attempts - - - - hive.server.read.socket.timeout - 10 - Timeout for the HiveServer to close the connection if no response from the client in N seconds, defaults to 10 seconds. - - - - hive.server.tcp.keepalive - true - Whether to enable TCP keepalive for the Hive server. Keepalive will prevent accumulation of half-open connections. - - - - hive.decode.partition.name - false - Whether to show the unquoted partition names in query results. - - - - hive.log4j.file - - Hive log4j configuration file. - If the property is not set, then logging will be initialized using hive-log4j.properties found on the classpath. - If the property is set, the value must be a valid URI (java.net.URI, e.g. "file:///tmp/my-logging.properties"), which you can then extract a URL from and pass to PropertyConfigurator.configure(URL). - - - - hive.exec.log4j.file - - Hive log4j configuration file for execution mode(sub command). - If the property is not set, then logging will be initialized using hive-exec-log4j.properties found on the classpath. - If the property is set, the value must be a valid URI (java.net.URI, e.g. "file:///tmp/my-logging.properties"), which you can then extract a URL from and pass to PropertyConfigurator.configure(URL). - - - - hive.exec.infer.bucket.sort - false - - If this is set, when writing partitions, the metadata will include the bucketing/sorting - properties with which the data was written if any (this will not overwrite the metadata - inherited from the table if the table is bucketed/sorted) - - - - - hive.exec.infer.bucket.sort.num.buckets.power.two - false - - If this is set, when setting the number of reducers for the map reduce task which writes the - final output files, it will choose a number which is a power of two, unless the user specifies - the number of reducers to use using mapred.reduce.tasks. The number of reducers - may be set to a power of two, only to be followed by a merge task meaning preventing - anything from being inferred. - With hive.exec.infer.bucket.sort set to true: - Advantages: If this is not set, the number of buckets for partitions will seem arbitrary, - which means that the number of mappers used for optimized joins, for example, will - be very low. With this set, since the number of buckets used for any partition is - a power of two, the number of mappers used for optimized joins will be the least - number of buckets used by any partition being joined. - Disadvantages: This may mean a much larger or much smaller number of reducers being used in the - final map reduce job, e.g. if a job was originally going to take 257 reducers, - it will now take 512 reducers, similarly if the max number of reducers is 511, - and a job was going to use this many, it will now use 256 reducers. - - - - - - hive.groupby.orderby.position.alias - false - Whether to enable using Column Position Alias in Group By or Order By - - - - hive.server2.thrift.min.worker.threads - 5 - Minimum number of Thrift worker threads - - - - hive.server2.thrift.max.worker.threads - 100 - Maximum number of Thrift worker threads - - - - hive.server2.thrift.port - 10000 - Port number of HiveServer2 Thrift interface. - Can be overridden by setting $HIVE_SERVER2_THRIFT_PORT - - - - hive.server2.thrift.bind.host - localhost - Bind host on which to run the HiveServer2 Thrift interface. - Can be overridden by setting $HIVE_SERVER2_THRIFT_BIND_HOST - - - - hive.server2.authentication - NONE - - Client authentication types. - NONE: no authentication check - LDAP: LDAP/AD based authentication - KERBEROS: Kerberos/GSSAPI authentication - CUSTOM: Custom authentication provider - (Use with property hive.server2.custom.authentication.class) - - - - - hive.server2.custom.authentication.class - - - Custom authentication class. Used when property - 'hive.server2.authentication' is set to 'CUSTOM'. Provided class - must be a proper implementation of the interface - org.apache.hive.service.auth.PasswdAuthenticationProvider. HiveServer2 - will call its Authenticate(user, passed) method to authenticate requests. - The implementation may optionally extend the Hadoop's - org.apache.hadoop.conf.Configured class to grab Hive's Configuration object. - - - - - >hive.server2.authentication.kerberos.principal - - - Kerberos server principal - - - - - >hive.server2.authentication.kerberos.keytab - - - Kerberos keytab file for server principal - - - - - hive.server2.authentication.ldap.url - - - LDAP connection URL - - - - - hive.server2.authentication.ldap.baseDN - - - LDAP base DN - - - - - hive.server2.enable.doAs - true - - Setting this property to true will have hive server2 execute - hive operations as the user making the calls to it. - - - - - - diff --git a/sahara/plugins/vanilla/v1_2_1/resources/mapred-default.xml b/sahara/plugins/vanilla/v1_2_1/resources/mapred-default.xml deleted file mode 100644 index f7d25549f0..0000000000 --- a/sahara/plugins/vanilla/v1_2_1/resources/mapred-default.xml +++ /dev/null @@ -1,1328 +0,0 @@ - - - - - - - - - - - hadoop.job.history.location - - The location where jobtracker history files are stored. - The value for this key is treated as a URI, meaning that the files - can be stored either on HDFS or the local file system. If no value is - set here, the location defaults to the local file system, at - file:///${hadoop.log.dir}/history. If the URI is missing a scheme, - fs.default.name is used for the file system. - - - - - hadoop.job.history.user.location - - User can specify a location to store the history files of - a particular job. If nothing is specified, the logs are stored in - output directory. The files are stored in "_logs/history/" in the directory. - User can stop logging by giving the value "none". - - - - - mapred.job.tracker.history.completed.location - - The completed job history files are stored at this single well - known location. If nothing is specified, the files are stored at - ${hadoop.job.history.location}/done. - - - - - mapreduce.jobhistory.max-age-ms - 2592000000 - Job history files older than this many milliseconds will - be deleted when the history cleaner runs. Defaults to 2592000000 (30 - days). - - - - - mapreduce.jobhistory.cleaner.interval-ms - 86400000 - How often the job history cleaner checks for files to delete, - in milliseconds. Defaults to 86400000 (one day). Files are only deleted if - they are older than mapreduce.jobhistory.max-age-ms. - - - - - - - io.sort.factor - 10 - The number of streams to merge at once while sorting - files. This determines the number of open file handles. - - - - io.sort.mb - 100 - The total amount of buffer memory to use while sorting - files, in megabytes. By default, gives each merge stream 1MB, which - should minimize seeks. - - - - io.sort.record.percent - 0.05 - The percentage of io.sort.mb dedicated to tracking record - boundaries. Let this value be r, io.sort.mb be x. The maximum number - of records collected before the collection thread must block is equal - to (r * x) / 4 - - - - io.sort.spill.percent - 0.80 - The soft limit in either the buffer or record collection - buffers. Once reached, a thread will begin to spill the contents to disk - in the background. Note that this does not imply any chunking of data to - the spill. A value less than 0.5 is not recommended. - - - - io.map.index.skip - 0 - Number of index entries to skip between each entry. - Zero by default. Setting this to values larger than zero can - facilitate opening large map files using less memory. - - - - mapred.job.tracker - local - The host and port that the MapReduce job tracker runs - at. If "local", then jobs are run in-process as a single map - and reduce task. - - - - - mapred.job.tracker.http.address - 0.0.0.0:50030 - - The job tracker http server address and port the server will listen on. - If the port is 0 then the server will start on a free port. - - - - - mapred.job.tracker.handler.count - 10 - - The number of server threads for the JobTracker. This should be roughly - 4% of the number of tasktracker nodes. - - - - - mapred.task.tracker.report.address - 127.0.0.1:0 - The interface and port that task tracker server listens on. - Since it is only connected to by the tasks, it uses the local interface. - EXPERT ONLY. Should only be changed if your host does not have the loopback - interface. - - - - mapred.local.dir - ${hadoop.tmp.dir}/mapred/local - The local directory where MapReduce stores intermediate - data files. May be a comma-separated list of - directories on different devices in order to spread disk i/o. - Directories that do not exist are ignored. - - - - - mapred.system.dir - ${hadoop.tmp.dir}/mapred/system - The directory where MapReduce stores control files. - - - - - mapreduce.jobtracker.staging.root.dir - ${hadoop.tmp.dir}/mapred/staging - The root of the staging area for users' job files - In practice, this should be the directory where users' home - directories are located (usually /user) - - - - - mapred.temp.dir - ${hadoop.tmp.dir}/mapred/temp - A shared directory for temporary files. - - - - - mapred.local.dir.minspacestart - 0 - If the space in mapred.local.dir drops under this, - do not ask for more tasks. - Value in bytes. - - - - - mapred.local.dir.minspacekill - 0 - If the space in mapred.local.dir drops under this, - do not ask more tasks until all the current ones have finished and - cleaned up. Also, to save the rest of the tasks we have running, - kill one of them, to clean up some space. Start with the reduce tasks, - then go with the ones that have finished the least. - Value in bytes. - - - - - mapred.tasktracker.expiry.interval - 600000 - Expert: The time-interval, in miliseconds, after which - a tasktracker is declared 'lost' if it doesn't send heartbeats. - - - - - - - mapred.tasktracker.resourcecalculatorplugin - - - Name of the class whose instance will be used to query resource information - on the tasktracker. - - The class must be an instance of - org.apache.hadoop.util.ResourceCalculatorPlugin. If the value is null, the - tasktracker attempts to use a class appropriate to the platform. - Currently, the only platform supported is Linux. - - - - - mapred.tasktracker.taskmemorymanager.monitoring-interval - 5000 - The interval, in milliseconds, for which the tasktracker waits - between two cycles of monitoring its tasks' memory usage. Used only if - tasks' memory management is enabled via mapred.tasktracker.tasks.maxmemory. - - - - - mapred.tasktracker.tasks.sleeptime-before-sigkill - 5000 - The time, in milliseconds, the tasktracker waits for sending a - SIGKILL to a process, after it has been sent a SIGTERM. - - - - mapred.map.tasks - 2 - The default number of map tasks per job. - Ignored when mapred.job.tracker is "local". - - - - - mapred.reduce.tasks - 1 - The default number of reduce tasks per job. Typically set to 99% - of the cluster's reduce capacity, so that if a node fails the reduces can - still be executed in a single wave. - Ignored when mapred.job.tracker is "local". - - - - - mapreduce.tasktracker.outofband.heartbeat - false - Expert: Set this to true to let the tasktracker send an - out-of-band heartbeat on task-completion for better latency. - - - - - mapreduce.tasktracker.outofband.heartbeat.damper - 1000000 - When out-of-band heartbeats are enabled, provides - damping to avoid overwhelming the JobTracker if too many out-of-band - heartbeats would occur. The damping is calculated such that the - heartbeat interval is divided by (T*D + 1) where T is the number - of completed tasks and D is the damper value. - - Setting this to a high value like the default provides no damping -- - as soon as any task finishes, a heartbeat will be sent. Setting this - parameter to 0 is equivalent to disabling the out-of-band heartbeat feature. - A value of 1 would indicate that, after one task has completed, the - time to wait before the next heartbeat would be 1/2 the usual time. - After two tasks have finished, it would be 1/3 the usual time, etc. - - - - - mapred.jobtracker.restart.recover - false - "true" to enable (job) recovery upon restart, - "false" to start afresh - - - - - mapreduce.job.restart.recover - true - A per-job override for job recovery. If set to false for a - job then job recovery will not be attempted for that job upon restart - even if mapred.jobtracker.restart.recover is enabled. Defaults to true - so that jobs are recovered by default if - mapred.jobtracker.restart.recover is enabled. - - - - - mapred.jobtracker.job.history.block.size - 3145728 - The block size of the job history file. Since the job recovery - uses job history, its important to dump job history to disk as - soon as possible. Note that this is an expert level parameter. - The default value is set to 3 MB. - - - - - mapreduce.job.split.metainfo.maxsize - 10000000 - The maximum permissible size of the split metainfo file. - The JobTracker won't attempt to read split metainfo files bigger than - the configured value. - No limits if set to -1. - - - - - mapred.jobtracker.taskScheduler - org.apache.hadoop.mapred.JobQueueTaskScheduler - The class responsible for scheduling the tasks. - - - - mapred.jobtracker.nodegroup.aware - false - Identify if jobtracker is aware of nodegroup layer. - - - - mapred.jobtracker.jobSchedulable - org.apache.hadoop.mapred.JobSchedulable - The class responsible for an entity in FairScheduler that can - launch tasks. - - - - - mapred.jobtracker.taskScheduler.maxRunningTasksPerJob - - The maximum number of running tasks for a job before - it gets preempted. No limits if undefined. - - - - - mapred.map.max.attempts - 4 - Expert: The maximum number of attempts per map task. - In other words, framework will try to execute a map task these many number - of times before giving up on it. - - - - - mapred.reduce.max.attempts - 4 - Expert: The maximum number of attempts per reduce task. - In other words, framework will try to execute a reduce task these many number - of times before giving up on it. - - - - - mapred.reduce.parallel.copies - 5 - The default number of parallel transfers run by reduce - during the copy(shuffle) phase. - - - - - mapreduce.reduce.shuffle.maxfetchfailures - 10 - The maximum number of times a reducer tries to - fetch a map output before it reports it. - - - - mapreduce.reduce.shuffle.connect.timeout - 180000 - Expert: The maximum amount of time (in milli seconds) a reduce - task spends in trying to connect to a tasktracker for getting map output. - - - - - mapreduce.reduce.shuffle.read.timeout - 180000 - Expert: The maximum amount of time (in milli seconds) a reduce - task waits for map output data to be available for reading after obtaining - connection. - - - - - mapred.task.timeout - 600000 - The number of milliseconds before a task will be - terminated if it neither reads an input, writes an output, nor - updates its status string. - - - - - mapred.tasktracker.map.tasks.maximum - 2 - The maximum number of map tasks that will be run - simultaneously by a task tracker. - - - - - mapred.tasktracker.reduce.tasks.maximum - 2 - The maximum number of reduce tasks that will be run - simultaneously by a task tracker. - - - - - mapred.jobtracker.completeuserjobs.maximum - 100 - The maximum number of complete jobs per user to keep around - before delegating them to the job history. - - - - mapreduce.reduce.input.limit - -1 - The limit on the input size of the reduce. If the estimated - input size of the reduce is greater than this value, job is failed. A - value of -1 means that there is no limit set. - - - - mapred.job.tracker.retiredjobs.cache.size - 1000 - The number of retired job status to keep in the cache. - - - - - mapred.job.tracker.jobhistory.lru.cache.size - 5 - The number of job history files loaded in memory. The jobs are - loaded when they are first accessed. The cache is cleared based on LRU. - - - - - - - mapred.child.java.opts - -Xmx200m - Java opts for the task tracker child processes. - The following symbol, if present, will be interpolated: @taskid@ is replaced - by current TaskID. Any other occurrences of '@' will go unchanged. - For example, to enable verbose gc logging to a file named for the taskid in - /tmp and to set the heap maximum to be a gigabyte, pass a 'value' of: - -Xmx1024m -verbose:gc -Xloggc:/tmp/@taskid@.gc - - The configuration variable mapred.child.ulimit can be used to control the - maximum virtual memory of the child processes. - - - - - mapred.child.env - - User added environment variables for the task tracker child - processes. Example : - 1) A=foo This will set the env variable A to foo - 2) B=$B:c This is inherit tasktracker's B env variable. - - - - - mapred.child.ulimit - - The maximum virtual memory, in KB, of a process launched by the - Map-Reduce framework. This can be used to control both the Mapper/Reducer - tasks and applications using Hadoop Pipes, Hadoop Streaming etc. - By default it is left unspecified to let cluster admins control it via - limits.conf and other such relevant mechanisms. - - Note: mapred.child.ulimit must be greater than or equal to the -Xmx passed to - JavaVM, else the VM might not start. - - - - - mapred.cluster.map.memory.mb - -1 - The size, in terms of virtual memory, of a single map slot - in the Map-Reduce framework, used by the scheduler. - A job can ask for multiple slots for a single map task via - mapred.job.map.memory.mb, upto the limit specified by - mapred.cluster.max.map.memory.mb, if the scheduler supports the feature. - The value of -1 indicates that this feature is turned off. - - - - - mapred.cluster.reduce.memory.mb - -1 - The size, in terms of virtual memory, of a single reduce slot - in the Map-Reduce framework, used by the scheduler. - A job can ask for multiple slots for a single reduce task via - mapred.job.reduce.memory.mb, upto the limit specified by - mapred.cluster.max.reduce.memory.mb, if the scheduler supports the feature. - The value of -1 indicates that this feature is turned off. - - - - - mapred.cluster.max.map.memory.mb - -1 - The maximum size, in terms of virtual memory, of a single map - task launched by the Map-Reduce framework, used by the scheduler. - A job can ask for multiple slots for a single map task via - mapred.job.map.memory.mb, upto the limit specified by - mapred.cluster.max.map.memory.mb, if the scheduler supports the feature. - The value of -1 indicates that this feature is turned off. - - - - - mapred.cluster.max.reduce.memory.mb - -1 - The maximum size, in terms of virtual memory, of a single reduce - task launched by the Map-Reduce framework, used by the scheduler. - A job can ask for multiple slots for a single reduce task via - mapred.job.reduce.memory.mb, upto the limit specified by - mapred.cluster.max.reduce.memory.mb, if the scheduler supports the feature. - The value of -1 indicates that this feature is turned off. - - - - - mapred.job.map.memory.mb - -1 - The size, in terms of virtual memory, of a single map task - for the job. - A job can ask for multiple slots for a single map task, rounded up to the - next multiple of mapred.cluster.map.memory.mb and upto the limit - specified by mapred.cluster.max.map.memory.mb, if the scheduler supports - the feature. - The value of -1 indicates that this feature is turned off iff - mapred.cluster.map.memory.mb is also turned off (-1). - - - - - mapred.job.reduce.memory.mb - -1 - The size, in terms of virtual memory, of a single reduce task - for the job. - A job can ask for multiple slots for a single map task, rounded up to the - next multiple of mapred.cluster.reduce.memory.mb and upto the limit - specified by mapred.cluster.max.reduce.memory.mb, if the scheduler supports - the feature. - The value of -1 indicates that this feature is turned off iff - mapred.cluster.reduce.memory.mb is also turned off (-1). - - - - - mapred.child.tmp - ./tmp - To set the value of tmp directory for map and reduce tasks. - If the value is an absolute path, it is directly assigned. Otherwise, it is - prepended with task's working directory. The java tasks are executed with - option -Djava.io.tmpdir='the absolute path of the tmp dir'. Pipes and - streaming are set with environment variable, - TMPDIR='the absolute path of the tmp dir' - - - - - mapred.inmem.merge.threshold - 1000 - The threshold, in terms of the number of files - for the in-memory merge process. When we accumulate threshold number of files - we initiate the in-memory merge and spill to disk. A value of 0 or less than - 0 indicates we want to DON'T have any threshold and instead depend only on - the ramfs's memory consumption to trigger the merge. - - - - - mapred.job.shuffle.merge.percent - 0.66 - The usage threshold at which an in-memory merge will be - initiated, expressed as a percentage of the total memory allocated to - storing in-memory map outputs, as defined by - mapred.job.shuffle.input.buffer.percent. - - - - - mapred.job.shuffle.input.buffer.percent - 0.70 - The percentage of memory to be allocated from the maximum heap - size to storing map outputs during the shuffle. - - - - - mapred.job.reduce.input.buffer.percent - 0.0 - The percentage of memory- relative to the maximum heap size- to - retain map outputs during the reduce. When the shuffle is concluded, any - remaining map outputs in memory must consume less than this threshold before - the reduce can begin. - - - - - mapred.map.tasks.speculative.execution - true - If true, then multiple instances of some map tasks - may be executed in parallel. - - - - mapred.reduce.tasks.speculative.execution - true - If true, then multiple instances of some reduce tasks - may be executed in parallel. - - - - mapred.job.reuse.jvm.num.tasks - 1 - How many tasks to run per jvm. If set to -1, there is - no limit. - - - - - mapred.min.split.size - 0 - The minimum size chunk that map input should be split - into. Note that some file formats may have minimum split sizes that - take priority over this setting. - - - - mapred.jobtracker.maxtasks.per.job - -1 - The maximum number of tasks for a single job. - A value of -1 indicates that there is no maximum. - - - - mapred.submit.replication - 10 - The replication level for submitted job files. This - should be around the square root of the number of nodes. - - - - - - mapred.tasktracker.dns.interface - default - The name of the Network Interface from which a task - tracker should report its IP address. - - - - - mapred.tasktracker.dns.nameserver - default - The host name or IP address of the name server (DNS) - which a TaskTracker should use to determine the host name used by - the JobTracker for communication and display purposes. - - - - - tasktracker.http.threads - 40 - The number of worker threads that for the http server. This is - used for map output fetching - - - - - mapred.task.tracker.http.address - 0.0.0.0:50060 - - The task tracker http server address and port. - If the port is 0 then the server will start on a free port. - - - - - keep.failed.task.files - false - Should the files for failed tasks be kept. This should only be - used on jobs that are failing, because the storage is never - reclaimed. It also prevents the map outputs from being erased - from the reduce directory as they are consumed. - - - - - - - mapred.output.compress - false - Should the job outputs be compressed? - - - - - mapred.output.compression.type - RECORD - If the job outputs are to compressed as SequenceFiles, how should - they be compressed? Should be one of NONE, RECORD or BLOCK. - - - - - mapred.output.compression.codec - org.apache.hadoop.io.compress.DefaultCodec - If the job outputs are compressed, how should they be compressed? - - - - - mapred.compress.map.output - false - Should the outputs of the maps be compressed before being - sent across the network. Uses SequenceFile compression. - - - - - mapred.map.output.compression.codec - org.apache.hadoop.io.compress.DefaultCodec - If the map outputs are compressed, how should they be - compressed? - - - - - map.sort.class - org.apache.hadoop.util.QuickSort - The default sort class for sorting keys. - - - - - mapred.userlog.limit.kb - 0 - The maximum size of user-logs of each task in KB. 0 disables the cap. - - - - - mapred.userlog.retain.hours - 24 - The maximum time, in hours, for which the user-logs are to be - retained after the job completion. - - - - - mapred.user.jobconf.limit - 5242880 - The maximum allowed size of the user jobconf. The - default is set to 5 MB - - - - mapred.hosts - - Names a file that contains the list of nodes that may - connect to the jobtracker. If the value is empty, all hosts are - permitted. - - - - mapred.hosts.exclude - - Names a file that contains the list of hosts that - should be excluded by the jobtracker. If the value is empty, no - hosts are excluded. - - - - mapred.heartbeats.in.second - 100 - Expert: Approximate number of heart-beats that could arrive - at JobTracker in a second. Assuming each RPC can be processed - in 10msec, the default value is made 100 RPCs in a second. - - - - - mapred.max.tracker.blacklists - 4 - The number of blacklists for a tasktracker by various jobs - after which the tasktracker will be marked as potentially - faulty and is a candidate for graylisting across all jobs. - (Unlike blacklisting, this is advisory; the tracker remains - active. However, it is reported as graylisted in the web UI, - with the expectation that chronically graylisted trackers - will be manually decommissioned.) This value is tied to - mapred.jobtracker.blacklist.fault-timeout-window; faults - older than the window width are forgiven, so the tracker - will recover from transient problems. It will also become - healthy after a restart. - - - - - mapred.jobtracker.blacklist.fault-timeout-window - 180 - The timeout (in minutes) after which per-job tasktracker - faults are forgiven. The window is logically a circular - buffer of time-interval buckets whose width is defined by - mapred.jobtracker.blacklist.fault-bucket-width; when the - "now" pointer moves across a bucket boundary, the previous - contents (faults) of the new bucket are cleared. In other - words, the timeout's granularity is determined by the bucket - width. - - - - - mapred.jobtracker.blacklist.fault-bucket-width - 15 - The width (in minutes) of each bucket in the tasktracker - fault timeout window. Each bucket is reused in a circular - manner after a full timeout-window interval (defined by - mapred.jobtracker.blacklist.fault-timeout-window). - - - - - mapred.max.tracker.failures - 4 - The number of task-failures on a tasktracker of a given job - after which new tasks of that job aren't assigned to it. - - - - - jobclient.output.filter - FAILED - The filter for controlling the output of the task's userlogs sent - to the console of the JobClient. - The permissible options are: NONE, KILLED, FAILED, SUCCEEDED and - ALL. - - - - - mapred.job.tracker.persist.jobstatus.active - false - Indicates if persistency of job status information is - active or not. - - - - - mapred.job.tracker.persist.jobstatus.hours - 0 - The number of hours job status information is persisted in DFS. - The job status information will be available after it drops of the memory - queue and between jobtracker restarts. With a zero value the job status - information is not persisted at all in DFS. - - - - - mapred.job.tracker.persist.jobstatus.dir - /jobtracker/jobsInfo - The directory where the job status information is persisted - in a file system to be available after it drops of the memory queue and - between jobtracker restarts. - - - - - mapreduce.job.complete.cancel.delegation.tokens - true - if false - do not unregister/cancel delegation tokens - from renewal, because same tokens may be used by spawned jobs - - - - - mapred.task.profile - false - To set whether the system should collect profiler - information for some of the tasks in this job? The information is stored - in the user log directory. The value is "true" if task profiling - is enabled. - - - - mapred.task.profile.maps - 0-2 - To set the ranges of map tasks to profile. - mapred.task.profile has to be set to true for the value to be accounted. - - - - - mapred.task.profile.reduces - 0-2 - To set the ranges of reduce tasks to profile. - mapred.task.profile has to be set to true for the value to be accounted. - - - - - mapred.line.input.format.linespermap - 1 - Number of lines per split in NLineInputFormat. - - - - - mapred.skip.attempts.to.start.skipping - 2 - The number of Task attempts AFTER which skip mode - will be kicked off. When skip mode is kicked off, the - tasks reports the range of records which it will process - next, to the TaskTracker. So that on failures, TT knows which - ones are possibly the bad records. On further executions, - those are skipped. - - - - - mapred.skip.map.auto.incr.proc.count - true - The flag which if set to true, - SkipBadRecords.COUNTER_MAP_PROCESSED_RECORDS is incremented - by MapRunner after invoking the map function. This value must be set to - false for applications which process the records asynchronously - or buffer the input records. For example streaming. - In such cases applications should increment this counter on their own. - - - - - mapred.skip.reduce.auto.incr.proc.count - true - The flag which if set to true, - SkipBadRecords.COUNTER_REDUCE_PROCESSED_GROUPS is incremented - by framework after invoking the reduce function. This value must be set to - false for applications which process the records asynchronously - or buffer the input records. For example streaming. - In such cases applications should increment this counter on their own. - - - - - mapred.skip.out.dir - - If no value is specified here, the skipped records are - written to the output directory at _logs/skip. - User can stop writing skipped records by giving the value "none". - - - - - mapred.skip.map.max.skip.records - 0 - The number of acceptable skip records surrounding the bad - record PER bad record in mapper. The number includes the bad record as well. - To turn the feature of detection/skipping of bad records off, set the - value to 0. - The framework tries to narrow down the skipped range by retrying - until this threshold is met OR all attempts get exhausted for this task. - Set the value to Long.MAX_VALUE to indicate that framework need not try to - narrow down. Whatever records(depends on application) get skipped are - acceptable. - - - - - mapred.skip.reduce.max.skip.groups - 0 - The number of acceptable skip groups surrounding the bad - group PER bad group in reducer. The number includes the bad group as well. - To turn the feature of detection/skipping of bad groups off, set the - value to 0. - The framework tries to narrow down the skipped range by retrying - until this threshold is met OR all attempts get exhausted for this task. - Set the value to Long.MAX_VALUE to indicate that framework need not try to - narrow down. Whatever groups(depends on application) get skipped are - acceptable. - - - - - mapreduce.ifile.readahead - true - Configuration key to enable/disable IFile readahead. - - - - - mapreduce.ifile.readahead.bytes - 4194304 - Configuration key to set the IFile readahead length in bytes. - - - - - - - - - job.end.retry.attempts - 0 - Indicates how many times hadoop should attempt to contact the - notification URL - - - - job.end.retry.interval - 30000 - Indicates time in milliseconds between notification URL retry - calls - - - - - hadoop.rpc.socket.factory.class.JobSubmissionProtocol - - SocketFactory to use to connect to a Map/Reduce master - (JobTracker). If null or empty, then use hadoop.rpc.socket.class.default. - - - - - mapred.task.cache.levels - 2 - This is the max level of the task cache. For example, if - the level is 2, the tasks cached are at the host level and at the rack - level. - - - - - mapred.queue.names - default - Comma separated list of queues configured for this jobtracker. - Jobs are added to queues and schedulers can configure different - scheduling properties for the various queues. To configure a property - for a queue, the name of the queue must match the name specified in this - value. Queue properties that are common to all schedulers are configured - here with the naming convention, mapred.queue.$QUEUE-NAME.$PROPERTY-NAME, - for e.g. mapred.queue.default.submit-job-acl. - The number of queues configured in this parameter could depend on the - type of scheduler being used, as specified in - mapred.jobtracker.taskScheduler. For example, the JobQueueTaskScheduler - supports only a single queue, which is the default configured here. - Before adding more queues, ensure that the scheduler you've configured - supports multiple queues. - - - - - mapred.acls.enabled - false - Specifies whether ACLs should be checked - for authorization of users for doing various queue and job level operations. - ACLs are disabled by default. If enabled, access control checks are made by - JobTracker and TaskTracker when requests are made by users for queue - operations like submit job to a queue and kill a job in the queue and job - operations like viewing the job-details (See mapreduce.job.acl-view-job) - or for modifying the job (See mapreduce.job.acl-modify-job) using - Map/Reduce APIs, RPCs or via the console and web user interfaces. - - - - - mapred.queue.default.state - RUNNING - - This values defines the state , default queue is in. - the values can be either "STOPPED" or "RUNNING" - This value can be changed at runtime. - - - - - mapred.job.queue.name - default - Queue to which a job is submitted. This must match one of the - queues defined in mapred.queue.names for the system. Also, the ACL setup - for the queue must allow the current user to submit a job to the queue. - Before specifying a queue, ensure that the system is configured with - the queue, and access is allowed for submitting jobs to the queue. - - - - - mapreduce.job.acl-modify-job - - Job specific access-control list for 'modifying' the job. It - is only used if authorization is enabled in Map/Reduce by setting the - configuration property mapred.acls.enabled to true. - This specifies the list of users and/or groups who can do modification - operations on the job. For specifying a list of users and groups the - format to use is "user1,user2 group1,group". If set to '*', it allows all - users/groups to modify this job. If set to ' '(i.e. space), it allows - none. This configuration is used to guard all the modifications with respect - to this job and takes care of all the following operations: - o killing this job - o killing a task of this job, failing a task of this job - o setting the priority of this job - Each of these operations are also protected by the per-queue level ACL - "acl-administer-jobs" configured via mapred-queues.xml. So a caller should - have the authorization to satisfy either the queue-level ACL or the - job-level ACL. - - Irrespective of this ACL configuration, job-owner, the user who started the - cluster, cluster administrators configured via - mapreduce.cluster.administrators and queue administrators of the queue to - which this job is submitted to configured via - mapred.queue.queue-name.acl-administer-jobs in mapred-queue-acls.xml can - do all the modification operations on a job. - - By default, nobody else besides job-owner, the user who started the cluster, - cluster administrators and queue administrators can perform modification - operations on a job. - - - - - mapreduce.job.acl-view-job - - Job specific access-control list for 'viewing' the job. It is - only used if authorization is enabled in Map/Reduce by setting the - configuration property mapred.acls.enabled to true. - This specifies the list of users and/or groups who can view private details - about the job. For specifying a list of users and groups the - format to use is "user1,user2 group1,group". If set to '*', it allows all - users/groups to modify this job. If set to ' '(i.e. space), it allows - none. This configuration is used to guard some of the job-views and at - present only protects APIs that can return possibly sensitive information - of the job-owner like - o job-level counters - o task-level counters - o tasks' diagnostic information - o task-logs displayed on the TaskTracker web-UI and - o job.xml showed by the JobTracker's web-UI - Every other piece of information of jobs is still accessible by any other - user, for e.g., JobStatus, JobProfile, list of jobs in the queue, etc. - - Irrespective of this ACL configuration, job-owner, the user who started the - cluster, cluster administrators configured via - mapreduce.cluster.administrators and queue administrators of the queue to - which this job is submitted to configured via - mapred.queue.queue-name.acl-administer-jobs in mapred-queue-acls.xml can do - all the view operations on a job. - - By default, nobody else besides job-owner, the user who started the - cluster, cluster administrators and queue administrators can perform - view operations on a job. - - - - - mapred.tasktracker.indexcache.mb - 10 - The maximum memory that a task tracker allows for the - index cache that is used when serving map outputs to reducers. - - - - - mapred.combine.recordsBeforeProgress - 10000 - The number of records to process during combine output collection - before sending a progress notification to the TaskTracker. - - - - - mapred.merge.recordsBeforeProgress - 10000 - The number of records to process during merge before - sending a progress notification to the TaskTracker. - - - - - mapred.reduce.slowstart.completed.maps - 0.05 - Fraction of the number of maps in the job which should be - complete before reduces are scheduled for the job. - - - - - mapred.task.tracker.task-controller - org.apache.hadoop.mapred.DefaultTaskController - TaskController which is used to launch and manage task execution - - - - - mapreduce.tasktracker.group - - Expert: Group to which TaskTracker belongs. If - LinuxTaskController is configured via mapreduce.tasktracker.taskcontroller, - the group owner of the task-controller binary should be same as this group. - - - - - mapred.disk.healthChecker.interval - 60000 - How often the TaskTracker checks the health of its - local directories. Configuring this to a value smaller than the - heartbeat interval is equivalent to setting this to heartbeat - interval value. - - - - - - - mapred.healthChecker.script.path - - Absolute path to the script which is - periodicallyrun by the node health monitoring service to determine if - the node is healthy or not. If the value of this key is empty or the - file does not exist in the location configured here, the node health - monitoring service is not started. - - - - mapred.healthChecker.interval - 60000 - Frequency of the node health script to be run, - in milliseconds - - - - mapred.healthChecker.script.timeout - 600000 - Time after node health script should be killed if - unresponsive and considered that the script has failed. - - - - mapred.healthChecker.script.args - - List of arguments which are to be passed to - node health script when it is being launched comma seperated. - - - - - - - mapreduce.job.counters.max - 120 - Limit on the number of counters allowed per job. - - - - - mapreduce.job.counters.groups.max - 50 - Limit on the number of counter groups allowed per job. - - - - - mapreduce.job.counters.counter.name.max - 64 - Limit on the length of counter names in jobs. Names - exceeding this limit will be truncated. - - - - - mapreduce.job.counters.group.name.max - 128 - Limit on the length of counter group names in jobs. Names - exceeding this limit will be truncated. - - - - diff --git a/sahara/plugins/vanilla/v1_2_1/resources/oozie-default.xml b/sahara/plugins/vanilla/v1_2_1/resources/oozie-default.xml deleted file mode 100644 index 455ef9d924..0000000000 --- a/sahara/plugins/vanilla/v1_2_1/resources/oozie-default.xml +++ /dev/null @@ -1,1929 +0,0 @@ - - - - - - - - - - - - oozie.action.ship.launcher.jar - true - - If true, Oozie will create and ship a "launcher jar" that contains classes necessary for the launcher job. If false, - Oozie will not do this, and it is assumed that the necessary classes are in their respective sharelib jars or the - "oozie" sharelib instead. When false, the sharelib is required for ALL actions; when true, the sharelib is only - required for actions that need additional jars (e.g. Pig). The main advantage of setting this to false is that - launching jobs should be slightly faster. - - - - - oozie.action.mapreduce.uber.jar.enable - false - - If true, enables the oozie.mapreduce.uber.jar mapreduce workflow configuration property, which is used to specify an - uber jar in HDFS. Submitting a workflow with an uber jar requires at least Hadoop 2.2.0 or 1.2.0. If false, workflows - which specify the oozie.mapreduce.uber.jar configuration property will fail. - - - - - oozie.processing.timezone - UTC - - Oozie server timezone. Valid values are UTC and GMT(+/-)####, for example 'GMT+0530' would be India - timezone. All dates parsed and genered dates by Oozie Coordinator/Bundle will be done in the specified - timezone. The default value of 'UTC' should not be changed under normal circumtances. If for any reason - is changed, note that GMT(+/-)#### timezones do not observe DST changes. - - - - - - - oozie.base.url - http://localhost:8080/oozie - - Base Oozie URL. - - - - - - - oozie.system.id - oozie-${user.name} - - The Oozie system ID. - - - - - oozie.systemmode - NORMAL - - System mode for Oozie at startup. - - - - - oozie.delete.runtime.dir.on.shutdown - true - - If the runtime directory should be kept after Oozie shutdowns down. - - - - - oozie.services - - org.apache.oozie.service.SchedulerService, - org.apache.oozie.service.InstrumentationService, - org.apache.oozie.service.CallableQueueService, - org.apache.oozie.service.UUIDService, - org.apache.oozie.service.ELService, - org.apache.oozie.service.AuthorizationService, - org.apache.oozie.service.UserGroupInformationService, - org.apache.oozie.service.HadoopAccessorService, - org.apache.oozie.service.URIHandlerService, - org.apache.oozie.service.MemoryLocksService, - org.apache.oozie.service.DagXLogInfoService, - org.apache.oozie.service.SchemaService, - org.apache.oozie.service.LiteWorkflowAppService, - org.apache.oozie.service.JPAService, - org.apache.oozie.service.StoreService, - org.apache.oozie.service.CoordinatorStoreService, - org.apache.oozie.service.SLAStoreService, - org.apache.oozie.service.DBLiteWorkflowStoreService, - org.apache.oozie.service.CallbackService, - org.apache.oozie.service.ActionService, - org.apache.oozie.service.ActionCheckerService, - org.apache.oozie.service.RecoveryService, - org.apache.oozie.service.PurgeService, - org.apache.oozie.service.CoordinatorEngineService, - org.apache.oozie.service.BundleEngineService, - org.apache.oozie.service.DagEngineService, - org.apache.oozie.service.CoordMaterializeTriggerService, - org.apache.oozie.service.StatusTransitService, - org.apache.oozie.service.PauseTransitService, - org.apache.oozie.service.GroupsService, - org.apache.oozie.service.ProxyUserService - - - All services to be created and managed by Oozie Services singleton. - Class names must be separated by commas. - - - - - oozie.services.ext - - - To add/replace services defined in 'oozie.services' with custom implementations. - Class names must be separated by commas. - - - - - - oozie.service.HCatAccessorService.jmsconnections - - default=java.naming.factory.initial#org.apache.activemq.jndi.ActiveMQInitialContextFactory;java.naming.provider.url#tcp://localhost:61616;connectionFactoryNames#ConnectionFactory - - - Specify the map of endpoints to JMS configuration properties. In general, endpoint - identifies the HCatalog server URL. "default" is used if no endpoint is mentioned - in the query. If some JMS property is not defined, the system will use the property - defined jndi.properties. jndi.properties files is retrieved from the application classpath. - Mapping rules can also be provided for mapping Hcatalog servers to corresponding JMS providers. - hcat://${1}.${2}.server.com:8020=java.naming.factory.initial#Dummy.Factory;java.naming.provider.url#tcp://broker.${2}:61616 - - - - - - - oozie.service.JMSTopicService.topic.name - - default=${username} - - - Topic options are ${username} or ${jobId} or a fixed string which can be specified as default or for a - particular job type. - For e.g To have a fixed string topic for workflows, coordinators and bundles, - specify in the following comma-separated format: {jobtype1}={some_string1}, {jobtype2}={some_string2} - where job type can be WORKFLOW, COORDINATOR or BUNDLE. - e.g. Following defines topic for workflow job, workflow action, coordinator job, coordinator action, - bundle job and bundle action - WORKFLOW=workflow, - COORDINATOR=coordinator, - BUNDLE=bundle - For jobs with no defined topic, default topic will be ${username} - - - - - - oozie.jms.producer.connection.properties - java.naming.factory.initial#org.apache.activemq.jndi.ActiveMQInitialContextFactory;java.naming.provider.url#tcp://localhost:61616;connectionFactoryNames#ConnectionFactory - - - - - oozie.service.JMSAccessorService.connectioncontext.impl - - org.apache.oozie.jms.DefaultConnectionContext - - - Specifies the Connection Context implementation - - - - - - - - oozie.service.ConfigurationService.ignore.system.properties - - oozie.service.AuthorizationService.security.enabled - - - Specifies "oozie.*" properties to cannot be overriden via Java system properties. - Property names must be separted by commas. - - - - - - - oozie.service.SchedulerService.threads - 10 - - The number of threads to be used by the SchedulerService to run deamon tasks. - If maxed out, scheduled daemon tasks will be queued up and delayed until threads become available. - - - - - - - oozie.service.AuthorizationService.authorization.enabled - false - - Specifies whether security (user name/admin role) is enabled or not. - If disabled any user can manage Oozie system and manage any job. - - - - - oozie.service.AuthorizationService.default.group.as.acl - false - - Enables old behavior where the User's default group is the job's ACL. - - - - - - - oozie.service.InstrumentationService.logging.interval - 60 - - Interval, in seconds, at which instrumentation should be logged by the InstrumentationService. - If set to 0 it will not log instrumentation data. - - - - - - oozie.service.PurgeService.older.than - 30 - - Completed workflow jobs older than this value, in days, will be purged by the PurgeService. - - - - - oozie.service.PurgeService.coord.older.than - 7 - - Completed coordinator jobs older than this value, in days, will be purged by the PurgeService. - - - - - oozie.service.PurgeService.bundle.older.than - 7 - - Completed bundle jobs older than this value, in days, will be purged by the PurgeService. - - - - - oozie.service.PurgeService.purge.limit - 100 - - Completed Actions purge - limit each purge to this value - - - - - oozie.service.PurgeService.purge.interval - 3600 - - Interval at which the purge service will run, in seconds. - - - - - - - oozie.service.RecoveryService.wf.actions.older.than - 120 - - Age of the actions which are eligible to be queued for recovery, in seconds. - - - - - oozie.service.RecoveryService.callable.batch.size - 10 - - This value determines the number of callable which will be batched together - to be executed by a single thread. - - - - - oozie.service.RecoveryService.push.dependency.interval - 200 - - This value determines the delay for push missing dependency command queueing - in Recovery Service - - - - - oozie.service.RecoveryService.interval - 60 - - Interval at which the RecoverService will run, in seconds. - - - - - oozie.service.RecoveryService.coord.older.than - 600 - - Age of the Coordinator jobs or actions which are eligible to be queued for recovery, in seconds. - - - - - oozie.service.RecoveryService.bundle.older.than - 600 - - Age of the Bundle jobs which are eligible to be queued for recovery, in seconds. - - - - - - - oozie.service.CallableQueueService.queue.size - 10000 - Max callable queue size - - - - oozie.service.CallableQueueService.threads - 10 - Number of threads used for executing callables - - - - oozie.service.CallableQueueService.callable.concurrency - 3 - - Maximum concurrency for a given callable type. - Each command is a callable type (submit, start, run, signal, job, jobs, suspend,resume, etc). - Each action type is a callable type (Map-Reduce, Pig, SSH, FS, sub-workflow, etc). - All commands that use action executors (action-start, action-end, action-kill and action-check) use - the action type as the callable type. - - - - - oozie.service.CallableQueueService.callable.next.eligible - true - - If true, when a callable in the queue has already reached max concurrency, - Oozie continuously find next one which has not yet reach max concurrency. - - - - - oozie.service.CallableQueueService.InterruptMapMaxSize - 500 - - Maximum Size of the Interrupt Map, the interrupt element will not be inserted in the map if exceeded the size. - - - - - oozie.service.CallableQueueService.InterruptTypes - kill,resume,suspend,bundle_kill,bundle_resume,bundle_suspend,coord_kill,coord_change,coord_resume,coord_suspend - - Getting the types of XCommands that are considered to be of Interrupt type - - - - - - - oozie.service.CoordMaterializeTriggerService.lookup.interval - - 300 - Coordinator Job Lookup trigger command is scheduled at - this "interval" (in seconds). - - - - oozie.service.CoordMaterializeTriggerService.materialization.window - - 3600 - Coordinator Job Lookup command materialized each job for - this next "window" duration - - - - oozie.service.CoordMaterializeTriggerService.callable.batch.size - 10 - - This value determines the number of callable which will be batched together - to be executed by a single thread. - - - - - oozie.service.CoordMaterializeTriggerService.materialization.system.limit - 50 - - This value determines the number of coordinator jobs to be materialized at a given time. - - - - - oozie.service.coord.normal.default.timeout - - 10080 - Default timeout for a coordinator action input check (in minutes) for normal job. - - - - - oozie.service.coord.default.max.timeout - - 86400 - Default maximum timeout for a coordinator action input check (in minutes). 86400= 60days - - - - - oozie.service.coord.input.check.requeue.interval - - 60000 - Command re-queue interval for coordinator data input check (in millisecond). - - - - - oozie.service.coord.push.check.requeue.interval - - 600000 - Command re-queue interval for push dependencies (in millisecond). - - - - - oozie.service.coord.default.concurrency - - 1 - Default concurrency for a coordinator job to determine how many maximum action should - be executed at the same time. -1 means infinite concurrency. - - - - oozie.service.coord.default.throttle - - 12 - Default throttle for a coordinator job to determine how many maximum action should - be in WAITING state at the same time. - - - - oozie.service.coord.materialization.throttling.factor - - 0.05 - Determine how many maximum actions should be in WAITING state for a single job at any time. The value is calculated by - this factor X the total queue size. - - - - - - oozie.service.ELService.groups - job-submit,workflow,wf-sla-submit,coord-job-submit-freq,coord-job-submit-nofuncs,coord-job-submit-data,coord-job-submit-instances,coord-sla-submit,coord-action-create,coord-action-create-inst,coord-sla-create,coord-action-start - List of groups for different ELServices - - - - oozie.service.ELService.constants.job-submit - - - - EL constant declarations, separated by commas, format is [PREFIX:]NAME=CLASS#CONSTANT. - - - - - oozie.service.ELService.functions.job-submit - - - - EL functions declarations, separated by commas, format is [PREFIX:]NAME=CLASS#METHOD. - - - - - - oozie.service.ELService.constants.workflow - - KB=org.apache.oozie.util.ELConstantsFunctions#KB, - MB=org.apache.oozie.util.ELConstantsFunctions#MB, - GB=org.apache.oozie.util.ELConstantsFunctions#GB, - TB=org.apache.oozie.util.ELConstantsFunctions#TB, - PB=org.apache.oozie.util.ELConstantsFunctions#PB, - RECORDS=org.apache.oozie.action.hadoop.HadoopELFunctions#RECORDS, - MAP_IN=org.apache.oozie.action.hadoop.HadoopELFunctions#MAP_IN, - MAP_OUT=org.apache.oozie.action.hadoop.HadoopELFunctions#MAP_OUT, - REDUCE_IN=org.apache.oozie.action.hadoop.HadoopELFunctions#REDUCE_IN, - REDUCE_OUT=org.apache.oozie.action.hadoop.HadoopELFunctions#REDUCE_OUT, - GROUPS=org.apache.oozie.action.hadoop.HadoopELFunctions#GROUPS - - - EL constant declarations, separated by commas, format is [PREFIX:]NAME=CLASS#CONSTANT. - - - - - oozie.service.ELService.ext.constants.workflow - - - EL constant declarations, separated by commas, format is [PREFIX:]NAME=CLASS#CONSTANT. - This property is a convenience property to add extensions to the built in executors without having to - include all the built in ones. - - - - - oozie.service.ELService.functions.workflow - - firstNotNull=org.apache.oozie.util.ELConstantsFunctions#firstNotNull, - concat=org.apache.oozie.util.ELConstantsFunctions#concat, - replaceAll=org.apache.oozie.util.ELConstantsFunctions#replaceAll, - appendAll=org.apache.oozie.util.ELConstantsFunctions#appendAll, - trim=org.apache.oozie.util.ELConstantsFunctions#trim, - timestamp=org.apache.oozie.util.ELConstantsFunctions#timestamp, - urlEncode=org.apache.oozie.util.ELConstantsFunctions#urlEncode, - toJsonStr=org.apache.oozie.util.ELConstantsFunctions#toJsonStr, - toPropertiesStr=org.apache.oozie.util.ELConstantsFunctions#toPropertiesStr, - toConfigurationStr=org.apache.oozie.util.ELConstantsFunctions#toConfigurationStr, - wf:id=org.apache.oozie.DagELFunctions#wf_id, - wf:name=org.apache.oozie.DagELFunctions#wf_name, - wf:appPath=org.apache.oozie.DagELFunctions#wf_appPath, - wf:conf=org.apache.oozie.DagELFunctions#wf_conf, - wf:user=org.apache.oozie.DagELFunctions#wf_user, - wf:group=org.apache.oozie.DagELFunctions#wf_group, - wf:callback=org.apache.oozie.DagELFunctions#wf_callback, - wf:transition=org.apache.oozie.DagELFunctions#wf_transition, - wf:lastErrorNode=org.apache.oozie.DagELFunctions#wf_lastErrorNode, - wf:errorCode=org.apache.oozie.DagELFunctions#wf_errorCode, - wf:errorMessage=org.apache.oozie.DagELFunctions#wf_errorMessage, - wf:run=org.apache.oozie.DagELFunctions#wf_run, - wf:actionData=org.apache.oozie.DagELFunctions#wf_actionData, - wf:actionExternalId=org.apache.oozie.DagELFunctions#wf_actionExternalId, - wf:actionTrackerUri=org.apache.oozie.DagELFunctions#wf_actionTrackerUri, - wf:actionExternalStatus=org.apache.oozie.DagELFunctions#wf_actionExternalStatus, - hadoop:counters=org.apache.oozie.action.hadoop.HadoopELFunctions#hadoop_counters, - fs:exists=org.apache.oozie.action.hadoop.FsELFunctions#fs_exists, - fs:isDir=org.apache.oozie.action.hadoop.FsELFunctions#fs_isDir, - fs:dirSize=org.apache.oozie.action.hadoop.FsELFunctions#fs_dirSize, - fs:fileSize=org.apache.oozie.action.hadoop.FsELFunctions#fs_fileSize, - fs:blockSize=org.apache.oozie.action.hadoop.FsELFunctions#fs_blockSize, - hcat:exists=org.apache.oozie.coord.HCatELFunctions#hcat_exists - - - EL functions declarations, separated by commas, format is [PREFIX:]NAME=CLASS#METHOD. - - - - - oozie.service.WorkflowAppService.WorkflowDefinitionMaxLength - 100000 - - The maximum length of the workflow definition in bytes - An error will be reported if the length exceeds the given maximum - - - - - oozie.service.ELService.ext.functions.workflow - - - - EL functions declarations, separated by commas, format is [PREFIX:]NAME=CLASS#METHOD. - This property is a convenience property to add extensions to the built in executors without having to - include all the built in ones. - - - - - - oozie.service.ELService.constants.wf-sla-submit - - MINUTES=org.apache.oozie.util.ELConstantsFunctions#SUBMIT_MINUTES, - HOURS=org.apache.oozie.util.ELConstantsFunctions#SUBMIT_HOURS, - DAYS=org.apache.oozie.util.ELConstantsFunctions#SUBMIT_DAYS - - - EL constant declarations, separated by commas, format is [PREFIX:]NAME=CLASS#CONSTANT. - - - - - oozie.service.ELService.ext.constants.wf-sla-submit - - - EL constant declarations, separated by commas, format is [PREFIX:]NAME=CLASS#CONSTANT. - This property is a convenience property to add extensions to the built in executors without having to - include all the built in ones. - - - - - oozie.service.ELService.functions.wf-sla-submit - - - EL functions declarations, separated by commas, format is [PREFIX:]NAME=CLASS#METHOD. - - - - oozie.service.ELService.ext.functions.wf-sla-submit - - - - EL functions declarations, separated by commas, format is [PREFIX:]NAME=CLASS#METHOD. - This property is a convenience property to add extensions to the built in executors without having to - include all the built in ones. - - - -l - - - - oozie.service.ELService.constants.coord-job-submit-freq - - - EL constant declarations, separated by commas, format is [PREFIX:]NAME=CLASS#CONSTANT. - - - - - oozie.service.ELService.ext.constants.coord-job-submit-freq - - - EL constant declarations, separated by commas, format is [PREFIX:]NAME=CLASS#CONSTANT. - This property is a convenience property to add extensions to the built in executors without having to - include all the built in ones. - - - - - oozie.service.ELService.functions.coord-job-submit-freq - - coord:days=org.apache.oozie.coord.CoordELFunctions#ph1_coord_days, - coord:months=org.apache.oozie.coord.CoordELFunctions#ph1_coord_months, - coord:hours=org.apache.oozie.coord.CoordELFunctions#ph1_coord_hours, - coord:minutes=org.apache.oozie.coord.CoordELFunctions#ph1_coord_minutes, - coord:endOfDays=org.apache.oozie.coord.CoordELFunctions#ph1_coord_endOfDays, - coord:endOfMonths=org.apache.oozie.coord.CoordELFunctions#ph1_coord_endOfMonths, - coord:conf=org.apache.oozie.coord.CoordELFunctions#coord_conf, - coord:user=org.apache.oozie.coord.CoordELFunctions#coord_user - - - EL functions declarations, separated by commas, format is [PREFIX:]NAME=CLASS#METHOD. - - - - - oozie.service.ELService.ext.functions.coord-job-submit-freq - - - - EL functions declarations, separated by commas, format is [PREFIX:]NAME=CLASS#METHOD. - This property is a convenience property to add extensions to the built in executors without having to - include all the built in ones. - - - - - - oozie.service.ELService.constants.coord-job-submit-nofuncs - - MINUTE=org.apache.oozie.coord.CoordELConstants#SUBMIT_MINUTE, - HOUR=org.apache.oozie.coord.CoordELConstants#SUBMIT_HOUR, - DAY=org.apache.oozie.coord.CoordELConstants#SUBMIT_DAY, - MONTH=org.apache.oozie.coord.CoordELConstants#SUBMIT_MONTH, - YEAR=org.apache.oozie.coord.CoordELConstants#SUBMIT_YEAR - - - EL constant declarations, separated by commas, format is [PREFIX:]NAME=CLASS#CONSTANT. - - - - - oozie.service.ELService.ext.constants.coord-job-submit-nofuncs - - - EL constant declarations, separated by commas, format is [PREFIX:]NAME=CLASS#CONSTANT. - This property is a convenience property to add extensions to the built in executors without having to - include all the built in ones. - - - - - oozie.service.ELService.functions.coord-job-submit-nofuncs - - coord:conf=org.apache.oozie.coord.CoordELFunctions#coord_conf, - coord:user=org.apache.oozie.coord.CoordELFunctions#coord_user - - - EL functions declarations, separated by commas, format is [PREFIX:]NAME=CLASS#METHOD. - - - - - oozie.service.ELService.ext.functions.coord-job-submit-nofuncs - - - EL functions declarations, separated by commas, format is [PREFIX:]NAME=CLASS#METHOD. - This property is a convenience property to add extensions to the built in executors without having to - include all the built in ones. - - - - - - oozie.service.ELService.constants.coord-job-submit-instances - - - EL constant declarations, separated by commas, format is [PREFIX:]NAME=CLASS#CONSTANT. - - - - - oozie.service.ELService.ext.constants.coord-job-submit-instances - - - EL constant declarations, separated by commas, format is [PREFIX:]NAME=CLASS#CONSTANT. - This property is a convenience property to add extensions to the built in executors without having to - include all the built in ones. - - - - - oozie.service.ELService.functions.coord-job-submit-instances - - coord:hoursInDay=org.apache.oozie.coord.CoordELFunctions#ph1_coord_hoursInDay_echo, - coord:daysInMonth=org.apache.oozie.coord.CoordELFunctions#ph1_coord_daysInMonth_echo, - coord:tzOffset=org.apache.oozie.coord.CoordELFunctions#ph1_coord_tzOffset_echo, - coord:current=org.apache.oozie.coord.CoordELFunctions#ph1_coord_current_echo, - coord:currentRange=org.apache.oozie.coord.CoordELFunctions#ph1_coord_currentRange_echo, - coord:offset=org.apache.oozie.coord.CoordELFunctions#ph1_coord_offset_echo, - coord:latest=org.apache.oozie.coord.CoordELFunctions#ph1_coord_latest_echo, - coord:latestRange=org.apache.oozie.coord.CoordELFunctions#ph1_coord_latestRange_echo, - coord:future=org.apache.oozie.coord.CoordELFunctions#ph1_coord_future_echo, - coord:futureRange=org.apache.oozie.coord.CoordELFunctions#ph1_coord_futureRange_echo, - coord:formatTime=org.apache.oozie.coord.CoordELFunctions#ph1_coord_formatTime_echo, - coord:conf=org.apache.oozie.coord.CoordELFunctions#coord_conf, - coord:user=org.apache.oozie.coord.CoordELFunctions#coord_user - - - EL functions declarations, separated by commas, format is [PREFIX:]NAME=CLASS#METHOD. - - - - - oozie.service.ELService.ext.functions.coord-job-submit-instances - - - - EL functions declarations, separated by commas, format is [PREFIX:]NAME=CLASS#METHOD. - This property is a convenience property to add extensions to the built in executors without having to - include all the built in ones. - - - - - - - oozie.service.ELService.constants.coord-job-submit-data - - - EL constant declarations, separated by commas, format is [PREFIX:]NAME=CLASS#CONSTANT. - - - - - oozie.service.ELService.ext.constants.coord-job-submit-data - - - EL constant declarations, separated by commas, format is [PREFIX:]NAME=CLASS#CONSTANT. - This property is a convenience property to add extensions to the built in executors without having to - include all the built in ones. - - - - - oozie.service.ELService.functions.coord-job-submit-data - - coord:dataIn=org.apache.oozie.coord.CoordELFunctions#ph1_coord_dataIn_echo, - coord:dataOut=org.apache.oozie.coord.CoordELFunctions#ph1_coord_dataOut_echo, - coord:nominalTime=org.apache.oozie.coord.CoordELFunctions#ph1_coord_nominalTime_echo_wrap, - coord:actualTime=org.apache.oozie.coord.CoordELFunctions#ph1_coord_actualTime_echo_wrap, - coord:dateOffset=org.apache.oozie.coord.CoordELFunctions#ph1_coord_dateOffset_echo, - coord:formatTime=org.apache.oozie.coord.CoordELFunctions#ph1_coord_formatTime_echo, - coord:actionId=org.apache.oozie.coord.CoordELFunctions#ph1_coord_actionId_echo, - coord:name=org.apache.oozie.coord.CoordELFunctions#ph1_coord_name_echo, - coord:conf=org.apache.oozie.coord.CoordELFunctions#coord_conf, - coord:user=org.apache.oozie.coord.CoordELFunctions#coord_user, - coord:databaseIn=org.apache.oozie.coord.HCatELFunctions#ph1_coord_databaseIn_echo, - coord:databaseOut=org.apache.oozie.coord.HCatELFunctions#ph1_coord_databaseOut_echo, - coord:tableIn=org.apache.oozie.coord.HCatELFunctions#ph1_coord_tableIn_echo, - coord:tableOut=org.apache.oozie.coord.HCatELFunctions#ph1_coord_tableOut_echo, - coord:dataInPartitionFilter=org.apache.oozie.coord.HCatELFunctions#ph1_coord_dataInPartitionFilter_echo, - coord:dataInPartitionMin=org.apache.oozie.coord.HCatELFunctions#ph1_coord_dataInPartitionMin_echo, - coord:dataInPartitionMax=org.apache.oozie.coord.HCatELFunctions#ph1_coord_dataInPartitionMax_echo, - coord:dataOutPartitions=org.apache.oozie.coord.HCatELFunctions#ph1_coord_dataOutPartitions_echo, - coord:dataOutPartitionValue=org.apache.oozie.coord.HCatELFunctions#ph1_coord_dataOutPartitionValue_echo - - - EL functions declarations, separated by commas, format is [PREFIX:]NAME=CLASS#METHOD. - - - - - oozie.service.ELService.ext.functions.coord-job-submit-data - - - - EL functions declarations, separated by commas, format is [PREFIX:]NAME=CLASS#METHOD. - This property is a convenience property to add extensions to the built in executors without having to - include all the built in ones. - - - - - - oozie.service.ELService.constants.coord-sla-submit - - MINUTES=org.apache.oozie.coord.CoordELConstants#SUBMIT_MINUTES, - HOURS=org.apache.oozie.coord.CoordELConstants#SUBMIT_HOURS, - DAYS=org.apache.oozie.coord.CoordELConstants#SUBMIT_DAYS - - - EL constant declarations, separated by commas, format is [PREFIX:]NAME=CLASS#CONSTANT. - - - - - oozie.service.ELService.ext.constants.coord-sla-submit - - - EL constant declarations, separated by commas, format is [PREFIX:]NAME=CLASS#CONSTANT. - This property is a convenience property to add extensions to the built in executors without having to - include all the built in ones. - - - - - oozie.service.ELService.functions.coord-sla-submit - - coord:nominalTime=org.apache.oozie.coord.CoordELFunctions#ph1_coord_nominalTime_echo_fixed, - coord:conf=org.apache.oozie.coord.CoordELFunctions#coord_conf, - coord:user=org.apache.oozie.coord.CoordELFunctions#coord_user - - - EL functions declarations, separated by commas, format is [PREFIX:]NAME=CLASS#METHOD. - - - - oozie.service.ELService.ext.functions.coord-sla-submit - - - - EL functions declarations, separated by commas, format is [PREFIX:]NAME=CLASS#METHOD. - This property is a convenience property to add extensions to the built in executors without having to - include all the built in ones. - - - - - - oozie.service.ELService.constants.coord-action-create - - - - EL constant declarations, separated by commas, format is [PREFIX:]NAME=CLASS#CONSTANT. - - - - - oozie.service.ELService.ext.constants.coord-action-create - - - EL constant declarations, separated by commas, format is [PREFIX:]NAME=CLASS#CONSTANT. - This property is a convenience property to add extensions to the built in executors without having to - include all the built in ones. - - - - - oozie.service.ELService.functions.coord-action-create - - coord:hoursInDay=org.apache.oozie.coord.CoordELFunctions#ph2_coord_hoursInDay, - coord:daysInMonth=org.apache.oozie.coord.CoordELFunctions#ph2_coord_daysInMonth, - coord:tzOffset=org.apache.oozie.coord.CoordELFunctions#ph2_coord_tzOffset, - coord:current=org.apache.oozie.coord.CoordELFunctions#ph2_coord_current, - coord:currentRange=org.apache.oozie.coord.CoordELFunctions#ph2_coord_currentRange, - coord:offset=org.apache.oozie.coord.CoordELFunctions#ph2_coord_offset, - coord:latest=org.apache.oozie.coord.CoordELFunctions#ph2_coord_latest_echo, - coord:latestRange=org.apache.oozie.coord.CoordELFunctions#ph2_coord_latestRange_echo, - coord:future=org.apache.oozie.coord.CoordELFunctions#ph2_coord_future_echo, - coord:futureRange=org.apache.oozie.coord.CoordELFunctions#ph2_coord_futureRange_echo, - coord:actionId=org.apache.oozie.coord.CoordELFunctions#ph2_coord_actionId, - coord:name=org.apache.oozie.coord.CoordELFunctions#ph2_coord_name, - coord:formatTime=org.apache.oozie.coord.CoordELFunctions#ph2_coord_formatTime, - coord:conf=org.apache.oozie.coord.CoordELFunctions#coord_conf, - coord:user=org.apache.oozie.coord.CoordELFunctions#coord_user - - - EL functions declarations, separated by commas, format is [PREFIX:]NAME=CLASS#METHOD. - - - - - oozie.service.ELService.ext.functions.coord-action-create - - - - EL functions declarations, separated by commas, format is [PREFIX:]NAME=CLASS#METHOD. - This property is a convenience property to add extensions to the built in executors without having to - include all the built in ones. - - - - - - - oozie.service.ELService.constants.coord-action-create-inst - - - - EL constant declarations, separated by commas, format is [PREFIX:]NAME=CLASS#CONSTANT. - - - - - oozie.service.ELService.ext.constants.coord-action-create-inst - - - EL constant declarations, separated by commas, format is [PREFIX:]NAME=CLASS#CONSTANT. - This property is a convenience property to add extensions to the built in executors without having to - include all the built in ones. - - - - - oozie.service.ELService.functions.coord-action-create-inst - - coord:hoursInDay=org.apache.oozie.coord.CoordELFunctions#ph2_coord_hoursInDay, - coord:daysInMonth=org.apache.oozie.coord.CoordELFunctions#ph2_coord_daysInMonth, - coord:tzOffset=org.apache.oozie.coord.CoordELFunctions#ph2_coord_tzOffset, - coord:current=org.apache.oozie.coord.CoordELFunctions#ph2_coord_current_echo, - coord:currentRange=org.apache.oozie.coord.CoordELFunctions#ph2_coord_currentRange_echo, - coord:offset=org.apache.oozie.coord.CoordELFunctions#ph2_coord_offset_echo, - coord:latest=org.apache.oozie.coord.CoordELFunctions#ph2_coord_latest_echo, - coord:latestRange=org.apache.oozie.coord.CoordELFunctions#ph2_coord_latestRange_echo, - coord:future=org.apache.oozie.coord.CoordELFunctions#ph2_coord_future_echo, - coord:futureRange=org.apache.oozie.coord.CoordELFunctions#ph2_coord_futureRange_echo, - coord:formatTime=org.apache.oozie.coord.CoordELFunctions#ph2_coord_formatTime, - coord:conf=org.apache.oozie.coord.CoordELFunctions#coord_conf, - coord:user=org.apache.oozie.coord.CoordELFunctions#coord_user - - - EL functions declarations, separated by commas, format is [PREFIX:]NAME=CLASS#METHOD. - - - - - oozie.service.ELService.ext.functions.coord-action-create-inst - - - - EL functions declarations, separated by commas, format is [PREFIX:]NAME=CLASS#METHOD. - This property is a convenience property to add extensions to the built in executors without having to - include all the built in ones. - - - - - - oozie.service.ELService.constants.coord-sla-create - - - EL constant declarations, separated by commas, format is [PREFIX:]NAME=CLASS#CONSTANT. - - - - - oozie.service.ELService.ext.constants.coord-sla-create - - MINUTES=org.apache.oozie.coord.CoordELConstants#SUBMIT_MINUTES, - HOURS=org.apache.oozie.coord.CoordELConstants#SUBMIT_HOURS, - DAYS=org.apache.oozie.coord.CoordELConstants#SUBMIT_DAYS - - EL constant declarations, separated by commas, format is [PREFIX:]NAME=CLASS#CONSTANT. - This property is a convenience property to add extensions to the built in executors without having to - include all the built in ones. - - - - - oozie.service.ELService.functions.coord-sla-create - - coord:nominalTime=org.apache.oozie.coord.CoordELFunctions#ph2_coord_nominalTime, - coord:conf=org.apache.oozie.coord.CoordELFunctions#coord_conf, - coord:user=org.apache.oozie.coord.CoordELFunctions#coord_user - - - EL functions declarations, separated by commas, format is [PREFIX:]NAME=CLASS#METHOD. - - - - oozie.service.ELService.ext.functions.coord-sla-create - - - - EL functions declarations, separated by commas, format is [PREFIX:]NAME=CLASS#METHOD. - This property is a convenience property to add extensions to the built in executors without having to - include all the built in ones. - - - - - - oozie.service.ELService.constants.coord-action-start - - - - EL constant declarations, separated by commas, format is [PREFIX:]NAME=CLASS#CONSTANT. - - - - - oozie.service.ELService.ext.constants.coord-action-start - - - EL constant declarations, separated by commas, format is [PREFIX:]NAME=CLASS#CONSTANT. - This property is a convenience property to add extensions to the built in executors without having to - include all the built in ones. - - - - - oozie.service.ELService.functions.coord-action-start - - coord:hoursInDay=org.apache.oozie.coord.CoordELFunctions#ph3_coord_hoursInDay, - coord:daysInMonth=org.apache.oozie.coord.CoordELFunctions#ph3_coord_daysInMonth, - coord:tzOffset=org.apache.oozie.coord.CoordELFunctions#ph3_coord_tzOffset, - coord:latest=org.apache.oozie.coord.CoordELFunctions#ph3_coord_latest, - coord:latestRange=org.apache.oozie.coord.CoordELFunctions#ph3_coord_latestRange, - coord:future=org.apache.oozie.coord.CoordELFunctions#ph3_coord_future, - coord:futureRange=org.apache.oozie.coord.CoordELFunctions#ph3_coord_futureRange, - coord:dataIn=org.apache.oozie.coord.CoordELFunctions#ph3_coord_dataIn, - coord:dataOut=org.apache.oozie.coord.CoordELFunctions#ph3_coord_dataOut, - coord:nominalTime=org.apache.oozie.coord.CoordELFunctions#ph3_coord_nominalTime, - coord:actualTime=org.apache.oozie.coord.CoordELFunctions#ph3_coord_actualTime, - coord:dateOffset=org.apache.oozie.coord.CoordELFunctions#ph3_coord_dateOffset, - coord:formatTime=org.apache.oozie.coord.CoordELFunctions#ph3_coord_formatTime, - coord:actionId=org.apache.oozie.coord.CoordELFunctions#ph3_coord_actionId, - coord:name=org.apache.oozie.coord.CoordELFunctions#ph3_coord_name, - coord:conf=org.apache.oozie.coord.CoordELFunctions#coord_conf, - coord:user=org.apache.oozie.coord.CoordELFunctions#coord_user, - coord:databaseIn=org.apache.oozie.coord.HCatELFunctions#ph3_coord_databaseIn, - coord:databaseOut=org.apache.oozie.coord.HCatELFunctions#ph3_coord_databaseOut, - coord:tableIn=org.apache.oozie.coord.HCatELFunctions#ph3_coord_tableIn, - coord:tableOut=org.apache.oozie.coord.HCatELFunctions#ph3_coord_tableOut, - coord:dataInPartitionFilter=org.apache.oozie.coord.HCatELFunctions#ph3_coord_dataInPartitionFilter, - coord:dataInPartitionMin=org.apache.oozie.coord.HCatELFunctions#ph3_coord_dataInPartitionMin, - coord:dataInPartitionMax=org.apache.oozie.coord.HCatELFunctions#ph3_coord_dataInPartitionMax, - coord:dataOutPartitions=org.apache.oozie.coord.HCatELFunctions#ph3_coord_dataOutPartitions, - coord:dataOutPartitionValue=org.apache.oozie.coord.HCatELFunctions#ph3_coord_dataOutPartitionValue - - - EL functions declarations, separated by commas, format is [PREFIX:]NAME=CLASS#METHOD. - - - - - oozie.service.ELService.ext.functions.coord-action-start - - - - EL functions declarations, separated by commas, format is [PREFIX:]NAME=CLASS#METHOD. - This property is a convenience property to add extensions to the built in executors without having to - include all the built in ones. - - - - - oozie.service.ELService.latest-el.use-current-time - false - - Determine whether to use the current time to determine the latest dependency or the action creation time. - This is for backward compatibility with older oozie behaviour. - - - - - - - oozie.service.UUIDService.generator - counter - - random : generated UUIDs will be random strings. - counter: generated UUIDs generated will be a counter postfixed with the system startup time. - - - - - - - oozie.service.DBLiteWorkflowStoreService.status.metrics.collection.interval - 5 - Workflow Status metrics collection interval in minutes. - - - - oozie.service.DBLiteWorkflowStoreService.status.metrics.window - 3600 - - Workflow Status metrics collection window in seconds. Workflow status will be instrumented for the window. - - - - - - - oozie.db.schema.name - oozie - - Oozie DataBase Name - - - - - - - oozie.service.JPAService.create.db.schema - true - - Creates Oozie DB. - - If set to true, it creates the DB schema if it does not exist. If the DB schema exists is a NOP. - If set to false, it does not create the DB schema. If the DB schema does not exist it fails start up. - - - - - oozie.service.JPAService.validate.db.connection - false - - Validates DB connections from the DB connection pool. - If the 'oozie.service.JPAService.create.db.schema' property is set to true, this property is ignored. - - - - - oozie.service.JPAService.validate.db.connection.eviction.interval - 300000 - - Validates DB connections from the DB connection pool. - When validate db connection 'TestWhileIdle' is true, the number of milliseconds to sleep - between runs of the idle object evictor thread. - - - - - oozie.service.JPAService.validate.db.connection.eviction.num - 10 - - Validates DB connections from the DB connection pool. - When validate db connection 'TestWhileIdle' is true, the number of objects to examine during - each run of the idle object evictor thread. - - - - - - oozie.service.JPAService.connection.data.source - org.apache.commons.dbcp.BasicDataSource - - DataSource to be used for connection pooling. - - - - - oozie.service.JPAService.jdbc.driver - org.apache.derby.jdbc.EmbeddedDriver - - JDBC driver class. - - - - - oozie.service.JPAService.jdbc.url - jdbc:derby:${oozie.data.dir}/${oozie.db.schema.name}-db;create=true - - JDBC URL. - - - - - oozie.service.JPAService.jdbc.username - sa - - DB user name. - - - - - oozie.service.JPAService.jdbc.password - - - DB user password. - - IMPORTANT: if password is emtpy leave a 1 space string, the service trims the value, - if empty Configuration assumes it is NULL. - - IMPORTANT: if the StoreServicePasswordService is active, it will reset this value with the value given in - the console. - - - - - oozie.service.JPAService.pool.max.active.conn - 10 - - Max number of connections. - - - - - - - oozie.service.SchemaService.wf.ext.schemas - oozie-sla-0.1.xsd,oozie-sla-0.2.xsd - - Schemas for additional actions types. - - IMPORTANT: if there are no schemas leave a 1 space string, the service trims the value, - if empty Configuration assumes it is NULL. - - - - - oozie.service.SchemaService.coord.ext.schemas - oozie-sla-0.1.xsd,oozie-sla-0.2.xsd - - Schemas for additional actions types. - - IMPORTANT: if there are no schemas leave a 1 space string, the service trims the value, - if empty Configuration assumes it is NULL. - - - - - oozie.service.SchemaService.sla.ext.schemas - - - Schemas for semantic validation for GMS SLA. - - IMPORTANT: if there are no schemas leave a 1 space string, the service trims the value, - if empty Configuration assumes it is NULL. - - - - - - oozie.service.CallbackService.base.url - ${oozie.base.url}/callback - - Base callback URL used by ActionExecutors. - - - - - - - oozie.servlet.CallbackServlet.max.data.len - 2048 - - Max size in characters for the action completion data output. - - - - - - - oozie.external.stats.max.size - -1 - - Max size in bytes for action stats. -1 means infinite value. - - - - - - - oozie.JobCommand.job.console.url - ${oozie.base.url}?job= - - Base console URL for a workflow job. - - - - - - - - oozie.service.ActionService.executor.classes - - org.apache.oozie.action.decision.DecisionActionExecutor, - org.apache.oozie.action.hadoop.JavaActionExecutor, - org.apache.oozie.action.hadoop.FsActionExecutor, - org.apache.oozie.action.hadoop.MapReduceActionExecutor, - org.apache.oozie.action.hadoop.PigActionExecutor, - org.apache.oozie.action.ssh.SshActionExecutor, - org.apache.oozie.action.oozie.SubWorkflowActionExecutor - - - List of ActionExecutors classes (separated by commas). - Only action types with associated executors can be used in workflows. - - - - - oozie.service.ActionService.executor.ext.classes - - - List of ActionExecutors extension classes (separated by commas). Only action types with associated - executors can be used in workflows. This property is a convenience property to add extensions to the built - in executors without having to include all the built in ones. - - - - - - - oozie.service.ActionCheckerService.action.check.interval - 60 - - The frequency at which the ActionCheckService will run. - - - - - oozie.service.ActionCheckerService.action.check.delay - 600 - - The time, in seconds, between an ActionCheck for the same action. - - - - - oozie.service.ActionCheckerService.callable.batch.size - 10 - - This value determines the number of actions which will be batched together - to be executed by a single thread. - - - - - - oozie.service.StatusTransitService.statusTransit.interval - 60 - - The frequency in seconds at which the StatusTransitService will run. - - - - - oozie.service.StatusTransitService.backward.support.for.coord.status - false - - true, if coordinator job submits using 'uri:oozie:coordinator:0.1' and wants to keep Oozie 2.x status transit. - if set true, - 1. SUCCEEDED state in coordinator job means materialization done. - 2. No DONEWITHERROR state in coordinator job - 3. No PAUSED or PREPPAUSED state in coordinator job - 4. PREPSUSPENDED becomes SUSPENDED in coordinator job - - - - - oozie.service.StatusTransitService.backward.support.for.states.without.error - true - - true, if you want to keep Oozie 3.2 status transit. - Change it to false for Oozie 4.x releases. - if set true, - No states like RUNNINGWITHERROR, SUSPENDEDWITHERROR and PAUSEDWITHERROR - for coordinator and bundle - - - - - - oozie.service.PauseTransitService.PauseTransit.interval - 60 - - The frequency in seconds at which the PauseTransitService will run. - - - - - - - - oozie.action.launcher.mapreduce.job.ubertask.enable - false - - Enables Uber Mode for the launcher job in YARN/Hadoop 2 (no effect in Hadoop 1). - Setting oozie.launcher.mapreduce.job.ubertask.enable in a an action's configuration section overrides this for that - action. - - - - - - - - oozie.action.retries.max - 3 - - The number of retries for executing an action in case of failure - - - - - oozie.action.hadoop.delete.hdfs.tmp.dir - false - - If set to true, it will delete temporary directory at the end of execution of map reduce action. - - - - - - - oozie.action.pig.delete.hdfs.tmp.dir - false - - If set to true, it will delete temporary directory at the end of execution of pig action. - - - - - - - oozie.action.ssh.delete.remote.tmp.dir - false - - If set to true, it will delete temporary directory at the end of execution of ssh action. - - - - - oozie.action.ssh.http.command - curl - - Command to use for callback to oozie, normally is 'curl' or 'wget'. - The command must available in PATH environment variable of the USER@HOST box shell. - - - - - oozie.action.ssh.http.command.post.options - --data-binary @#stdout --request POST --header "content-type:text/plain" - - The callback command POST options. - Used when the ouptut of the ssh action is captured. - - - - - oozie.action.ssh.allow.user.at.host - true - - Specifies whether the user specified by the ssh action is allowed or is to be replaced - by the Job user - - - - - - - oozie.service.HadoopAccessorService.kerberos.enabled - false - - Indicates if Oozie is configured to use Kerberos. - - - - - local.realm - LOCALHOST - - Kerberos Realm used by Oozie and Hadoop. Using 'local.realm' to be aligned with Hadoop configuration - - - - - oozie.service.HadoopAccessorService.keytab.file - ${user.home}/oozie.keytab - - Location of the Oozie user keytab file. - - - - - oozie.service.HadoopAccessorService.kerberos.principal - ${user.name}/localhost@${local.realm} - - Kerberos principal for Oozie service. - - - - - oozie.service.HadoopAccessorService.jobTracker.whitelist - - - Whitelisted job tracker for Oozie service. - - - - - oozie.service.HadoopAccessorService.nameNode.whitelist - - - Whitelisted job tracker for Oozie service. - - - - - oozie.service.HadoopAccessorService.hadoop.configurations - *=hadoop-conf - - Comma separated AUTHORITY=HADOOP_CONF_DIR, where AUTHORITY is the HOST:PORT of - the Hadoop service (JobTracker, YARN, HDFS). The wildcard '*' configuration is - used when there is no exact match for an authority. The HADOOP_CONF_DIR contains - the relevant Hadoop *-site.xml files. If the path is relative is looked within - the Oozie configuration directory; though the path can be absolute (i.e. to point - to Hadoop client conf/ directories in the local filesystem. - - - - - - oozie.service.HadoopAccessorService.action.configurations - *=action-conf - - Comma separated AUTHORITY=ACTION_CONF_DIR, where AUTHORITY is the HOST:PORT of - the Hadoop MapReduce service (JobTracker, YARN). The wildcard '*' configuration is - used when there is no exact match for an authority. The ACTION_CONF_DIR may contain - ACTION.xml files where ACTION is the action type ('java', 'map-reduce', 'pig', - 'hive', 'sqoop', etc.). If the ACTION.xml file exists, its properties will be used - as defaults properties for the action. If the path is relative is looked within - the Oozie configuration directory; though the path can be absolute (i.e. to point - to Hadoop client conf/ directories in the local filesystem. - - - - - - oozie.credentials.credentialclasses - - - A list of credential class mapping for CredentialsProvider - - - - - oozie.actions.main.classnames - distcp=org.apache.hadoop.tools.DistCp - - A list of class name mapping for Action classes - - - - - - oozie.service.WorkflowAppService.system.libpath - /user/${user.name}/share/lib - - System library path to use for workflow applications. - This path is added to workflow application if their job properties sets - the property 'oozie.use.system.libpath' to true. - - - - - use.system.libpath.for.mapreduce.and.pig.jobs - false - - If set to true, submissions of MapReduce and Pig jobs will include - automatically the system library path, thus not requiring users to - specify where the Pig JAR files are. Instead, the ones from the system - library path are used. - - - - - oozie.command.default.lock.timeout - 5000 - - Default timeout (in milliseconds) for commands for acquiring an exclusive lock on an entity. - - - - - - - oozie.service.LiteWorkflowStoreService.user.retry.max - 3 - - Automatic retry max count for workflow action is 3 in default. - - - - - oozie.service.LiteWorkflowStoreService.user.retry.inteval - 10 - - Automatic retry interval for workflow action is in minutes and the default value is 10 minutes. - - - - - oozie.service.LiteWorkflowStoreService.user.retry.error.code - JA008,JA009,JA017,JA018,JA019,FS009,FS008 - - Automatic retry interval for workflow action is handled for these specified error code: - FS009, FS008 is file exists error when using chmod in fs action. - JA018 is output directory exists error in workflow map-reduce action. - JA019 is error while executing distcp action. - JA017 is job not exists error in action executor. - JA008 is FileNotFoundException in action executor. - JA009 is IOException in action executor. - - - - - oozie.service.LiteWorkflowStoreService.user.retry.error.code.ext - - - Automatic retry interval for workflow action is handled for these specified extra error code. - - - - - oozie.service.LiteWorkflowStoreService.node.def.version - _oozie_inst_v_1 - - NodeDef default version, _oozie_inst_v_0 or _oozie_inst_v_1 - - - - - - - oozie.authentication.type - simple - - Defines authentication used for Oozie HTTP endpoint. - Supported values are: simple | kerberos | #AUTHENTICATION_HANDLER_CLASSNAME# - - - - - oozie.authentication.token.validity - 36000 - - Indicates how long (in seconds) an authentication token is valid before it has - to be renewed. - - - - - oozie.authentication.signature.secret - oozie - - The signature secret for signing the authentication tokens. - If not set a random secret is generated at startup time. - In order to authentiation to work correctly across multiple hosts - the secret must be the same across al the hosts. - - - - - oozie.authentication.cookie.domain - - - The domain to use for the HTTP cookie that stores the authentication token. - In order to authentiation to work correctly across multiple hosts - the domain must be correctly set. - - - - - oozie.authentication.simple.anonymous.allowed - true - - Indicates if anonymous requests are allowed when using 'simple' authentication. - - - - - oozie.authentication.kerberos.principal - HTTP/localhost@${local.realm} - - Indicates the Kerberos principal to be used for HTTP endpoint. - The principal MUST start with 'HTTP/' as per Kerberos HTTP SPNEGO specification. - - - - - oozie.authentication.kerberos.keytab - ${oozie.service.HadoopAccessorService.keytab.file} - - Location of the keytab file with the credentials for the principal. - Referring to the same keytab file Oozie uses for its Kerberos credentials for Hadoop. - - - - - oozie.authentication.kerberos.name.rules - DEFAULT - - The kerberos names rules is to resolve kerberos principal names, refer to Hadoop's - KerberosName for more details. - - - - - - oozie.coord.actions.default.length - 1000 - - Default number of coordinator actions to be retrieved by the info command - - - - - - oozie.validate.ForkJoin - true - - If true, fork and join should be validated at wf submission time. - - - - - oozie.coord.action.get.all.attributes - false - - Setting to true is not recommended as coord job/action info will bring all columns of the action in memory. - Set it true only if backward compatibility for action/job info is required. - - - - - oozie.service.HadoopAccessorService.supported.filesystems - hdfs,hftp,webhdfs - - Enlist the different filesystems supported for federation. If wildcard "*" is specified, - then ALL file schemes will be allowed. - - - - - oozie.service.URIHandlerService.uri.handlers - org.apache.oozie.dependency.FSURIHandler - - Enlist the different uri handlers supported for data availability checks. - - - - - - oozie.notification.url.connection.timeout - 10000 - - Defines the timeout, in milliseconds, for Oozie HTTP notification callbacks. Oozie does - HTTP notifications for workflow jobs which set the 'oozie.wf.action.notification.url', - 'oozie.wf.worklfow.notification.url' and/or 'oozie.coord.action.notification.url' - properties in their job.properties. Refer to section '5 Oozie Notifications' in the - Workflow specification for details. - - - - - - - oozie.hadoop-2.0.2-alpha.workaround.for.distributed.cache - false - - Due to a bug in Hadoop 2.0.2-alpha, MAPREDUCE-4820, launcher jobs fail to set - the distributed cache for the action job because the local JARs are implicitly - included triggering a duplicate check. - This flag removes the distributed cache files for the action as they'll be - included from the local JARs of the JobClient (MRApps) submitting the action - job from the launcher. - - - - - oozie.service.EventHandlerService.filter.app.types - workflow_job, coordinator_action - - The app-types among workflow/coordinator/bundle job/action for which - for which events system is enabled. - - - - - oozie.service.EventHandlerService.event.queue - org.apache.oozie.event.MemoryEventQueue - - The implementation for EventQueue in use by the EventHandlerService. - - - - - oozie.service.EventHandlerService.event.listeners - org.apache.oozie.jms.JMSJobEventListener - - - - oozie.service.EventHandlerService.queue.size - 10000 - - Maximum number of events to be contained in the event queue. - - - - - oozie.service.EventHandlerService.worker.interval - 30 - - The default interval (seconds) at which the worker threads will be scheduled to run - and process events. - - - - - oozie.service.EventHandlerService.batch.size - 10 - - The batch size for batched draining per thread from the event queue. - - - - - oozie.service.EventHandlerService.worker.threads - 3 - - Number of worker threads to be scheduled to run and process events. - - - - - oozie.sla.service.SLAService.capacity - 5000 - - Maximum number of sla records to be contained in the memory structure. - - - - - oozie.sla.service.SLAService.alert.events - END_MISS - - Default types of SLA events for being alerted of. - - - - - oozie.sla.service.SLAService.calculator.impl - org.apache.oozie.sla.SLACalculatorMemory - - The implementation for SLACalculator in use by the SLAService. - - - - - oozie.sla.service.SLAService.job.event.latency - 90000 - - Time in milliseconds to account of latency of getting the job status event - to compare against and decide sla miss/met - - - - diff --git a/sahara/plugins/vanilla/v1_2_1/resources/topology.sh b/sahara/plugins/vanilla/v1_2_1/resources/topology.sh deleted file mode 100755 index 405af1547f..0000000000 --- a/sahara/plugins/vanilla/v1_2_1/resources/topology.sh +++ /dev/null @@ -1,21 +0,0 @@ -#!/bin/bash -HADOOP_CONF=/etc/hadoop - -while [ $# -gt 0 ] ; do - nodeArg=$1 - exec< ${HADOOP_CONF}/topology.data - result="" - while read line ; do - ar=( $line ) - if [ "${ar[0]}" = "$nodeArg" ] ; then - result="${ar[1]}" - fi - done - shift - if [ -z "$result" ] ; then - echo -n "/default/rack " - else - echo -n "$result " - fi -done - diff --git a/sahara/plugins/vanilla/v1_2_1/run_scripts.py b/sahara/plugins/vanilla/v1_2_1/run_scripts.py deleted file mode 100644 index 12a887c7dd..0000000000 --- a/sahara/plugins/vanilla/v1_2_1/run_scripts.py +++ /dev/null @@ -1,123 +0,0 @@ -# Copyright (c) 2013 Mirantis Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or -# implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -from oslo_log import log as logging - -from sahara.utils import files - - -LOG = logging.getLogger(__name__) - - -def start_processes(remote, *processes): - for proc in processes: - remote.execute_command('sudo su -c "/usr/sbin/hadoop-daemon.sh ' - 'start %s" hadoop' % proc) - - -def refresh_nodes(remote, service): - remote.execute_command("sudo su -c 'hadoop %s -refreshNodes' hadoop" - % service) - - -def format_namenode(remote): - remote.execute_command("sudo su -c 'hadoop namenode -format' hadoop") - - -def hive_create_warehouse_dir(remote): - LOG.debug("Creating Hive warehouse dir") - remote.execute_command("sudo su - -c 'hadoop fs -mkdir " - "/user/hive/warehouse' hadoop") - - -def hive_copy_shared_conf(remote, dest): - LOG.debug("Copying shared Hive conf") - remote.execute_command( - "sudo su - -c 'hadoop fs -put /opt/hive/conf/hive-site.xml " - "%s' hadoop" % dest) - - -def oozie_share_lib(remote, nn_hostname): - LOG.debug("Sharing Oozie libs to hdfs://{host}:8020".format( - host=nn_hostname)) - # remote.execute_command('sudo su - -c "/opt/oozie/bin/oozie-setup.sh ' - # 'sharelib create -fs hdfs://%s:8020" hadoop' - # % nn_hostname) - - # TODO(alazarev) return 'oozie-setup.sh sharelib create' back - # when #1262023 is resolved - remote.execute_command( - 'sudo su - -c "mkdir /tmp/oozielib && ' - 'tar zxf /opt/oozie/oozie-sharelib-4.0.0.tar.gz -C /tmp/oozielib && ' - 'hadoop fs -put /tmp/oozielib/share share && ' - 'rm -rf /tmp/oozielib" hadoop') - - LOG.debug("Creating sqlfile for Oozie") - remote.execute_command('sudo su - -c "/opt/oozie/bin/ooziedb.sh ' - 'create -sqlfile oozie.sql ' - '-run Validate DB Connection" hadoop') - - -def check_datanodes_count(remote, count): - if count < 1: - return True - - LOG.debug("Checking datanode count") - exit_code, stdout = remote.execute_command( - 'sudo su -c "hadoop dfsadmin -report | ' - 'grep \'Datanodes available:\' | ' - 'awk \'{print \\$3}\'" hadoop') - LOG.debug("Datanode count={count}".format(count=stdout.rstrip())) - - return exit_code == 0 and stdout and int(stdout) == count - - -def mysql_start(remote): - LOG.debug("Starting MySQL") - remote.execute_command("/opt/start-mysql.sh") - - -def oozie_create_db(remote): - LOG.debug("Creating Oozie DB Schema") - sql_script = files.get_file_text( - 'plugins/vanilla/v1_2_1/resources/create_oozie_db.sql') - script_location = "create_oozie_db.sql" - remote.write_file_to(script_location, sql_script) - remote.execute_command('mysql -u root < %(script_location)s && ' - 'rm %(script_location)s' % - {"script_location": script_location}) - - -def start_oozie(remote): - remote.execute_command( - 'sudo su - -c "/opt/oozie/bin/oozied.sh start" hadoop') - - -def hive_create_db(remote, hive_mysql_passwd): - LOG.debug("Creating Hive metastore db") - sql_script = files.get_file_text( - 'plugins/vanilla/v1_2_1/resources/create_hive_db.sql') - sql_script = sql_script.replace('pass', hive_mysql_passwd) - script_location = "create_hive_db.sql" - remote.write_file_to(script_location, sql_script) - remote.execute_command('mysql -u root < %(script_location)s && ' - 'rm %(script_location)s' % - {"script_location": script_location}) - - -def hive_metastore_start(remote): - LOG.debug("Starting Hive Metastore Server") - remote.execute_command("sudo su - -c 'nohup /opt/hive/bin/hive" - " --service metastore > /dev/null &' hadoop") diff --git a/sahara/plugins/vanilla/v1_2_1/scaling.py b/sahara/plugins/vanilla/v1_2_1/scaling.py deleted file mode 100644 index f76141650f..0000000000 --- a/sahara/plugins/vanilla/v1_2_1/scaling.py +++ /dev/null @@ -1,98 +0,0 @@ -# Copyright (c) 2013 Mirantis Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or -# implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import os - -import six - -from sahara import context -from sahara.i18n import _ -from sahara.plugins import utils -from sahara.plugins.vanilla.v1_2_1 import config_helper -from sahara.plugins.vanilla.v1_2_1 import run_scripts as run -from sahara.utils import cluster_progress_ops as cpo -from sahara.utils import poll_utils -from sahara.utils import remote - - -@cpo.event_wrapper(True, step=_("Decommission %s") % "TaskTrackers") -def decommission_tt(jt, inst_to_be_deleted, survived_inst): - with remote.get_remote(jt) as r: - r.write_file_to('/etc/hadoop/tt.excl', - utils.generate_fqdn_host_names( - inst_to_be_deleted)) - run.refresh_nodes(remote.get_remote(jt), "mradmin") - context.sleep(3) - r.write_files_to({'/etc/hadoop/tt.incl': - utils.generate_fqdn_host_names(survived_inst), - '/etc/hadoop/tt.excl': "", - }) - - -def is_decommissioned(r, inst_to_be_deleted): - cmd = r.execute_command("sudo su -c 'hadoop dfsadmin -report' hadoop") - datanodes_info = parse_dfs_report(cmd[1]) - for inst in inst_to_be_deleted: - for dn in datanodes_info: - if (dn["Name"].startswith(inst.internal_ip)) and ( - dn["Decommission Status"] != "Decommissioned"): - return False - return True - - -@cpo.event_wrapper(True, step=_("Decommission %s") % "DataNodes") -def decommission_dn(nn, inst_to_be_deleted, survived_inst): - with remote.get_remote(nn) as r: - r.write_file_to('/etc/hadoop/dn.excl', - utils.generate_fqdn_host_names( - inst_to_be_deleted)) - run.refresh_nodes(remote.get_remote(nn), "dfsadmin") - context.sleep(3) - - poll_utils.plugin_option_poll( - nn.cluster, is_decommissioned, - config_helper.DECOMMISSIONING_TIMEOUT, - _("Decommission %s") % "DataNodes", 3, - {'r': r, 'inst_to_be_deleted': inst_to_be_deleted}) - - r.write_files_to({'/etc/hadoop/dn.incl': - utils.generate_fqdn_host_names(survived_inst), - '/etc/hadoop/dn.excl': ""}) - - -def parse_dfs_report(cmd_output): - report = cmd_output.rstrip().split(os.linesep) - array = [] - started = False - for line in report: - if started: - array.append(line) - if line.startswith("Datanodes available"): - started = True - - res = [] - datanode_info = {} - for i in six.moves.xrange(0, len(array)): - if array[i]: - idx = str.find(array[i], ':') - name = array[i][0:idx] - value = array[i][idx + 2:] - datanode_info[name.strip()] = value.strip() - if not array[i] and datanode_info: - res.append(datanode_info) - datanode_info = {} - if datanode_info: - res.append(datanode_info) - return res diff --git a/sahara/plugins/vanilla/v1_2_1/versionhandler.py b/sahara/plugins/vanilla/v1_2_1/versionhandler.py deleted file mode 100644 index 53d31d33c8..0000000000 --- a/sahara/plugins/vanilla/v1_2_1/versionhandler.py +++ /dev/null @@ -1,596 +0,0 @@ -# Copyright (c) 2013 Mirantis Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or -# implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import uuid - -from oslo_config import cfg -from oslo_log import log as logging -import six - -from sahara import conductor -from sahara import context -from sahara.i18n import _ -from sahara.i18n import _LI -from sahara.plugins import exceptions as ex -from sahara.plugins import utils -from sahara.plugins.vanilla import abstractversionhandler as avm -from sahara.plugins.vanilla import utils as vu -from sahara.plugins.vanilla.v1_2_1 import config_helper as c_helper -from sahara.plugins.vanilla.v1_2_1 import edp_engine -from sahara.plugins.vanilla.v1_2_1 import run_scripts as run -from sahara.plugins.vanilla.v1_2_1 import scaling as sc -from sahara.topology import topology_helper as th -from sahara.utils import cluster_progress_ops as cpo -from sahara.utils import edp -from sahara.utils import files as f -from sahara.utils import general as g -from sahara.utils import poll_utils -from sahara.utils import proxy -from sahara.utils import remote - - -conductor = conductor.API -LOG = logging.getLogger(__name__) -CONF = cfg.CONF - - -class VersionHandler(avm.AbstractVersionHandler): - def get_plugin_configs(self): - return c_helper.get_plugin_configs() - - def get_node_processes(self): - return { - "HDFS": ["namenode", "datanode", "secondarynamenode"], - "MapReduce": ["tasktracker", "jobtracker"], - "JobFlow": ["oozie"], - "Hive": ["hiveserver"] - } - - def validate(self, cluster): - nn_count = sum([ng.count for ng - in utils.get_node_groups(cluster, "namenode")]) - if nn_count != 1: - raise ex.InvalidComponentCountException("namenode", 1, nn_count) - - snn_count = sum( - [ng.count for ng - in utils.get_node_groups(cluster, 'secondarynamenode')]) - if snn_count > 1: - raise ex.InvalidComponentCountException('secondarynamenode', - _('0 or 1'), snn_count) - - jt_count = sum([ng.count for ng - in utils.get_node_groups(cluster, "jobtracker")]) - - if jt_count > 1: - raise ex.InvalidComponentCountException("jobtracker", _('0 or 1'), - jt_count) - - oozie_count = sum([ng.count for ng - in utils.get_node_groups(cluster, "oozie")]) - - if oozie_count > 1: - raise ex.InvalidComponentCountException("oozie", _('0 or 1'), - oozie_count) - - hive_count = sum([ng.count for ng - in utils.get_node_groups(cluster, "hiveserver")]) - if jt_count == 0: - - tt_count = sum([ng.count for ng - in utils.get_node_groups(cluster, "tasktracker")]) - if tt_count > 0: - raise ex.RequiredServiceMissingException( - "jobtracker", required_by="tasktracker") - - if oozie_count > 0: - raise ex.RequiredServiceMissingException( - "jobtracker", required_by="oozie") - - if hive_count > 0: - raise ex.RequiredServiceMissingException( - "jobtracker", required_by="hive") - - if hive_count > 1: - raise ex.InvalidComponentCountException("hive", _('0 or 1'), - hive_count) - - def configure_cluster(self, cluster): - instances = utils.get_instances(cluster) - self._setup_instances(cluster, instances) - - def start_namenode(self, cluster): - nn = vu.get_namenode(cluster) - self._start_namenode(nn) - - @cpo.event_wrapper( - True, step=utils.start_process_event_message("NameNode")) - def _start_namenode(self, nn_instance): - with remote.get_remote(nn_instance) as r: - run.format_namenode(r) - run.start_processes(r, "namenode") - - def start_secondarynamenode(self, cluster): - snn = vu.get_secondarynamenode(cluster) - if snn is None: - return - - self._start_secondarynamenode(snn) - - @cpo.event_wrapper( - True, step=utils.start_process_event_message("SecondaryNameNode")) - def _start_secondarynamenode(self, snn): - run.start_processes(remote.get_remote(snn), "secondarynamenode") - - def start_jobtracker(self, cluster): - jt = vu.get_jobtracker(cluster) - if jt: - self._start_jobtracker(jt) - - @cpo.event_wrapper( - True, step=utils.start_process_event_message("JobTracker")) - def _start_jobtracker(self, jt_instance): - run.start_processes(remote.get_remote(jt_instance), "jobtracker") - - def start_oozie(self, cluster): - oozie = vu.get_oozie(cluster) - if oozie: - self._start_oozie(cluster, oozie) - - @cpo.event_wrapper( - True, step=utils.start_process_event_message("Oozie")) - def _start_oozie(self, cluster, oozie): - nn_instance = vu.get_namenode(cluster) - - with remote.get_remote(oozie) as r: - with context.set_current_instance_id(oozie.instance_id): - if c_helper.is_mysql_enable(cluster): - run.mysql_start(r) - run.oozie_create_db(r) - run.oozie_share_lib(r, nn_instance.hostname()) - run.start_oozie(r) - LOG.info( - _LI("Oozie service has been started")) - - def start_hiveserver(self, cluster): - hs = vu.get_hiveserver(cluster) - if hs: - self._start_hiveserver(cluster, hs) - - @cpo.event_wrapper( - True, step=utils.start_process_event_message("HiveServer")) - def _start_hiveserver(self, cluster, hive_server): - oozie = vu.get_oozie(cluster) - - with remote.get_remote(hive_server) as r: - with context.set_current_instance_id(hive_server.instance_id): - run.hive_create_warehouse_dir(r) - run.hive_copy_shared_conf( - r, edp.get_hive_shared_conf_path('hadoop')) - - if c_helper.is_mysql_enable(cluster): - if not oozie or hive_server.hostname() != oozie.hostname(): - run.mysql_start(r) - run.hive_create_db(r, cluster.extra['hive_mysql_passwd']) - run.hive_metastore_start(r) - LOG.info(_LI("Hive Metastore server has been started")) - - def start_cluster(self, cluster): - self.start_namenode(cluster) - - self.start_secondarynamenode(cluster) - - self.start_jobtracker(cluster) - - self._start_tt_dn_processes(utils.get_instances(cluster)) - - self._await_datanodes(cluster) - - LOG.info(_LI("Hadoop services in cluster have been started")) - - self.start_oozie(cluster) - - self.start_hiveserver(cluster) - - LOG.info(_LI('Cluster has been started successfully')) - self._set_cluster_info(cluster) - - @cpo.event_wrapper( - True, step=_("Await %s start up") % "DataNodes", param=('cluster', 1)) - def _await_datanodes(self, cluster): - datanodes_count = len(vu.get_datanodes(cluster)) - if datanodes_count < 1: - return - - l_message = _("Waiting on %s datanodes to start up") % datanodes_count - LOG.info(l_message) - with remote.get_remote(vu.get_namenode(cluster)) as r: - poll_utils.plugin_option_poll( - cluster, run.check_datanodes_count, - c_helper.DATANODES_STARTUP_TIMEOUT, l_message, 1, { - 'remote': r, - 'count': datanodes_count}) - - def _generate_hive_mysql_password(self, cluster): - extra = cluster.extra.to_dict() if cluster.extra else {} - password = extra.get('hive_mysql_passwd') - if not password: - password = six.text_type(uuid.uuid4()) - extra['hive_mysql_passwd'] = password - conductor.cluster_update(context.ctx(), cluster, {'extra': extra}) - return password - - def _extract_configs_to_extra(self, cluster): - oozie = vu.get_oozie(cluster) - hive = vu.get_hiveserver(cluster) - - extra = dict() - - if hive: - extra['hive_mysql_passwd'] = self._generate_hive_mysql_password( - cluster) - - for ng in cluster.node_groups: - extra[ng.id] = { - 'xml': c_helper.generate_xml_configs( - cluster, ng, extra['hive_mysql_passwd'] if hive else None), - 'setup_script': c_helper.generate_setup_script( - ng.storage_paths(), - c_helper.extract_environment_confs(ng.configuration()), - append_oozie=( - oozie and oozie.node_group.id == ng.id) - ) - } - - if c_helper.is_data_locality_enabled(cluster): - topology_data = th.generate_topology_map( - cluster, CONF.enable_hypervisor_awareness) - extra['topology_data'] = "\n".join( - [k + " " + v for k, v in topology_data.items()]) + "\n" - - return extra - - def decommission_nodes(self, cluster, instances): - tts = vu.get_tasktrackers(cluster) - dns = vu.get_datanodes(cluster) - decommission_dns = False - decommission_tts = False - - for i in instances: - if 'datanode' in i.node_group.node_processes: - dns.remove(i) - decommission_dns = True - if 'tasktracker' in i.node_group.node_processes: - tts.remove(i) - decommission_tts = True - - nn = vu.get_namenode(cluster) - jt = vu.get_jobtracker(cluster) - - if decommission_tts: - sc.decommission_tt(jt, instances, tts) - if decommission_dns: - sc.decommission_dn(nn, instances, dns) - - def validate_scaling(self, cluster, existing, additional): - self._validate_existing_ng_scaling(cluster, existing) - self._validate_additional_ng_scaling(cluster, additional) - - def scale_cluster(self, cluster, instances): - self._setup_instances(cluster, instances) - - run.refresh_nodes(remote.get_remote( - vu.get_namenode(cluster)), "dfsadmin") - jt = vu.get_jobtracker(cluster) - if jt: - run.refresh_nodes(remote.get_remote(jt), "mradmin") - - self._start_tt_dn_processes(instances) - - def _start_tt_dn_processes(self, instances): - tt_dn_names = ["datanode", "tasktracker"] - - instances = utils.instances_with_services(instances, tt_dn_names) - - if not instances: - return - - cpo.add_provisioning_step( - instances[0].cluster_id, - utils.start_process_event_message("DataNodes, TaskTrackers"), - len(instances)) - - with context.ThreadGroup() as tg: - for i in instances: - processes = set(i.node_group.node_processes) - tt_dn_procs = processes.intersection(tt_dn_names) - tg.spawn('vanilla-start-tt-dn-%s' % i.instance_name, - self._start_tt_dn, i, list(tt_dn_procs)) - - @cpo.event_wrapper(True) - def _start_tt_dn(self, instance, tt_dn_procs): - with instance.remote() as r: - run.start_processes(r, *tt_dn_procs) - - @cpo.event_wrapper(True, step=_("Setup instances and push configs"), - param=('cluster', 1)) - def _setup_instances(self, cluster, instances): - if (CONF.use_identity_api_v3 and CONF.use_domain_for_proxy_users and - vu.get_hiveserver(cluster) and - c_helper.is_swift_enable(cluster)): - cluster = proxy.create_proxy_user_for_cluster(cluster) - instances = utils.get_instances(cluster) - - extra = self._extract_configs_to_extra(cluster) - cluster = conductor.cluster_get(context.ctx(), cluster) - self._push_configs_to_nodes(cluster, extra, instances) - - def _push_configs_to_nodes(self, cluster, extra, new_instances): - all_instances = utils.get_instances(cluster) - new_ids = set([instance.id for instance in new_instances]) - with context.ThreadGroup() as tg: - for instance in all_instances: - if instance.id in new_ids: - tg.spawn('vanilla-configure-%s' % instance.instance_name, - self._push_configs_to_new_node, cluster, - extra, instance) - else: - tg.spawn('vanilla-reconfigure-%s' % instance.instance_name, - self._push_configs_to_existing_node, cluster, - extra, instance) - - def _push_configs_to_new_node(self, cluster, extra, instance): - ng_extra = extra[instance.node_group.id] - private_key, public_key = c_helper.get_hadoop_ssh_keys(cluster) - - files = { - '/etc/hadoop/core-site.xml': ng_extra['xml']['core-site'], - '/etc/hadoop/mapred-site.xml': ng_extra['xml']['mapred-site'], - '/etc/hadoop/hdfs-site.xml': ng_extra['xml']['hdfs-site'], - '/tmp/sahara-hadoop-init.sh': ng_extra['setup_script'], - 'id_rsa': private_key, - 'authorized_keys': public_key - } - - key_cmd = ('sudo mkdir -p /home/hadoop/.ssh/ && ' - 'sudo mv id_rsa authorized_keys /home/hadoop/.ssh && ' - 'sudo chown -R hadoop:hadoop /home/hadoop/.ssh && ' - 'sudo chmod 600 /home/hadoop/.ssh/{id_rsa,authorized_keys}') - - with remote.get_remote(instance) as r: - # TODO(aignatov): sudo chown is wrong solution. But it works. - r.execute_command( - 'sudo chown -R $USER:$USER /etc/hadoop' - ) - r.execute_command( - 'sudo chown -R $USER:$USER /opt/oozie/conf' - ) - r.write_files_to(files) - r.execute_command( - 'sudo chmod 0500 /tmp/sahara-hadoop-init.sh' - ) - r.execute_command( - 'sudo /tmp/sahara-hadoop-init.sh ' - '>> /tmp/sahara-hadoop-init.log 2>&1') - - r.execute_command(key_cmd) - - if c_helper.is_data_locality_enabled(cluster): - r.write_file_to( - '/etc/hadoop/topology.sh', - f.get_file_text( - 'plugins/vanilla/v1_2_1/resources/topology.sh')) - r.execute_command( - 'sudo chmod +x /etc/hadoop/topology.sh' - ) - - self._write_topology_data(r, cluster, extra) - self._push_master_configs(r, cluster, extra, instance) - - def _push_configs_to_existing_node(self, cluster, extra, instance): - node_processes = instance.node_group.node_processes - need_update = (c_helper.is_data_locality_enabled(cluster) or - 'namenode' in node_processes or - 'jobtracker' in node_processes or - 'oozie' in node_processes or - 'hiveserver' in node_processes) - - if not need_update: - return - - with remote.get_remote(instance) as r: - self._write_topology_data(r, cluster, extra) - self._push_master_configs(r, cluster, extra, instance) - - def _write_topology_data(self, r, cluster, extra): - if c_helper.is_data_locality_enabled(cluster): - topology_data = extra['topology_data'] - r.write_file_to('/etc/hadoop/topology.data', topology_data) - - def _push_master_configs(self, r, cluster, extra, instance): - ng_extra = extra[instance.node_group.id] - node_processes = instance.node_group.node_processes - - if 'namenode' in node_processes: - self._push_namenode_configs(cluster, r) - - if 'jobtracker' in node_processes: - self._push_jobtracker_configs(cluster, r) - - if 'oozie' in node_processes: - self._push_oozie_configs(ng_extra, r) - - if 'hiveserver' in node_processes: - self._push_hive_configs(ng_extra, r) - - def _push_namenode_configs(self, cluster, r): - r.write_file_to('/etc/hadoop/dn.incl', - utils.generate_fqdn_host_names( - vu.get_datanodes(cluster))) - - def _push_jobtracker_configs(self, cluster, r): - r.write_file_to('/etc/hadoop/tt.incl', - utils.generate_fqdn_host_names( - vu.get_tasktrackers(cluster))) - - def _push_oozie_configs(self, ng_extra, r): - r.write_file_to('/opt/oozie/conf/oozie-site.xml', - ng_extra['xml']['oozie-site']) - - def _push_hive_configs(self, ng_extra, r): - files = { - '/opt/hive/conf/hive-site.xml': - ng_extra['xml']['hive-site'] - } - r.write_files_to(files) - - def _set_cluster_info(self, cluster): - nn = vu.get_namenode(cluster) - jt = vu.get_jobtracker(cluster) - oozie = vu.get_oozie(cluster) - info = {} - - if jt: - ui_port = c_helper.get_port_from_config( - 'MapReduce', 'mapred.job.tracker.http.address', cluster) - jt_port = c_helper.get_port_from_config( - 'MapReduce', 'mapred.job.tracker', cluster) - - info['MapReduce'] = { - 'Web UI': 'http://%s:%s' % (jt.management_ip, ui_port), - 'JobTracker': '%s:%s' % (jt.hostname(), jt_port) - } - - if nn: - ui_port = c_helper.get_port_from_config('HDFS', 'dfs.http.address', - cluster) - nn_port = c_helper.get_port_from_config('HDFS', 'fs.default.name', - cluster) - - info['HDFS'] = { - 'Web UI': 'http://%s:%s' % (nn.management_ip, ui_port), - 'NameNode': 'hdfs://%s:%s' % (nn.hostname(), nn_port) - } - - if oozie: - # TODO(yrunts) change from hardcode value - info['JobFlow'] = { - 'Oozie': 'http://%s:11000' % oozie.management_ip - } - - ctx = context.ctx() - conductor.cluster_update(ctx, cluster, {'info': info}) - - def _get_scalable_processes(self): - return ["datanode", "tasktracker"] - - def _validate_additional_ng_scaling(self, cluster, additional): - jt = vu.get_jobtracker(cluster) - scalable_processes = self._get_scalable_processes() - - for ng_id in additional: - ng = g.get_by_id(cluster.node_groups, ng_id) - if not set(ng.node_processes).issubset(scalable_processes): - raise ex.NodeGroupCannotBeScaled( - ng.name, _("Vanilla plugin cannot scale nodegroup" - " with processes: %s") % - ' '.join(ng.node_processes)) - if not jt and 'tasktracker' in ng.node_processes: - raise ex.NodeGroupCannotBeScaled( - ng.name, _("Vanilla plugin cannot scale node group with " - "processes which have no master-processes run " - "in cluster")) - - def _validate_existing_ng_scaling(self, cluster, existing): - scalable_processes = self._get_scalable_processes() - dn_to_delete = 0 - for ng in cluster.node_groups: - if ng.id in existing: - if (ng.count > existing[ng.id] and "datanode" in - ng.node_processes): - dn_to_delete += ng.count - existing[ng.id] - if not set(ng.node_processes).issubset(scalable_processes): - raise ex.NodeGroupCannotBeScaled( - ng.name, _("Vanilla plugin cannot scale nodegroup" - " with processes: %s") % - ' '.join(ng.node_processes)) - - dn_amount = len(vu.get_datanodes(cluster)) - rep_factor = c_helper.get_config_value('HDFS', 'dfs.replication', - cluster) - - if dn_to_delete > 0 and dn_amount - dn_to_delete < rep_factor: - raise ex.ClusterCannotBeScaled( - cluster.name, _("Vanilla plugin cannot shrink cluster because " - "it would be not enough nodes for replicas " - "(replication factor is %s)") % rep_factor) - - def get_edp_engine(self, cluster, job_type): - if job_type in edp_engine.EdpOozieEngine.get_supported_job_types(): - return edp_engine.EdpOozieEngine(cluster) - return None - - def get_edp_job_types(self): - return edp_engine.EdpOozieEngine.get_supported_job_types() - - def get_edp_config_hints(self, job_type): - return edp_engine.EdpOozieEngine.get_possible_job_config(job_type) - - def get_open_ports(self, node_group): - cluster = node_group.cluster - - ports = [] - - if "namenode" in node_group.node_processes: - ports.append(c_helper.get_port_from_config( - 'HDFS', 'dfs.http.address', cluster)) - ports.append(8020) - - if "datanode" in node_group.node_processes: - ports.append(c_helper.get_port_from_config( - 'HDFS', 'dfs.datanode.http.address', cluster)) - ports.append(c_helper.get_port_from_config( - 'HDFS', 'dfs.datanode.address', cluster)) - ports.append(c_helper.get_port_from_config( - 'HDFS', 'dfs.datanode.ipc.address', cluster)) - - if "jobtracker" in node_group.node_processes: - ports.append(c_helper.get_port_from_config( - 'MapReduce', 'mapred.job.tracker.http.address', cluster)) - ports.append(8021) - - if "tasktracker" in node_group.node_processes: - ports.append(c_helper.get_port_from_config( - 'MapReduce', 'mapred.task.tracker.http.address', cluster)) - - if "secondarynamenode" in node_group.node_processes: - ports.append(c_helper.get_port_from_config( - 'HDFS', 'dfs.secondary.http.address', cluster)) - - if "oozie" in node_group.node_processes: - ports.append(11000) - - if "hive" in node_group.node_processes: - ports.append(9999) - ports.append(10000) - - return ports - - def on_terminate_cluster(self, cluster): - proxy.delete_proxy_user_for_cluster(cluster) - - def recommend_configs(self, cluster, scaling): - # We don't support any recommendations in Vanilla 1 plugin - pass diff --git a/sahara/service/edp/oozie/workflow_creator/workflow_factory.py b/sahara/service/edp/oozie/workflow_creator/workflow_factory.py index d91694218f..6076583d7d 100644 --- a/sahara/service/edp/oozie/workflow_creator/workflow_factory.py +++ b/sahara/service/edp/oozie/workflow_creator/workflow_factory.py @@ -317,13 +317,13 @@ def get_possible_job_config(job_type): edp.JOB_TYPE_MAPREDUCE, edp.JOB_TYPE_PIG): # TODO(nmakhotkin): Here we need return config based on specific plugin cfg = xmlutils.load_hadoop_xml_defaults( - 'plugins/vanilla/v1_2_1/resources/mapred-default.xml') + 'plugins/vanilla/v2_6_0/resources/mapred-default.xml') if edp.compare_job_type(job_type, edp.JOB_TYPE_MAPREDUCE): cfg += get_possible_mapreduce_configs() elif edp.compare_job_type(job_type, edp.JOB_TYPE_HIVE): # TODO(nmakhotkin): Here we need return config based on specific plugin cfg = xmlutils.load_hadoop_xml_defaults( - 'plugins/vanilla/v1_2_1/resources/hive-default.xml') + 'plugins/vanilla/v2_6_0/resources/hive-default.xml') config = {'configs': cfg} if edp.compare_job_type(job_type, edp.JOB_TYPE_PIG, edp.JOB_TYPE_HIVE): diff --git a/sahara/tests/scenario/README.rst b/sahara/tests/scenario/README.rst index 1a355bdd90..a3b782117c 100644 --- a/sahara/tests/scenario/README.rst +++ b/sahara/tests/scenario/README.rst @@ -33,7 +33,7 @@ and use the following tox env: .. sourcecode:: console - $ tox -e scenario -- -V templatevars.ini etc/scenario/sahara-ci/vanilla-1.2.1.yaml.mako + $ tox -e scenario -- -V templatevars.ini etc/scenario/sahara-ci/vanilla-2.7.1.yaml.mako .. If you want to run scenario tests for a few plugins or their versions, you @@ -41,7 +41,7 @@ should use the several YAML and/or YAML Mako template files: .. sourcecode:: console - $ tox -e scenario -- -V templatevars.ini etc/scenario/sahara-ci/vanilla-1.2.1.yaml.mako etc/scenario/sahara-ci/vanilla-2.7.1.yaml.mako ... + $ tox -e scenario -- -V templatevars.ini etc/scenario/sahara-ci/cdh-5.4.0.yaml.mako etc/scenario/sahara-ci/vanilla-2.7.1.yaml.mako ... .. Here are a few more examples. diff --git a/sahara/tests/unit/db/templates/test_update.py b/sahara/tests/unit/db/templates/test_update.py index fcc27b8fd5..5ad0b3bcc1 100644 --- a/sahara/tests/unit/db/templates/test_update.py +++ b/sahara/tests/unit/db/templates/test_update.py @@ -268,13 +268,6 @@ class TemplateUpdateTestCase(base.ConductorManagerTestCase): self.assertEqual(1, len(cl_templates)) self.assertEqual(2, len(ng_templates)) - option_values = {"plugin_name": "vanilla", - "plugin_version": "1.2.1"} - template_api.set_conf(Config(option_values)) - ng_templates, cl_templates = template_api.process_files(tempdir, files) - self.assertEqual(0, len(cl_templates)) - self.assertEqual(0, len(ng_templates)) - option_values = {"plugin_name": "hdp", "plugin_version": "2.7.1"} template_api.set_conf(Config(option_values)) diff --git a/sahara/tests/unit/plugins/general/test_utils.py b/sahara/tests/unit/plugins/general/test_utils.py index 74c9b6022e..e848d08695 100644 --- a/sahara/tests/unit/plugins/general/test_utils.py +++ b/sahara/tests/unit/plugins/general/test_utils.py @@ -34,7 +34,7 @@ class GeneralUtilsTest(testtools.TestCase): [i2, i3, i4]) ng3 = tu.make_ng_dict("sn", "f1", ["dn"], 1, [i5]) - self.c1 = tu.create_cluster("cluster1", "tenant1", "general", "1.2.1", + self.c1 = tu.create_cluster("cluster1", "tenant1", "general", "2.6.0", [ng1, ng2, ng3]) self.ng1 = self.c1.node_groups[0] diff --git a/sahara/tests/unit/plugins/vanilla/test_utils.py b/sahara/tests/unit/plugins/vanilla/test_utils.py index 7453f99737..9b92a07e67 100644 --- a/sahara/tests/unit/plugins/vanilla/test_utils.py +++ b/sahara/tests/unit/plugins/vanilla/test_utils.py @@ -31,17 +31,17 @@ class TestUtils(base.SaharaWithDbTestCase): self.ng_namenode = tu.make_ng_dict( 'nn', 'f1', ['namenode'], 1, [tu.make_inst_dict('nn1', 'namenode')]) - self.ng_jobtracker = tu.make_ng_dict( - 'jt', 'f1', ['jobtracker'], 1, - [tu.make_inst_dict('jt1', 'jobtracker')]) + self.ng_resourcemanager = tu.make_ng_dict( + 'jt', 'f1', ['resourcemanager'], 1, + [tu.make_inst_dict('jt1', 'resourcemanager')]) self.ng_datanode = tu.make_ng_dict( 'dn', 'f1', ['datanode'], 2, [tu.make_inst_dict('dn1', 'datanode-1'), tu.make_inst_dict('dn2', 'datanode-2')]) - self.ng_tasktracker = tu.make_ng_dict( - 'tt', 'f1', ['tasktracker'], 2, - [tu.make_inst_dict('tt1', 'tasktracker-1'), - tu.make_inst_dict('tt2', 'tasktracker-2')]) + self.ng_nodemanager = tu.make_ng_dict( + 'tt', 'f1', ['nodemanager'], 2, + [tu.make_inst_dict('tt1', 'nodemanager-1'), + tu.make_inst_dict('tt2', 'nodemanager-2')]) self.ng_oozie = tu.make_ng_dict( 'ooz1', 'f1', ['oozie'], 1, [tu.make_inst_dict('ooz1', 'oozie')]) @@ -53,43 +53,34 @@ class TestUtils(base.SaharaWithDbTestCase): [tu.make_inst_dict('snn1', 'secondarynamenode')]) def test_get_namenode(self): - cl = tu.create_cluster('cl1', 't1', 'vanilla', '1.2.1', + cl = tu.create_cluster('cl1', 't1', 'vanilla', '2.6.0', [self.ng_manager, self.ng_namenode]) self.assertEqual('nn1', u.get_namenode(cl).instance_id) - cl = tu.create_cluster('cl1', 't1', 'vanilla', '1.2.1', + cl = tu.create_cluster('cl1', 't1', 'vanilla', '2.6.0', [self.ng_manager]) self.assertIsNone(u.get_namenode(cl)) - def test_get_jobtracker(self): - cl = tu.create_cluster('cl1', 't1', 'vanilla', '1.2.1', - [self.ng_manager, self.ng_jobtracker]) - self.assertEqual('jt1', u.get_jobtracker(cl).instance_id) - - cl = tu.create_cluster('cl1', 't1', 'vanilla', '1.2.1', - [self.ng_manager]) - self.assertIsNone(u.get_jobtracker(cl)) - def test_get_oozie(self): - cl = tu.create_cluster('cl1', 't1', 'vanilla', '1.2.1', + cl = tu.create_cluster('cl1', 't1', 'vanilla', '2.6.0', [self.ng_manager, self.ng_oozie]) self.assertEqual('ooz1', u.get_oozie(cl).instance_id) - cl = tu.create_cluster('cl1', 't1', 'vanilla', '1.2.1', + cl = tu.create_cluster('cl1', 't1', 'vanilla', '2.6.0', [self.ng_manager]) self.assertIsNone(u.get_oozie(cl)) def test_get_hiveserver(self): - cl = tu.create_cluster('cl1', 't1', 'vanilla', '1.2.1', + cl = tu.create_cluster('cl1', 't1', 'vanilla', '2.6.0', [self.ng_manager, self.ng_hiveserver]) self.assertEqual('hs1', u.get_hiveserver(cl).instance_id) - cl = tu.create_cluster('cl1', 't1', 'vanilla', '1.2.1', + cl = tu.create_cluster('cl1', 't1', 'vanilla', '2.6.0', [self.ng_manager]) self.assertIsNone(u.get_hiveserver(cl)) def test_get_datanodes(self): - cl = tu.create_cluster('cl1', 't1', 'vanilla', '1.2.1', + cl = tu.create_cluster('cl1', 't1', 'vanilla', '2.6.0', [self.ng_manager, self.ng_namenode, self.ng_datanode]) datanodes = u.get_datanodes(cl) @@ -98,30 +89,16 @@ class TestUtils(base.SaharaWithDbTestCase): set([datanodes[0].instance_id, datanodes[1].instance_id])) - cl = tu.create_cluster('cl1', 't1', 'vanilla', '1.2.1', + cl = tu.create_cluster('cl1', 't1', 'vanilla', '2.6.0', [self.ng_manager]) self.assertEqual([], u.get_datanodes(cl)) - def test_get_tasktrackers(self): - cl = tu.create_cluster('cl1', 't1', 'vanilla', '1.2.1', - [self.ng_manager, self.ng_jobtracker, - self.ng_tasktracker]) - tasktrackers = u.get_tasktrackers(cl) - self.assertEqual(2, len(tasktrackers)) - self.assertEqual(set(['tt1', 'tt2']), - set([tasktrackers[0].instance_id, - tasktrackers[1].instance_id])) - - cl = tu.create_cluster('cl1', 't1', 'vanilla', '1.2.1', - [self.ng_manager]) - self.assertEqual([], u.get_tasktrackers(cl)) - def test_get_secondarynamenodes(self): - cl = tu.create_cluster('cl1', 't1', 'vanilla', '1.2.1', + cl = tu.create_cluster('cl1', 't1', 'vanilla', '2.6.0', [self.ng_manager, self.ng_namenode, self.ng_secondarynamenode]) self.assertEqual('snn1', u.get_secondarynamenode(cl).instance_id) - cl = tu.create_cluster('cl1', 't1', 'vanilla', '1.2.1', + cl = tu.create_cluster('cl1', 't1', 'vanilla', '2.6.0', [self.ng_manager]) self.assertEqual(None, u.get_secondarynamenode(cl)) diff --git a/sahara/tests/unit/plugins/vanilla/v1_2_1/__init__.py b/sahara/tests/unit/plugins/vanilla/v1_2_1/__init__.py deleted file mode 100644 index e69de29bb2..0000000000 diff --git a/sahara/tests/unit/plugins/vanilla/v1_2_1/test_dfsadmin_parsing.py b/sahara/tests/unit/plugins/vanilla/v1_2_1/test_dfsadmin_parsing.py deleted file mode 100644 index ea3ab60f10..0000000000 --- a/sahara/tests/unit/plugins/vanilla/v1_2_1/test_dfsadmin_parsing.py +++ /dev/null @@ -1,54 +0,0 @@ -# Copyright (c) 2013 Mirantis Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or -# implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import pkg_resources as pkg -import testtools - -from sahara.plugins.vanilla.v1_2_1 import scaling as sc -from sahara import version - - -class ProvisioningPluginBaseTest(testtools.TestCase): - def test_result_for_3_nodes(self): - ins = open(pkg.resource_filename( - version.version_info.package, "tests/unit/resources/" - "dfs_admin_3_nodes.txt"), "r") - big_string = ins.read() - - exp1 = {"Name": "10.155.0.94:50010", "Decommission Status": "Normal"} - exp2 = {"Name": "10.155.0.90:50010", "Last contact": "Tue Jul 16 12:" - "00:07 UTC 2013"} - exp3 = {"Configured Capacity": "10568916992 (9.84 GB)", "DFS " - "Remaining%": "93.42%"} - expected = [exp1, exp2, exp3] - res = sc.parse_dfs_report(big_string) - self.assertEqual(expected, res) - - def test_result_for_0_nodes(self): - ins = open(pkg.resource_filename( - version.version_info.package, "tests/unit/resources/" - "dfs_admin_0_nodes.txt"), "r") - big_string = ins.read() - res = sc.parse_dfs_report(big_string) - self.assertEqual(0, len(res)) - - def test_result_for_1_node(self): - ins = open(pkg.resource_filename( - version.version_info.package, "tests/unit/resources/" - "dfs_admin_1_nodes.txt"), "r") - big_string = ins.read() - exp = {"Name": "10.155.0.94:50010", "Decommission Status": "Normal"} - res = sc.parse_dfs_report(big_string) - self.assertIn(exp, res) diff --git a/sahara/tests/unit/plugins/vanilla/v1_2_1/test_edp_engine.py b/sahara/tests/unit/plugins/vanilla/v1_2_1/test_edp_engine.py deleted file mode 100644 index ba8d2bf9e3..0000000000 --- a/sahara/tests/unit/plugins/vanilla/v1_2_1/test_edp_engine.py +++ /dev/null @@ -1,96 +0,0 @@ -# Copyright (c) 2015 Red Hat, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or -# implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import mock - -from sahara.plugins.vanilla.v1_2_1 import edp_engine -from sahara.tests.unit import base as sahara_base -from sahara.utils import edp - - -class VanillaConfigHintsTest(sahara_base.SaharaTestCase): - @mock.patch( - 'sahara.plugins.vanilla.confighints_helper.' - 'get_possible_hive_config_from', - return_value={}) - def test_get_possible_job_config_hive( - self, get_possible_hive_config_from): - expected_config = {'job_config': {}} - actual_config = edp_engine.EdpOozieEngine.get_possible_job_config( - edp.JOB_TYPE_HIVE) - get_possible_hive_config_from.assert_called_once_with( - 'plugins/vanilla/v1_2_1/resources/hive-default.xml') - self.assertEqual(expected_config, actual_config) - - @mock.patch('sahara.plugins.vanilla.edp_engine.EdpOozieEngine') - def test_get_possible_job_config_java(self, BaseVanillaEdpOozieEngine): - expected_config = {'job_config': {}} - BaseVanillaEdpOozieEngine.get_possible_job_config.return_value = ( - expected_config) - actual_config = edp_engine.EdpOozieEngine.get_possible_job_config( - edp.JOB_TYPE_JAVA) - (BaseVanillaEdpOozieEngine.get_possible_job_config. - assert_called_once_with(edp.JOB_TYPE_JAVA)) - self.assertEqual(expected_config, actual_config) - - @mock.patch( - 'sahara.plugins.vanilla.confighints_helper.' - 'get_possible_mapreduce_config_from', - return_value={}) - def test_get_possible_job_config_mapreduce( - self, get_possible_mapreduce_config_from): - expected_config = {'job_config': {}} - actual_config = edp_engine.EdpOozieEngine.get_possible_job_config( - edp.JOB_TYPE_MAPREDUCE) - get_possible_mapreduce_config_from.assert_called_once_with( - 'plugins/vanilla/v1_2_1/resources/mapred-default.xml') - self.assertEqual(expected_config, actual_config) - - @mock.patch( - 'sahara.plugins.vanilla.confighints_helper.' - 'get_possible_mapreduce_config_from', - return_value={}) - def test_get_possible_job_config_mapreduce_streaming( - self, get_possible_mapreduce_config_from): - expected_config = {'job_config': {}} - actual_config = edp_engine.EdpOozieEngine.get_possible_job_config( - edp.JOB_TYPE_MAPREDUCE_STREAMING) - get_possible_mapreduce_config_from.assert_called_once_with( - 'plugins/vanilla/v1_2_1/resources/mapred-default.xml') - self.assertEqual(expected_config, actual_config) - - @mock.patch( - 'sahara.plugins.vanilla.confighints_helper.' - 'get_possible_pig_config_from', - return_value={}) - def test_get_possible_job_config_pig( - self, get_possible_pig_config_from): - expected_config = {'job_config': {}} - actual_config = edp_engine.EdpOozieEngine.get_possible_job_config( - edp.JOB_TYPE_PIG) - get_possible_pig_config_from.assert_called_once_with( - 'plugins/vanilla/v1_2_1/resources/mapred-default.xml') - self.assertEqual(expected_config, actual_config) - - @mock.patch('sahara.plugins.vanilla.edp_engine.EdpOozieEngine') - def test_get_possible_job_config_shell(self, BaseVanillaEdpOozieEngine): - expected_config = {'job_config': {}} - BaseVanillaEdpOozieEngine.get_possible_job_config.return_value = ( - expected_config) - actual_config = edp_engine.EdpOozieEngine.get_possible_job_config( - edp.JOB_TYPE_SHELL) - (BaseVanillaEdpOozieEngine.get_possible_job_config. - assert_called_once_with(edp.JOB_TYPE_SHELL)) - self.assertEqual(expected_config, actual_config) diff --git a/sahara/tests/unit/plugins/vanilla/v1_2_1/test_plugin.py b/sahara/tests/unit/plugins/vanilla/v1_2_1/test_plugin.py deleted file mode 100644 index 4c3ca9eb29..0000000000 --- a/sahara/tests/unit/plugins/vanilla/v1_2_1/test_plugin.py +++ /dev/null @@ -1,312 +0,0 @@ -# Copyright (c) 2013 Mirantis Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or -# implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import mock -import testtools - -from sahara import conductor as cond -from sahara import context -from sahara import exceptions as e -from sahara.plugins import base as pb -from sahara.plugins import exceptions as ex -from sahara.plugins.vanilla import plugin as p -from sahara.plugins.vanilla.v1_2_1 import config_helper as c_h -from sahara.plugins.vanilla.v1_2_1 import mysql_helper as m_h -from sahara.plugins.vanilla.v1_2_1 import versionhandler as v_h -from sahara.tests.unit import base -from sahara.tests.unit import testutils as tu -from sahara.utils import edp - - -conductor = cond.API - - -class VanillaPluginTest(base.SaharaWithDbTestCase): - def setUp(self): - super(VanillaPluginTest, self).setUp() - pb.setup_plugins() - self.pl = p.VanillaProvider() - - def test_validate(self): - self.ng = [] - self.ng.append(tu.make_ng_dict("nn", "f1", ["namenode"], 0)) - self.ng.append(tu.make_ng_dict("jt", "f1", ["jobtracker"], 0)) - self.ng.append(tu.make_ng_dict("tt", "f1", ["tasktracker"], 0)) - self.ng.append(tu.make_ng_dict("oozie", "f1", ["oozie"], 0)) - - self._validate_case(1, 1, 10, 1) - - with testtools.ExpectedException(ex.InvalidComponentCountException): - self._validate_case(0, 1, 10, 1) - with testtools.ExpectedException(ex.InvalidComponentCountException): - self._validate_case(2, 1, 10, 1) - - with testtools.ExpectedException(ex.RequiredServiceMissingException): - self._validate_case(1, 0, 10, 1) - with testtools.ExpectedException(ex.InvalidComponentCountException): - self._validate_case(1, 2, 10, 1) - - with testtools.ExpectedException(ex.InvalidComponentCountException): - self._validate_case(1, 1, 0, 2) - with testtools.ExpectedException(ex.RequiredServiceMissingException): - self._validate_case(1, 0, 0, 1) - - def _validate_case(self, *args): - lst = [] - for i in range(0, len(args)): - self.ng[i]['count'] = args[i] - lst.append(self.ng[i]) - - cl = tu.create_cluster("cluster1", "tenant1", "vanilla", "1.2.1", lst) - - self.pl.validate(cl) - - def test_get_configs(self): - cl_configs = self.pl.get_configs("1.2.1") - for cfg in cl_configs: - if cfg.config_type is "bool": - self.assertIsInstance(cfg.default_value, bool) - elif cfg.config_type is "int": - try: - self.assertIsInstance(cfg.default_value, int) - except AssertionError: - self.assertIsInstance(cfg.default_value, long) - else: - self.assertIsInstance(cfg.default_value, str) - self.assertNotIn(cfg.name, c_h.HIDDEN_CONFS) - - def test_extract_environment_configs(self): - env_configs = { - "JobFlow": { - 'Oozie Heap Size': 4000 - }, - "MapReduce": { - 'Job Tracker Heap Size': 1000, - 'Task Tracker Heap Size': "2000" - }, - "HDFS": { - 'Name Node Heap Size': 3000, - 'Data Node Heap Size': "4000" - }, - "Wrong-applicable-target": { - 't1': 4 - }} - self.assertEqual(['CATALINA_OPTS -Xmx4000m', - 'HADOOP_DATANODE_OPTS=\\"-Xmx4000m\\"', - 'HADOOP_JOBTRACKER_OPTS=\\"-Xmx1000m\\"', - 'HADOOP_NAMENODE_OPTS=\\"-Xmx3000m\\"', - 'HADOOP_TASKTRACKER_OPTS=\\"-Xmx2000m\\"'], - c_h.extract_environment_confs(env_configs)) - - def test_extract_xml_configs(self): - xml_configs = { - "HDFS": { - 'dfs.replication': 3, - 'fs.default.name': 'hdfs://', - 'key': 'value' - }, - "MapReduce": { - 'io.sort.factor': 10, - 'mapred.reduce.tasks': 2 - }, - "Wrong-applicable-target": { - 'key': 'value' - } - } - - self.assertEqual([('dfs.replication', 3), - ('fs.default.name', 'hdfs://'), - ('io.sort.factor', 10), - ('mapred.reduce.tasks', 2)], - c_h.extract_xml_confs(xml_configs)) - - def test_general_configs(self): - gen_config = { - c_h.ENABLE_SWIFT.name: { - 'default_value': c_h.ENABLE_SWIFT.default_value, - 'conf': { - 'fs.swift.enabled': True - } - }, - c_h.ENABLE_MYSQL.name: { - 'default_value': c_h.ENABLE_MYSQL.default_value, - 'conf': { - 'oozie.service.JPAService.jdbc.username': 'oozie' - } - } - } - all_configured = { - 'fs.swift.enabled': True, - 'oozie.service.JPAService.jdbc.username': 'oozie' - } - configs = { - 'general': { - 'Enable Swift': True - } - } - cfg = c_h.generate_cfg_from_general({}, configs, gen_config) - self.assertEqual(all_configured, cfg) - configs['general'].update({'Enable MySQL': False}) - cfg = c_h.generate_cfg_from_general({}, configs, gen_config) - self.assertEqual({'fs.swift.enabled': True}, cfg) - configs['general'].update({ - 'Enable Swift': False, - 'Enable MySQL': False - }) - cfg = c_h.generate_cfg_from_general({}, configs, gen_config) - self.assertEqual({}, cfg) - configs = {} - cfg = c_h.generate_cfg_from_general({}, configs, gen_config) - self.assertEqual(all_configured, cfg) - - def test_get_mysql_configs(self): - cfg = m_h.get_required_mysql_configs(None, None) - self.assertEqual(cfg, m_h.get_oozie_mysql_configs()) - cfg = m_h.get_required_mysql_configs("metastore_host", "passwd") - cfg_to_compare = m_h.get_oozie_mysql_configs() - cfg_to_compare.update(m_h.get_hive_mysql_configs( - "metastore_host", "passwd")) - self.assertEqual(cfg, cfg_to_compare) - - @mock.patch('sahara.conductor.api.LocalApi.cluster_get') - def test_get_config_value(self, cond_get_cluster): - cluster = self._get_fake_cluster() - cond_get_cluster.return_value = cluster - - self.assertEqual( - 'hdfs://inst1:8020', - c_h.get_config_value('HDFS', 'fs.default.name', cluster)) - self.assertEqual( - 'eggs', c_h.get_config_value('HDFS', 'spam', cluster)) - self.assertEqual( - 30000, c_h.get_config_value('HDFS', 'dfs.safemode.extension')) - self.assertRaises(e.ConfigurationError, - c_h.get_config_value, - 'MapReduce', 'spam', cluster) - - @mock.patch('sahara.plugins.vanilla.v1_2_1.versionhandler.context') - @mock.patch('sahara.conductor.api.LocalApi.cluster_update') - def test_set_cluster_info(self, cond_cluster_update, context_mock): - cluster = self._get_fake_cluster() - v_h.VersionHandler()._set_cluster_info(cluster) - expected_info = { - 'HDFS': { - 'NameNode': 'hdfs://inst1:8020', - 'Web UI': 'http://127.0.0.1:50070' - }, - 'MapReduce': { - 'Web UI': 'http://127.0.0.1:50030', - 'JobTracker': 'inst1:8021' - }, - 'JobFlow': { - 'Oozie': 'http://127.0.0.1:11000' - } - } - cond_cluster_update.assert_called_with(context_mock.ctx(), cluster, - {'info': expected_info}) - - def _get_fake_cluster(self): - class FakeNG(object): - def __init__(self, name, flavor, processes, count, instances=None, - configuration=None, cluster_id=None): - self.name = name - self.flavor = flavor - self.node_processes = processes - self.count = count - self.instances = instances or [] - self.ng_configuration = configuration - self.cluster_id = cluster_id - - def configuration(self): - return self.ng_configuration - - def storage_paths(self): - return ['/mnt'] - - class FakeCluster(object): - def __init__(self, name, tenant, plugin, version, node_groups): - self.name = name - self.tenant = tenant - self.plugin = plugin - self.version = version - self.node_groups = node_groups - - class FakeInst(object): - def __init__(self, inst_name, inst_id, management_ip): - self.instance_name = inst_name - self.instance_id = inst_id - self.management_ip = management_ip - - def hostname(self): - return self.instance_name - - ms_inst = FakeInst('inst1', 'id1', '127.0.0.1') - wk_inst = FakeInst('inst2', 'id2', '127.0.0.1') - - conf = { - "MapReduce": {}, - "HDFS": { - "spam": "eggs" - }, - "JobFlow": {}, - } - - ng1 = FakeNG('master', 'fl1', ['namenode', 'jobtracker', 'oozie'], 1, - [ms_inst], conf, 'id1') - ng2 = FakeNG('worker', 'fl1', ['datanode', 'tasktracker'], 1, - [wk_inst], conf, 'id1') - return FakeCluster('cl1', 'ten1', 'vanilla', '1.2.1', [ng1, ng2]) - - def test_get_hadoop_ssh_keys(self): - cluster_dict = { - 'name': 'cluster1', - 'plugin_name': 'mock_plugin', - 'hadoop_version': 'mock_version', - 'default_image_id': 'initial', - 'node_groups': [tu.make_ng_dict("ng1", "f1", ["s1"], 1)], - 'extra': {'test': '1'}} - - cluster1 = conductor.cluster_create(context.ctx(), cluster_dict) - (private_key1, public_key1) = c_h.get_hadoop_ssh_keys(cluster1) - - # should store keys for old cluster - cluster1 = conductor.cluster_get(context.ctx(), cluster1) - (private_key2, public_key2) = c_h.get_hadoop_ssh_keys(cluster1) - - self.assertEqual(public_key1, public_key2) - self.assertEqual(private_key1, private_key2) - - # should generate new keys for new cluster - cluster_dict.update({'name': 'cluster2'}) - cluster2 = conductor.cluster_create(context.ctx(), cluster_dict) - (private_key3, public_key3) = c_h.get_hadoop_ssh_keys(cluster2) - - self.assertNotEqual(public_key1, public_key3) - self.assertNotEqual(private_key1, private_key3) - - @mock.patch('sahara.service.edp.hdfs_helper.create_dir_hadoop1') - def test_edp_calls_hadoop1_create_dir(self, create_dir): - cluster_dict = { - 'name': 'cluster1', - 'plugin_name': 'vanilla', - 'hadoop_version': '1.2.1', - 'default_image_id': 'image'} - - cluster = conductor.cluster_create(context.ctx(), cluster_dict) - plugin = pb.PLUGINS.get_plugin(cluster.plugin_name) - plugin.get_edp_engine(cluster, edp.JOB_TYPE_PIG).create_hdfs_dir( - mock.Mock(), '/tmp') - - self.assertEqual(1, create_dir.call_count) diff --git a/sahara/tests/unit/plugins/vanilla/v1_2_1/test_run_scripts.py b/sahara/tests/unit/plugins/vanilla/v1_2_1/test_run_scripts.py deleted file mode 100644 index 0f8746b8d9..0000000000 --- a/sahara/tests/unit/plugins/vanilla/v1_2_1/test_run_scripts.py +++ /dev/null @@ -1,42 +0,0 @@ -# Copyright (c) 2013 Mirantis Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or -# implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import mock -import testtools - -from sahara.plugins.vanilla.v1_2_1 import run_scripts - - -class RunScriptsTest(testtools.TestCase): - - def test_check_datanodes_count_positive(self): - remote = mock.Mock() - remote.execute_command.return_value = (0, "1") - self.assertTrue(run_scripts.check_datanodes_count(remote, 1)) - - def test_check_datanodes_count_negative(self): - remote = mock.Mock() - remote.execute_command.return_value = (0, "1") - self.assertFalse(run_scripts.check_datanodes_count(remote, 2)) - - def test_check_datanodes_count_nonzero_exitcode(self): - remote = mock.Mock() - remote.execute_command.return_value = (1, "1") - self.assertFalse(run_scripts.check_datanodes_count(remote, 1)) - - def test_check_datanodes_count_expects_zero(self): - remote = mock.Mock() - self.assertTrue(run_scripts.check_datanodes_count(remote, 0)) - self.assertEqual(0, remote.execute_command.call_count) diff --git a/sahara/tests/unit/service/edp/edp_test_utils.py b/sahara/tests/unit/service/edp/edp_test_utils.py index 9b3a8ec654..100fa76dab 100644 --- a/sahara/tests/unit/service/edp/edp_test_utils.py +++ b/sahara/tests/unit/service/edp/edp_test_utils.py @@ -64,7 +64,7 @@ def create_job_binary(id, type): return binary -def create_cluster(plugin_name='vanilla', hadoop_version='1.2.1'): +def create_cluster(plugin_name='vanilla', hadoop_version='2.6.0'): cluster = mock.Mock() cluster.plugin_name = plugin_name cluster.hadoop_version = hadoop_version diff --git a/sahara/tests/unit/service/edp/test_job_possible_configs.py b/sahara/tests/unit/service/edp/test_job_possible_configs.py index 1a9e06fc06..23388fdb1b 100644 --- a/sahara/tests/unit/service/edp/test_job_possible_configs.py +++ b/sahara/tests/unit/service/edp/test_job_possible_configs.py @@ -24,10 +24,11 @@ class TestJobPossibleConfigs(testtools.TestCase): def test_possible_configs(self): res = w_f.get_possible_job_config(edp.JOB_TYPE_MAPREDUCE) sample_config_property = { - 'name': 'mapred.map.tasks', - 'value': '2', - 'description': 'The default number of map tasks per job.' - 'Ignored when mapred.job.tracker is "local". ' + 'name': 'mapreduce.jobtracker.expire.trackers.interval', + 'value': '600000', + 'description': "Expert: The time-interval, in miliseconds, after " + "whicha tasktracker is declared 'lost' if it " + "doesn't send heartbeats." } self.assertIn(sample_config_property, res['job_config']["configs"]) diff --git a/sahara/tests/unit/service/heat/test_templates.py b/sahara/tests/unit/service/heat/test_templates.py index 01fbdb7ae2..2f144b610b 100644 --- a/sahara/tests/unit/service/heat/test_templates.py +++ b/sahara/tests/unit/service/heat/test_templates.py @@ -43,7 +43,7 @@ class TestClusterTemplate(base.SaharaWithDbTestCase): def _make_cluster(self, mng_network, ng1, ng2, anti_affinity=[]): return tu.create_cluster("cluster", "tenant1", "general", - "1.2.1", [ng1, ng2], + "2.6.0", [ng1, ng2], user_keypair_id='user_key', neutron_management_network=mng_network, default_image_id='1', image_id=None, @@ -84,7 +84,6 @@ class TestClusterTemplate(base.SaharaWithDbTestCase): ng1 = [ng for ng in cluster.node_groups if ng.name == "master"][0] ng2 = [ng for ng in cluster.node_groups if ng.name == "worker"][0] - expected = ['1', '2'] actual = heat_template._get_security_groups(ng1) self.assertEqual(expected, actual) diff --git a/sahara/tests/unit/service/validation/edp/test_job_executor.py b/sahara/tests/unit/service/validation/edp/test_job_executor.py index 6af0811a2a..ccf779fdf2 100644 --- a/sahara/tests/unit/service/validation/edp/test_job_executor.py +++ b/sahara/tests/unit/service/validation/edp/test_job_executor.py @@ -64,7 +64,7 @@ class TestJobExecCreateValidation(u.ValidationTestCase): ng = tu.make_ng_dict('master', 42, ['oozie'], 1, instances=[tu.make_inst_dict('id', 'name')]) get_cluster.return_value = tu.create_cluster("cluster", "tenant1", - "vanilla", "1.2.1", [ng]) + "vanilla", "2.6.0", [ng]) self._assert_create_object_validation( data={ @@ -113,7 +113,7 @@ class TestJobExecCreateValidation(u.ValidationTestCase): ng = tu.make_ng_dict('master', 42, ['oozie'], 1, instances=[tu.make_inst_dict('id', 'name')]) get_cluster.return_value = tu.create_cluster("cluster", "tenant1", - "vanilla", "1.2.1", [ng]) + "vanilla", "2.6.0", [ng]) self._assert_create_object_validation( data={ @@ -156,7 +156,7 @@ class TestJobExecCreateValidation(u.ValidationTestCase): ng = tu.make_ng_dict('master', 42, ['namenode'], 1, instances=[tu.make_inst_dict('id', 'name')]) get_cluster.return_value = tu.create_cluster("cluster", "tenant1", - "vanilla", "1.2.1", [ng]) + "vanilla", "2.6.0", [ng]) self._assert_create_object_validation( data={ @@ -199,7 +199,7 @@ class TestJobExecCreateValidation(u.ValidationTestCase): ng = tu.make_ng_dict('master', 42, ['namenode', 'oozie'], 1, instances=[tu.make_inst_dict('id', 'name')]) cluster_get.return_value = tu.create_cluster("cluster", "tenant1", - "vanilla", "1.2.1", [ng]) + "vanilla", "2.6.0", [ng]) self._assert_create_object_validation( data={ diff --git a/sahara/tests/unit/service/validation/test_cluster_create_validation.py b/sahara/tests/unit/service/validation/test_cluster_create_validation.py index bb1f3d1e3a..f66f69537c 100644 --- a/sahara/tests/unit/service/validation/test_cluster_create_validation.py +++ b/sahara/tests/unit/service/validation/test_cluster_create_validation.py @@ -71,7 +71,7 @@ class TestClusterCreateValidation(u.ValidationTestCase): data = { 'name': "testname", 'plugin_name': "vanilla", - 'hadoop_version': "1.2.1" + 'hadoop_version': "2.6.0" } self._assert_types(data) @@ -79,7 +79,7 @@ class TestClusterCreateValidation(u.ValidationTestCase): data = { 'name': "testname", 'plugin_name': "vanilla", - 'hadoop_version': "1.2.1" + 'hadoop_version': "2.6.0" } self._assert_valid_name_hostname_validation(data) @@ -87,7 +87,7 @@ class TestClusterCreateValidation(u.ValidationTestCase): data = { 'name': 'test', 'plugin_name': 'vanilla', - 'hadoop_version': '1.2.1' + 'hadoop_version': '2.6.0' } self._assert_create_object_validation( data=data, @@ -101,7 +101,7 @@ class TestClusterCreateValidation(u.ValidationTestCase): data = { 'name': 'test-heat', 'plugin_name': 'vanilla', - 'hadoop_version': '1.2.1' + 'hadoop_version': '2.6.0' } self._assert_create_object_validation( data=data, @@ -115,7 +115,7 @@ class TestClusterCreateValidation(u.ValidationTestCase): data={ 'name': "testname", 'plugin_name': "vanilla", - 'hadoop_version': "1.2.1", + 'hadoop_version': "2.6.0", 'user_keypair_id': 'wrong_keypair' }, bad_req_i=(1, 'NOT_FOUND', @@ -127,7 +127,7 @@ class TestClusterCreateValidation(u.ValidationTestCase): data={ 'name': "test-name", 'plugin_name': "vanilla", - 'hadoop_version': "1.2.1", + 'hadoop_version': "2.6.0", 'user_keypair_id': '!'}, bad_req_i=(1, 'VALIDATION_ERROR', "'!' is not a 'valid_keypair_name'") @@ -138,7 +138,7 @@ class TestClusterCreateValidation(u.ValidationTestCase): data={ 'name': "test-name", 'plugin_name': "vanilla", - 'hadoop_version': "1.2.1", + 'hadoop_version': "2.6.0", 'default_image_id': '550e8400-e29b-41d4-a616-446655440000' }, bad_req_i=(1, 'INVALID_REFERENCE', @@ -151,7 +151,7 @@ class TestClusterCreateValidation(u.ValidationTestCase): data={ 'name': "test-name", 'plugin_name': "wrong_plugin", - 'hadoop_version': "1.2.1", + 'hadoop_version': "2.6.0", }, bad_req_i=(1, 'INVALID_REFERENCE', "Sahara doesn't contain plugin " @@ -164,7 +164,7 @@ class TestClusterCreateValidation(u.ValidationTestCase): data={ 'name': "test-name", 'plugin_name': "vanilla", - 'hadoop_version': "1.2.1", + 'hadoop_version': "2.6.0", 'default_image_id': '550e8400-e29b-41d4-a716-446655440000', 'neutron_management_network': '53a36917-ab9f-4589-' '94ce-b6df85a68332' @@ -178,7 +178,7 @@ class TestClusterCreateValidation(u.ValidationTestCase): data={ 'name': "test-name", 'plugin_name': "vanilla", - 'hadoop_version': "1.2.1", + 'hadoop_version': "2.6.0", 'default_image_id': '550e8400-e29b-41d4-a716-446655440000', 'neutron_management_network': '53a36917-ab9f-4589-' '94ce-b6df85a68332' @@ -194,7 +194,7 @@ class TestClusterCreateValidation(u.ValidationTestCase): data={ 'name': "test-name", 'plugin_name': "vanilla", - 'hadoop_version': "1.2.1", + 'hadoop_version': "2.6.0", 'default_image_id': '550e8400-e29b-41d4-a716-446655440000' }, bad_req_i=(1, 'NOT_FOUND', @@ -206,7 +206,7 @@ class TestClusterCreateValidation(u.ValidationTestCase): data={ 'name': "long-long-cluster-name", 'plugin_name': "vanilla", - 'hadoop_version': "1.2.1", + 'hadoop_version': "2.6.0", 'default_image_id': '550e8400-e29b-41d4-a716-446655440000', 'neutron_management_network': 'd9a3bebc-f788-4b81-' '9a93-aa048022c1ca', @@ -235,11 +235,11 @@ class TestClusterCreateValidation(u.ValidationTestCase): data={ 'name': "testname", 'plugin_name': "vanilla", - 'hadoop_version': "1.2.1", + 'hadoop_version': "2.6.0", 'user_keypair_id': 'test_keypair', 'cluster_configs': { 'HDFS': { - u'hadoop.tmp.dir': '/temp/' + u'hadoop.hdfs.configuration.version': '2' } }, 'default_image_id': '550e8400-e29b-41d4-a716-446655440000', @@ -257,7 +257,7 @@ class TestClusterCreateValidation(u.ValidationTestCase): data={ 'name': "testname", 'plugin_name': "vanilla", - 'hadoop_version': "1.2.1", + 'hadoop_version': "2.6.0", 'user_keypair_id': 'test_keypair', 'default_image_id': '550e8400-e29b-41d4-a716-446655440000', 'neutron_management_network': 'd9a3bebc-f788-4b81-' @@ -282,7 +282,7 @@ class TestClusterCreateValidation(u.ValidationTestCase): data={ 'name': "testname", 'plugin_name': "vanilla", - 'hadoop_version': "1.2.1", + 'hadoop_version': "2.6.0", 'user_keypair_id': 'test_keypair', 'default_image_id': '550e8400-e29b-41d4-a716-446655440000', 'neutron_management_network': 'd9a3bebc-f788-4b81-' @@ -317,7 +317,7 @@ class TestClusterCreateValidation(u.ValidationTestCase): data={ 'name': "testname", 'plugin_name': "vanilla", - 'hadoop_version': "1.2.1", + 'hadoop_version': "2.6.0", 'user_keypair_id': 'test_keypair', 'default_image_id': '550e8400-e29b-41d4-a716-446655440000', 'neutron_management_network': 'd9a3bebc-f788-4b81-' @@ -350,7 +350,7 @@ class TestClusterCreateValidation(u.ValidationTestCase): data={ 'name': "testname", 'plugin_name': "vanilla", - 'hadoop_version': "1.2.1", + 'hadoop_version': "2.6.0", 'user_keypair_id': 'test_keypair', 'default_image_id': '550e8400-e29b-41d4-a716-446655440000', 'neutron_management_network': 'd9a3bebc-f788-4b81-' @@ -375,7 +375,7 @@ class TestClusterCreateValidation(u.ValidationTestCase): data={ 'name': "testname", 'plugin_name': "vanilla", - 'hadoop_version': "1.2.1", + 'hadoop_version': "2.6.0", 'user_keypair_id': 'test_keypair', 'default_image_id': '550e8400-e29b-41d4-a716-446655440000', 'neutron_management_network': 'd9a3bebc-f788-4b81-' @@ -402,7 +402,7 @@ class TestClusterCreateValidation(u.ValidationTestCase): data={ 'name': 'testname', 'plugin_name': 'vanilla', - 'hadoop_version': '1.2.1', + 'hadoop_version': '2.6.0', 'user_keypair_id': 'test_keypair', 'default_image_id': '550e8400-e29b-41d4-a716-446655440000', 'neutron_management_network': 'd9a3bebc-f788-4b81-' @@ -430,7 +430,7 @@ class TestClusterCreateValidation(u.ValidationTestCase): data={ 'name': 'testname', 'plugin_name': 'vanilla', - 'hadoop_version': '1.2.1', + 'hadoop_version': '2.6.0', 'user_keypair_id': 'test_keypair', 'default_image_id': '550e8400-e29b-41d4-a716-446655440000', 'neutron_management_network': 'd9a3bebc-f788-4b81-' @@ -458,7 +458,7 @@ class TestClusterCreateValidation(u.ValidationTestCase): data={ 'name': 'testname', 'plugin_name': 'vanilla', - 'hadoop_version': '1.2.1', + 'hadoop_version': '2.6.0', 'user_keypair_id': 'test_keypair', 'default_image_id': '550e8400-e29b-41d4-a716-446655440000', 'neutron_management_network': 'd9a3bebc-f788-4b81-' @@ -521,7 +521,7 @@ class TestClusterCreateFlavorValidation(base.SaharaWithDbTestCase): def _create_node_group_template(self, flavor='42'): ng_tmpl = { "plugin_name": "vanilla", - "hadoop_version": "1.2.1", + "hadoop_version": "2.6.0", "node_processes": ["namenode"], "name": "master", "flavor_id": flavor @@ -531,7 +531,7 @@ class TestClusterCreateFlavorValidation(base.SaharaWithDbTestCase): def _create_cluster_template(self, ng_id): cl_tmpl = { "plugin_name": "vanilla", - "hadoop_version": "1.2.1", + "hadoop_version": "2.6.0", "node_groups": [ {"name": "master", "count": 1, @@ -548,7 +548,7 @@ class TestClusterCreateFlavorValidation(base.SaharaWithDbTestCase): data = { "name": "testname", "plugin_name": "vanilla", - "hadoop_version": "1.2.1", + "hadoop_version": "2.6.0", "cluster_template_id": '%s' % ctmpl_id, 'default_image_id': '550e8400-e29b-41d4-a716-446655440000' } @@ -559,7 +559,7 @@ class TestClusterCreateFlavorValidation(base.SaharaWithDbTestCase): data1 = { "name": "testwithnodegroups", "plugin_name": "vanilla", - "hadoop_version": "1.2.1", + "hadoop_version": "2.6.0", "node_groups": [ { "name": "allinone", @@ -567,9 +567,9 @@ class TestClusterCreateFlavorValidation(base.SaharaWithDbTestCase): "flavor_id": "42", "node_processes": [ "namenode", - "jobtracker", + "resourcemanager", "datanode", - "tasktracker" + "nodemanager" ] } ], @@ -586,14 +586,14 @@ class TestClusterCreateFlavorValidation(base.SaharaWithDbTestCase): data = { "name": "testname", "plugin_name": "vanilla", - "hadoop_version": "1.2.1", + "hadoop_version": "2.6.0", "cluster_template_id": '%s' % ctmpl_id, 'default_image_id': '550e8400-e29b-41d4-a716-446655440000' } data1 = { "name": "testwithnodegroups", "plugin_name": "vanilla", - "hadoop_version": "1.2.1", + "hadoop_version": "2.6.0", "node_groups": [ { "name": "allinone", @@ -601,9 +601,9 @@ class TestClusterCreateFlavorValidation(base.SaharaWithDbTestCase): "flavor_id": "10", "node_processes": [ "namenode", - "jobtracker", + "resourcemanager", "datanode", - "tasktracker" + "nodemanager" ] } ], @@ -630,7 +630,7 @@ class TestClusterCreateFlavorValidation(base.SaharaWithDbTestCase): data = { "name": "testtmplnodegroups", "plugin_name": "vanilla", - "hadoop_version": "1.2.1", + "hadoop_version": "2.6.0", "cluster_template_id": '%s' % ctmpl_id, 'default_image_id': '550e8400-e29b-41d4-a716-446655440000', "node_groups": [ @@ -640,9 +640,9 @@ class TestClusterCreateFlavorValidation(base.SaharaWithDbTestCase): "flavor_id": "42", "node_processes": [ "namenode", - "jobtracker", + "resourcemanager", "datanode", - "tasktracker" + "nodemanager" ] } ] @@ -656,7 +656,7 @@ class TestClusterCreateFlavorValidation(base.SaharaWithDbTestCase): data = { "name": "testtmplnodegroups", "plugin_name": "vanilla", - "hadoop_version": "1.2.1", + "hadoop_version": "2.6.0", "node_groups": [ { "node_group_template_id": '%s' % ng_id, @@ -665,9 +665,9 @@ class TestClusterCreateFlavorValidation(base.SaharaWithDbTestCase): "flavor_id": "42", "node_processes": [ "namenode", - "jobtracker", + "resourcemanager", "datanode", - "tasktracker" + "nodemanager" ] }, ], diff --git a/sahara/tests/unit/service/validation/test_cluster_scaling_validation.py b/sahara/tests/unit/service/validation/test_cluster_scaling_validation.py index c48600a8dc..6f8fe69387 100644 --- a/sahara/tests/unit/service/validation/test_cluster_scaling_validation.py +++ b/sahara/tests/unit/service/validation/test_cluster_scaling_validation.py @@ -66,7 +66,7 @@ class TestScalingValidation(u.ValidationTestCase): def test_check_cluster_scaling_resize_ng(self, ops): ops.get_engine_type_and_version.return_value = "direct.1.1" ng1 = tu.make_ng_dict('ng', '42', ['namenode'], 1) - cluster = tu.create_cluster("cluster1", "tenant1", "vanilla", "1.2.1", + cluster = tu.create_cluster("cluster1", "tenant1", "vanilla", "2.6.0", [ng1], status='Validating', id='12321') self._assert_check_scaling( @@ -76,7 +76,7 @@ class TestScalingValidation(u.ValidationTestCase): "status. Cluster status: " "Validating") - cluster = tu.create_cluster("cluster1", "tenant1", "vanilla", "1.2.1", + cluster = tu.create_cluster("cluster1", "tenant1", "vanilla", "2.6.0", [ng1], status='Active', id='12321') data = { 'resize_node_groups': [ @@ -113,7 +113,7 @@ class TestScalingValidation(u.ValidationTestCase): ops.get_engine_type_and_version.return_value = "direct.1.1" ng1 = tu.make_ng_dict('ng', '42', ['namenode'], 1) cluster = tu.create_cluster("test-cluster", "tenant1", "vanilla", - "1.2.1", [ng1], status='Active', + "2.7.1", [ng1], status='Active', id='12321') data = { 'add_node_groups': [ @@ -359,7 +359,7 @@ class TestScalingValidation(u.ValidationTestCase): ops.get_engine_type_and_version.return_value = "direct.1.1" ng1 = tu.make_ng_dict('ng', '42', ['namenode'], 1) cluster = tu.create_cluster( - "cluster1", "tenant1", "vanilla", "1.2.1", [ng1], + "cluster1", "tenant1", "vanilla", "2.6.0", [ng1], status='Active', id='12321', sahara_info={"infrastructure_engine": "heat.1.1"}) @@ -372,7 +372,7 @@ class TestScalingValidation(u.ValidationTestCase): def test_check_heat_cluster_scaling_missing_engine(self, ops): ops.get_engine_type_and_version.return_value = "heat.1.1" ng1 = tu.make_ng_dict('ng', '42', ['namenode'], 1) - cluster = tu.create_cluster("cluster1", "tenant1", "vanilla", "1.2.1", + cluster = tu.create_cluster("cluster1", "tenant1", "vanilla", "2.6.0", [ng1], status='Active', id='12321') self._assert_check_scaling( diff --git a/sahara/tests/unit/service/validation/test_cluster_template_create_validation.py b/sahara/tests/unit/service/validation/test_cluster_template_create_validation.py index 36e2427a0e..9df2e24ae8 100644 --- a/sahara/tests/unit/service/validation/test_cluster_template_create_validation.py +++ b/sahara/tests/unit/service/validation/test_cluster_template_create_validation.py @@ -38,7 +38,7 @@ class TestClusterTemplateCreateValidation(u.ValidationTestCase): data={ 'name': "test-name", 'plugin_name': "vanilla", - 'hadoop_version': "1.2.1", + 'hadoop_version': "2.6.0", 'node_groups': [ {'name': 'a'} ] @@ -51,7 +51,7 @@ class TestClusterTemplateCreateValidation(u.ValidationTestCase): data={ 'name': "test-name", 'plugin_name': "vanilla", - 'hadoop_version': "1.2.1", + 'hadoop_version': "2.6.0", 'node_groups': [ {'name': 'a', 'flavor_id': '42'} @@ -66,7 +66,7 @@ class TestClusterTemplateCreateValidation(u.ValidationTestCase): data={ 'name': "test-name", 'plugin_name': "vanilla", - 'hadoop_version': "1.2.1", + 'hadoop_version': "2.6.0", 'node_groups': [ {'name': 'a', 'flavor_id': '42', @@ -83,7 +83,7 @@ class TestClusterTemplateCreateValidation(u.ValidationTestCase): data={ 'name': "test-name", 'plugin_name': "vanilla", - 'hadoop_version': "1.2.1", + 'hadoop_version': "2.6.0", 'node_groups': [ { 'name': 'a', @@ -108,7 +108,7 @@ class TestClusterTemplateCreateValidation(u.ValidationTestCase): data={ 'name': "test-name", 'plugin_name': "vanilla", - 'hadoop_version': "1.2.1", + 'hadoop_version': "2.6.0", 'node_groups': [ { "node_group_template_id": "", @@ -125,7 +125,7 @@ class TestClusterTemplateCreateValidation(u.ValidationTestCase): data={ 'name': "test-name", 'plugin_name': "vanilla", - 'hadoop_version': "1.2.1", + 'hadoop_version': "2.6.0", 'node_groups': [ { "node_group_template_id": "test", @@ -147,7 +147,7 @@ class TestClusterTemplateCreateValidation(u.ValidationTestCase): data={ 'name': "test-name", 'plugin_name': "vanilla", - 'hadoop_version': "1.2.1", + 'hadoop_version': "2.6.0", 'node_groups': [ { "node_group_template_id": "550e8400-e29b-41d4-a716-" @@ -164,7 +164,7 @@ class TestClusterTemplateCreateValidation(u.ValidationTestCase): data = { 'name': "testname", 'plugin_name': "vanilla", - 'hadoop_version': "1.2.1" + 'hadoop_version': "2.6.0" } self._assert_valid_name_hostname_validation(data) @@ -172,7 +172,7 @@ class TestClusterTemplateCreateValidation(u.ValidationTestCase): data = { 'name': "testname", 'plugin_name': "vanilla", - 'hadoop_version': "1.2.1" + 'hadoop_version': "2.6.0" } self._assert_types(data) @@ -208,7 +208,7 @@ class TestClusterTemplateCreateValidation(u.ValidationTestCase): data={ 'name': 'testname', 'plugin_name': 'vanilla', - 'hadoop_version': '1.2.1', + 'hadoop_version': '2.6.0', 'default_image_id': str(uuid.uuid4()), 'cluster_configs': { "service_1": { @@ -237,7 +237,7 @@ class TestClusterTemplateCreateValidation(u.ValidationTestCase): data={ 'name': 'testname', 'plugin_name': 'vanilla', - 'hadoop_version': '1.2.1', + 'hadoop_version': '2.6.0', 'default_image_id': None, 'cluster_configs': None, 'node_groups': None, @@ -251,7 +251,7 @@ class TestClusterTemplateCreateValidation(u.ValidationTestCase): data={ 'name': "test-name", 'plugin_name': "wrong_plugin", - 'hadoop_version': "1.2.1", + 'hadoop_version': "2.6.0", }, bad_req_i=(1, 'INVALID_REFERENCE', "Sahara doesn't contain plugin " @@ -262,7 +262,7 @@ class TestClusterTemplateCreateValidation(u.ValidationTestCase): data = { 'name': 'test', 'plugin_name': 'vanilla', - 'hadoop_version': '1.2.1' + 'hadoop_version': '2.6.0' } self._assert_create_object_validation( data=data, @@ -274,7 +274,7 @@ class TestClusterTemplateCreateValidation(u.ValidationTestCase): data = { 'name': 'test-template', 'plugin_name': 'vanilla', - 'hadoop_version': '1.2.1', + 'hadoop_version': '2.6.0', 'neutron_management_network': '53a36917-ab9f-4589' '-94ce-b6df85a68332' } diff --git a/sahara/tests/unit/service/validation/test_cluster_template_update_validation.py b/sahara/tests/unit/service/validation/test_cluster_template_update_validation.py index 406ad0258d..59da91faba 100644 --- a/sahara/tests/unit/service/validation/test_cluster_template_update_validation.py +++ b/sahara/tests/unit/service/validation/test_cluster_template_update_validation.py @@ -24,7 +24,7 @@ from sahara.tests.unit.service.validation import utils as u SAMPLE_DATA = { 'name': 'testname', 'plugin_name': 'vanilla', - 'hadoop_version': '1.2.1', + 'hadoop_version': '2.7.1', 'is_public': False, 'is_protected': False } diff --git a/sahara/tests/unit/service/validation/test_ng_template_validation_create.py b/sahara/tests/unit/service/validation/test_ng_template_validation_create.py index 9ac8aa746f..aa4670beb3 100644 --- a/sahara/tests/unit/service/validation/test_ng_template_validation_create.py +++ b/sahara/tests/unit/service/validation/test_ng_template_validation_create.py @@ -56,7 +56,7 @@ class TestNGTemplateCreateValidation(u.ValidationTestCase): 'name': 'a', 'flavor_id': '42', 'plugin_name': 'vanilla', - 'hadoop_version': '1.2.1' + 'hadoop_version': '2.6.0' }, bad_req_i=(1, "VALIDATION_ERROR", u"'node_processes' is a required property") @@ -66,7 +66,7 @@ class TestNGTemplateCreateValidation(u.ValidationTestCase): 'name': "a", 'flavor_id': '42', 'plugin_name': 'vanilla', - 'hadoop_version': '1.2.1', + 'hadoop_version': '2.6.0', 'node_processes': [] }, bad_req_i=(1, 'VALIDATION_ERROR', @@ -78,7 +78,7 @@ class TestNGTemplateCreateValidation(u.ValidationTestCase): 'name': 'a', 'flavor_id': '42', 'plugin_name': 'vanilla', - 'hadoop_version': '1.2.1', + 'hadoop_version': '2.6.0', 'node_processes': ['namenode'] } self._assert_valid_name_hostname_validation(data) @@ -89,7 +89,7 @@ class TestNGTemplateCreateValidation(u.ValidationTestCase): 'name': "a", 'flavor_id': '42', 'plugin_name': 'vanilla', - 'hadoop_version': '1.2.1', + 'hadoop_version': '2.6.0', 'node_processes': ["namenode", "namenode"] }, bad_req_i=(1, 'INVALID_DATA', @@ -100,7 +100,7 @@ class TestNGTemplateCreateValidation(u.ValidationTestCase): 'name': 'a', 'flavor_id': '42', 'plugin_name': 'vanilla', - 'hadoop_version': '1.2.1', + 'hadoop_version': '2.6.0', 'node_processes': ['wrong_process'] }, bad_req_i=(1, 'INVALID_REFERENCE', @@ -121,16 +121,16 @@ class TestNGTemplateCreateValidation(u.ValidationTestCase): 'name': 'a', 'flavor_id': '42', 'plugin_name': 'vanilla', - 'hadoop_version': '1.2.1', + 'hadoop_version': '2.6.0', 'node_processes': ['namenode', 'datanode', 'secondarynamenode', - 'tasktracker', - 'jobtracker'], + 'nodemanager', + 'resourcemanager'], 'image_id': '550e8400-e29b-41d4-a716-446655440000', 'node_configs': { 'HDFS': { - u'hadoop.tmp.dir': '/temp/' + u'hadoop.hdfs.configuration.version': '2' } }, 'volumes_per_node': 2, @@ -156,12 +156,12 @@ class TestNGTemplateCreateValidation(u.ValidationTestCase): 'name': 'a', 'flavor_id': '42', 'plugin_name': 'vanilla', - 'hadoop_version': '1.2.1', + 'hadoop_version': '2.6.0', 'node_processes': ['namenode', 'datanode', 'secondarynamenode', - 'tasktracker', - 'jobtracker'], + 'nodemanager', + 'resourcemanager'], 'image_id': None, 'node_configs': None, @@ -187,7 +187,7 @@ class TestNGTemplateCreateValidation(u.ValidationTestCase): 'name': 'a', 'flavor_id': '42', 'plugin_name': 'vanilla', - 'hadoop_version': '1.2.1', + 'hadoop_version': '2.6.0', 'node_processes': ['wrong_process'], 'volumes_per_node': -1 }, @@ -199,7 +199,7 @@ class TestNGTemplateCreateValidation(u.ValidationTestCase): 'name': 'a', 'flavor_id': '42', 'plugin_name': 'vanilla', - 'hadoop_version': '1.2.1', + 'hadoop_version': '2.6.0', 'node_processes': ['wrong_process'], 'volumes_size': 0 }, @@ -211,7 +211,7 @@ class TestNGTemplateCreateValidation(u.ValidationTestCase): default_data = { 'name': 'a', 'flavor_id': '42', 'plugin_name': 'vanilla', - 'hadoop_version': '1.2.1', + 'hadoop_version': '2.6.0', 'node_processes': ['namenode'] } self._assert_types(default_data) @@ -221,7 +221,7 @@ class TestNGTemplateCreateValidation(u.ValidationTestCase): 'name': 'test', 'flavor_id': '42', 'plugin_name': 'vanilla', - 'hadoop_version': '1.2.1', + 'hadoop_version': '2.6.0', 'node_processes': ['namenode']} self._assert_create_object_validation( data=data, @@ -235,7 +235,7 @@ class TestNGTemplateCreateValidation(u.ValidationTestCase): 'name': 'test-ng', 'flavor_id': '1', 'plugin_name': 'vanilla', - 'hadoop_version': '1.2.1', + 'hadoop_version': '2.6.0', 'node_processes': ['namenode'] }, bad_req_i=(1, 'NOT_FOUND', @@ -248,11 +248,11 @@ class TestNGTemplateCreateValidation(u.ValidationTestCase): 'name': 'test-ng', 'flavor_id': '42', 'plugin_name': 'vanilla', - 'hadoop_version': '1.2.1', + 'hadoop_version': '2.6.0', 'node_processes': ['namenode'], 'node_configs': { 'wrong_target': { - u'hadoop.tmp.dir': '/temp/' + u'mapreduce.task.tmp.dir': '/temp/' } }}, bad_req_i=(1, 'INVALID_REFERENCE', @@ -264,7 +264,7 @@ class TestNGTemplateCreateValidation(u.ValidationTestCase): 'name': 'test-ng', 'flavor_id': '42', 'plugin_name': 'vanilla', - 'hadoop_version': '1.2.1', + 'hadoop_version': '2.6.0', 'node_processes': ['namenode'], 'node_configs': { 'HDFS': { @@ -283,7 +283,7 @@ class TestNGTemplateCreateValidation(u.ValidationTestCase): 'name': 'a', 'flavor_id': '42', 'plugin_name': 'vanilla', - 'hadoop_version': '1.2.1', + 'hadoop_version': '2.6.0', 'node_processes': ['wrong_process'], 'volumes_per_node': -1 }, @@ -295,7 +295,7 @@ class TestNGTemplateCreateValidation(u.ValidationTestCase): 'name': 'a', 'flavor_id': '42', 'plugin_name': 'vanilla', - 'hadoop_version': '1.2.1', + 'hadoop_version': '2.6.0', 'node_processes': ['wrong_process'], 'volumes_size': 0 }, @@ -307,8 +307,8 @@ class TestNGTemplateCreateValidation(u.ValidationTestCase): 'name': 'a', 'flavor_id': '42', 'plugin_name': 'vanilla', - 'hadoop_version': '1.2.1', - 'node_processes': ['datanode', 'tasktracker'], + 'hadoop_version': '2.6.0', + 'node_processes': ['datanode', 'nodemanager'], 'volumes_per_node': 1, 'volumes_size': 1, 'volume_mount_prefix': '/mnt/volume' @@ -318,8 +318,8 @@ class TestNGTemplateCreateValidation(u.ValidationTestCase): 'name': 'a', 'flavor_id': '42', 'plugin_name': 'vanilla', - 'hadoop_version': '1.2.1', - 'node_processes': ['datanode', 'tasktracker'], + 'hadoop_version': '2.6.0', + 'node_processes': ['datanode', 'nodemanager'], 'volumes_per_node': 1, 'volumes_size': 1, 'volume_mount_prefix': 'qwerty' @@ -335,8 +335,8 @@ class TestNGTemplateCreateValidation(u.ValidationTestCase): 'name': 'a', 'flavor_id': '42', 'plugin_name': 'vanilla', - 'hadoop_version': '1.2.1', - 'node_processes': ['datanode', 'tasktracker'], + 'hadoop_version': '2.6.0', + 'node_processes': ['datanode', 'nodemanager'], 'floating_ip_pool': 'network_bad' }, bad_req_i=(1, 'NOT_FOUND', "Floating IP pool network_bad " diff --git a/sahara/tests/unit/service/validation/test_ng_template_validation_update.py b/sahara/tests/unit/service/validation/test_ng_template_validation_update.py index c56223d6da..3f0ed7ca3a 100644 --- a/sahara/tests/unit/service/validation/test_ng_template_validation_update.py +++ b/sahara/tests/unit/service/validation/test_ng_template_validation_update.py @@ -28,15 +28,15 @@ SAMPLE_DATA = { 'name': 'a', 'flavor_id': '42', 'plugin_name': 'vanilla', - 'hadoop_version': '1.2.1', + 'hadoop_version': '2.6.0', 'node_processes': ['namenode', 'datanode', 'secondarynamenode', - 'tasktracker', - 'jobtracker'], + 'nodemanager', + 'resourcemanager'], 'node_configs': { 'HDFS': { - u'hadoop.tmp.dir': '/temp/' + u'mapreduce.task.tmp.dir': '/temp/' } }, 'image_id': '550e8400-e29b-41d4-a716-446655440000', diff --git a/sahara/tests/unit/service/validation/utils.py b/sahara/tests/unit/service/validation/utils.py index 0dbde3e013..a11ab1cc53 100644 --- a/sahara/tests/unit/service/validation/utils.py +++ b/sahara/tests/unit/service/validation/utils.py @@ -186,7 +186,7 @@ def start_patch(patch_templates=True): @property def tags(self): if self.name == 'test': - return ['vanilla', '1.2.1'] + return ['vanilla', '2.6.0'] else: return ['vanilla', 'wrong_tag'] @@ -200,7 +200,7 @@ def start_patch(patch_templates=True): nova().images.list_registered.return_value = [Image(), Image(name='wrong_name')] ng_dict = tu.make_ng_dict('ng', '42', ['namenode'], 1) - cluster = tu.create_cluster('test', 't', 'vanilla', '1.2.1', [ng_dict], + cluster = tu.create_cluster('test', 't', 'vanilla', '2.6.0', [ng_dict], id=1, status='Active') # stub clusters list get_clusters.return_value = [cluster] @@ -209,14 +209,14 @@ def start_patch(patch_templates=True): # stub node templates if patch_templates: ngt_dict = {'name': 'test', 'tenant_id': 't', 'flavor_id': '42', - 'plugin_name': 'vanilla', 'hadoop_version': '1.2.1', + 'plugin_name': 'vanilla', 'hadoop_version': '2.6.0', 'id': '550e8400-e29b-41d4-a716-446655440000', 'node_processes': ['namenode']} get_ng_templates.return_value = [r.NodeGroupTemplateResource(ngt_dict)] ct_dict = {'name': 'test', 'tenant_id': 't', - 'plugin_name': 'vanilla', 'hadoop_version': '1.2.1'} + 'plugin_name': 'vanilla', 'hadoop_version': '2.6.0'} get_cl_templates.return_value = [r.ClusterTemplateResource(ct_dict)] @@ -370,10 +370,10 @@ class ValidationTestCase(base.SaharaTestCase): data = { 'name': 'test-cluster', 'plugin_name': 'vanilla', - 'hadoop_version': '1.2.1', + 'hadoop_version': '2.6.0', 'cluster_configs': { 'HDFS': { - u'hadoop.tmp.dir': '/temp/' + u'mapreduce.task.tmp.dir': '/temp/' } }, 'default_image_id': '550e8400-e29b-41d4-a716-446655440000' @@ -390,7 +390,7 @@ class ValidationTestCase(base.SaharaTestCase): data=_update_data(data.copy(), { 'cluster_configs': { 'wrong_target': { - u'hadoop.tmp.dir': '/temp/' + u'mapreduce.task.tmp.dir': '/temp/' } }}), bad_req_i=(1, 'INVALID_REFERENCE', @@ -414,14 +414,14 @@ class ValidationTestCase(base.SaharaTestCase): data = { 'name': 'test-cluster', 'plugin_name': 'vanilla', - 'hadoop_version': '1.2.1', + 'hadoop_version': '2.6.0', 'default_image_id': '550e8400-e29b-41d4-a716-446655440000' } self._assert_create_object_validation(data=data) data = { 'name': 'test-cluster', 'plugin_name': 'vanilla', - 'hadoop_version': '1.2.1', + 'hadoop_version': '2.6.0', 'default_image_id': '813fe450-40d2-4acc-ade5-ea753a1bd5bc' } self._assert_create_object_validation( @@ -430,7 +430,7 @@ class ValidationTestCase(base.SaharaTestCase): "Requested image " "'813fe450-40d2-4acc-ade5-ea753a1bd5bc' " "doesn't contain required tags: " - "['1.2.1']")) + "['2.6.0']")) def assert_protected_resource_exception(self, ex): self.assertIn("marked as protected", six.text_type(ex)) diff --git a/sahara/tests/unit/utils/test_api_validator.py b/sahara/tests/unit/utils/test_api_validator.py index 4e30f74f18..80b9db7a6b 100644 --- a/sahara/tests/unit/utils/test_api_validator.py +++ b/sahara/tests/unit/utils/test_api_validator.py @@ -169,9 +169,9 @@ class ApiValidatorTest(testtools.TestCase): self._validate_failure(schema, "_123") self._validate_success(schema, "a" * 64) self._validate_failure(schema, "") - self._validate_success(schema, "hadoop-examples-1.2.1.jar") - self._validate_success(schema, "hadoop-examples-1.2.1") - self._validate_success(schema, "hadoop-examples-1.2.1.") + self._validate_success(schema, "hadoop-examples-2.6.0.jar") + self._validate_success(schema, "hadoop-examples-2.6.0") + self._validate_success(schema, "hadoop-examples-2.6.0.") self._validate_success(schema, "1") self._validate_success(schema, "1a") self._validate_success(schema, "a1") @@ -202,9 +202,9 @@ class ApiValidatorTest(testtools.TestCase): self._validate_success(schema, "_123") self._validate_success(schema, "a" * 64) self._validate_failure(schema, "") - self._validate_failure(schema, "hadoop-examples-1.2.1.jar") - self._validate_failure(schema, "hadoop-examples-1.2.1") - self._validate_failure(schema, "hadoop-examples-1.2.1.") + self._validate_failure(schema, "hadoop-examples-2.6.0.jar") + self._validate_failure(schema, "hadoop-examples-2.6.0") + self._validate_failure(schema, "hadoop-examples-2.6.0.") self._validate_success(schema, "1") self._validate_success(schema, "1a") self._validate_success(schema, "a1") @@ -238,9 +238,9 @@ class ApiValidatorTest(testtools.TestCase): self._validate_failure(schema, "_123") self._validate_success(schema, "a" * 64) self._validate_failure(schema, "") - self._validate_failure(schema, "hadoop-examples-1.2.1.jar") - self._validate_failure(schema, "hadoop-examples-1.2.1") - self._validate_failure(schema, "hadoop-examples-1.2.1.") + self._validate_failure(schema, "hadoop-examples-2.6.0.jar") + self._validate_failure(schema, "hadoop-examples-2.6.0") + self._validate_failure(schema, "hadoop-examples-2.6.0.") self._validate_failure(schema, "1") self._validate_failure(schema, "1a") self._validate_success(schema, "a1")